source: trunk/kernel/mm/mapper.c @ 624

Last change on this file since 624 was 624, checked in by alain, 5 years ago

Fix several bugs to use the instruction MMU in kernel mode
in replacement of the instruction address extension register,
and remove the "kentry" segment.

This version is running on the tsar_generic_iob" platform.

One interesting bug: the cp0_ebase defining the kernel entry point
(for interrupts, exceptions and syscalls) must be initialized
early in kernel_init(), because the VFS initialisation done by
kernel_ini() uses RPCs, and RPCs uses Inter-Processor-Interrup.

File size: 24.9 KB
Line 
1/*
2 * mapper.c - Kernel cache for FS files or directories implementation.
3 *
4 * Authors   Mohamed Lamine Karaoui (2015)
5 *           Alain Greiner (2016,2017,2018,2019)
6 *
7 * Copyright (c)  UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25#include <kernel_config.h>
26#include <hal_kernel_types.h>
27#include <hal_special.h>
28#include <hal_uspace.h>
29#include <grdxt.h>
30#include <string.h>
31#include <rwlock.h>
32#include <printk.h>
33#include <memcpy.h>
34#include <thread.h>
35#include <core.h>
36#include <process.h>
37#include <kmem.h>
38#include <kcm.h>
39#include <ppm.h>
40#include <page.h>
41#include <cluster.h>
42#include <vfs.h>
43#include <mapper.h>
44#include <dev_ioc.h>
45
46
47//////////////////////////////////////////////
48mapper_t * mapper_create( vfs_fs_type_t type )
49{
50    mapper_t * mapper;
51    kmem_req_t req;
52    error_t    error;
53
54    // allocate memory for mapper
55    req.type  = KMEM_MAPPER;
56    req.size  = sizeof(mapper_t);
57    req.flags = AF_KERNEL | AF_ZERO;
58    mapper    = (mapper_t *)kmem_alloc( &req );
59
60    if( mapper == NULL )
61    {
62        printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ );
63        return NULL;
64    }
65
66    // initialize refcount & inode
67    mapper->refcount = 0;
68    mapper->inode    = NULL;
69
70    // initialize radix tree
71    error = grdxt_init( &mapper->rt,
72                        CONFIG_MAPPER_GRDXT_W1,
73                        CONFIG_MAPPER_GRDXT_W2,
74                        CONFIG_MAPPER_GRDXT_W3 );
75
76    if( error )
77    {
78        printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ );
79        req.type  = KMEM_MAPPER;
80        req.ptr   = mapper;
81        kmem_free( &req );
82        return NULL;
83    }
84
85    // initialize mapper type
86    mapper->type = type;
87
88    // initialize mapper lock
89    remote_rwlock_init( XPTR( local_cxy , &mapper->lock ) , LOCK_MAPPER_STATE );
90
91    // initialize waiting threads xlist (empty)
92    xlist_root_init( XPTR( local_cxy , &mapper->wait_root ) );
93
94    // initialize vsegs xlist (empty)
95    xlist_root_init( XPTR( local_cxy , &mapper->vsegs_root ) );
96
97    return mapper;
98
99}  // end mapper_create()
100
101////////////////////////////////////////
102void mapper_destroy( mapper_t * mapper )
103{
104    page_t   * page;
105    uint32_t   found_index = 0;
106    uint32_t   start_index = 0;
107    kmem_req_t req;
108
109    // scan radix tree
110    do
111    {
112        // get page from radix tree
113        page = (page_t *)grdxt_get_first( &mapper->rt , start_index , &found_index );
114
115        // release registered pages to PPM
116        if( page != NULL )
117        {
118            // remove page from mapper and release to PPM
119            mapper_release_page( mapper , page );
120
121            // update start_key value for next page
122            start_index = found_index;
123        }
124    }
125    while( page != NULL );
126
127    // release the memory allocated to radix tree itself
128    grdxt_destroy( &mapper->rt );
129
130    // release memory for mapper descriptor
131    req.type = KMEM_MAPPER;
132    req.ptr  = mapper;
133    kmem_free( &req );
134
135}  // end mapper_destroy()
136
137////////////////////////////////////////////////////
138xptr_t  mapper_remote_get_page( xptr_t    mapper_xp,
139                                uint32_t  page_id )
140{
141    error_t       error;
142    mapper_t    * mapper_ptr;
143    cxy_t         mapper_cxy;
144    xptr_t        lock_xp;        // extended pointer on mapper lock
145    xptr_t        page_xp;        // extended pointer on searched page descriptor
146    xptr_t        rt_xp;          // extended pointer on radix tree in mapper
147
148    thread_t * this = CURRENT_THREAD;
149
150    // get mapper cluster and local pointer
151    mapper_ptr = GET_PTR( mapper_xp );
152    mapper_cxy = GET_CXY( mapper_xp );
153
154#if DEBUG_MAPPER_GET_PAGE
155uint32_t cycle = (uint32_t)hal_get_cycles();
156char          name[CONFIG_VFS_MAX_NAME_LENGTH];
157vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
158vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name );
159if( DEBUG_MAPPER_GET_PAGE < cycle )
160printk("\n[%s] thread [%x,%x] enter for page %d of <%s> / cycle %d\n",
161__FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle );
162#endif
163
164    // check thread can yield
165    thread_assert_can_yield( this , __FUNCTION__ );
166
167    // build extended pointer on mapper lock and mapper rt
168    lock_xp  = XPTR( mapper_cxy , &mapper_ptr->lock );
169    rt_xp    = XPTR( mapper_cxy , &mapper_ptr->rt );
170
171    // take mapper lock in READ_MODE
172    remote_rwlock_rd_acquire( lock_xp );
173
174    // search page in radix tree
175    page_xp  = grdxt_remote_lookup( rt_xp , page_id );
176
177    // test mapper miss
178    if( page_xp == XPTR_NULL )                  // miss => try to handle it
179    {
180        // release the lock in READ_MODE and take it in WRITE_MODE
181        remote_rwlock_rd_release( lock_xp );
182        remote_rwlock_wr_acquire( lock_xp );
183
184        // second test on missing page because the page status can be modified
185        // by another thread, when passing from READ_MODE to WRITE_MODE.
186        // from this point there is no concurrent accesses to mapper.
187        page_xp = grdxt_remote_lookup( rt_xp , page_id );
188
189        if ( page_xp == XPTR_NULL )  // miss confirmed => handle it
190        {
191
192            if( mapper_cxy == local_cxy )   // mapper is local
193            {
194
195#if (DEBUG_MAPPER_GET_PAGE & 1)
196if( DEBUG_MAPPER_GET_PAGE < cycle )
197printk("\n[%s] missing page => load it from FS / local access \n", __FUNCTION__ );
198#endif
199                 error = mapper_handle_miss( mapper_ptr,
200                                             page_id, 
201                                             &page_xp );
202            } 
203            else
204            {
205
206#if (DEBUG_MAPPER_GET_PAGE & 1)
207if( DEBUG_MAPPER_GET_PAGE < cycle )
208printk("\n[%s] missing page => load it from FS / RPC access \n", __FUNCTION__ );
209#endif
210                 rpc_mapper_handle_miss_client( mapper_cxy,
211                                                mapper_ptr,
212                                                page_id,
213                                                &page_xp,
214                                                &error );
215            }
216
217            if ( error )
218            {
219                printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n",
220                __FUNCTION__ , this->process->pid, this->trdid );
221                remote_rwlock_wr_release( lock_xp );
222                return XPTR_NULL;
223            }
224        }
225       
226        // release mapper lock from WRITE_MODE
227        remote_rwlock_wr_release( lock_xp );
228    }
229    else                                              // hit
230    {
231        // release mapper lock from READ_MODE
232        remote_rwlock_rd_release( lock_xp );
233    }
234
235#if DEBUG_MAPPER_GET_PAGE
236cycle = (uint32_t)hal_get_cycles();
237if( DEBUG_MAPPER_GET_PAGE < cycle )
238printk("\n[%s] thread[%x,%x] exit for page %d of <%s> / ppn %x / cycle %d\n",
239__FUNCTION__, this->process->pid, this->trdid, 
240page_id, name, ppm_page2ppn( page_xp ), cycle );
241#endif
242
243    return page_xp;
244
245}  // end mapper_remote_get_page()
246
247//////////////////////////////////////////////
248error_t mapper_handle_miss( mapper_t * mapper,
249                            uint32_t   page_id,
250                            xptr_t   * page_xp )
251{
252    kmem_req_t   req;
253    page_t     * page;
254    error_t      error;
255
256    thread_t * this = CURRENT_THREAD;
257
258#if DEBUG_MAPPER_HANDLE_MISS
259uint32_t cycle = (uint32_t)hal_get_cycles();
260char          name[CONFIG_VFS_MAX_NAME_LENGTH];
261vfs_inode_t * inode = mapper->inode;
262vfs_inode_get_name( XPTR( local_cxy , inode ) , name );
263if( DEBUG_MAPPER_HANDLE_MISS < cycle )
264printk("\n[%s] enter for page %d in <%s> / cycle %d",
265__FUNCTION__, page_id, name, cycle );
266if( DEBUG_MAPPER_HANDLE_MISS & 1 )
267grdxt_display( XPTR( local_cxy , &mapper->rt ) , name );
268#endif
269
270    // allocate one page from the local cluster
271    req.type  = KMEM_PAGE;
272    req.size  = 0;
273    req.flags = AF_NONE;
274    page = kmem_alloc( &req );
275
276    if( page == NULL )
277    {
278        printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x\n",
279        __FUNCTION__ , this->process->pid, this->trdid , local_cxy );
280        return -1;
281    }
282
283    // initialize the page descriptor
284    page_init( page );
285    page_set_flag( page , PG_INIT );
286    page_refcount_up( page );
287    page->mapper = mapper;
288    page->index  = page_id;
289
290    // insert page in mapper radix tree
291    error = grdxt_insert( &mapper->rt , page_id , page );
292
293    if( error )
294    {
295        printk("\n[ERROR] in %s : thread[%x,%x] cannot insert page in mapper\n",
296        __FUNCTION__ , this->process->pid, this->trdid );
297        mapper_release_page( mapper , page );
298        req.ptr  = page;
299        req.type = KMEM_PAGE;
300        kmem_free(&req);
301        return -1;
302    }
303
304    // launch I/O operation to load page from device to mapper
305    error = vfs_fs_move_page( XPTR( local_cxy , page ) , IOC_SYNC_READ );
306
307    if( error )
308    {
309        printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n",
310        __FUNCTION__ , this->process->pid, this->trdid );
311        mapper_release_page( mapper , page );
312        req.ptr  = page;
313        req.type = KMEM_PAGE;
314        kmem_free( &req );
315        return -1;
316    }
317
318    // set extended pointer on allocated page
319    *page_xp = XPTR( local_cxy , page );
320
321#if DEBUG_MAPPER_HANDLE_MISS
322cycle = (uint32_t)hal_get_cycles();
323if( DEBUG_MAPPER_HANDLE_MISS < cycle )
324printk("\n[%s] exit for page %d in <%s> / ppn %x / cycle %d",
325__FUNCTION__, page_id, name, ppm_page2ppn( *page_xp ), cycle );
326if( DEBUG_MAPPER_HANDLE_MISS & 1 )
327grdxt_display( XPTR( local_cxy , &mapper->rt ) , name );
328#endif
329
330    return 0;
331
332}  // end mapper_handle_miss()
333
334////////////////////////////////////////////
335void mapper_release_page( mapper_t * mapper,
336                          page_t   * page )
337{
338    // build extended pointer on mapper lock
339    xptr_t mapper_lock_xp = XPTR( local_cxy , &mapper->lock );
340
341    // take mapper lock in WRITE_MODE
342    remote_rwlock_wr_acquire( mapper_lock_xp );
343
344    // remove physical page from radix tree
345    grdxt_remove( &mapper->rt , page->index );
346
347    // release mapper lock from WRITE_MODE
348    remote_rwlock_wr_release( mapper_lock_xp );
349
350    // release page to PPM
351    kmem_req_t   req;
352    req.type  = KMEM_PAGE;
353    req.ptr   = page;
354    kmem_free( &req );
355
356}  // end mapper_release_page()
357
358///////////////////////////////////////////////
359error_t mapper_move_user( xptr_t     mapper_xp,
360                          bool_t     to_buffer,
361                          uint32_t   file_offset,
362                          void     * buffer,
363                          uint32_t   size )
364{
365    uint32_t   page_offset;    // first byte to move to/from a mapper page
366    uint32_t   page_count;     // number of bytes to move to/from a mapper page
367    uint32_t   page_id;        // current mapper page index
368    uint32_t   done;           // number of moved bytes
369    xptr_t     page_xp;        // extended pointer on current mapper page descriptor
370
371#if DEBUG_MAPPER_MOVE_USER
372uint32_t   cycle = (uint32_t)hal_get_cycles();
373thread_t * this  = CURRENT_THREAD;
374if( DEBUG_MAPPER_MOVE_USER < cycle )
375printk("\n[%s] thread[%x,%x] : to_buf %d / buffer %x / size %d / offset %d / cycle %d\n",
376__FUNCTION__, this->process->pid, this->trdid,
377to_buffer, buffer, size, file_offset, cycle );
378#endif
379
380    // compute offsets of first and last bytes in file
381    uint32_t min_byte = file_offset;
382    uint32_t max_byte = file_offset + size - 1;
383
384    // compute indexes of pages for first and last byte in mapper
385    uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
386    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
387
388#if (DEBUG_MAPPER_MOVE_USER & 1)
389if( DEBUG_MAPPER_MOVE_USER < cycle )
390printk("\n[%s] thread[%x,%x] : first_page %d / last_page %d\n",
391__FUNCTION__, this->process->pid, this->trdid, first, last );
392#endif
393
394    done = 0;
395
396    // loop on pages in mapper
397    for( page_id = first ; page_id <= last ; page_id++ )
398    {
399        // compute page_offset
400        if( page_id == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
401        else                   page_offset = 0;
402
403        // compute number of bytes in page
404        if      ( first   == last  ) page_count = size;
405        else if ( page_id == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset;
406        else if ( page_id == last  ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
407        else                         page_count = CONFIG_PPM_PAGE_SIZE;
408
409#if (DEBUG_MAPPER_MOVE_USER & 1)
410if( DEBUG_MAPPER_MOVE_USER < cycle )
411printk("\n[%s] thread[%x,%x] : page_id = %d / page_offset = %d / page_count = %d\n",
412__FUNCTION__, this->process->pid, this->trdid, page_id , page_offset , page_count );
413#endif
414
415        // get extended pointer on page descriptor
416        page_xp = mapper_remote_get_page( mapper_xp , page_id ); 
417
418        if ( page_xp == XPTR_NULL ) return -1;
419
420#if (DEBUG_MAPPER_MOVE_USER & 1)
421if( DEBUG_MAPPER_MOVE_USER < cycle )
422printk("\n[%s] thread[%x,%x] : get page (%x,%x) from mapper\n",
423__FUNCTION__, this->process->pid, this->trdid, GET_CXY(page_xp), GET_PTR(page_xp) );
424#endif
425
426        // compute pointer in mapper
427        xptr_t    base_xp = ppm_page2base( page_xp );
428        uint8_t * map_ptr = (uint8_t *)GET_PTR( base_xp ) + page_offset;
429
430        // compute pointer in buffer
431        uint8_t * buf_ptr = (uint8_t *)buffer + done;
432
433        // move fragment
434        if( to_buffer )
435        {
436            hal_copy_to_uspace( buf_ptr , map_ptr , page_count ); 
437        }
438        else
439        {
440            ppm_page_do_dirty( page_xp ); 
441            hal_copy_from_uspace( map_ptr , buf_ptr , page_count ); 
442        }
443
444        done += page_count;
445    }
446
447#if DEBUG_MAPPER_MOVE_USER
448cycle = (uint32_t)hal_get_cycles();
449if( DEBUG_MAPPER_MOVE_USER < cycle )
450printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
451__FUNCTION__, this->process->pid, this->trdid, cycle );
452#endif
453
454    return 0;
455
456}  // end mapper_move_user()
457
458////////////////////////////////////////////////
459error_t mapper_move_kernel( xptr_t    mapper_xp,
460                            bool_t    to_buffer,
461                            uint32_t  file_offset,
462                            xptr_t    buffer_xp,
463                            uint32_t  size )
464{
465    uint32_t   page_offset;    // first byte to move to/from a mapper page
466    uint32_t   page_count;     // number of bytes to move to/from a mapper page
467    uint32_t   page_id;        // current mapper page index
468    uint32_t   done;           // number of moved bytes
469    xptr_t     page_xp;        // extended pointer on current mapper page descriptor
470
471    uint8_t  * src_ptr;        // source buffer local pointer
472    cxy_t      src_cxy;        // source cluster
473    uint8_t  * dst_ptr;        // destination buffer local pointer
474    cxy_t      dst_cxy;        // destination cluster
475
476    // get buffer cluster and local pointer
477    cxy_t     buffer_cxy = GET_CXY( buffer_xp );
478    uint8_t * buffer_ptr = GET_PTR( buffer_xp );
479
480    // get mapper cluster
481    cxy_t     mapper_cxy = GET_CXY( mapper_xp );
482
483#if DEBUG_MAPPER_MOVE_KERNEL
484uint32_t   cycle = (uint32_t)hal_get_cycles();
485thread_t * this  = CURRENT_THREAD;
486if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
487printk("\n[%s] thread[%x,%x] enter / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n",
488__FUNCTION__, this->process->pid, this->trdid, to_buffer, buffer_cxy, buffer_ptr, cycle );
489#endif
490
491    // compute offsets of first and last bytes in file
492    uint32_t min_byte = file_offset;
493    uint32_t max_byte = file_offset + size -1;
494
495    // compute indexes for first and last pages in mapper
496    uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
497    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
498
499#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
500if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
501printk("\n[%s] first_page %d / last_page %d\n", __FUNCTION__, first, last );
502#endif
503
504    // compute source and destination clusters
505    if( to_buffer )
506    {
507        dst_cxy = buffer_cxy;
508        src_cxy = mapper_cxy;
509    }
510    else
511    {
512        src_cxy = buffer_cxy;
513        dst_cxy = mapper_cxy;
514    }
515
516    done = 0;
517
518    // loop on pages in mapper
519    for( page_id = first ; page_id <= last ; page_id++ )
520    {
521        // compute page_offset
522        if( page_id == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
523        else                   page_offset = 0;
524
525        // compute number of bytes to move in page
526        if      ( first == last  )   page_count = size;
527        else if ( page_id == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset;
528        else if ( page_id == last  ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
529        else                         page_count = CONFIG_PPM_PAGE_SIZE;
530
531#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
532if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
533printk("\n[%s] page_id = %d / offset = %d / bytes = %d\n",
534__FUNCTION__ , page_id , page_offset , page_count );
535#endif
536
537        // get extended pointer on page descriptor
538        page_xp = mapper_remote_get_page( mapper_xp , page_id );
539
540        if ( page_xp == XPTR_NULL ) return -1;
541
542        // get page base address
543        xptr_t    base_xp  = ppm_page2base( page_xp );
544        uint8_t * base_ptr = (uint8_t *)GET_PTR( base_xp );
545
546        // compute source and destination pointers
547        if( to_buffer )
548        {
549            dst_ptr = buffer_ptr + done;
550            src_ptr = base_ptr + page_offset;
551        }
552        else
553        {
554            src_ptr = buffer_ptr + done;
555            dst_ptr = base_ptr + page_offset;
556
557            ppm_page_do_dirty( page_xp );
558        }
559
560#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
561if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
562printk("\n[%s] src_cxy %x / src_ptr %x / dst_cxy %x / dst_ptr %x\n",
563__FUNCTION__, src_cxy, src_ptr, dst_cxy, dst_ptr );
564#endif
565
566        // move fragment
567        hal_remote_memcpy( XPTR( dst_cxy , dst_ptr ), XPTR( src_cxy , src_ptr ), page_count );
568
569        done += page_count;
570    }
571
572#if DEBUG_MAPPER_MOVE_KERNEL
573cycle = (uint32_t)hal_get_cycles();
574if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
575printk("\n[%s] thread[%x,%x] exit / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n",
576__FUNCTION__, this->process->pid, this->trdid, to_buffer, buffer_cxy, buffer_ptr, cycle );
577#endif
578
579    return 0;
580
581}  // end mapper_move_kernel()
582
583///////////////////////////////////////////////////
584error_t mapper_remote_get_32( xptr_t     mapper_xp,
585                              uint32_t   word_id,
586                              uint32_t * p_value )
587{
588    uint32_t   page_id;      // page index in file
589    uint32_t   local_id;     // word index in page
590    xptr_t     page_xp;      // extended pointer on searched page descriptor
591    xptr_t     base_xp;      // extended pointer on searched page base
592
593   
594    // get page index and local word index
595    page_id  = word_id >> 10;
596    local_id = word_id & 0x3FF;
597
598    // get page containing the searched word
599    page_xp  = mapper_remote_get_page( mapper_xp , page_id );
600
601    if( page_xp == XPTR_NULL )  return -1;
602   
603    // get page base
604    base_xp = ppm_page2base( page_xp );
605
606    // get the value from mapper
607    *p_value = hal_remote_l32( base_xp + (local_id<<2) ); 
608
609    return 0;
610
611}  // end mapper_remote_get_32()
612
613///////////////////////////////////////////////////
614error_t mapper_remote_set_32( xptr_t     mapper_xp,
615                              uint32_t   word_id,
616                              uint32_t   value )
617{
618   
619    uint32_t   page_id;      // page index in file
620    uint32_t   local_id;     // word index in page
621    xptr_t     page_xp;      // extended pointer on searched page descriptor
622    xptr_t     base_xp;      // extended pointer on searched page base
623
624    // get page index and local vord index
625    page_id  = word_id >> 10;
626    local_id = word_id & 0x3FF;
627
628    // get page containing the searched word
629    page_xp  = mapper_remote_get_page( mapper_xp , page_id );
630
631    if( page_xp == XPTR_NULL ) return -1;
632
633    // get page base
634    base_xp = ppm_page2base( page_xp );
635
636    // set value to mapper
637    hal_remote_s32( (base_xp + (local_id << 2)) , value );
638
639    // set the dirty flag
640    ppm_page_do_dirty( page_xp );
641
642    return 0;
643
644}  // end mapper_remote_set_32()
645
646/////////////////////////////////////////
647error_t mapper_sync( mapper_t *  mapper )
648{
649    page_t   * page;                // local pointer on current page descriptor
650    xptr_t     page_xp;             // extended pointer on current page descriptor
651    grdxt_t  * rt;                  // pointer on radix_tree descriptor
652    uint32_t   start_key;           // start page index in mapper
653    uint32_t   found_key;           // current page index in mapper
654    error_t    error;
655
656#if DEBUG_MAPPER_SYNC
657thread_t * this  = CURRENT_THREAD;
658uint32_t   cycle = (uint32_t)hal_get_cycles();
659char       name[CONFIG_VFS_MAX_NAME_LENGTH];
660vfs_inode_get_name( XPTR( local_cxy , mapper->inode ) , name );
661#endif
662
663    // get pointer on radix tree
664    rt        = &mapper->rt;
665
666    // initialise loop variable
667    start_key = 0;
668
669    // scan radix-tree until last page found
670    while( 1 )
671    {
672        // get page descriptor from radix tree
673        page = (page_t *)grdxt_get_first( rt , start_key , &found_key );
674         
675        if( page == NULL ) break;
676
677assert( (page->index == found_key ), __FUNCTION__, "wrong page descriptor index" );
678assert( (page->order == 0),          __FUNCTION__, "mapper page order must be 0" );
679
680        // build extended pointer on page descriptor
681        page_xp = XPTR( local_cxy , page );
682
683        // synchronize page if dirty
684        if( (page->flags & PG_DIRTY) != 0 )
685        {
686
687#if DEBUG_MAPPER_SYNC
688if( cycle > DEBUG_MAPPER_SYNC )
689printk("\n[%s] thread[%x,%x] synchonise page %d of <%s> to device\n",
690__FUNCTION__, this->process->pid, this->trdid, page->index, name );
691#endif
692            // copy page to file system
693            error = vfs_fs_move_page( page_xp , IOC_WRITE );
694
695            if( error )
696            {
697                printk("\n[ERROR] in %s : cannot synchonize dirty page %d\n", 
698                __FUNCTION__, page->index );
699                return -1;
700            }
701
702            // remove page from PPM dirty list
703            ppm_page_undo_dirty( page_xp ); 
704        } 
705        else
706        {
707
708#if DEBUG_MAPPER_SYNC
709if( cycle > DEBUG_MAPPER_SYNC )
710printk("\n[%s] thread[%x,%x] skip page %d for <%s>\n",
711__FUNCTION__, this->process->pid, this->trdid, page->index, name );
712#endif
713        }
714
715        // update loop variable
716        start_key = page->index + 1;
717    }  // end while
718
719    return 0;
720
721}  // end mapper_sync()
722
723//////////////////////////////////////////////////
724error_t mapper_display_page( xptr_t     mapper_xp,
725                             uint32_t   page_id,
726                             uint32_t   nbytes )
727{
728    xptr_t        page_xp;        // extended pointer on page descriptor
729    xptr_t        base_xp;        // extended pointer on page base
730    char          buffer[4096];   // local buffer
731    uint32_t    * tabi;           // pointer on uint32_t to scan buffer
732    char        * tabc;           // pointer on char to scan buffer
733    uint32_t      line;           // line index
734    uint32_t      word;           // word index
735    uint32_t      n;              // char index
736    cxy_t         mapper_cxy;     // mapper cluster identifier
737    mapper_t    * mapper_ptr;     // mapper local pointer
738    vfs_inode_t * inode_ptr;      // inode local pointer
739 
740    char       name[CONFIG_VFS_MAX_NAME_LENGTH];
741
742    if( nbytes > 4096)
743    {
744        printk("\n[ERROR] in %s : nbytes (%d) cannot be larger than 4096\n",
745        __FUNCTION__, nbytes );
746        return -1;
747    }
748   
749    // get extended pointer on page descriptor
750    page_xp = mapper_remote_get_page( mapper_xp , page_id );
751
752    if( page_xp == XPTR_NULL)
753    {
754        printk("\n[ERROR] in %s : cannot access page %d in mapper\n",
755        __FUNCTION__, page_id );
756        return -1;
757    }
758
759    // get cluster and local pointer
760    mapper_cxy = GET_CXY( mapper_xp );
761    mapper_ptr = GET_PTR( mapper_xp );
762
763    // get inode
764    inode_ptr = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
765
766    // get inode name
767    if( inode_ptr == NULL ) strcpy( name , "fat" );
768    else  vfs_inode_get_name( XPTR( mapper_cxy , inode_ptr ) , name );
769   
770    // get extended pointer on page base
771    base_xp = ppm_page2base( page_xp );
772   
773    // copy remote page to local buffer
774    hal_remote_memcpy( XPTR( local_cxy , buffer ) , base_xp , nbytes );
775
776    // display 8 words per line
777    tabi = (uint32_t *)buffer;
778    tabc = (char *)buffer;
779    printk("\n***** <%s> first %d bytes of page %d *****\n", name, nbytes, page_id );
780    for( line = 0 ; line < (nbytes >> 5) ; line++ )
781    {
782        printk("%X : ", line );
783        for( word = 0 ; word < 8 ; word++ ) printk("%X ", tabi[(line<<3) + word] );
784        printk(" | ");
785        for( n = 0 ; n < 32 ; n++ ) printk("%c", tabc[(line<<5) + n] );
786        printk("\n");
787    }
788
789    return 0;
790
791}  // end mapper_display_page
792
793
Note: See TracBrowser for help on using the repository browser.