source: trunk/kernel/mm/mapper.c @ 625

Last change on this file since 625 was 625, checked in by alain, 5 years ago

Fix a bug in the vmm_remove_vseg() function: the physical pages
associated to an user DATA vseg were released to the kernel when
the target process descriptor was in the reference cluster.
This physical pages release should be done only when the page
forks counter value is zero.
All other modifications are cosmetic.

File size: 26.0 KB
Line 
1/*
2 * mapper.c - Kernel cache for FS files or directories implementation.
3 *
4 * Authors   Mohamed Lamine Karaoui (2015)
5 *           Alain Greiner (2016,2017,2018,2019)
6 *
7 * Copyright (c)  UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25#include <kernel_config.h>
26#include <hal_kernel_types.h>
27#include <hal_special.h>
28#include <hal_uspace.h>
29#include <grdxt.h>
30#include <string.h>
31#include <rwlock.h>
32#include <printk.h>
33#include <memcpy.h>
34#include <thread.h>
35#include <core.h>
36#include <process.h>
37#include <kmem.h>
38#include <kcm.h>
39#include <ppm.h>
40#include <page.h>
41#include <cluster.h>
42#include <vfs.h>
43#include <mapper.h>
44#include <dev_ioc.h>
45
46
47//////////////////////////////////////////////
48mapper_t * mapper_create( vfs_fs_type_t type )
49{
50    mapper_t * mapper;
51    kmem_req_t req;
52    error_t    error;
53
54    // allocate memory for mapper
55    req.type  = KMEM_MAPPER;
56    req.size  = sizeof(mapper_t);
57    req.flags = AF_KERNEL | AF_ZERO;
58    mapper    = (mapper_t *)kmem_alloc( &req );
59
60    if( mapper == NULL )
61    {
62        printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ );
63        return NULL;
64    }
65
66    // initialize refcount & inode
67    mapper->refcount = 0;
68    mapper->inode    = NULL;
69
70    // initialize radix tree
71    error = grdxt_init( &mapper->rt,
72                        CONFIG_MAPPER_GRDXT_W1,
73                        CONFIG_MAPPER_GRDXT_W2,
74                        CONFIG_MAPPER_GRDXT_W3 );
75
76    if( error )
77    {
78        printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ );
79        req.type  = KMEM_MAPPER;
80        req.ptr   = mapper;
81        kmem_free( &req );
82        return NULL;
83    }
84
85    // initialize mapper type
86    mapper->type = type;
87
88    // initialize mapper lock
89    remote_rwlock_init( XPTR( local_cxy , &mapper->lock ) , LOCK_MAPPER_STATE );
90
91    // initialize waiting threads xlist (empty)
92    xlist_root_init( XPTR( local_cxy , &mapper->wait_root ) );
93
94    // initialize vsegs xlist (empty)
95    xlist_root_init( XPTR( local_cxy , &mapper->vsegs_root ) );
96
97    return mapper;
98
99}  // end mapper_create()
100
101////////////////////////////////////////
102void mapper_destroy( mapper_t * mapper )
103{
104    page_t   * page;
105    uint32_t   found_index = 0;
106    uint32_t   start_index = 0;
107    kmem_req_t req;
108
109    // scan radix tree
110    do
111    {
112        // get page from radix tree
113        page = (page_t *)grdxt_get_first( &mapper->rt , start_index , &found_index );
114
115        // release registered pages to PPM
116        if( page != NULL )
117        {
118            // remove page from mapper and release to PPM
119            mapper_release_page( mapper , page );
120
121            // update start_key value for next page
122            start_index = found_index;
123        }
124    }
125    while( page != NULL );
126
127    // release the memory allocated to radix tree itself
128    grdxt_destroy( &mapper->rt );
129
130    // release memory for mapper descriptor
131    req.type = KMEM_MAPPER;
132    req.ptr  = mapper;
133    kmem_free( &req );
134
135}  // end mapper_destroy()
136
137////////////////////////////////////////////////////
138xptr_t  mapper_remote_get_page( xptr_t    mapper_xp,
139                                uint32_t  page_id )
140{
141    error_t       error;
142    mapper_t    * mapper_ptr;
143    cxy_t         mapper_cxy;
144    xptr_t        lock_xp;        // extended pointer on mapper lock
145    xptr_t        page_xp;        // extended pointer on searched page descriptor
146    xptr_t        rt_xp;          // extended pointer on radix tree in mapper
147
148    thread_t * this = CURRENT_THREAD;
149
150    // get mapper cluster and local pointer
151    mapper_ptr = GET_PTR( mapper_xp );
152    mapper_cxy = GET_CXY( mapper_xp );
153
154#if DEBUG_MAPPER_GET_PAGE
155vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
156uint32_t      cycle = (uint32_t)hal_get_cycles();
157char          name[CONFIG_VFS_MAX_NAME_LENGTH];
158if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) )  // FAT mapper
159{
160    printk("\n[%s] thread[%x,%x] enter for page %d of FAT mapper / cycle %d\n",
161    __FUNCTION__, this->process->pid, this->trdid, page_id, cycle );
162}
163if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) )  // file mapper
164{
165    vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name );
166    printk("\n[%s] thread[%x,%x] enter for page %d of <%s> mapper / cycle %d\n",
167    __FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle );
168}
169#endif
170
171    // check thread can yield
172    thread_assert_can_yield( this , __FUNCTION__ );
173
174    // build extended pointer on mapper lock and mapper rt
175    lock_xp  = XPTR( mapper_cxy , &mapper_ptr->lock );
176    rt_xp    = XPTR( mapper_cxy , &mapper_ptr->rt );
177
178    // take mapper lock in READ_MODE
179    remote_rwlock_rd_acquire( lock_xp );
180
181    // search page in radix tree
182    page_xp  = grdxt_remote_lookup( rt_xp , page_id );
183
184    // test mapper miss
185    if( page_xp == XPTR_NULL )                  // miss => try to handle it
186    {
187        // release the lock in READ_MODE and take it in WRITE_MODE
188        remote_rwlock_rd_release( lock_xp );
189        remote_rwlock_wr_acquire( lock_xp );
190
191        // second test on missing page because the page status can be modified
192        // by another thread, when passing from READ_MODE to WRITE_MODE.
193        // from this point there is no concurrent accesses to mapper.
194        page_xp = grdxt_remote_lookup( rt_xp , page_id );
195
196        if ( page_xp == XPTR_NULL )  // miss confirmed => handle it
197        {
198
199            if( mapper_cxy == local_cxy )   // mapper is local
200            {
201
202#if (DEBUG_MAPPER_GET_PAGE & 1)
203if( DEBUG_MAPPER_GET_PAGE < cycle )
204printk("\n[%s] missing page => load it from FS / local access \n", __FUNCTION__ );
205#endif
206                 error = mapper_handle_miss( mapper_ptr,
207                                             page_id, 
208                                             &page_xp );
209            } 
210            else
211            {
212
213#if (DEBUG_MAPPER_GET_PAGE & 1)
214if( DEBUG_MAPPER_GET_PAGE < cycle )
215printk("\n[%s] missing page => load it from FS / RPC access \n", __FUNCTION__ );
216#endif
217                 rpc_mapper_handle_miss_client( mapper_cxy,
218                                                mapper_ptr,
219                                                page_id,
220                                                &page_xp,
221                                                &error );
222            }
223
224            if ( error )
225            {
226                printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n",
227                __FUNCTION__ , this->process->pid, this->trdid );
228                remote_rwlock_wr_release( lock_xp );
229                return XPTR_NULL;
230            }
231        }
232       
233        // release mapper lock from WRITE_MODE
234        remote_rwlock_wr_release( lock_xp );
235    }
236    else                                              // hit
237    {
238        // release mapper lock from READ_MODE
239        remote_rwlock_rd_release( lock_xp );
240    }
241
242#if DEBUG_MAPPER_GET_PAGE
243cycle = (uint32_t)hal_get_cycles();
244if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) )
245{
246    printk("\n[%s] thread[%x,%x] exit for page %d of <%s> mapper / ppn %x / cycle %d\n",
247    __FUNCTION__, this->process->pid, this->trdid, page_id,
248    name, ppm_page2ppn(page_xp), cycle );
249}
250if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) )
251{
252    printk("\n[%s] thread[%x,%x] exit for page %d of FAT mapper  / ppn %x / cycle %d\n",
253    __FUNCTION__, this->process->pid, this->trdid, page_id,
254    ppm_page2ppn(page_xp), cycle );
255}
256#endif
257
258    return page_xp;
259
260}  // end mapper_remote_get_page()
261
262//////////////////////////////////////////////
263error_t mapper_handle_miss( mapper_t * mapper,
264                            uint32_t   page_id,
265                            xptr_t   * page_xp )
266{
267    kmem_req_t   req;
268    page_t     * page;
269    error_t      error;
270
271    thread_t * this = CURRENT_THREAD;
272
273#if DEBUG_MAPPER_HANDLE_MISS
274uint32_t      cycle = (uint32_t)hal_get_cycles();
275char          name[CONFIG_VFS_MAX_NAME_LENGTH];
276vfs_inode_t * inode = mapper->inode;
277if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) )
278{
279    vfs_inode_get_name( XPTR( local_cxy , inode ) , name );
280    printk("\n[%s] thread[%x,%x] enter for page %d in <%s> / cycle %d",
281    __FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle );
282   if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), name );
283}
284if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) )
285{
286    printk("\n[%s] thread[%x,%x] enter for page %d in FAT / cycle %d",
287    __FUNCTION__, this->process->pid, this->trdid, page_id, cycle );
288   if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), "FAT" );
289}
290#endif
291
292    // allocate one page from the local cluster
293    req.type  = KMEM_PAGE;
294    req.size  = 0;
295    req.flags = AF_NONE;
296    page = kmem_alloc( &req );
297
298    if( page == NULL )
299    {
300        printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x\n",
301        __FUNCTION__ , this->process->pid, this->trdid , local_cxy );
302        return -1;
303    }
304
305    // initialize the page descriptor
306    page_init( page );
307    page_set_flag( page , PG_INIT );
308    page_refcount_up( page );
309    page->mapper = mapper;
310    page->index  = page_id;
311
312    // insert page in mapper radix tree
313    error = grdxt_insert( &mapper->rt , page_id , page );
314
315    if( error )
316    {
317        printk("\n[ERROR] in %s : thread[%x,%x] cannot insert page in mapper\n",
318        __FUNCTION__ , this->process->pid, this->trdid );
319        mapper_release_page( mapper , page );
320        req.ptr  = page;
321        req.type = KMEM_PAGE;
322        kmem_free(&req);
323        return -1;
324    }
325
326    // launch I/O operation to load page from device to mapper
327    error = vfs_fs_move_page( XPTR( local_cxy , page ) , IOC_SYNC_READ );
328
329    if( error )
330    {
331        printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n",
332        __FUNCTION__ , this->process->pid, this->trdid );
333        mapper_release_page( mapper , page );
334        req.ptr  = page;
335        req.type = KMEM_PAGE;
336        kmem_free( &req );
337        return -1;
338    }
339
340    // set extended pointer on allocated page
341    *page_xp = XPTR( local_cxy , page );
342
343#if DEBUG_MAPPER_HANDLE_MISS
344cycle = (uint32_t)hal_get_cycles();
345if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) )
346{
347    printk("\n[%s] thread[%x,%x] exit for page %d in <%s> / ppn %x / cycle %d",
348    __FUNCTION__, this->process->pid, this->trdid,
349    page_id, name, ppm_page2ppn( *page_xp ), cycle );
350    if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt) , name );
351}
352if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) )
353{
354    printk("\n[%s] thread[%x,%x] exit for page %d in FAT / ppn %x / cycle %d",
355    __FUNCTION__, this->process->pid, this->trdid,
356    page_id, ppm_page2ppn( *page_xp ), cycle );
357    if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt ), "FAT" );
358}
359#endif
360
361    return 0;
362
363}  // end mapper_handle_miss()
364
365////////////////////////////////////////////
366void mapper_release_page( mapper_t * mapper,
367                          page_t   * page )
368{
369    // build extended pointer on mapper lock
370    xptr_t mapper_lock_xp = XPTR( local_cxy , &mapper->lock );
371
372    // take mapper lock in WRITE_MODE
373    remote_rwlock_wr_acquire( mapper_lock_xp );
374
375    // remove physical page from radix tree
376    grdxt_remove( &mapper->rt , page->index );
377
378    // release mapper lock from WRITE_MODE
379    remote_rwlock_wr_release( mapper_lock_xp );
380
381    // release page to PPM
382    kmem_req_t   req;
383    req.type  = KMEM_PAGE;
384    req.ptr   = page;
385    kmem_free( &req );
386
387}  // end mapper_release_page()
388
389///////////////////////////////////////////////
390error_t mapper_move_user( xptr_t     mapper_xp,
391                          bool_t     to_buffer,
392                          uint32_t   file_offset,
393                          void     * buffer,
394                          uint32_t   size )
395{
396    uint32_t   page_offset;    // first byte to move to/from a mapper page
397    uint32_t   page_count;     // number of bytes to move to/from a mapper page
398    uint32_t   page_id;        // current mapper page index
399    uint32_t   done;           // number of moved bytes
400    xptr_t     page_xp;        // extended pointer on current mapper page descriptor
401
402#if DEBUG_MAPPER_MOVE_USER
403uint32_t   cycle = (uint32_t)hal_get_cycles();
404thread_t * this  = CURRENT_THREAD;
405if( DEBUG_MAPPER_MOVE_USER < cycle )
406printk("\n[%s] thread[%x,%x] : to_buf %d / buffer %x / size %d / offset %d / cycle %d\n",
407__FUNCTION__, this->process->pid, this->trdid,
408to_buffer, buffer, size, file_offset, cycle );
409#endif
410
411    // compute offsets of first and last bytes in file
412    uint32_t min_byte = file_offset;
413    uint32_t max_byte = file_offset + size - 1;
414
415    // compute indexes of pages for first and last byte in mapper
416    uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
417    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
418
419#if (DEBUG_MAPPER_MOVE_USER & 1)
420if( DEBUG_MAPPER_MOVE_USER < cycle )
421printk("\n[%s] thread[%x,%x] : first_page %d / last_page %d\n",
422__FUNCTION__, this->process->pid, this->trdid, first, last );
423#endif
424
425    done = 0;
426
427    // loop on pages in mapper
428    for( page_id = first ; page_id <= last ; page_id++ )
429    {
430        // compute page_offset
431        if( page_id == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
432        else                   page_offset = 0;
433
434        // compute number of bytes in page
435        if      ( first   == last  ) page_count = size;
436        else if ( page_id == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset;
437        else if ( page_id == last  ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
438        else                         page_count = CONFIG_PPM_PAGE_SIZE;
439
440#if (DEBUG_MAPPER_MOVE_USER & 1)
441if( DEBUG_MAPPER_MOVE_USER < cycle )
442printk("\n[%s] thread[%x,%x] : page_id = %d / page_offset = %d / page_count = %d\n",
443__FUNCTION__, this->process->pid, this->trdid, page_id , page_offset , page_count );
444#endif
445
446        // get extended pointer on page descriptor
447        page_xp = mapper_remote_get_page( mapper_xp , page_id ); 
448
449        if ( page_xp == XPTR_NULL ) return -1;
450
451#if (DEBUG_MAPPER_MOVE_USER & 1)
452if( DEBUG_MAPPER_MOVE_USER < cycle )
453printk("\n[%s] thread[%x,%x] : get page (%x,%x) from mapper\n",
454__FUNCTION__, this->process->pid, this->trdid, GET_CXY(page_xp), GET_PTR(page_xp) );
455#endif
456
457        // compute pointer in mapper
458        xptr_t    base_xp = ppm_page2base( page_xp );
459        uint8_t * map_ptr = (uint8_t *)GET_PTR( base_xp ) + page_offset;
460
461        // compute pointer in buffer
462        uint8_t * buf_ptr = (uint8_t *)buffer + done;
463
464        // move fragment
465        if( to_buffer )
466        {
467            hal_copy_to_uspace( buf_ptr , map_ptr , page_count ); 
468        }
469        else
470        {
471            ppm_page_do_dirty( page_xp ); 
472            hal_copy_from_uspace( map_ptr , buf_ptr , page_count ); 
473        }
474
475        done += page_count;
476    }
477
478#if DEBUG_MAPPER_MOVE_USER
479cycle = (uint32_t)hal_get_cycles();
480if( DEBUG_MAPPER_MOVE_USER < cycle )
481printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
482__FUNCTION__, this->process->pid, this->trdid, cycle );
483#endif
484
485    return 0;
486
487}  // end mapper_move_user()
488
489////////////////////////////////////////////////
490error_t mapper_move_kernel( xptr_t    mapper_xp,
491                            bool_t    to_buffer,
492                            uint32_t  file_offset,
493                            xptr_t    buffer_xp,
494                            uint32_t  size )
495{
496    uint32_t   page_offset;    // first byte to move to/from a mapper page
497    uint32_t   page_count;     // number of bytes to move to/from a mapper page
498    uint32_t   page_id;        // current mapper page index
499    uint32_t   done;           // number of moved bytes
500    xptr_t     page_xp;        // extended pointer on current mapper page descriptor
501
502    uint8_t  * src_ptr;        // source buffer local pointer
503    cxy_t      src_cxy;        // source cluster
504    uint8_t  * dst_ptr;        // destination buffer local pointer
505    cxy_t      dst_cxy;        // destination cluster
506
507    // get buffer cluster and local pointer
508    cxy_t     buffer_cxy = GET_CXY( buffer_xp );
509    uint8_t * buffer_ptr = GET_PTR( buffer_xp );
510
511    // get mapper cluster
512    cxy_t     mapper_cxy = GET_CXY( mapper_xp );
513
514#if DEBUG_MAPPER_MOVE_KERNEL
515char          name[CONFIG_VFS_MAX_NAME_LENGTH];
516uint32_t      cycle  = (uint32_t)hal_get_cycles();
517thread_t    * this   = CURRENT_THREAD;
518mapper_t    * mapper = GET_PTR( mapper_xp );
519vfs_inode_t * inode  = hal_remote_lpt( XPTR( mapper_cxy , &mapper->inode ) );
520vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name );
521if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
522printk("\n[%s] thread[%x,%x] enter / %d bytes / offset %d / mapper <%s> / cycle %d\n",
523__FUNCTION__, this->process->pid, this->trdid, size, file_offset, name, cycle );
524#endif
525
526    // compute offsets of first and last bytes in file
527    uint32_t min_byte = file_offset;
528    uint32_t max_byte = file_offset + size -1;
529
530    // compute indexes for first and last pages in mapper
531    uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
532    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
533
534    // compute source and destination clusters
535    if( to_buffer )
536    {
537        dst_cxy = buffer_cxy;
538        src_cxy = mapper_cxy;
539    }
540    else
541    {
542        src_cxy = buffer_cxy;
543        dst_cxy = mapper_cxy;
544    }
545
546    done = 0;
547
548    // loop on pages in mapper
549    for( page_id = first ; page_id <= last ; page_id++ )
550    {
551        // compute page_offset
552        if( page_id == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
553        else                   page_offset = 0;
554
555        // compute number of bytes to move in page
556        if      ( first == last  )   page_count = size;
557        else if ( page_id == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset;
558        else if ( page_id == last  ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
559        else                         page_count = CONFIG_PPM_PAGE_SIZE;
560
561        // get extended pointer on page descriptor
562        page_xp = mapper_remote_get_page( mapper_xp , page_id );
563
564        if ( page_xp == XPTR_NULL ) return -1;
565
566        // get page base address
567        xptr_t    base_xp  = ppm_page2base( page_xp );
568        uint8_t * base_ptr = (uint8_t *)GET_PTR( base_xp );
569
570        // compute source and destination pointers
571        if( to_buffer )
572        {
573            dst_ptr = buffer_ptr + done;
574            src_ptr = base_ptr + page_offset;
575        }
576        else
577        {
578            src_ptr = buffer_ptr + done;
579            dst_ptr = base_ptr + page_offset;
580
581            ppm_page_do_dirty( page_xp );
582        }
583
584#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
585if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
586{
587    if( to_buffer )
588    printk("\n[%s] mapper <%s> page %d => buffer(%x,%x) / %d bytes\n",
589    __FUNCTION__, name, page_id, dst_cxy, dst_ptr, page_count );
590    else
591    printk("\n[%s] buffer(%x,%x) => mapper <%s> page %d / %d bytes\n",
592    __FUNCTION__, src_cxy, src_ptr, name, page_id, page_count );
593}
594#endif
595
596        // move fragment
597        hal_remote_memcpy( XPTR( dst_cxy , dst_ptr ), XPTR( src_cxy , src_ptr ), page_count );
598
599        done += page_count;
600    }
601
602#if DEBUG_MAPPER_MOVE_KERNEL
603cycle  = (uint32_t)hal_get_cycles();
604if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
605printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
606__FUNCTION__, this->process->pid, this->trdid, cycle );
607#endif
608
609    return 0;
610
611}  // end mapper_move_kernel()
612
613///////////////////////////////////////////////////
614error_t mapper_remote_get_32( xptr_t     mapper_xp,
615                              uint32_t   word_id,
616                              uint32_t * p_value )
617{
618    uint32_t   page_id;      // page index in file
619    uint32_t   local_id;     // word index in page
620    xptr_t     page_xp;      // extended pointer on searched page descriptor
621    xptr_t     base_xp;      // extended pointer on searched page base
622
623   
624    // get page index and local word index
625    page_id  = word_id >> 10;
626    local_id = word_id & 0x3FF;
627
628    // get page containing the searched word
629    page_xp  = mapper_remote_get_page( mapper_xp , page_id );
630
631    if( page_xp == XPTR_NULL )  return -1;
632   
633    // get page base
634    base_xp = ppm_page2base( page_xp );
635
636    // get the value from mapper
637    *p_value = hal_remote_l32( base_xp + (local_id<<2) ); 
638
639    return 0;
640
641}  // end mapper_remote_get_32()
642
643///////////////////////////////////////////////////
644error_t mapper_remote_set_32( xptr_t     mapper_xp,
645                              uint32_t   word_id,
646                              uint32_t   value )
647{
648   
649    uint32_t   page_id;      // page index in file
650    uint32_t   local_id;     // word index in page
651    xptr_t     page_xp;      // extended pointer on searched page descriptor
652    xptr_t     base_xp;      // extended pointer on searched page base
653
654    // get page index and local vord index
655    page_id  = word_id >> 10;
656    local_id = word_id & 0x3FF;
657
658    // get page containing the searched word
659    page_xp  = mapper_remote_get_page( mapper_xp , page_id );
660
661    if( page_xp == XPTR_NULL ) return -1;
662
663    // get page base
664    base_xp = ppm_page2base( page_xp );
665
666    // set value to mapper
667    hal_remote_s32( (base_xp + (local_id << 2)) , value );
668
669    // set the dirty flag
670    ppm_page_do_dirty( page_xp );
671
672    return 0;
673
674}  // end mapper_remote_set_32()
675
676/////////////////////////////////////////
677error_t mapper_sync( mapper_t *  mapper )
678{
679    page_t   * page;                // local pointer on current page descriptor
680    xptr_t     page_xp;             // extended pointer on current page descriptor
681    grdxt_t  * rt;                  // pointer on radix_tree descriptor
682    uint32_t   start_key;           // start page index in mapper
683    uint32_t   found_key;           // current page index in mapper
684    error_t    error;
685
686#if DEBUG_MAPPER_SYNC
687thread_t * this  = CURRENT_THREAD;
688uint32_t   cycle = (uint32_t)hal_get_cycles();
689char       name[CONFIG_VFS_MAX_NAME_LENGTH];
690vfs_inode_get_name( XPTR( local_cxy , mapper->inode ) , name );
691#endif
692
693    // get pointer on radix tree
694    rt = &mapper->rt;
695
696    // initialise loop variable
697    start_key = 0;
698
699    // scan radix-tree until last page found
700    while( 1 )
701    {
702        // get page descriptor from radix tree
703        page = (page_t *)grdxt_get_first( rt , start_key , &found_key );
704         
705        if( page == NULL ) break;
706
707assert( (page->index == found_key ), "wrong page descriptor index" );
708assert( (page->order == 0),          "mapper page order must be 0" );
709
710        // build extended pointer on page descriptor
711        page_xp = XPTR( local_cxy , page );
712
713        // synchronize page if dirty
714        if( (page->flags & PG_DIRTY) != 0 )
715        {
716
717#if DEBUG_MAPPER_SYNC
718if( cycle > DEBUG_MAPPER_SYNC )
719printk("\n[%s] thread[%x,%x] synchonise page %d of <%s> to device\n",
720__FUNCTION__, this->process->pid, this->trdid, page->index, name );
721#endif
722            // copy page to file system
723            error = vfs_fs_move_page( page_xp , IOC_WRITE );
724
725            if( error )
726            {
727                printk("\n[ERROR] in %s : cannot synchonize dirty page %d\n", 
728                __FUNCTION__, page->index );
729                return -1;
730            }
731
732            // remove page from PPM dirty list
733            ppm_page_undo_dirty( page_xp ); 
734        } 
735        else
736        {
737
738#if DEBUG_MAPPER_SYNC
739if( cycle > DEBUG_MAPPER_SYNC )
740printk("\n[%s] thread[%x,%x] skip page %d for <%s>\n",
741__FUNCTION__, this->process->pid, this->trdid, page->index, name );
742#endif
743        }
744
745        // update loop variable
746        start_key = page->index + 1;
747    }  // end while
748
749    return 0;
750
751}  // end mapper_sync()
752
753//////////////////////////////////////////////////
754error_t mapper_display_page( xptr_t     mapper_xp,
755                             uint32_t   page_id,
756                             uint32_t   nbytes )
757{
758    xptr_t        page_xp;        // extended pointer on page descriptor
759    xptr_t        base_xp;        // extended pointer on page base
760    char          buffer[4096];   // local buffer
761    uint32_t    * tabi;           // pointer on uint32_t to scan buffer
762    uint32_t      line;           // line index
763    uint32_t      word;           // word index
764    cxy_t         mapper_cxy;     // mapper cluster identifier
765    mapper_t    * mapper_ptr;     // mapper local pointer
766    vfs_inode_t * inode_ptr;      // inode local pointer
767 
768    char       name[CONFIG_VFS_MAX_NAME_LENGTH];
769
770    if( nbytes > 4096)
771    {
772        printk("\n[ERROR] in %s : nbytes (%d) cannot be larger than 4096\n",
773        __FUNCTION__, nbytes );
774        return -1;
775    }
776   
777    // get extended pointer on page descriptor
778    page_xp = mapper_remote_get_page( mapper_xp , page_id );
779
780    if( page_xp == XPTR_NULL)
781    {
782        printk("\n[ERROR] in %s : cannot access page %d in mapper\n",
783        __FUNCTION__, page_id );
784        return -1;
785    }
786
787    // get cluster and local pointer
788    mapper_cxy = GET_CXY( mapper_xp );
789    mapper_ptr = GET_PTR( mapper_xp );
790
791    // get inode
792    inode_ptr = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
793
794    // get inode name
795    if( inode_ptr == NULL ) strcpy( name , "fat" );
796    else  vfs_inode_get_name( XPTR( mapper_cxy , inode_ptr ) , name );
797   
798    // get extended pointer on page base
799    base_xp = ppm_page2base( page_xp );
800   
801    // copy remote page to local buffer
802    hal_remote_memcpy( XPTR( local_cxy , buffer ) , base_xp , nbytes );
803
804    // display 8 words per line
805    tabi = (uint32_t *)buffer;
806    printk("\n***** <%s> first %d bytes of page %d *****\n", name, nbytes, page_id );
807    for( line = 0 ; line < (nbytes >> 5) ; line++ )
808    {
809        printk("%X : ", line << 5 );
810        for( word = 0 ; word < 8 ; word++ ) printk("%X ", tabi[(line<<3) + word] );
811        printk("\n");
812    }
813
814    return 0;
815
816}  // end mapper_display_page
817
818
Note: See TracBrowser for help on using the repository browser.