source: trunk/kernel/mm/mapper.c @ 611

Last change on this file since 611 was 611, checked in by alain, 5 years ago

Introduce sigificant modifs in VFS to support the <ls> command,
and the . and .. directories entries.

File size: 21.9 KB
RevLine 
[1]1/*
[606]2 * mapper.c - Kernel cache for FS files or directories implementation.
[1]3 *
4 * Authors   Mohamed Lamine Karaoui (2015)
[440]5 *           Alain Greiner (2016,2017,2018)
[1]6 *
7 * Copyright (c)  UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
[14]25#include <kernel_config.h>
[457]26#include <hal_kernel_types.h>
[1]27#include <hal_special.h>
[23]28#include <hal_uspace.h>
[1]29#include <grdxt.h>
30#include <rwlock.h>
31#include <printk.h>
[279]32#include <memcpy.h>
[1]33#include <thread.h>
34#include <core.h>
35#include <process.h>
36#include <kmem.h>
37#include <kcm.h>
[567]38#include <ppm.h>
[1]39#include <page.h>
40#include <cluster.h>
41#include <vfs.h>
42#include <mapper.h>
43
[567]44
[246]45//////////////////////////////////////////////
46mapper_t * mapper_create( vfs_fs_type_t type )
[1]47{
48    mapper_t * mapper;
49    kmem_req_t req;
50    error_t    error;
51
[606]52    // allocate memory for mapper
[183]53    req.type  = KMEM_MAPPER;
54    req.size  = sizeof(mapper_t);
[1]55    req.flags = AF_KERNEL | AF_ZERO;
[183]56    mapper    = (mapper_t *)kmem_alloc( &req );
[1]57
58    if( mapper == NULL )
59    {
60        printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ );
61        return NULL;
62    }
63
64    // initialize refcount & inode
[183]65    mapper->refcount = 0;
[1]66    mapper->inode    = NULL;
67
68    // initialize radix tree
[606]69    error = grdxt_init( &mapper->rt,
70                        CONFIG_MAPPER_GRDXT_W1,
71                        CONFIG_MAPPER_GRDXT_W2,
72                        CONFIG_MAPPER_GRDXT_W3 );
[1]73
74    if( error )
75    {
76        printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ );
[183]77        req.type  = KMEM_MAPPER;
[1]78        req.ptr   = mapper;
79        kmem_free( &req );
80        return NULL;
81    }
82
[246]83    // initialize mapper type
84    mapper->type = type;
85
[1]86    // initialize mapper lock
[606]87    remote_rwlock_init( XPTR( local_cxy , &mapper->lock ) , LOCK_MAPPER_STATE );
[1]88
89    // initialize waiting threads xlist (empty)
[183]90    xlist_root_init( XPTR( local_cxy , &mapper->wait_root ) );
[1]91
92    // initialize vsegs xlist (empty)
[183]93    xlist_root_init( XPTR( local_cxy , &mapper->vsegs_root ) );
[1]94
95    return mapper;
96
[204]97}  // end mapper_create()
98
[606]99////////////////////////////////////////
100void mapper_destroy( mapper_t * mapper )
[1]101{
102    page_t   * page;
103    uint32_t   found_index = 0;
104    uint32_t   start_index = 0;
105    kmem_req_t req;
106
[606]107    // scan radix tree
[1]108    do
109    {
110        // get page from radix tree
[606]111        page = (page_t *)grdxt_get_first( &mapper->rt , start_index , &found_index );
[1]112
[606]113        // release registered pages to PPM
[18]114        if( page != NULL )
[1]115        {
116            // remove page from mapper and release to PPM
[606]117            mapper_release_page( mapper , page );
[1]118
119            // update start_key value for next page
120            start_index = found_index;
121        }
122    }
123    while( page != NULL );
124
[606]125    // release the memory allocated to radix tree itself
126    grdxt_destroy( &mapper->rt );
[1]127
128    // release memory for mapper descriptor
129    req.type = KMEM_MAPPER;
130    req.ptr  = mapper;
131    kmem_free( &req );
132
[204]133}  // end mapper_destroy()
134
[606]135////////////////////////////////////////////////////
136xptr_t  mapper_remote_get_page( xptr_t    mapper_xp,
137                                uint32_t  page_id )
[1]138{
[183]139    error_t       error;
[606]140    mapper_t    * mapper_ptr;
141    cxy_t         mapper_cxy;
142    xptr_t        lock_xp;        // extended pointer on mapper lock
143    xptr_t        page_xp;        // extended pointer on searched page descriptor
144    xptr_t        rt_xp;          // extended pointer on radix tree in mapper
[1]145
[606]146    thread_t * this = CURRENT_THREAD;
147
148    // get mapper cluster and local pointer
149    mapper_ptr = GET_PTR( mapper_xp );
150    mapper_cxy = GET_CXY( mapper_xp );
151
[438]152#if DEBUG_MAPPER_GET_PAGE
[435]153uint32_t cycle = (uint32_t)hal_get_cycles();
[606]154char          name[CONFIG_VFS_MAX_NAME_LENGTH];
155vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
156vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name );
[438]157if( DEBUG_MAPPER_GET_PAGE < cycle )
[606]158printk("\n[%s] thread [%x,%x] enter for page %d of <%s> / cycle %d\n",
159__FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle );
[435]160#endif
[204]161
[581]162    // check thread can yield
163    thread_assert_can_yield( this , __FUNCTION__ );
164
[606]165    // build extended pointer on mapper lock and mapper rt
166    lock_xp  = XPTR( mapper_cxy , &mapper_ptr->lock );
167    rt_xp    = XPTR( mapper_cxy , &mapper_ptr->rt );
168
[1]169    // take mapper lock in READ_MODE
[606]170    remote_rwlock_rd_acquire( lock_xp );
[1]171
172    // search page in radix tree
[606]173    page_xp  = grdxt_remote_lookup( rt_xp , page_id );
[1]174
[606]175    // test mapper miss
176    if( page_xp == XPTR_NULL )                  // miss => try to handle it
[1]177    {
178        // release the lock in READ_MODE and take it in WRITE_MODE
[606]179        remote_rwlock_rd_release( lock_xp );
180        remote_rwlock_wr_acquire( lock_xp );
[1]181
[606]182        // second test on missing page because the page status can be modified
[1]183        // by another thread, when passing from READ_MODE to WRITE_MODE.
184        // from this point there is no concurrent accesses to mapper.
[606]185        page_xp = grdxt_remote_lookup( rt_xp , page_id );
[1]186
[606]187        if ( page_xp == XPTR_NULL )  // miss confirmed => handle it
[1]188        {
[204]189
[610]190            if( mapper_cxy == local_cxy )   // mapper is local
191            {
192
[438]193#if (DEBUG_MAPPER_GET_PAGE & 1)
194if( DEBUG_MAPPER_GET_PAGE < cycle )
[610]195printk("\n[%s] missing page => load it from FS / local access \n", __FUNCTION__ );
[435]196#endif
[606]197                 error = mapper_handle_miss( mapper_ptr,
198                                             page_id, 
199                                             &page_xp );
200            } 
201            else
[1]202            {
[610]203
204#if (DEBUG_MAPPER_GET_PAGE & 1)
205if( DEBUG_MAPPER_GET_PAGE < cycle )
206printk("\n[%s] missing page => load it from FS / RPC access \n", __FUNCTION__ );
207#endif
[606]208                 rpc_mapper_handle_miss_client( mapper_cxy,
209                                                mapper_ptr,
210                                                page_id,
211                                                &page_xp,
212                                                &error );
[1]213            }
[18]214
[606]215            if ( error )
[1]216            {
[606]217                printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n",
218                __FUNCTION__ , this->process->pid, this->trdid );
219                remote_rwlock_wr_release( lock_xp );
220                return XPTR_NULL;
[1]221            }
222        }
[606]223       
224        // release mapper lock from WRITE_MODE
225        remote_rwlock_wr_release( lock_xp );
[1]226    }
[606]227    else                                              // hit
[1]228    {
[606]229        // release mapper lock from READ_MODE
230        remote_rwlock_rd_release( lock_xp );
[1]231    }
232
[438]233#if DEBUG_MAPPER_GET_PAGE
[435]234cycle = (uint32_t)hal_get_cycles();
[438]235if( DEBUG_MAPPER_GET_PAGE < cycle )
[606]236printk("\n[%s] thread[%x,%x] exit for page %d of <%s> / ppn %x / cycle %d\n",
237__FUNCTION__, this->process->pid, this->trdid, 
238page_id, name, ppm_page2ppn( page_xp ), cycle );
[435]239#endif
[204]240
[606]241    return page_xp;
[204]242
[606]243}  // end mapper_remote_get_page()
[204]244
[606]245//////////////////////////////////////////////
246error_t mapper_handle_miss( mapper_t * mapper,
247                            uint32_t   page_id,
248                            xptr_t   * page_xp )
[1]249{
[606]250    kmem_req_t   req;
251    page_t     * page;
252    error_t      error;
[1]253
[606]254    thread_t * this = CURRENT_THREAD;
[1]255
[606]256#if DEBUG_MAPPER_HANDLE_MISS
257uint32_t cycle = (uint32_t)hal_get_cycles();
258char          name[CONFIG_VFS_MAX_NAME_LENGTH];
259vfs_inode_t * inode = mapper->inode;
260vfs_inode_get_name( XPTR( local_cxy , inode ) , name );
[610]261// if( DEBUG_MAPPER_HANDLE_MISS < cycle )
262// if( (page_id == 1) && (cycle > 10000000) )
263printk("\n[%s] enter for page %d in <%s> / cycle %d",
[606]264__FUNCTION__, page_id, name, cycle );
265if( DEBUG_MAPPER_HANDLE_MISS & 1 )
[610]266grdxt_display( XPTR( local_cxy , &mapper->rt ) , name );
[606]267#endif
268
[610]269    // allocate one page from the local cluster
[606]270    req.type  = KMEM_PAGE;
271    req.size  = 0;
272    req.flags = AF_NONE;
273    page = kmem_alloc( &req );
274
275    if( page == NULL )
276    {
277        printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x\n",
278        __FUNCTION__ , this->process->pid, this->trdid , local_cxy );
279        return -1;
280    }
281
282    // initialize the page descriptor
283    page_init( page );
284    page_set_flag( page , PG_INIT );
285    page_refcount_up( page );
286    page->mapper = mapper;
287    page->index  = page_id;
288
289    // insert page in mapper radix tree
290    error = grdxt_insert( &mapper->rt , page_id , page );
291
[1]292    if( error )
293    {
[606]294        printk("\n[ERROR] in %s : thread[%x,%x] cannot insert page in mapper\n",
295        __FUNCTION__ , this->process->pid, this->trdid );
296        mapper_release_page( mapper , page );
297        req.ptr  = page;
298        req.type = KMEM_PAGE;
299        kmem_free(&req);
300        return -1;
[1]301    }
[18]302
[606]303    // launch I/O operation to load page from device to mapper
304    error = vfs_fs_move_page( XPTR( local_cxy , page ) , true );
305
306    if( error )
307    {
308        printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n",
309        __FUNCTION__ , this->process->pid, this->trdid );
310        mapper_release_page( mapper , page );
311        req.ptr  = page;
312        req.type = KMEM_PAGE;
313        kmem_free( &req );
314        return -1;
315    }
316
317    // set extended pointer on allocated page
318    *page_xp = XPTR( local_cxy , page );
319
320#if DEBUG_MAPPER_HANDLE_MISS
321cycle = (uint32_t)hal_get_cycles();
[610]322// if( DEBUG_MAPPER_HANDLE_MISS < cycle )
323// if( (page_id == 1) && (cycle > 10000000) )
324printk("\n[%s] exit for page %d in <%s> / ppn %x / cycle %d",
[606]325__FUNCTION__, page_id, name, ppm_page2ppn( *page_xp ), cycle );
326if( DEBUG_MAPPER_HANDLE_MISS & 1 )
[610]327grdxt_display( XPTR( local_cxy , &mapper->rt ) , name );
[606]328#endif
329
330    return 0;
331
332}  // end mapper_handle_miss()
333
334////////////////////////////////////////////
335void mapper_release_page( mapper_t * mapper,
336                          page_t   * page )
337{
338    // build extended pointer on mapper lock
339    xptr_t mapper_lock_xp = XPTR( local_cxy , &mapper->lock );
340
[1]341    // take mapper lock in WRITE_MODE
[606]342    remote_rwlock_wr_acquire( mapper_lock_xp );
[1]343
344    // remove physical page from radix tree
[606]345    grdxt_remove( &mapper->rt , page->index );
[1]346
347    // release mapper lock from WRITE_MODE
[606]348    remote_rwlock_wr_release( mapper_lock_xp );
[1]349
350    // release page to PPM
[183]351    kmem_req_t   req;
352    req.type  = KMEM_PAGE;
[1]353    req.ptr   = page;
354    kmem_free( &req );
355
[204]356}  // end mapper_release_page()
357
[610]358///////////////////////////////////////////////
359error_t mapper_move_user( xptr_t     mapper_xp,
[313]360                          bool_t     to_buffer,
361                          uint32_t   file_offset,
362                          void     * buffer,
363                          uint32_t   size )
[1]364{
[23]365    uint32_t   page_offset;    // first byte to move to/from a mapper page
366    uint32_t   page_count;     // number of bytes to move to/from a mapper page
[606]367    uint32_t   page_id;        // current mapper page index
[23]368    uint32_t   done;           // number of moved bytes
[606]369    xptr_t     page_xp;        // extended pointer on current mapper page descriptor
[330]370
[438]371#if DEBUG_MAPPER_MOVE_USER
[606]372uint32_t   cycle = (uint32_t)hal_get_cycles();
373thread_t * this  = CURRENT_THREAD;
[438]374if( DEBUG_MAPPER_MOVE_USER < cycle )
[606]375printk("\n[%s] thread[%x,%x] : to_buf %d / buffer %x / size %d / offset %d / cycle %d\n",
376__FUNCTION__, this->process->pid, this->trdid,
377to_buffer, buffer, size, file_offset, cycle );
[435]378#endif
[1]379
[23]380    // compute offsets of first and last bytes in file
381    uint32_t min_byte = file_offset;
[606]382    uint32_t max_byte = file_offset + size - 1;
[1]383
[23]384    // compute indexes of pages for first and last byte in mapper
385    uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
386    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
[1]387
[606]388#if (DEBUG_MAPPER_MOVE_USER & 1)
389if( DEBUG_MAPPER_MOVE_USER < cycle )
[610]390printk("\n[%s] thread[%x,%x] : first_page %d / last_page %d\n",
391__FUNCTION__, this->process->pid, this->trdid, first, last );
[606]392#endif
393
[23]394    done = 0;
[1]395
[23]396    // loop on pages in mapper
[606]397    for( page_id = first ; page_id <= last ; page_id++ )
[1]398    {
[183]399        // compute page_offset
[606]400        if( page_id == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
401        else                   page_offset = 0;
[1]402
[313]403        // compute number of bytes in page
[606]404        if      ( first   == last  ) page_count = size;
405        else if ( page_id == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset;
406        else if ( page_id == last  ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
407        else                         page_count = CONFIG_PPM_PAGE_SIZE;
[1]408
[438]409#if (DEBUG_MAPPER_MOVE_USER & 1)
410if( DEBUG_MAPPER_MOVE_USER < cycle )
[610]411printk("\n[%s] thread[%x,%x] : page_id = %d / page_offset = %d / page_count = %d\n",
412__FUNCTION__, this->process->pid, this->trdid, page_id , page_offset , page_count );
[435]413#endif
[265]414
[606]415        // get extended pointer on page descriptor
416        page_xp = mapper_remote_get_page( mapper_xp , page_id ); 
[1]417
[606]418        if ( page_xp == XPTR_NULL ) return -1;
[1]419
[610]420#if (DEBUG_MAPPER_MOVE_USER & 1)
421if( DEBUG_MAPPER_MOVE_USER < cycle )
422printk("\n[%s] thread[%x,%x] : get page (%x,%x) from mapper\n",
423__FUNCTION__, this->process->pid, this->trdid, GET_CXY(page_xp), GET_PTR(page_xp) );
424#endif
425
[23]426        // compute pointer in mapper
[606]427        xptr_t    base_xp = ppm_page2base( page_xp );
428        uint8_t * map_ptr = (uint8_t *)GET_PTR( base_xp ) + page_offset;
[1]429
[23]430        // compute pointer in buffer
[606]431        uint8_t * buf_ptr = (uint8_t *)buffer + done;
[1]432
433        // move fragment
[330]434        if( to_buffer )
[1]435        {
[606]436            hal_copy_to_uspace( buf_ptr , map_ptr , page_count ); 
[1]437        }
[330]438        else
[1]439        {
[606]440            ppm_page_do_dirty( page_xp ); 
441            hal_copy_from_uspace( map_ptr , buf_ptr , page_count ); 
[1]442        }
443
[23]444        done += page_count;
[1]445    }
446
[438]447#if DEBUG_MAPPER_MOVE_USER
[435]448cycle = (uint32_t)hal_get_cycles();
[438]449if( DEBUG_MAPPER_MOVE_USER < cycle )
[606]450printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
451__FUNCTION__, this->process->pid, this->trdid, cycle );
[435]452#endif
[204]453
[1]454    return 0;
455
[313]456}  // end mapper_move_user()
[204]457
[313]458////////////////////////////////////////////////
[606]459error_t mapper_move_kernel( xptr_t    mapper_xp,
460                            bool_t    to_buffer,
461                            uint32_t  file_offset,
462                            xptr_t    buffer_xp,
463                            uint32_t  size )
[313]464{
465    uint32_t   page_offset;    // first byte to move to/from a mapper page
466    uint32_t   page_count;     // number of bytes to move to/from a mapper page
[606]467    uint32_t   page_id;        // current mapper page index
[313]468    uint32_t   done;           // number of moved bytes
[606]469    xptr_t     page_xp;        // extended pointer on current mapper page descriptor
[313]470
471    uint8_t  * src_ptr;        // source buffer local pointer
472    cxy_t      src_cxy;        // source cluster
473    uint8_t  * dst_ptr;        // destination buffer local pointer
474    cxy_t      dst_cxy;        // destination cluster
[330]475
[406]476    // get buffer cluster and local pointer
477    cxy_t     buffer_cxy = GET_CXY( buffer_xp );
[606]478    uint8_t * buffer_ptr = GET_PTR( buffer_xp );
[313]479
[606]480    // get mapper cluster
481    cxy_t     mapper_cxy = GET_CXY( mapper_xp );
482
[438]483#if DEBUG_MAPPER_MOVE_KERNEL
[606]484uint32_t   cycle = (uint32_t)hal_get_cycles();
485thread_t * this  = CURRENT_THREAD;
[438]486if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
[606]487printk("\n[%s] thread[%x,%x] enter / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n",
488__FUNCTION__, this->process->pid, this->trdid, to_buffer, buffer_cxy, buffer_ptr, cycle );
[435]489#endif
[406]490
[313]491    // compute offsets of first and last bytes in file
492    uint32_t min_byte = file_offset;
493    uint32_t max_byte = file_offset + size -1;
494
495    // compute indexes for first and last pages in mapper
496    uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
497    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
498
[438]499#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
500if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
[606]501printk("\n[%s] first_page %d / last_page %d\n", __FUNCTION__, first, last );
[435]502#endif
[313]503
504    // compute source and destination clusters
505    if( to_buffer )
506    {
507        dst_cxy = buffer_cxy;
[606]508        src_cxy = mapper_cxy;
[313]509    }
510    else
511    {
512        src_cxy = buffer_cxy;
[606]513        dst_cxy = mapper_cxy;
[313]514    }
515
516    done = 0;
517
518    // loop on pages in mapper
[606]519    for( page_id = first ; page_id <= last ; page_id++ )
[313]520    {
521        // compute page_offset
[606]522        if( page_id == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
523        else                   page_offset = 0;
[313]524
525        // compute number of bytes to move in page
[606]526        if      ( first == last  )   page_count = size;
527        else if ( page_id == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset;
528        else if ( page_id == last  ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
529        else                         page_count = CONFIG_PPM_PAGE_SIZE;
[313]530
[438]531#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
532if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
[606]533printk("\n[%s] page_id = %d / offset = %d / bytes = %d\n",
534__FUNCTION__ , page_id , page_offset , page_count );
[435]535#endif
[313]536
[606]537        // get extended pointer on page descriptor
538        page_xp = mapper_remote_get_page( mapper_xp , page_id );
[313]539
[606]540        if ( page_xp == XPTR_NULL ) return -1;
[313]541
[315]542        // get page base address
[606]543        xptr_t    base_xp  = ppm_page2base( page_xp );
[367]544        uint8_t * base_ptr = (uint8_t *)GET_PTR( base_xp );
[330]545
[313]546        // compute source and destination pointers
547        if( to_buffer )
548        {
[315]549            dst_ptr = buffer_ptr + done;
[367]550            src_ptr = base_ptr + page_offset;
[313]551        }
552        else
553        {
[315]554            src_ptr = buffer_ptr + done;
[367]555            dst_ptr = base_ptr + page_offset;
[313]556
[606]557            ppm_page_do_dirty( page_xp );
[313]558        }
559
[610]560#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
561if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
562printk("\n[%s] src_cxy %x / src_ptr %x / dst_cxy %x / dst_ptr %x\n",
563__FUNCTION__, src_cxy, src_ptr, dst_cxy, dst_ptr );
564#endif
565
[313]566        // move fragment
567        hal_remote_memcpy( XPTR( dst_cxy , dst_ptr ), XPTR( src_cxy , src_ptr ), page_count );
[330]568
[313]569        done += page_count;
570    }
571
[438]572#if DEBUG_MAPPER_MOVE_KERNEL
[435]573cycle = (uint32_t)hal_get_cycles();
[438]574if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
[606]575printk("\n[%s] thread[%x,%x] exit / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n",
576__FUNCTION__, this->process->pid, this->trdid, to_buffer, buffer_cxy, buffer_ptr, cycle );
[435]577#endif
[313]578
579    return 0;
580
[406]581}  // end mapper_move_kernel()
[313]582
[606]583///////////////////////////////////////////////////
584error_t mapper_remote_get_32( xptr_t     mapper_xp,
585                              uint32_t   word_id,
586                              uint32_t * p_value )
587{
588    uint32_t   page_id;      // page index in file
589    uint32_t   local_id;     // word index in page
590    xptr_t     page_xp;      // extended pointer on searched page descriptor
591    xptr_t     base_xp;      // extended pointer on searched page base
592
593   
594    // get page index and local word index
595    page_id  = word_id >> 10;
596    local_id = word_id & 0x3FF;
597
598    // get page containing the searched word
599    page_xp  = mapper_remote_get_page( mapper_xp , page_id );
600
601    if( page_xp == XPTR_NULL )  return -1;
602   
603    // get page base
604    base_xp = ppm_page2base( page_xp );
605
606    // get the value from mapper
607    *p_value = hal_remote_l32( base_xp + (local_id<<2) ); 
608
609    return 0;
610
611}  // end mapper_remote_get_32()
612
613///////////////////////////////////////////////////
614error_t mapper_remote_set_32( xptr_t     mapper_xp,
615                              uint32_t   word_id,
616                              uint32_t   value )
617{
618   
619    uint32_t   page_id;      // page index in file
620    uint32_t   local_id;     // word index in page
621    xptr_t     page_xp;      // extended pointer on searched page descriptor
622    xptr_t     base_xp;      // extended pointer on searched page base
623
624    // get page index and local vord index
625    page_id  = word_id >> 10;
626    local_id = word_id & 0x3FF;
627
628    // get page containing the searched word
629    page_xp  = mapper_remote_get_page( mapper_xp , page_id );
630
631    if( page_xp == XPTR_NULL ) return -1;
632
633    // get page base
634    base_xp = ppm_page2base( page_xp );
635
636    // set value to mapper
637    hal_remote_s32( (base_xp + (local_id << 2)) , value );
638
639    // set the dirty flag
640    ppm_page_do_dirty( page_xp );
641
642    return 0;
643
644}  // end mapper_remote_set_32()
645
[611]646//////////////////////////////////////////////////
647error_t mapper_display_page( xptr_t     mapper_xp,
648                             uint32_t   page_id,
649                             uint32_t   nbytes,
650                             char     * string )
651{
652    xptr_t     page_xp;        // extended pointer on page descriptor
653    xptr_t     base_xp;        // extended pointer on page base
654    char       buffer[4096];   // local buffer
655    uint32_t * tab;            // pointer on uint32_t to scan the buffer
656    uint32_t   line;           // line index
657    uint32_t   word;           // word index
[606]658
[611]659    if( nbytes > 4096)
660    {
661        printk("\n[ERROR] in %s : nbytes (%d) cannot be larger than 4096\n",
662        __FUNCTION__, nbytes );
663        return -1;
664    }
665   
666    // get extended pointer on page descriptor
667    page_xp = mapper_remote_get_page( mapper_xp , page_id );
668
669    if( page_xp == XPTR_NULL)
670    {
671        printk("\n[ERROR] in %s : cannot access page %d in mapper\n",
672        __FUNCTION__, page_id );
673        return -1;
674    }
675
676    // get extended pointer on page base
677    base_xp = ppm_page2base( page_xp );
678   
679    // copy remote page to local buffer
680    hal_remote_memcpy( XPTR( local_cxy , buffer ) , base_xp , nbytes );
681
682    // display 8 words per line
683    tab = (uint32_t *)buffer;
684    printk("\n***** %s : first %d bytes of page %d *****\n", string, nbytes, page_id );
685    for( line = 0 ; line < (nbytes >> 5) ; line++ )
686    {
687        printk("%X : ", line );
688        for( word = 0 ; word < 8 ; word++ ) printk("%X ", tab[(line<<3) + word] );
689        printk("\n");
690    }
691
692    return 0;
693
694}  // end mapper_display_page
695
696
Note: See TracBrowser for help on using the repository browser.