source: trunk/kernel/mm/mapper.c @ 610

Last change on this file since 610 was 610, checked in by alain, 5 years ago

Fix several bugs in VFS to support the following
ksh commandis : cp, mv, rm, mkdir, cd, pwd

File size: 20.3 KB
Line 
1/*
2 * mapper.c - Kernel cache for FS files or directories implementation.
3 *
4 * Authors   Mohamed Lamine Karaoui (2015)
5 *           Alain Greiner (2016,2017,2018)
6 *
7 * Copyright (c)  UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25#include <kernel_config.h>
26#include <hal_kernel_types.h>
27#include <hal_special.h>
28#include <hal_uspace.h>
29#include <grdxt.h>
30#include <rwlock.h>
31#include <printk.h>
32#include <memcpy.h>
33#include <thread.h>
34#include <core.h>
35#include <process.h>
36#include <kmem.h>
37#include <kcm.h>
38#include <ppm.h>
39#include <page.h>
40#include <cluster.h>
41#include <vfs.h>
42#include <mapper.h>
43
44
45//////////////////////////////////////////////
46mapper_t * mapper_create( vfs_fs_type_t type )
47{
48    mapper_t * mapper;
49    kmem_req_t req;
50    error_t    error;
51
52    // allocate memory for mapper
53    req.type  = KMEM_MAPPER;
54    req.size  = sizeof(mapper_t);
55    req.flags = AF_KERNEL | AF_ZERO;
56    mapper    = (mapper_t *)kmem_alloc( &req );
57
58    if( mapper == NULL )
59    {
60        printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ );
61        return NULL;
62    }
63
64    // initialize refcount & inode
65    mapper->refcount = 0;
66    mapper->inode    = NULL;
67
68    // initialize radix tree
69    error = grdxt_init( &mapper->rt,
70                        CONFIG_MAPPER_GRDXT_W1,
71                        CONFIG_MAPPER_GRDXT_W2,
72                        CONFIG_MAPPER_GRDXT_W3 );
73
74    if( error )
75    {
76        printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ );
77        req.type  = KMEM_MAPPER;
78        req.ptr   = mapper;
79        kmem_free( &req );
80        return NULL;
81    }
82
83    // initialize mapper type
84    mapper->type = type;
85
86    // initialize mapper lock
87    remote_rwlock_init( XPTR( local_cxy , &mapper->lock ) , LOCK_MAPPER_STATE );
88
89    // initialize waiting threads xlist (empty)
90    xlist_root_init( XPTR( local_cxy , &mapper->wait_root ) );
91
92    // initialize vsegs xlist (empty)
93    xlist_root_init( XPTR( local_cxy , &mapper->vsegs_root ) );
94
95    return mapper;
96
97}  // end mapper_create()
98
99////////////////////////////////////////
100void mapper_destroy( mapper_t * mapper )
101{
102    page_t   * page;
103    uint32_t   found_index = 0;
104    uint32_t   start_index = 0;
105    kmem_req_t req;
106
107    // scan radix tree
108    do
109    {
110        // get page from radix tree
111        page = (page_t *)grdxt_get_first( &mapper->rt , start_index , &found_index );
112
113        // release registered pages to PPM
114        if( page != NULL )
115        {
116            // remove page from mapper and release to PPM
117            mapper_release_page( mapper , page );
118
119            // update start_key value for next page
120            start_index = found_index;
121        }
122    }
123    while( page != NULL );
124
125    // release the memory allocated to radix tree itself
126    grdxt_destroy( &mapper->rt );
127
128    // release memory for mapper descriptor
129    req.type = KMEM_MAPPER;
130    req.ptr  = mapper;
131    kmem_free( &req );
132
133}  // end mapper_destroy()
134
135////////////////////////////////////////////////////
136xptr_t  mapper_remote_get_page( xptr_t    mapper_xp,
137                                uint32_t  page_id )
138{
139    error_t       error;
140    mapper_t    * mapper_ptr;
141    cxy_t         mapper_cxy;
142    xptr_t        lock_xp;        // extended pointer on mapper lock
143    xptr_t        page_xp;        // extended pointer on searched page descriptor
144    xptr_t        rt_xp;          // extended pointer on radix tree in mapper
145
146    thread_t * this = CURRENT_THREAD;
147
148    // get mapper cluster and local pointer
149    mapper_ptr = GET_PTR( mapper_xp );
150    mapper_cxy = GET_CXY( mapper_xp );
151
152#if DEBUG_MAPPER_GET_PAGE
153uint32_t cycle = (uint32_t)hal_get_cycles();
154char          name[CONFIG_VFS_MAX_NAME_LENGTH];
155vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
156vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name );
157if( DEBUG_MAPPER_GET_PAGE < cycle )
158printk("\n[%s] thread [%x,%x] enter for page %d of <%s> / cycle %d\n",
159__FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle );
160#endif
161
162    // check thread can yield
163    thread_assert_can_yield( this , __FUNCTION__ );
164
165    // build extended pointer on mapper lock and mapper rt
166    lock_xp  = XPTR( mapper_cxy , &mapper_ptr->lock );
167    rt_xp    = XPTR( mapper_cxy , &mapper_ptr->rt );
168
169    // take mapper lock in READ_MODE
170    remote_rwlock_rd_acquire( lock_xp );
171
172    // search page in radix tree
173    page_xp  = grdxt_remote_lookup( rt_xp , page_id );
174
175    // test mapper miss
176    if( page_xp == XPTR_NULL )                  // miss => try to handle it
177    {
178        // release the lock in READ_MODE and take it in WRITE_MODE
179        remote_rwlock_rd_release( lock_xp );
180        remote_rwlock_wr_acquire( lock_xp );
181
182        // second test on missing page because the page status can be modified
183        // by another thread, when passing from READ_MODE to WRITE_MODE.
184        // from this point there is no concurrent accesses to mapper.
185        page_xp = grdxt_remote_lookup( rt_xp , page_id );
186
187        if ( page_xp == XPTR_NULL )  // miss confirmed => handle it
188        {
189
190            if( mapper_cxy == local_cxy )   // mapper is local
191            {
192
193#if (DEBUG_MAPPER_GET_PAGE & 1)
194if( DEBUG_MAPPER_GET_PAGE < cycle )
195printk("\n[%s] missing page => load it from FS / local access \n", __FUNCTION__ );
196#endif
197                 error = mapper_handle_miss( mapper_ptr,
198                                             page_id, 
199                                             &page_xp );
200            } 
201            else
202            {
203
204#if (DEBUG_MAPPER_GET_PAGE & 1)
205if( DEBUG_MAPPER_GET_PAGE < cycle )
206printk("\n[%s] missing page => load it from FS / RPC access \n", __FUNCTION__ );
207#endif
208                 rpc_mapper_handle_miss_client( mapper_cxy,
209                                                mapper_ptr,
210                                                page_id,
211                                                &page_xp,
212                                                &error );
213            }
214
215            if ( error )
216            {
217                printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n",
218                __FUNCTION__ , this->process->pid, this->trdid );
219                remote_rwlock_wr_release( lock_xp );
220                return XPTR_NULL;
221            }
222        }
223       
224        // release mapper lock from WRITE_MODE
225        remote_rwlock_wr_release( lock_xp );
226    }
227    else                                              // hit
228    {
229        // release mapper lock from READ_MODE
230        remote_rwlock_rd_release( lock_xp );
231    }
232
233#if DEBUG_MAPPER_GET_PAGE
234cycle = (uint32_t)hal_get_cycles();
235if( DEBUG_MAPPER_GET_PAGE < cycle )
236printk("\n[%s] thread[%x,%x] exit for page %d of <%s> / ppn %x / cycle %d\n",
237__FUNCTION__, this->process->pid, this->trdid, 
238page_id, name, ppm_page2ppn( page_xp ), cycle );
239#endif
240
241    return page_xp;
242
243}  // end mapper_remote_get_page()
244
245//////////////////////////////////////////////
246error_t mapper_handle_miss( mapper_t * mapper,
247                            uint32_t   page_id,
248                            xptr_t   * page_xp )
249{
250    kmem_req_t   req;
251    page_t     * page;
252    error_t      error;
253
254    thread_t * this = CURRENT_THREAD;
255
256#if DEBUG_MAPPER_HANDLE_MISS
257uint32_t cycle = (uint32_t)hal_get_cycles();
258char          name[CONFIG_VFS_MAX_NAME_LENGTH];
259vfs_inode_t * inode = mapper->inode;
260vfs_inode_get_name( XPTR( local_cxy , inode ) , name );
261// if( DEBUG_MAPPER_HANDLE_MISS < cycle )
262// if( (page_id == 1) && (cycle > 10000000) )
263printk("\n[%s] enter for page %d in <%s> / cycle %d",
264__FUNCTION__, page_id, name, cycle );
265if( DEBUG_MAPPER_HANDLE_MISS & 1 )
266grdxt_display( XPTR( local_cxy , &mapper->rt ) , name );
267#endif
268
269    // allocate one page from the local cluster
270    req.type  = KMEM_PAGE;
271    req.size  = 0;
272    req.flags = AF_NONE;
273    page = kmem_alloc( &req );
274
275    if( page == NULL )
276    {
277        printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x\n",
278        __FUNCTION__ , this->process->pid, this->trdid , local_cxy );
279        return -1;
280    }
281
282    // initialize the page descriptor
283    page_init( page );
284    page_set_flag( page , PG_INIT );
285    page_refcount_up( page );
286    page->mapper = mapper;
287    page->index  = page_id;
288
289    // insert page in mapper radix tree
290    error = grdxt_insert( &mapper->rt , page_id , page );
291
292    if( error )
293    {
294        printk("\n[ERROR] in %s : thread[%x,%x] cannot insert page in mapper\n",
295        __FUNCTION__ , this->process->pid, this->trdid );
296        mapper_release_page( mapper , page );
297        req.ptr  = page;
298        req.type = KMEM_PAGE;
299        kmem_free(&req);
300        return -1;
301    }
302
303    // launch I/O operation to load page from device to mapper
304    error = vfs_fs_move_page( XPTR( local_cxy , page ) , true );
305
306    if( error )
307    {
308        printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n",
309        __FUNCTION__ , this->process->pid, this->trdid );
310        mapper_release_page( mapper , page );
311        req.ptr  = page;
312        req.type = KMEM_PAGE;
313        kmem_free( &req );
314        return -1;
315    }
316
317    // set extended pointer on allocated page
318    *page_xp = XPTR( local_cxy , page );
319
320#if DEBUG_MAPPER_HANDLE_MISS
321cycle = (uint32_t)hal_get_cycles();
322// if( DEBUG_MAPPER_HANDLE_MISS < cycle )
323// if( (page_id == 1) && (cycle > 10000000) )
324printk("\n[%s] exit for page %d in <%s> / ppn %x / cycle %d",
325__FUNCTION__, page_id, name, ppm_page2ppn( *page_xp ), cycle );
326if( DEBUG_MAPPER_HANDLE_MISS & 1 )
327grdxt_display( XPTR( local_cxy , &mapper->rt ) , name );
328#endif
329
330    return 0;
331
332}  // end mapper_handle_miss()
333
334////////////////////////////////////////////
335void mapper_release_page( mapper_t * mapper,
336                          page_t   * page )
337{
338    // build extended pointer on mapper lock
339    xptr_t mapper_lock_xp = XPTR( local_cxy , &mapper->lock );
340
341    // take mapper lock in WRITE_MODE
342    remote_rwlock_wr_acquire( mapper_lock_xp );
343
344    // remove physical page from radix tree
345    grdxt_remove( &mapper->rt , page->index );
346
347    // release mapper lock from WRITE_MODE
348    remote_rwlock_wr_release( mapper_lock_xp );
349
350    // release page to PPM
351    kmem_req_t   req;
352    req.type  = KMEM_PAGE;
353    req.ptr   = page;
354    kmem_free( &req );
355
356}  // end mapper_release_page()
357
358///////////////////////////////////////////////
359error_t mapper_move_user( xptr_t     mapper_xp,
360                          bool_t     to_buffer,
361                          uint32_t   file_offset,
362                          void     * buffer,
363                          uint32_t   size )
364{
365    uint32_t   page_offset;    // first byte to move to/from a mapper page
366    uint32_t   page_count;     // number of bytes to move to/from a mapper page
367    uint32_t   page_id;        // current mapper page index
368    uint32_t   done;           // number of moved bytes
369    xptr_t     page_xp;        // extended pointer on current mapper page descriptor
370
371#if DEBUG_MAPPER_MOVE_USER
372uint32_t   cycle = (uint32_t)hal_get_cycles();
373thread_t * this  = CURRENT_THREAD;
374if( DEBUG_MAPPER_MOVE_USER < cycle )
375printk("\n[%s] thread[%x,%x] : to_buf %d / buffer %x / size %d / offset %d / cycle %d\n",
376__FUNCTION__, this->process->pid, this->trdid,
377to_buffer, buffer, size, file_offset, cycle );
378#endif
379
380    // compute offsets of first and last bytes in file
381    uint32_t min_byte = file_offset;
382    uint32_t max_byte = file_offset + size - 1;
383
384    // compute indexes of pages for first and last byte in mapper
385    uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
386    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
387
388#if (DEBUG_MAPPER_MOVE_USER & 1)
389if( DEBUG_MAPPER_MOVE_USER < cycle )
390printk("\n[%s] thread[%x,%x] : first_page %d / last_page %d\n",
391__FUNCTION__, this->process->pid, this->trdid, first, last );
392#endif
393
394    done = 0;
395
396    // loop on pages in mapper
397    for( page_id = first ; page_id <= last ; page_id++ )
398    {
399        // compute page_offset
400        if( page_id == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
401        else                   page_offset = 0;
402
403        // compute number of bytes in page
404        if      ( first   == last  ) page_count = size;
405        else if ( page_id == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset;
406        else if ( page_id == last  ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
407        else                         page_count = CONFIG_PPM_PAGE_SIZE;
408
409#if (DEBUG_MAPPER_MOVE_USER & 1)
410if( DEBUG_MAPPER_MOVE_USER < cycle )
411printk("\n[%s] thread[%x,%x] : page_id = %d / page_offset = %d / page_count = %d\n",
412__FUNCTION__, this->process->pid, this->trdid, page_id , page_offset , page_count );
413#endif
414
415        // get extended pointer on page descriptor
416        page_xp = mapper_remote_get_page( mapper_xp , page_id ); 
417
418        if ( page_xp == XPTR_NULL ) return -1;
419
420#if (DEBUG_MAPPER_MOVE_USER & 1)
421if( DEBUG_MAPPER_MOVE_USER < cycle )
422printk("\n[%s] thread[%x,%x] : get page (%x,%x) from mapper\n",
423__FUNCTION__, this->process->pid, this->trdid, GET_CXY(page_xp), GET_PTR(page_xp) );
424#endif
425
426        // compute pointer in mapper
427        xptr_t    base_xp = ppm_page2base( page_xp );
428        uint8_t * map_ptr = (uint8_t *)GET_PTR( base_xp ) + page_offset;
429
430        // compute pointer in buffer
431        uint8_t * buf_ptr = (uint8_t *)buffer + done;
432
433        // move fragment
434        if( to_buffer )
435        {
436            hal_copy_to_uspace( buf_ptr , map_ptr , page_count ); 
437        }
438        else
439        {
440            ppm_page_do_dirty( page_xp ); 
441            hal_copy_from_uspace( map_ptr , buf_ptr , page_count ); 
442        }
443
444        done += page_count;
445    }
446
447#if DEBUG_MAPPER_MOVE_USER
448cycle = (uint32_t)hal_get_cycles();
449if( DEBUG_MAPPER_MOVE_USER < cycle )
450printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
451__FUNCTION__, this->process->pid, this->trdid, cycle );
452#endif
453
454    return 0;
455
456}  // end mapper_move_user()
457
458////////////////////////////////////////////////
459error_t mapper_move_kernel( xptr_t    mapper_xp,
460                            bool_t    to_buffer,
461                            uint32_t  file_offset,
462                            xptr_t    buffer_xp,
463                            uint32_t  size )
464{
465    uint32_t   page_offset;    // first byte to move to/from a mapper page
466    uint32_t   page_count;     // number of bytes to move to/from a mapper page
467    uint32_t   page_id;        // current mapper page index
468    uint32_t   done;           // number of moved bytes
469    xptr_t     page_xp;        // extended pointer on current mapper page descriptor
470
471    uint8_t  * src_ptr;        // source buffer local pointer
472    cxy_t      src_cxy;        // source cluster
473    uint8_t  * dst_ptr;        // destination buffer local pointer
474    cxy_t      dst_cxy;        // destination cluster
475
476    // get buffer cluster and local pointer
477    cxy_t     buffer_cxy = GET_CXY( buffer_xp );
478    uint8_t * buffer_ptr = GET_PTR( buffer_xp );
479
480    // get mapper cluster
481    cxy_t     mapper_cxy = GET_CXY( mapper_xp );
482
483#if DEBUG_MAPPER_MOVE_KERNEL
484uint32_t   cycle = (uint32_t)hal_get_cycles();
485thread_t * this  = CURRENT_THREAD;
486if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
487printk("\n[%s] thread[%x,%x] enter / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n",
488__FUNCTION__, this->process->pid, this->trdid, to_buffer, buffer_cxy, buffer_ptr, cycle );
489#endif
490
491    // compute offsets of first and last bytes in file
492    uint32_t min_byte = file_offset;
493    uint32_t max_byte = file_offset + size -1;
494
495    // compute indexes for first and last pages in mapper
496    uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
497    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
498
499#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
500if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
501printk("\n[%s] first_page %d / last_page %d\n", __FUNCTION__, first, last );
502#endif
503
504    // compute source and destination clusters
505    if( to_buffer )
506    {
507        dst_cxy = buffer_cxy;
508        src_cxy = mapper_cxy;
509    }
510    else
511    {
512        src_cxy = buffer_cxy;
513        dst_cxy = mapper_cxy;
514    }
515
516    done = 0;
517
518    // loop on pages in mapper
519    for( page_id = first ; page_id <= last ; page_id++ )
520    {
521        // compute page_offset
522        if( page_id == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
523        else                   page_offset = 0;
524
525        // compute number of bytes to move in page
526        if      ( first == last  )   page_count = size;
527        else if ( page_id == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset;
528        else if ( page_id == last  ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
529        else                         page_count = CONFIG_PPM_PAGE_SIZE;
530
531#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
532if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
533printk("\n[%s] page_id = %d / offset = %d / bytes = %d\n",
534__FUNCTION__ , page_id , page_offset , page_count );
535#endif
536
537        // get extended pointer on page descriptor
538        page_xp = mapper_remote_get_page( mapper_xp , page_id );
539
540        if ( page_xp == XPTR_NULL ) return -1;
541
542        // get page base address
543        xptr_t    base_xp  = ppm_page2base( page_xp );
544        uint8_t * base_ptr = (uint8_t *)GET_PTR( base_xp );
545
546        // compute source and destination pointers
547        if( to_buffer )
548        {
549            dst_ptr = buffer_ptr + done;
550            src_ptr = base_ptr + page_offset;
551        }
552        else
553        {
554            src_ptr = buffer_ptr + done;
555            dst_ptr = base_ptr + page_offset;
556
557            ppm_page_do_dirty( page_xp );
558        }
559
560#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
561if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
562printk("\n[%s] src_cxy %x / src_ptr %x / dst_cxy %x / dst_ptr %x\n",
563__FUNCTION__, src_cxy, src_ptr, dst_cxy, dst_ptr );
564#endif
565
566        // move fragment
567        hal_remote_memcpy( XPTR( dst_cxy , dst_ptr ), XPTR( src_cxy , src_ptr ), page_count );
568
569        done += page_count;
570    }
571
572#if DEBUG_MAPPER_MOVE_KERNEL
573cycle = (uint32_t)hal_get_cycles();
574if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
575printk("\n[%s] thread[%x,%x] exit / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n",
576__FUNCTION__, this->process->pid, this->trdid, to_buffer, buffer_cxy, buffer_ptr, cycle );
577#endif
578
579    return 0;
580
581}  // end mapper_move_kernel()
582
583///////////////////////////////////////////////////
584error_t mapper_remote_get_32( xptr_t     mapper_xp,
585                              uint32_t   word_id,
586                              uint32_t * p_value )
587{
588    uint32_t   page_id;      // page index in file
589    uint32_t   local_id;     // word index in page
590    xptr_t     page_xp;      // extended pointer on searched page descriptor
591    xptr_t     base_xp;      // extended pointer on searched page base
592
593   
594    // get page index and local word index
595    page_id  = word_id >> 10;
596    local_id = word_id & 0x3FF;
597
598    // get page containing the searched word
599    page_xp  = mapper_remote_get_page( mapper_xp , page_id );
600
601    if( page_xp == XPTR_NULL )  return -1;
602   
603    // get page base
604    base_xp = ppm_page2base( page_xp );
605
606    // get the value from mapper
607    *p_value = hal_remote_l32( base_xp + (local_id<<2) ); 
608
609    return 0;
610
611}  // end mapper_remote_get_32()
612
613///////////////////////////////////////////////////
614error_t mapper_remote_set_32( xptr_t     mapper_xp,
615                              uint32_t   word_id,
616                              uint32_t   value )
617{
618   
619    uint32_t   page_id;      // page index in file
620    uint32_t   local_id;     // word index in page
621    xptr_t     page_xp;      // extended pointer on searched page descriptor
622    xptr_t     base_xp;      // extended pointer on searched page base
623
624    // get page index and local vord index
625    page_id  = word_id >> 10;
626    local_id = word_id & 0x3FF;
627
628    // get page containing the searched word
629    page_xp  = mapper_remote_get_page( mapper_xp , page_id );
630
631    if( page_xp == XPTR_NULL ) return -1;
632
633    // get page base
634    base_xp = ppm_page2base( page_xp );
635
636    // set value to mapper
637    hal_remote_s32( (base_xp + (local_id << 2)) , value );
638
639    // set the dirty flag
640    ppm_page_do_dirty( page_xp );
641
642    return 0;
643
644}  // end mapper_remote_set_32()
645
646
Note: See TracBrowser for help on using the repository browser.