source: trunk/kernel/mm/mapper.c @ 635

Last change on this file since 635 was 635, checked in by alain, 5 years ago

This version is a major evolution: The physical memory allocators,
defined in the kmem.c, ppm.c, and kcm.c files have been modified
to support remote accesses. The RPCs that were previously user
to allocate physical memory in a remote cluster have been removed.
This has been done to cure a dead-lock in case of concurrent page-faults.

This version 2.2 has been tested on a (4 clusters / 2 cores per cluster)
TSAR architecture, for both the "sort" and the "fft" applications.

File size: 27.3 KB
RevLine 
[1]1/*
[606]2 * mapper.c - Kernel cache for FS files or directories implementation.
[1]3 *
4 * Authors   Mohamed Lamine Karaoui (2015)
[623]5 *           Alain Greiner (2016,2017,2018,2019)
[1]6 *
7 * Copyright (c)  UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
[14]25#include <kernel_config.h>
[457]26#include <hal_kernel_types.h>
[1]27#include <hal_special.h>
[23]28#include <hal_uspace.h>
[1]29#include <grdxt.h>
[614]30#include <string.h>
[1]31#include <rwlock.h>
32#include <printk.h>
[279]33#include <memcpy.h>
[1]34#include <thread.h>
35#include <core.h>
36#include <process.h>
37#include <kmem.h>
38#include <kcm.h>
[567]39#include <ppm.h>
[1]40#include <page.h>
41#include <cluster.h>
42#include <vfs.h>
43#include <mapper.h>
[614]44#include <dev_ioc.h>
[1]45
[567]46
[246]47//////////////////////////////////////////////
48mapper_t * mapper_create( vfs_fs_type_t type )
[1]49{
50    mapper_t * mapper;
51    kmem_req_t req;
52    error_t    error;
53
[635]54    // allocate memory for mapper descriptor
55    req.type  = KMEM_KCM;
56    req.order = bits_log2( sizeof(mapper_t) );
[1]57    req.flags = AF_KERNEL | AF_ZERO;
[635]58    mapper    = kmem_alloc( &req );
[1]59
60    if( mapper == NULL )
61    {
62        printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ );
63        return NULL;
64    }
65
66    // initialize refcount & inode
[183]67    mapper->refcount = 0;
[1]68    mapper->inode    = NULL;
69
70    // initialize radix tree
[606]71    error = grdxt_init( &mapper->rt,
72                        CONFIG_MAPPER_GRDXT_W1,
73                        CONFIG_MAPPER_GRDXT_W2,
74                        CONFIG_MAPPER_GRDXT_W3 );
[1]75    if( error )
76    {
77        printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ );
[635]78        req.type  = KMEM_KCM;
[1]79        req.ptr   = mapper;
80        kmem_free( &req );
81        return NULL;
82    }
83
[246]84    // initialize mapper type
85    mapper->type = type;
86
[1]87    // initialize mapper lock
[606]88    remote_rwlock_init( XPTR( local_cxy , &mapper->lock ) , LOCK_MAPPER_STATE );
[1]89
90    // initialize waiting threads xlist (empty)
[183]91    xlist_root_init( XPTR( local_cxy , &mapper->wait_root ) );
[1]92
93    // initialize vsegs xlist (empty)
[183]94    xlist_root_init( XPTR( local_cxy , &mapper->vsegs_root ) );
[1]95
96    return mapper;
97
[204]98}  // end mapper_create()
99
[606]100////////////////////////////////////////
101void mapper_destroy( mapper_t * mapper )
[1]102{
103    page_t   * page;
104    uint32_t   found_index = 0;
105    uint32_t   start_index = 0;
106    kmem_req_t req;
107
[606]108    // scan radix tree
[1]109    do
110    {
111        // get page from radix tree
[606]112        page = (page_t *)grdxt_get_first( &mapper->rt , start_index , &found_index );
[1]113
[606]114        // release registered pages to PPM
[18]115        if( page != NULL )
[1]116        {
117            // remove page from mapper and release to PPM
[635]118            mapper_remote_release_page( XPTR( local_cxy , mapper ) , page );
[1]119
120            // update start_key value for next page
121            start_index = found_index;
122        }
123    }
124    while( page != NULL );
125
[606]126    // release the memory allocated to radix tree itself
127    grdxt_destroy( &mapper->rt );
[1]128
129    // release memory for mapper descriptor
[635]130    req.type = KMEM_KCM;
[1]131    req.ptr  = mapper;
132    kmem_free( &req );
133
[204]134}  // end mapper_destroy()
135
[635]136////////////////////////////////////////////////////////
137error_t mapper_remote_handle_miss( xptr_t     mapper_xp,
138                                   uint32_t   page_id,
139                                   xptr_t   * page_xp_ptr )
140{
141    error_t    error;
142
143    thread_t * this = CURRENT_THREAD;
144
145    // get target mapper cluster and local pointer
146    cxy_t      mapper_cxy = GET_CXY( mapper_xp );
147    mapper_t * mapper_ptr = GET_PTR( mapper_xp );
148
149#if DEBUG_MAPPER_HANDLE_MISS
150uint32_t      cycle = (uint32_t)hal_get_cycles();
151char          name[CONFIG_VFS_MAX_NAME_LENGTH];
152vfs_inode_t * inode = mapper->inode;
153if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) )
154{
155    vfs_inode_get_name( XPTR( local_cxy , inode ) , name );
156    printk("\n[%s] thread[%x,%x] enter for page %d in <%s> / cluster %x / cycle %d",
157    __FUNCTION__, this->process->pid, this->trdid, page_id, name, mapper_cxy, cycle );
158    if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), name );
159}
160if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) )
161{
162    printk("\n[%s] thread[%x,%x] enter for page %d in FAT / cluster %x / cycle %d",
163    __FUNCTION__, this->process->pid, this->trdid, page_id, mapper_cxy, cycle );
164    if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), "FAT" );
165}
166#endif
167
168    // allocate one 4 Kbytes page from the remote mapper cluster
169    page_t * page_ptr = ppm_remote_alloc_pages( mapper_cxy , 0 );
170                           
171    if( page_ptr == NULL )
172    {
173        printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x\n",
174        __FUNCTION__ , this->process->pid, this->trdid , mapper_cxy );
175        return -1;
176    }
177
178    // build extended pointer on new page descriptor
179    xptr_t page_xp = XPTR( mapper_cxy , page_ptr );
180
181    // initialize the page descriptor
182    page_remote_init( page_xp );
183
184    hal_remote_s32( XPTR( mapper_cxy , &page_ptr->refcount ) , 1          );
185    hal_remote_s32( XPTR( mapper_cxy , &page_ptr->index )    , page_id    );
186    hal_remote_spt( XPTR( mapper_cxy , &page_ptr->mapper )   , mapper_ptr );
187    hal_remote_s32( XPTR( mapper_cxy , &page_ptr->flags )    , PG_INIT    );
188
189    // insert page in mapper radix tree
190    error = grdxt_remote_insert( XPTR( mapper_cxy , &mapper_ptr->rt),
191                                 page_id,
192                                 page_ptr );
193
194    if( error )
195    {
196        printk("\n[ERROR] in %s : thread[%x,%x] cannot insert page in mapper\n",
197        __FUNCTION__ , this->process->pid, this->trdid );
198        ppm_remote_free_pages( mapper_cxy , page_ptr );
199        return -1;
200    }
201
202    // launch I/O operation to load page from IOC device to mapper
203    error = vfs_fs_move_page( page_xp , IOC_SYNC_READ );
204
205    if( error )
206    {
207        printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n",
208        __FUNCTION__ , this->process->pid, this->trdid );
209        mapper_remote_release_page( mapper_xp , page_ptr );
210        return -1;
211    }
212
213    // return extended pointer on allocated page
214    *page_xp_ptr = page_xp;
215
216#if DEBUG_MAPPER_HANDLE_MISS
217cycle = (uint32_t)hal_get_cycles();
218if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) )
219{
220    printk("\n[%s] thread[%x,%x] exit for page %d in <%s> / ppn %x / cycle %d",
221    __FUNCTION__, this->process->pid, this->trdid,
222    page_id, name, ppm_page2ppn( page_xp ), cycle );
223    if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt) , name );
224}
225if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) )
226{
227    printk("\n[%s] thread[%x,%x] exit for page %d in FAT / ppn %x / cycle %d",
228    __FUNCTION__, this->process->pid, this->trdid,
229    page_id, ppm_page2ppn( page_xp ), cycle );
230    if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt ), "FAT" );
231}
232#endif
233
234    return 0;
235
236}  // end mapper_remote_handle_miss()
237
[606]238////////////////////////////////////////////////////
239xptr_t  mapper_remote_get_page( xptr_t    mapper_xp,
240                                uint32_t  page_id )
[1]241{
[183]242    error_t       error;
[606]243    mapper_t    * mapper_ptr;
244    cxy_t         mapper_cxy;
245    xptr_t        lock_xp;        // extended pointer on mapper lock
246    xptr_t        page_xp;        // extended pointer on searched page descriptor
247    xptr_t        rt_xp;          // extended pointer on radix tree in mapper
[1]248
[606]249    thread_t * this = CURRENT_THREAD;
250
251    // get mapper cluster and local pointer
252    mapper_ptr = GET_PTR( mapper_xp );
253    mapper_cxy = GET_CXY( mapper_xp );
254
[438]255#if DEBUG_MAPPER_GET_PAGE
[625]256vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
257uint32_t      cycle = (uint32_t)hal_get_cycles();
[606]258char          name[CONFIG_VFS_MAX_NAME_LENGTH];
[625]259if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) )  // FAT mapper
260{
261    printk("\n[%s] thread[%x,%x] enter for page %d of FAT mapper / cycle %d\n",
262    __FUNCTION__, this->process->pid, this->trdid, page_id, cycle );
263}
264if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) )  // file mapper
265{
266    vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name );
267    printk("\n[%s] thread[%x,%x] enter for page %d of <%s> mapper / cycle %d\n",
268    __FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle );
269}
[435]270#endif
[204]271
[581]272    // check thread can yield
273    thread_assert_can_yield( this , __FUNCTION__ );
274
[606]275    // build extended pointer on mapper lock and mapper rt
276    lock_xp  = XPTR( mapper_cxy , &mapper_ptr->lock );
277    rt_xp    = XPTR( mapper_cxy , &mapper_ptr->rt );
278
[1]279    // take mapper lock in READ_MODE
[606]280    remote_rwlock_rd_acquire( lock_xp );
[1]281
282    // search page in radix tree
[606]283    page_xp  = grdxt_remote_lookup( rt_xp , page_id );
[1]284
[606]285    // test mapper miss
[635]286    if( page_xp == XPTR_NULL )                  // miss => handle it
[1]287    {
288        // release the lock in READ_MODE and take it in WRITE_MODE
[606]289        remote_rwlock_rd_release( lock_xp );
290        remote_rwlock_wr_acquire( lock_xp );
[1]291
[606]292        // second test on missing page because the page status can be modified
[1]293        // by another thread, when passing from READ_MODE to WRITE_MODE.
294        // from this point there is no concurrent accesses to mapper.
[606]295        page_xp = grdxt_remote_lookup( rt_xp , page_id );
[1]296
[606]297        if ( page_xp == XPTR_NULL )  // miss confirmed => handle it
[1]298        {
[635]299            error = mapper_remote_handle_miss( mapper_xp,
300                                               page_id,
301                                               &page_xp );
302            if( error )
[610]303            {
[606]304                printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n",
305                __FUNCTION__ , this->process->pid, this->trdid );
306                remote_rwlock_wr_release( lock_xp );
307                return XPTR_NULL;
[1]308            }
309        }
[635]310
311#if (DEBUG_MAPPER_GET_PAGE & 1)
312if( DEBUG_MAPPER_GET_PAGE < cycle )
313printk("\n[%s] thread[%x,%x] load missing page from FS : ppn %x\n",
314__FUNCTION__, this->process->pid, this->trdid, ppm_page2ppn(page_xp) );
315#endif
[606]316       
317        // release mapper lock from WRITE_MODE
318        remote_rwlock_wr_release( lock_xp );
[1]319    }
[606]320    else                                              // hit
[1]321    {
[606]322        // release mapper lock from READ_MODE
323        remote_rwlock_rd_release( lock_xp );
[1]324    }
325
[438]326#if DEBUG_MAPPER_GET_PAGE
[435]327cycle = (uint32_t)hal_get_cycles();
[625]328if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) )
329{
330    printk("\n[%s] thread[%x,%x] exit for page %d of <%s> mapper / ppn %x / cycle %d\n",
331    __FUNCTION__, this->process->pid, this->trdid, page_id,
332    name, ppm_page2ppn(page_xp), cycle );
333}
334if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) )
335{
336    printk("\n[%s] thread[%x,%x] exit for page %d of FAT mapper  / ppn %x / cycle %d\n",
337    __FUNCTION__, this->process->pid, this->trdid, page_id,
338    ppm_page2ppn(page_xp), cycle );
339}
[435]340#endif
[204]341
[606]342    return page_xp;
[204]343
[606]344}  // end mapper_remote_get_page()
[204]345
[635]346////////////////////////////////////////////////////
347void mapper_remote_release_page( xptr_t   mapper_xp,
348                                 page_t * page )
[1]349{
[635]350    // get mapper cluster an local pointer
351    cxy_t      mapper_cxy = GET_CXY( mapper_xp );
352    mapper_t * mapper_ptr = GET_PTR( mapper_xp );
[1]353
[606]354    // build extended pointer on mapper lock
[635]355    xptr_t lock_xp = XPTR( mapper_cxy , &mapper_ptr->lock );
[606]356
[1]357    // take mapper lock in WRITE_MODE
[635]358    remote_rwlock_wr_acquire( lock_xp );
[1]359
360    // remove physical page from radix tree
[635]361    grdxt_remote_remove( XPTR( mapper_cxy , &mapper_ptr->rt ) , page->index );
[1]362
363    // release mapper lock from WRITE_MODE
[635]364    remote_rwlock_wr_release( lock_xp );
[1]365
366    // release page to PPM
[635]367    ppm_remote_free_pages( mapper_cxy , page );
368                           
[204]369}  // end mapper_release_page()
370
[610]371///////////////////////////////////////////////
372error_t mapper_move_user( xptr_t     mapper_xp,
[313]373                          bool_t     to_buffer,
374                          uint32_t   file_offset,
375                          void     * buffer,
376                          uint32_t   size )
[1]377{
[23]378    uint32_t   page_offset;    // first byte to move to/from a mapper page
[628]379    uint32_t   page_bytes;     // number of bytes to move to/from a mapper page
[606]380    uint32_t   page_id;        // current mapper page index
[23]381    uint32_t   done;           // number of moved bytes
[606]382    xptr_t     page_xp;        // extended pointer on current mapper page descriptor
[330]383
[438]384#if DEBUG_MAPPER_MOVE_USER
[626]385uint32_t      cycle      = (uint32_t)hal_get_cycles();
386thread_t    * this       = CURRENT_THREAD;
387cxy_t         mapper_cxy = GET_CXY( mapper_xp );
388mapper_t    * mapper_ptr = GET_PTR( mapper_xp );
389vfs_inode_t * inode_ptr  = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
390xptr_t        inode_xp   = XPTR( mapper_cxy , inode_ptr );
391char          name[CONFIG_VFS_MAX_NAME_LENGTH];
392vfs_inode_get_name( inode_xp , name );
[438]393if( DEBUG_MAPPER_MOVE_USER < cycle )
[626]394{
395    if( to_buffer )
396    printk("\n[%s] thread[%x,%x] : mapper(%s) -> buffer(%x) / bytes %d / cycle %d\n",
397    __FUNCTION__, this->process->pid, this->trdid, name, buffer, size, cycle );
398    else
399    printk("\n[%s] thread[%x,%x] : buffer(%x) -> mapper(%s) / bytes %d / cycle %d\n",
400    __FUNCTION__, this->process->pid, this->trdid, buffer, name, size, cycle );
401}
[435]402#endif
[1]403
[628]404    // compute indexes of first and last bytes in file
[23]405    uint32_t min_byte = file_offset;
[606]406    uint32_t max_byte = file_offset + size - 1;
[1]407
[23]408    // compute indexes of pages for first and last byte in mapper
409    uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
410    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
[1]411
[606]412#if (DEBUG_MAPPER_MOVE_USER & 1)
413if( DEBUG_MAPPER_MOVE_USER < cycle )
[626]414printk("\n[%s] thread[%x,%x] : mapper(%x,%x) / first_page %d / last_page %d\n",
415__FUNCTION__, this->process->pid, this->trdid, mapper_cxy, mapper_ptr, first, last );
[606]416#endif
417
[23]418    done = 0;
[1]419
[23]420    // loop on pages in mapper
[606]421    for( page_id = first ; page_id <= last ; page_id++ )
[1]422    {
[183]423        // compute page_offset
[606]424        if( page_id == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
425        else                   page_offset = 0;
[1]426
[313]427        // compute number of bytes in page
[628]428        if      ( first   == last  ) page_bytes = size;
429        else if ( page_id == first ) page_bytes = CONFIG_PPM_PAGE_SIZE - page_offset;
430        else if ( page_id == last  ) page_bytes = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
431        else                         page_bytes = CONFIG_PPM_PAGE_SIZE;
[1]432
[438]433#if (DEBUG_MAPPER_MOVE_USER & 1)
434if( DEBUG_MAPPER_MOVE_USER < cycle )
[626]435printk("\n[%s] thread[%x,%x] : page_id %d / page_offset %d / bytes %d\n",
[628]436__FUNCTION__, this->process->pid, this->trdid, page_id , page_offset , page_bytes );
[435]437#endif
[265]438
[628]439        // get extended pointer on page descriptor in mapper
[606]440        page_xp = mapper_remote_get_page( mapper_xp , page_id ); 
[1]441
[606]442        if ( page_xp == XPTR_NULL ) return -1;
[1]443
[626]444        // compute cluster and pointers on page in mapper
445        xptr_t     map_xp  = ppm_page2base( page_xp );
446        uint8_t  * map_ptr = GET_PTR( map_xp );
447        cxy_t      map_cxy = GET_CXY( map_xp );
448
[610]449#if (DEBUG_MAPPER_MOVE_USER & 1)
450if( DEBUG_MAPPER_MOVE_USER < cycle )
[626]451printk("\n[%s] thread[%x,%x] : get buffer(%x,%x) in mapper\n",
452__FUNCTION__, this->process->pid, this->trdid, map_cxy, map_ptr );
[610]453#endif
[626]454        // compute pointer in user buffer
[606]455        uint8_t * buf_ptr = (uint8_t *)buffer + done;
[1]456
457        // move fragment
[330]458        if( to_buffer )
[1]459        {
[628]460            hal_copy_to_uspace( map_cxy , map_ptr + page_offset , buf_ptr , page_bytes ); 
[626]461
462#if DEBUG_MAPPER_MOVE_USER & 1
463if( DEBUG_MAPPER_MOVE_USER < cycle )
464printk("\n[%s] thread[%x,%x] moved %d bytes / mapper %s (%x,%x) -> user buffer(%x,%x)\n",
[628]465__FUNCTION__, this->process->pid, this->trdid, page_bytes,
466name, map_cxy, map_ptr + page_offset, local_cxy, buf_ptr );
[626]467#endif
468
[1]469        }
[330]470        else
[1]471        {
[606]472            ppm_page_do_dirty( page_xp ); 
[628]473            hal_copy_from_uspace( map_cxy , map_ptr + page_offset , buf_ptr , page_bytes ); 
[626]474
475#if DEBUG_MAPPER_MOVE_USER & 1
476if( DEBUG_MAPPER_MOVE_USER < cycle )
477printk("\n[%s] thread[%x,%x] moved %d bytes / user buffer(%x,%x) -> mapper %s (%x,%x)\n",
[628]478__FUNCTION__, this->process->pid, this->trdid, page_bytes,
479local_cxy, buf_ptr, name, map_cxy, map_ptr + page_offset );
[626]480mapper_display_page(  mapper_xp , page_id, 128 );
481#endif
482
[1]483        }
484
[628]485        done += page_bytes;
[1]486    }
487
[438]488#if DEBUG_MAPPER_MOVE_USER
[626]489cycle      = (uint32_t)hal_get_cycles();
[438]490if( DEBUG_MAPPER_MOVE_USER < cycle )
[626]491{
492    if( to_buffer )
493    printk("\n[%s] thread[%x,%x] completed mapper(%s) -> buffer(%x) / cycle %d\n",
494    __FUNCTION__, this->process->pid, this->trdid, name, buffer, cycle );
495    else
496    printk("\n[%s] thread[%x,%x] completed buffer(%x) -> mapper(%s) / cycle %d\n",
497    __FUNCTION__, this->process->pid, this->trdid, buffer, name, cycle );
498}
[435]499#endif
[204]500
[1]501    return 0;
502
[313]503}  // end mapper_move_user()
[204]504
[313]505////////////////////////////////////////////////
[606]506error_t mapper_move_kernel( xptr_t    mapper_xp,
507                            bool_t    to_buffer,
508                            uint32_t  file_offset,
509                            xptr_t    buffer_xp,
510                            uint32_t  size )
[313]511{
512    uint32_t   page_offset;    // first byte to move to/from a mapper page
[628]513    uint32_t   page_bytes;     // number of bytes to move to/from a mapper page
[606]514    uint32_t   page_id;        // current mapper page index
[313]515    uint32_t   done;           // number of moved bytes
[606]516    xptr_t     page_xp;        // extended pointer on current mapper page descriptor
[313]517
518    uint8_t  * src_ptr;        // source buffer local pointer
519    cxy_t      src_cxy;        // source cluster
520    uint8_t  * dst_ptr;        // destination buffer local pointer
521    cxy_t      dst_cxy;        // destination cluster
[330]522
[406]523    // get buffer cluster and local pointer
524    cxy_t     buffer_cxy = GET_CXY( buffer_xp );
[606]525    uint8_t * buffer_ptr = GET_PTR( buffer_xp );
[313]526
[606]527    // get mapper cluster
528    cxy_t     mapper_cxy = GET_CXY( mapper_xp );
529
[438]530#if DEBUG_MAPPER_MOVE_KERNEL
[625]531char          name[CONFIG_VFS_MAX_NAME_LENGTH];
532uint32_t      cycle  = (uint32_t)hal_get_cycles();
533thread_t    * this   = CURRENT_THREAD;
534mapper_t    * mapper = GET_PTR( mapper_xp );
535vfs_inode_t * inode  = hal_remote_lpt( XPTR( mapper_cxy , &mapper->inode ) );
536vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name );
[438]537if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
[625]538printk("\n[%s] thread[%x,%x] enter / %d bytes / offset %d / mapper <%s> / cycle %d\n",
539__FUNCTION__, this->process->pid, this->trdid, size, file_offset, name, cycle );
[435]540#endif
[406]541
[313]542    // compute offsets of first and last bytes in file
543    uint32_t min_byte = file_offset;
544    uint32_t max_byte = file_offset + size -1;
545
546    // compute indexes for first and last pages in mapper
547    uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
548    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
549
550    // compute source and destination clusters
551    if( to_buffer )
552    {
553        dst_cxy = buffer_cxy;
[606]554        src_cxy = mapper_cxy;
[313]555    }
556    else
557    {
558        src_cxy = buffer_cxy;
[606]559        dst_cxy = mapper_cxy;
[313]560    }
561
562    done = 0;
563
564    // loop on pages in mapper
[606]565    for( page_id = first ; page_id <= last ; page_id++ )
[313]566    {
567        // compute page_offset
[606]568        if( page_id == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
569        else                   page_offset = 0;
[313]570
571        // compute number of bytes to move in page
[628]572        if      ( first == last  )   page_bytes = size;
573        else if ( page_id == first ) page_bytes = CONFIG_PPM_PAGE_SIZE - page_offset;
574        else if ( page_id == last  ) page_bytes = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
575        else                         page_bytes = CONFIG_PPM_PAGE_SIZE;
[313]576
[606]577        // get extended pointer on page descriptor
578        page_xp = mapper_remote_get_page( mapper_xp , page_id );
[313]579
[606]580        if ( page_xp == XPTR_NULL ) return -1;
[313]581
[315]582        // get page base address
[606]583        xptr_t    base_xp  = ppm_page2base( page_xp );
[367]584        uint8_t * base_ptr = (uint8_t *)GET_PTR( base_xp );
[330]585
[313]586        // compute source and destination pointers
587        if( to_buffer )
588        {
[315]589            dst_ptr = buffer_ptr + done;
[367]590            src_ptr = base_ptr + page_offset;
[313]591        }
592        else
593        {
[315]594            src_ptr = buffer_ptr + done;
[367]595            dst_ptr = base_ptr + page_offset;
[313]596
[606]597            ppm_page_do_dirty( page_xp );
[313]598        }
599
[610]600#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
601if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
[625]602{
603    if( to_buffer )
604    printk("\n[%s] mapper <%s> page %d => buffer(%x,%x) / %d bytes\n",
[628]605    __FUNCTION__, name, page_id, dst_cxy, dst_ptr, page_bytes );
[625]606    else
607    printk("\n[%s] buffer(%x,%x) => mapper <%s> page %d / %d bytes\n",
[628]608    __FUNCTION__, src_cxy, src_ptr, name, page_id, page_bytes );
[625]609}
[610]610#endif
611
[313]612        // move fragment
[628]613        hal_remote_memcpy( XPTR( dst_cxy , dst_ptr ), XPTR( src_cxy , src_ptr ), page_bytes );
[330]614
[628]615        done += page_bytes;
[313]616    }
617
[438]618#if DEBUG_MAPPER_MOVE_KERNEL
[625]619cycle  = (uint32_t)hal_get_cycles();
[438]620if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
[625]621printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
622__FUNCTION__, this->process->pid, this->trdid, cycle );
[435]623#endif
[313]624
625    return 0;
626
[406]627}  // end mapper_move_kernel()
[313]628
[606]629///////////////////////////////////////////////////
630error_t mapper_remote_get_32( xptr_t     mapper_xp,
[628]631                              uint32_t   page_id,
[606]632                              uint32_t   word_id,
[628]633                              uint32_t * value )
[606]634{
635    xptr_t     page_xp;      // extended pointer on searched page descriptor
636    xptr_t     base_xp;      // extended pointer on searched page base
637   
638    // get page containing the searched word
639    page_xp  = mapper_remote_get_page( mapper_xp , page_id );
640
641    if( page_xp == XPTR_NULL )  return -1;
642   
643    // get page base
644    base_xp = ppm_page2base( page_xp );
645
646    // get the value from mapper
[628]647    *value = hal_remote_l32( base_xp + (word_id<<2) ); 
[606]648
649    return 0;
650
651}  // end mapper_remote_get_32()
652
653///////////////////////////////////////////////////
654error_t mapper_remote_set_32( xptr_t     mapper_xp,
[628]655                              uint32_t   page_id,
[606]656                              uint32_t   word_id,
657                              uint32_t   value )
658{
659    xptr_t     page_xp;      // extended pointer on searched page descriptor
660    xptr_t     base_xp;      // extended pointer on searched page base
661
662    // get page containing the searched word
663    page_xp  = mapper_remote_get_page( mapper_xp , page_id );
664
665    if( page_xp == XPTR_NULL ) return -1;
666
667    // get page base
668    base_xp = ppm_page2base( page_xp );
669
670    // set value to mapper
[628]671    hal_remote_s32( (base_xp + (word_id << 2)) , value );
[606]672
[628]673    // set the dirty flag in page descriptor
[606]674    ppm_page_do_dirty( page_xp );
675
676    return 0;
677
678}  // end mapper_remote_set_32()
679
[623]680/////////////////////////////////////////
681error_t mapper_sync( mapper_t *  mapper )
682{
683    page_t   * page;                // local pointer on current page descriptor
684    xptr_t     page_xp;             // extended pointer on current page descriptor
685    grdxt_t  * rt;                  // pointer on radix_tree descriptor
686    uint32_t   start_key;           // start page index in mapper
687    uint32_t   found_key;           // current page index in mapper
688    error_t    error;
689
690#if DEBUG_MAPPER_SYNC
691thread_t * this  = CURRENT_THREAD;
692uint32_t   cycle = (uint32_t)hal_get_cycles();
693char       name[CONFIG_VFS_MAX_NAME_LENGTH];
694vfs_inode_get_name( XPTR( local_cxy , mapper->inode ) , name );
695#endif
696
697    // get pointer on radix tree
[625]698    rt = &mapper->rt;
[623]699
700    // initialise loop variable
701    start_key = 0;
702
703    // scan radix-tree until last page found
704    while( 1 )
705    {
706        // get page descriptor from radix tree
707        page = (page_t *)grdxt_get_first( rt , start_key , &found_key );
708         
709        if( page == NULL ) break;
710
[625]711assert( (page->index == found_key ), "wrong page descriptor index" );
712assert( (page->order == 0),          "mapper page order must be 0" );
[623]713
714        // build extended pointer on page descriptor
715        page_xp = XPTR( local_cxy , page );
716
717        // synchronize page if dirty
718        if( (page->flags & PG_DIRTY) != 0 )
719        {
720
721#if DEBUG_MAPPER_SYNC
722if( cycle > DEBUG_MAPPER_SYNC )
[626]723printk("\n[%s] thread[%x,%x] synchonise page %d of <%s> to IOC device\n",
[623]724__FUNCTION__, this->process->pid, this->trdid, page->index, name );
725#endif
726            // copy page to file system
727            error = vfs_fs_move_page( page_xp , IOC_WRITE );
728
729            if( error )
730            {
731                printk("\n[ERROR] in %s : cannot synchonize dirty page %d\n", 
732                __FUNCTION__, page->index );
733                return -1;
734            }
735
736            // remove page from PPM dirty list
737            ppm_page_undo_dirty( page_xp ); 
738        } 
739        else
740        {
741
742#if DEBUG_MAPPER_SYNC
743if( cycle > DEBUG_MAPPER_SYNC )
744printk("\n[%s] thread[%x,%x] skip page %d for <%s>\n",
745__FUNCTION__, this->process->pid, this->trdid, page->index, name );
746#endif
747        }
748
749        // update loop variable
750        start_key = page->index + 1;
751    }  // end while
752
753    return 0;
754
755}  // end mapper_sync()
756
[611]757//////////////////////////////////////////////////
758error_t mapper_display_page( xptr_t     mapper_xp,
759                             uint32_t   page_id,
[614]760                             uint32_t   nbytes )
[611]761{
[614]762    xptr_t        page_xp;        // extended pointer on page descriptor
763    xptr_t        base_xp;        // extended pointer on page base
764    char          buffer[4096];   // local buffer
765    uint32_t    * tabi;           // pointer on uint32_t to scan buffer
766    uint32_t      line;           // line index
767    uint32_t      word;           // word index
768    cxy_t         mapper_cxy;     // mapper cluster identifier
769    mapper_t    * mapper_ptr;     // mapper local pointer
770    vfs_inode_t * inode_ptr;      // inode local pointer
771 
772    char       name[CONFIG_VFS_MAX_NAME_LENGTH];
[606]773
[611]774    if( nbytes > 4096)
775    {
776        printk("\n[ERROR] in %s : nbytes (%d) cannot be larger than 4096\n",
777        __FUNCTION__, nbytes );
778        return -1;
779    }
780   
781    // get extended pointer on page descriptor
782    page_xp = mapper_remote_get_page( mapper_xp , page_id );
783
784    if( page_xp == XPTR_NULL)
785    {
786        printk("\n[ERROR] in %s : cannot access page %d in mapper\n",
787        __FUNCTION__, page_id );
788        return -1;
789    }
790
[614]791    // get cluster and local pointer
792    mapper_cxy = GET_CXY( mapper_xp );
793    mapper_ptr = GET_PTR( mapper_xp );
794
795    // get inode
796    inode_ptr = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
797
798    // get inode name
799    if( inode_ptr == NULL ) strcpy( name , "fat" );
800    else  vfs_inode_get_name( XPTR( mapper_cxy , inode_ptr ) , name );
801   
[611]802    // get extended pointer on page base
803    base_xp = ppm_page2base( page_xp );
804   
805    // copy remote page to local buffer
806    hal_remote_memcpy( XPTR( local_cxy , buffer ) , base_xp , nbytes );
807
808    // display 8 words per line
[614]809    tabi = (uint32_t *)buffer;
[626]810    printk("\n***** mapper <%s> / %d bytes in page %d (%x,%x)\n",
811    name, nbytes, page_id, GET_CXY(base_xp), GET_PTR(base_xp) );
[611]812    for( line = 0 ; line < (nbytes >> 5) ; line++ )
813    {
[625]814        printk("%X : ", line << 5 );
[614]815        for( word = 0 ; word < 8 ; word++ ) printk("%X ", tabi[(line<<3) + word] );
[611]816        printk("\n");
817    }
818
819    return 0;
820
821}  // end mapper_display_page
822
823
Note: See TracBrowser for help on using the repository browser.