Changeset 606


Ignore:
Timestamp:
Dec 3, 2018, 12:20:18 PM (5 years ago)
Author:
alain
Message:

Improve the FAT32 file system to support cat, rm, cp commands.

Location:
trunk/kernel/mm
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/mm/mapper.c

    r581 r606  
    11/*
    2  * mapper.c - Map memory, file or device in process virtual address space.
     2 * mapper.c - Kernel cache for FS files or directories implementation.
    33 *
    44 * Authors   Mohamed Lamine Karaoui (2015)
     
    5050    error_t    error;
    5151
    52     // allocate memory for associated mapper
     52    // allocate memory for mapper
    5353    req.type  = KMEM_MAPPER;
    5454    req.size  = sizeof(mapper_t);
     
    6767
    6868    // initialize radix tree
    69     error = grdxt_init( &mapper->radix,
    70                         CONFIG_VMM_GRDXT_W1,
    71                         CONFIG_VMM_GRDXT_W2,
    72                         CONFIG_VMM_GRDXT_W3 );
     69    error = grdxt_init( &mapper->rt,
     70                        CONFIG_MAPPER_GRDXT_W1,
     71                        CONFIG_MAPPER_GRDXT_W2,
     72                        CONFIG_MAPPER_GRDXT_W3 );
    7373
    7474    if( error )
     
    8585
    8686    // initialize mapper lock
    87     rwlock_init(  &mapper->lock , LOCK_MAPPER_STATE );
     87    remote_rwlock_init( XPTR( local_cxy , &mapper->lock ) , LOCK_MAPPER_STATE );
    8888
    8989    // initialize waiting threads xlist (empty)
     
    9797}  // end mapper_create()
    9898
    99 ///////////////////////////////////////////
    100 error_t mapper_destroy( mapper_t * mapper )
     99////////////////////////////////////////
     100void mapper_destroy( mapper_t * mapper )
    101101{
    102102    page_t   * page;
     
    104104    uint32_t   start_index = 0;
    105105    kmem_req_t req;
    106     error_t    error;
    107 
    108     // scan radix three and release all registered pages to PPM
     106
     107    // scan radix tree
    109108    do
    110109    {
    111110        // get page from radix tree
    112         page = (page_t *)grdxt_get_first( &mapper->radix , start_index , &found_index );
    113 
     111        page = (page_t *)grdxt_get_first( &mapper->rt , start_index , &found_index );
     112
     113        // release registered pages to PPM
    114114        if( page != NULL )
    115115        {
    116116            // remove page from mapper and release to PPM
    117             error = mapper_release_page( mapper , page );
    118 
    119             if ( error ) return error;
     117            mapper_release_page( mapper , page );
    120118
    121119            // update start_key value for next page
     
    125123    while( page != NULL );
    126124
    127     // release the memory allocated to radix-tree itself
    128     grdxt_destroy( &mapper->radix );
     125    // release the memory allocated to radix tree itself
     126    grdxt_destroy( &mapper->rt );
    129127
    130128    // release memory for mapper descriptor
     
    133131    kmem_free( &req );
    134132
    135     return 0;
    136 
    137133}  // end mapper_destroy()
    138134
    139 ////////////////////////////////////////////
    140 page_t * mapper_get_page( mapper_t * mapper,
    141                           uint32_t   index )
    142 {
    143     kmem_req_t    req;
    144     page_t      * page;
     135////////////////////////////////////////////////////
     136xptr_t  mapper_remote_get_page( xptr_t    mapper_xp,
     137                                uint32_t  page_id )
     138{
    145139    error_t       error;
     140    mapper_t    * mapper_ptr;
     141    cxy_t         mapper_cxy;
     142    xptr_t        lock_xp;        // extended pointer on mapper lock
     143    xptr_t        page_xp;        // extended pointer on searched page descriptor
     144    xptr_t        rt_xp;          // extended pointer on radix tree in mapper
     145
     146    thread_t * this = CURRENT_THREAD;
     147
     148    // get mapper cluster and local pointer
     149    mapper_ptr = GET_PTR( mapper_xp );
     150    mapper_cxy = GET_CXY( mapper_xp );
    146151
    147152#if DEBUG_MAPPER_GET_PAGE
    148153uint32_t cycle = (uint32_t)hal_get_cycles();
     154char          name[CONFIG_VFS_MAX_NAME_LENGTH];
     155vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
     156vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name );
    149157if( DEBUG_MAPPER_GET_PAGE < cycle )
    150 printk("\n[DBG] %s : thread %x enter for page %d / mapper %x / cycle %d\n",
    151 __FUNCTION__ , CURRENT_THREAD , index , mapper , cycle );
    152 #endif
    153 
    154     thread_t * this = CURRENT_THREAD;
     158printk("\n[%s] thread [%x,%x] enter for page %d of <%s> / cycle %d\n",
     159__FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle );
     160#endif
    155161
    156162    // check thread can yield
    157163    thread_assert_can_yield( this , __FUNCTION__ );
    158164
     165    // build extended pointer on mapper lock and mapper rt
     166    lock_xp  = XPTR( mapper_cxy , &mapper_ptr->lock );
     167    rt_xp    = XPTR( mapper_cxy , &mapper_ptr->rt );
     168
    159169    // take mapper lock in READ_MODE
    160     rwlock_rd_acquire( &mapper->lock );
     170    remote_rwlock_rd_acquire( lock_xp );
    161171
    162172    // search page in radix tree
    163     page = (page_t *)grdxt_lookup( &mapper->radix , index );
    164 
    165     // test if page available in mapper
    166     if( ( page == NULL) || page_is_flag( page , PG_INLOAD ) )  // page not available
    167     {
    168 
     173    page_xp  = grdxt_remote_lookup( rt_xp , page_id );
     174
     175    // test mapper miss
     176    if( page_xp == XPTR_NULL )                  // miss => try to handle it
     177    {
    169178        // release the lock in READ_MODE and take it in WRITE_MODE
    170         rwlock_rd_release( &mapper->lock );
    171         rwlock_wr_acquire( &mapper->lock );
    172 
    173         // second test on missing page because the page status can have been modified
     179        remote_rwlock_rd_release( lock_xp );
     180        remote_rwlock_wr_acquire( lock_xp );
     181
     182        // second test on missing page because the page status can be modified
    174183        // by another thread, when passing from READ_MODE to WRITE_MODE.
    175184        // from this point there is no concurrent accesses to mapper.
    176 
    177         page = grdxt_lookup( &mapper->radix , index );
    178 
    179         if ( page == NULL )   // missing page => create it and load it from file system
     185        page_xp = grdxt_remote_lookup( rt_xp , page_id );
     186
     187        if ( page_xp == XPTR_NULL )  // miss confirmed => handle it
    180188        {
    181189
    182190#if (DEBUG_MAPPER_GET_PAGE & 1)
    183191if( DEBUG_MAPPER_GET_PAGE < cycle )
    184 printk("\n[DBG] %s : missing page => load from device\n", __FUNCTION__ );
    185 #endif
    186             // allocate one page from PPM
    187             req.type  = KMEM_PAGE;
    188             req.size  = 0;
    189             req.flags = AF_NONE;
    190             page = kmem_alloc( &req );
    191 
    192             if( page == NULL )
     192printk("\n[%s] missing page => load it from IOC device\n", __FUNCTION__ );
     193#endif
     194            if( mapper_cxy == local_cxy )   // mapper is local
    193195            {
    194                 printk("\n[ERROR] in %s : thread %x cannot allocate a page in cluster %x\n",
    195                        __FUNCTION__ , this->trdid , local_cxy );
    196                 rwlock_wr_release( &mapper->lock );
    197                 return NULL;
     196                 error = mapper_handle_miss( mapper_ptr,
     197                                             page_id,
     198                                             &page_xp );
     199            }
     200            else
     201            {
     202                 rpc_mapper_handle_miss_client( mapper_cxy,
     203                                                mapper_ptr,
     204                                                page_id,
     205                                                &page_xp,
     206                                                &error );
    198207            }
    199208
    200             // initialize the page descriptor
    201             page_init( page );
    202             page_set_flag( page , PG_INIT | PG_INLOAD );
    203             page_refcount_up( page );
    204             page->mapper = mapper;
    205             page->index  = index;
    206 
    207             // insert page in mapper radix tree
    208             error = grdxt_insert( &mapper->radix, index , page );
    209 
    210             // release mapper lock from WRITE_MODE
    211             rwlock_wr_release( &mapper->lock );
    212 
    213             if( error )
     209            if ( error )
    214210            {
    215                 printk("\n[ERROR] in %s : thread %x cannot insert page in mapper\n",
    216                        __FUNCTION__ , this->trdid );
    217                 mapper_release_page( mapper , page );
    218                 page_clear_flag( page , PG_ALL );
    219                 req.ptr  = page;
    220                 req.type = KMEM_PAGE;
    221                 kmem_free(&req);
    222                 return NULL;
    223             }
    224 
    225             // launch I/O operation to load page from file system
    226             error = vfs_mapper_move_page( page,
    227                                           true );   // to mapper
    228             if( error )
    229             {
    230                 printk("\n[ERROR] in %s : thread %x cannot load page from device\n",
    231                        __FUNCTION__ , this->trdid );
    232                 mapper_release_page( mapper , page );
    233                 page_clear_flag( page , PG_ALL );
    234                 req.ptr  = page;
    235                 req.type = KMEM_PAGE;
    236                 kmem_free( &req );
    237                 return NULL;
    238             }
    239 
    240             // reset the page INLOAD flag to make the page available to all readers
    241             page_clear_flag( page , PG_INLOAD );
    242         }
    243         else if( page_is_flag( page , PG_INLOAD ) )   // page is loaded by another thread
    244         {
    245             // release mapper lock from WRITE_MODE
    246             rwlock_wr_release( &mapper->lock );
    247 
    248             // wait load completion
    249             while( page_is_flag( page , PG_INLOAD ) == false )
    250             {
    251                 // deschedule without blocking
    252                 sched_yield("waiting page loading");
     211                printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n",
     212                __FUNCTION__ , this->process->pid, this->trdid );
     213                remote_rwlock_wr_release( lock_xp );
     214                return XPTR_NULL;
    253215            }
    254216        }
    255     }
    256     else                          // page available in mapper
    257     {
    258         rwlock_rd_release( &mapper->lock );
     217       
     218        // release mapper lock from WRITE_MODE
     219        remote_rwlock_wr_release( lock_xp );
     220    }
     221    else                                              // hit
     222    {
     223        // release mapper lock from READ_MODE
     224        remote_rwlock_rd_release( lock_xp );
    259225    }
    260226
     
    262228cycle = (uint32_t)hal_get_cycles();
    263229if( DEBUG_MAPPER_GET_PAGE < cycle )
    264 printk("\n[DBG] %s : thread %x exit for page %d / ppn %x / cycle %d\n",
    265 __FUNCTION__, CURRENT_THREAD, index, ppm_page2ppn(XPTR(local_cxy, page)), cycle );
    266 #endif
    267 
    268     return page;
    269 
    270 }  // end mapper_get_page()
    271 
    272 ///////////////////////////////////////////////
    273 error_t mapper_release_page( mapper_t * mapper,
    274                              page_t   * page )
    275 {
    276     error_t error;
    277 
    278     // lauch IO operation to update page to file system
    279     error = vfs_mapper_move_page( page , false );    // from mapper
     230printk("\n[%s] thread[%x,%x] exit for page %d of <%s> / ppn %x / cycle %d\n",
     231__FUNCTION__, this->process->pid, this->trdid,
     232page_id, name, ppm_page2ppn( page_xp ), cycle );
     233#endif
     234
     235    return page_xp;
     236
     237}  // end mapper_remote_get_page()
     238
     239//////////////////////////////////////////////
     240error_t mapper_handle_miss( mapper_t * mapper,
     241                            uint32_t   page_id,
     242                            xptr_t   * page_xp )
     243{
     244    kmem_req_t   req;
     245    page_t     * page;
     246    error_t      error;
     247
     248    thread_t * this = CURRENT_THREAD;
     249
     250#if DEBUG_MAPPER_HANDLE_MISS
     251uint32_t cycle = (uint32_t)hal_get_cycles();
     252char          name[CONFIG_VFS_MAX_NAME_LENGTH];
     253vfs_inode_t * inode = mapper->inode;
     254vfs_inode_get_name( XPTR( local_cxy , inode ) , name );
     255if( DEBUG_MAPPER_HANDLE_MISS < cycle )
     256printk("\n[%s] enter for page %d in <%s> / cycle %d\n",
     257__FUNCTION__, page_id, name, cycle );
     258if( DEBUG_MAPPER_HANDLE_MISS & 1 )
     259grdxt_display( &mapper->rt , name );
     260#endif
     261
     262    // allocate one page from the mapper cluster
     263    req.type  = KMEM_PAGE;
     264    req.size  = 0;
     265    req.flags = AF_NONE;
     266    page = kmem_alloc( &req );
     267
     268    if( page == NULL )
     269    {
     270        printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x\n",
     271        __FUNCTION__ , this->process->pid, this->trdid , local_cxy );
     272        return -1;
     273    }
     274
     275    // initialize the page descriptor
     276    page_init( page );
     277    page_set_flag( page , PG_INIT );
     278    page_refcount_up( page );
     279    page->mapper = mapper;
     280    page->index  = page_id;
     281
     282    // insert page in mapper radix tree
     283    error = grdxt_insert( &mapper->rt , page_id , page );
    280284
    281285    if( error )
    282286    {
    283         printk("\n[ERROR] in %s : cannot update file system\n", __FUNCTION__ );
    284         return EIO;
    285     }
     287        printk("\n[ERROR] in %s : thread[%x,%x] cannot insert page in mapper\n",
     288        __FUNCTION__ , this->process->pid, this->trdid );
     289        mapper_release_page( mapper , page );
     290        req.ptr  = page;
     291        req.type = KMEM_PAGE;
     292        kmem_free(&req);
     293        return -1;
     294    }
     295
     296    // launch I/O operation to load page from device to mapper
     297    error = vfs_fs_move_page( XPTR( local_cxy , page ) , true );
     298
     299    if( error )
     300    {
     301        printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n",
     302        __FUNCTION__ , this->process->pid, this->trdid );
     303        mapper_release_page( mapper , page );
     304        req.ptr  = page;
     305        req.type = KMEM_PAGE;
     306        kmem_free( &req );
     307        return -1;
     308    }
     309
     310    // set extended pointer on allocated page
     311    *page_xp = XPTR( local_cxy , page );
     312
     313#if DEBUG_MAPPER_HANDLE_MISS
     314cycle = (uint32_t)hal_get_cycles();
     315if( DEBUG_MAPPER_HANDLE_MISS < cycle )
     316printk("\n[%s] exit for page %d in <%s> / ppn %x / cycle %d\n",
     317__FUNCTION__, page_id, name, ppm_page2ppn( *page_xp ), cycle );
     318if( DEBUG_MAPPER_HANDLE_MISS & 1 )
     319grdxt_display( &mapper->rt , name );
     320#endif
     321
     322    return 0;
     323
     324}  // end mapper_handle_miss()
     325
     326////////////////////////////////////////////
     327void mapper_release_page( mapper_t * mapper,
     328                          page_t   * page )
     329{
     330    // build extended pointer on mapper lock
     331    xptr_t mapper_lock_xp = XPTR( local_cxy , &mapper->lock );
    286332
    287333    // take mapper lock in WRITE_MODE
    288     rwlock_wr_acquire( &mapper->lock );
     334    remote_rwlock_wr_acquire( mapper_lock_xp );
    289335
    290336    // remove physical page from radix tree
    291     grdxt_remove( &mapper->radix , page->index );
     337    grdxt_remove( &mapper->rt , page->index );
    292338
    293339    // release mapper lock from WRITE_MODE
    294     rwlock_wr_release( &mapper->lock );
     340    remote_rwlock_wr_release( mapper_lock_xp );
    295341
    296342    // release page to PPM
     
    300346    kmem_free( &req );
    301347
    302     return 0;
    303 
    304348}  // end mapper_release_page()
    305349
    306 ///////////////////////////////////////////////////
     350////////////////////////////////////////////
    307351error_t mapper_move_user( mapper_t * mapper,
    308352                          bool_t     to_buffer,
     
    311355                          uint32_t   size )
    312356{
     357    xptr_t     mapper_xp;      // extended pointer on local mapper
    313358    uint32_t   page_offset;    // first byte to move to/from a mapper page
    314359    uint32_t   page_count;     // number of bytes to move to/from a mapper page
    315     uint32_t   index;          // current mapper page index
     360    uint32_t   page_id;        // current mapper page index
    316361    uint32_t   done;           // number of moved bytes
    317     page_t   * page;           // current mapper page descriptor
    318     uint8_t  * map_ptr;        // current mapper  address
    319     uint8_t  * buf_ptr;        // current buffer  address
     362    xptr_t     page_xp;        // extended pointer on current mapper page descriptor
    320363
    321364#if DEBUG_MAPPER_MOVE_USER
    322 uint32_t cycle = (uint32_t)hal_get_cycles();
     365uint32_t   cycle = (uint32_t)hal_get_cycles();
     366thread_t * this  = CURRENT_THREAD;
    323367if( DEBUG_MAPPER_MOVE_USER < cycle )
    324 printk("\n[DBG] %s : thread %x enter / to_buf %d / buffer %x / cycle %d\n",
    325 __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer , cycle );
    326 #endif
     368printk("\n[%s] thread[%x,%x] : to_buf %d / buffer %x / size %d / offset %d / cycle %d\n",
     369__FUNCTION__, this->process->pid, this->trdid,
     370to_buffer, buffer, size, file_offset, cycle );
     371#endif
     372
     373    // build extended pointer on mapper
     374    mapper_xp = XPTR( local_cxy , mapper );
    327375
    328376    // compute offsets of first and last bytes in file
    329377    uint32_t min_byte = file_offset;
    330     uint32_t max_byte = file_offset + size -1;
     378    uint32_t max_byte = file_offset + size - 1;
    331379
    332380    // compute indexes of pages for first and last byte in mapper
     
    334382    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
    335383
    336     done = 0;
    337 
    338     // loop on pages in mapper
    339     for( index = first ; index <= last ; index++ )
    340     {
    341         // compute page_offset
    342         if( index == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
    343         else                 page_offset = 0;
    344 
    345         // compute number of bytes in page
    346         if      ( first == last  ) page_count = size;
    347         else if ( index == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset;
    348         else if ( index == last  ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
    349         else                       page_count = CONFIG_PPM_PAGE_SIZE;
    350 
    351384#if (DEBUG_MAPPER_MOVE_USER & 1)
    352385if( DEBUG_MAPPER_MOVE_USER < cycle )
    353 printk("\n[DBG] %s : index = %d / offset = %d / count = %d\n",
    354 __FUNCTION__ , index , page_offset , page_count );
    355 #endif
    356 
    357         // get page descriptor
    358         page = mapper_get_page( mapper , index );
    359 
    360         if ( page == NULL ) return EINVAL;
     386printk("\n[%s] first_page %d / last_page %d\n", __FUNCTION__, first, last );
     387#endif
     388
     389    done = 0;
     390
     391    // loop on pages in mapper
     392    for( page_id = first ; page_id <= last ; page_id++ )
     393    {
     394        // compute page_offset
     395        if( page_id == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
     396        else                   page_offset = 0;
     397
     398        // compute number of bytes in page
     399        if      ( first   == last  ) page_count = size;
     400        else if ( page_id == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset;
     401        else if ( page_id == last  ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
     402        else                         page_count = CONFIG_PPM_PAGE_SIZE;
     403
     404#if (DEBUG_MAPPER_MOVE_USER & 1)
     405if( DEBUG_MAPPER_MOVE_USER < cycle )
     406printk("\n[%s] page_id = %d / page_offset = %d / page_count = %d\n",
     407__FUNCTION__ , page_id , page_offset , page_count );
     408#endif
     409
     410        // get extended pointer on page descriptor
     411        page_xp = mapper_remote_get_page( mapper_xp , page_id );
     412
     413        if ( page_xp == XPTR_NULL ) return -1;
    361414
    362415        // compute pointer in mapper
    363         xptr_t base_xp = ppm_page2base( XPTR( local_cxy, page ) );
    364         map_ptr = (uint8_t *)GET_PTR( base_xp ) + page_offset;
     416        xptr_t    base_xp = ppm_page2base( page_xp );
     417        uint8_t * map_ptr = (uint8_t *)GET_PTR( base_xp ) + page_offset;
    365418
    366419        // compute pointer in buffer
    367         buf_ptr = (uint8_t *)buffer + done;
     420        uint8_t * buf_ptr = (uint8_t *)buffer + done;
    368421
    369422        // move fragment
    370423        if( to_buffer )
    371424        {
    372             hal_copy_to_uspace( buf_ptr , map_ptr , page_count );
     425            hal_copy_to_uspace( buf_ptr , map_ptr , page_count ); 
    373426        }
    374427        else
    375428        {
    376             ppm_page_do_dirty( page );
    377             hal_copy_from_uspace( map_ptr , buf_ptr , page_count );
     429            ppm_page_do_dirty( page_xp );
     430            hal_copy_from_uspace( map_ptr , buf_ptr , page_count ); 
    378431        }
    379432
     
    384437cycle = (uint32_t)hal_get_cycles();
    385438if( DEBUG_MAPPER_MOVE_USER < cycle )
    386 printk("\n[DBG] %s : thread %x exit / to_buf %d / buffer %x / cycle %d\n",
    387 __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer , cycle );
     439printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
     440__FUNCTION__, this->process->pid, this->trdid, cycle );
    388441#endif
    389442
     
    393446
    394447////////////////////////////////////////////////
    395 error_t mapper_move_kernel( mapper_t  *  mapper,
    396                             bool_t       to_buffer,
    397                             uint32_t     file_offset,
    398                             xptr_t       buffer_xp,
    399                             uint32_t     size )
     448error_t mapper_move_kernel( xptr_t    mapper_xp,
     449                            bool_t    to_buffer,
     450                            uint32_t  file_offset,
     451                            xptr_t    buffer_xp,
     452                            uint32_t  size )
    400453{
    401454    uint32_t   page_offset;    // first byte to move to/from a mapper page
    402455    uint32_t   page_count;     // number of bytes to move to/from a mapper page
    403     uint32_t   index;          // current mapper page index
     456    uint32_t   page_id;        // current mapper page index
    404457    uint32_t   done;           // number of moved bytes
    405     page_t   * page;           // current mapper page descriptor
     458    xptr_t     page_xp;        // extended pointer on current mapper page descriptor
    406459
    407460    uint8_t  * src_ptr;        // source buffer local pointer
     
    412465    // get buffer cluster and local pointer
    413466    cxy_t     buffer_cxy = GET_CXY( buffer_xp );
    414     uint8_t * buffer_ptr = (uint8_t *)GET_PTR( buffer_xp );
     467    uint8_t * buffer_ptr = GET_PTR( buffer_xp );
     468
     469    // get mapper cluster
     470    cxy_t     mapper_cxy = GET_CXY( mapper_xp );
    415471
    416472#if DEBUG_MAPPER_MOVE_KERNEL
    417 uint32_t cycle = (uint32_t)hal_get_cycles();
     473uint32_t   cycle = (uint32_t)hal_get_cycles();
     474thread_t * this  = CURRENT_THREAD;
    418475if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
    419 printk("\n[DBG] %s : thread %x enter / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n",
    420 __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer_cxy , buffer_ptr , cycle );
     476printk("\n[%s] thread[%x,%x] enter / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n",
     477__FUNCTION__, this->process->pid, this->trdid, to_buffer, buffer_cxy, buffer_ptr, cycle );
    421478#endif
    422479
     
    431488#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
    432489if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
    433 printk("\n[DBG] %s : first_page %d / last_page %d\n", __FUNCTION__, first, last );
     490printk("\n[%s] first_page %d / last_page %d\n", __FUNCTION__, first, last );
    434491#endif
    435492
     
    438495    {
    439496        dst_cxy = buffer_cxy;
    440         src_cxy = local_cxy;
     497        src_cxy = mapper_cxy;
    441498    }
    442499    else
    443500    {
    444501        src_cxy = buffer_cxy;
    445         dst_cxy = local_cxy;
     502        dst_cxy = mapper_cxy;
    446503    }
    447504
     
    449506
    450507    // loop on pages in mapper
    451     for( index = first ; index <= last ; index++ )
     508    for( page_id = first ; page_id <= last ; page_id++ )
    452509    {
    453510        // compute page_offset
    454         if( index == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
    455         else                 page_offset = 0;
     511        if( page_id == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
     512        else                   page_offset = 0;
    456513
    457514        // compute number of bytes to move in page
    458         if      ( first == last  ) page_count = size;
    459         else if ( index == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset;
    460         else if ( index == last  ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
    461         else                       page_count = CONFIG_PPM_PAGE_SIZE;
     515        if      ( first == last  )   page_count = size;
     516        else if ( page_id == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset;
     517        else if ( page_id == last  ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
     518        else                         page_count = CONFIG_PPM_PAGE_SIZE;
    462519
    463520#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
    464521if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
    465 printk("\n[DBG] %s : page_index = %d / offset = %d / bytes = %d\n",
    466 __FUNCTION__ , index , page_offset , page_count );
    467 #endif
    468 
    469         // get page descriptor
    470         page = mapper_get_page( mapper , index );
    471 
    472         if ( page == NULL ) return EINVAL;
     522printk("\n[%s] page_id = %d / offset = %d / bytes = %d\n",
     523__FUNCTION__ , page_id , page_offset , page_count );
     524#endif
     525
     526        // get extended pointer on page descriptor
     527        page_xp = mapper_remote_get_page( mapper_xp , page_id );
     528
     529        if ( page_xp == XPTR_NULL ) return -1;
    473530
    474531        // get page base address
    475         xptr_t    base_xp  = ppm_page2base( XPTR( local_cxy , page ) );
     532        xptr_t    base_xp  = ppm_page2base( page_xp );
    476533        uint8_t * base_ptr = (uint8_t *)GET_PTR( base_xp );
    477534
     
    487544            dst_ptr = base_ptr + page_offset;
    488545
    489             ppm_page_do_dirty( page );
     546            ppm_page_do_dirty( page_xp );
    490547        }
    491548
     
    499556cycle = (uint32_t)hal_get_cycles();
    500557if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
    501 printk("\n[DBG] %s : thread %x exit / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n",
    502 __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer_cxy , buffer_ptr , cycle );
     558printk("\n[%s] thread[%x,%x] exit / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n",
     559__FUNCTION__, this->process->pid, this->trdid, to_buffer, buffer_cxy, buffer_ptr, cycle );
    503560#endif
    504561
     
    507564}  // end mapper_move_kernel()
    508565
     566///////////////////////////////////////////////////
     567error_t mapper_remote_get_32( xptr_t     mapper_xp,
     568                              uint32_t   word_id,
     569                              uint32_t * p_value )
     570{
     571    uint32_t   page_id;      // page index in file
     572    uint32_t   local_id;     // word index in page
     573    xptr_t     page_xp;      // extended pointer on searched page descriptor
     574    xptr_t     base_xp;      // extended pointer on searched page base
     575
     576   
     577    // get page index and local word index
     578    page_id  = word_id >> 10;
     579    local_id = word_id & 0x3FF;
     580
     581    // get page containing the searched word
     582    page_xp  = mapper_remote_get_page( mapper_xp , page_id );
     583
     584    if( page_xp == XPTR_NULL )  return -1;
     585   
     586    // get page base
     587    base_xp = ppm_page2base( page_xp );
     588
     589    // get the value from mapper
     590    *p_value = hal_remote_l32( base_xp + (local_id<<2) );
     591
     592    return 0;
     593
     594}  // end mapper_remote_get_32()
     595
     596///////////////////////////////////////////////////
     597error_t mapper_remote_set_32( xptr_t     mapper_xp,
     598                              uint32_t   word_id,
     599                              uint32_t   value )
     600{
     601   
     602    uint32_t   page_id;      // page index in file
     603    uint32_t   local_id;     // word index in page
     604    xptr_t     page_xp;      // extended pointer on searched page descriptor
     605    xptr_t     base_xp;      // extended pointer on searched page base
     606
     607    // get page index and local vord index
     608    page_id  = word_id >> 10;
     609    local_id = word_id & 0x3FF;
     610
     611    // get page containing the searched word
     612    page_xp  = mapper_remote_get_page( mapper_xp , page_id );
     613
     614    if( page_xp == XPTR_NULL ) return -1;
     615
     616    // get page base
     617    base_xp = ppm_page2base( page_xp );
     618
     619    // set value to mapper
     620    hal_remote_s32( (base_xp + (local_id << 2)) , value );
     621
     622    // set the dirty flag
     623    ppm_page_do_dirty( page_xp );
     624
     625    return 0;
     626
     627}  // end mapper_remote_set_32()
     628
     629
  • trunk/kernel/mm/mapper.h

    r513 r606  
    11/*
    2  * mapper.h - Map memory, file or device in process virtual address space.
     2 * mapper.h - Kernel cache for FS files or directories definition.
    33 *
    44 * Authors   Mohamed Lamine Karaoui (2015)
     
    7272        struct vfs_inode_s * inode;           /*! owner inode                                     */
    7373    uint32_t             type;        /*! file system type                                */
    74         grdxt_t              radix;           /*! pages cache implemented as a radix tree         */
    75         rwlock_t             lock;        /*! several readers / only one writer               */
     74        grdxt_t              rt;              /*! embedded pages cache descriptor (radix tree)    */
     75        remote_rwlock_t      lock;        /*! several readers / only one writer               */
    7676        uint32_t                 refcount;    /*! several vsegs can refer the same file           */
    7777        xlist_entry_t        vsegs_root;  /*! root of list of vsegs refering this mapper      */
     
    109109
    110110/*******************************************************************************************
    111  * This function releases all physical pages allocated for the mapper.
    112  * It synchronizes all dirty pages (i.e. update the file on disk) if required.
    113  * The mapper descriptor and the radix tree themselves are released.
     111 * This function releases all physical memory allocated for a mapper.
     112 * Both the mapper descriptor and the radix tree are released.
     113 * It does NOT synchronize dirty pages. Use the vfs_sync_inode() function if required.
    114114 * It must be executed by a thread running in the cluster containing the mapper.
    115115 *******************************************************************************************
    116116 * @ mapper      : target mapper.
    117  * @ return 0 if success / return EIO if a dirty page cannot be updated on device.
    118  ******************************************************************************************/
    119 error_t mapper_destroy( mapper_t * mapper );
    120 
    121 /*******************************************************************************************
    122  * This function move data between a mapper and a - possibly distributed - user buffer.
    123  * It must be called by a thread running in the cluster containing the mapper.
    124  * It is called by the vfs_user_move() function to implement sys_read() and sys_write().
     117 ******************************************************************************************/
     118void mapper_destroy( mapper_t * mapper );
     119
     120/*******************************************************************************************
     121 * This function load from device a missing page identified by the <page_id> argument
     122 * into the mapper identified by the <mapper> local pointer.
     123 * It allocates a physical page from the local cluster, initialise by accessing device,
     124 * and register the page in the mapper radix tree.
     125 * It must be executed by a thread running in the cluster containing the mapper.
     126 * WARNING : the calling function mapper_remote_get_page() is supposed to take and release
     127 * the lock protecting the mapper in WRITE_MODE.
     128 *******************************************************************************************
     129 * @ mapper      : [in]  target mapper.
     130 * @ page_id : [in]  missing page index in file.
     131 * @ page_xp : [out] buffer for extended pointer on missing page descriptor.
     132 * @ return 0 if success / return -1 if a dirty page cannot be updated on device.
     133 ******************************************************************************************/
     134error_t mapper_handle_miss( mapper_t * mapper,
     135                            uint32_t   page_id,
     136                            xptr_t   * page_xp );
     137
     138/*******************************************************************************************
     139 * This function move data between a local mapper, and a distributed user buffer.
     140 * It must be called by a thread running in cluster containing the mapper.
     141 * It is called by the vfs_user_move() to implement sys_read() and sys_write() syscalls.
    125142 * If required, the data transfer is split in "fragments", where one fragment contains
    126143 * contiguous bytes in the same mapper page.
    127144 * It uses "hal_uspace" accesses to move a fragment to/from the user buffer.
    128145 * In case of write, the dirty bit is set for all pages written in the mapper.
    129  * The offset in the file descriptor is not modified by this function.
     146 * The mapper being an extendable cache, it is automatically extended when required
     147 * for both read and write accesses.
     148 * The "offset" field in the file descriptor, and the "size" field in inode descriptor
     149 * are not modified by this function.
    130150 *******************************************************************************************
    131151 * @ mapper       : local pointer on mapper.
     
    134154 * @ u_buf        : user space pointer on user buffer.
    135155 * @ size         : number of bytes to move.
    136  * returns O if success / returns EINVAL if error.
     156 * returns O if success / returns -1 if error.
    137157 ******************************************************************************************/
    138158error_t mapper_move_user( mapper_t * mapper,
     
    142162                          uint32_t   size );
    143163
    144 /*******************************************************************************************
    145  * This function move data between a mapper and a remote kernel buffer.
    146  * It must be called by a thread running in the cluster containing the mapper.
     164/********************************************************************************************
     165 * This function move data between a remote mapper and a remote kernel buffer.
     166 * It can be called by a thread running any cluster.
    147167 * If required, the data transfer is split in "fragments", where one fragment contains
    148168 * contiguous bytes in the same mapper page.
    149169 * It uses a "remote_memcpy" to move a fragment to/from the kernel buffer.
    150170 * In case of write, the dirty bit is set for all pages written in the mapper.
    151  * The offset in the file descriptor is not modified by this function.
    152  *******************************************************************************************
    153  * @ mapper       : local pointer on mapper.
     171 *******************************************************************************************
     172 * @ mapper_xp    : extended pointer on mapper.
    154173 * @ to_buffer    : mapper -> buffer if true / buffer -> mapper if false.
    155174 * @ file_offset  : first byte to move in file.
    156175 * @ buffer_xp    : extended pointer on kernel buffer.
    157176 * @ size         : number of bytes to move.
    158  * returns O if success / returns EINVAL if error.
    159  ******************************************************************************************/
    160 error_t mapper_move_kernel( mapper_t * mapper,
     177 * returns O if success / returns -1 if error.
     178 ******************************************************************************************/
     179error_t mapper_move_kernel( xptr_t     mapper_xp,
    161180                            bool_t     to_buffer,
    162181                            uint32_t   file_offset,
     
    164183                            uint32_t   size );
    165184
    166 
    167 /*******************************************************************************************
    168  * This function removes a physical page from the mapper, update the FS if the page
    169  * is dirty, and releases the page to PPM. It is called by the mapper_destroy() function.
    170  * It must be executed by a thread running in the cluster containing the mapper.
    171  * It takes both the page lock and the mapper lock in WRITE_MODE to release the page.
     185/*******************************************************************************************
     186 * This function removes a physical page from the mapper, and releases
     187 * the page to the local PPM. It is called by the mapper_destroy() function.
     188 * It must be executed by a thread running in the cluster containing the mapper.
     189 * It takes the mapper lock in WRITE_MODE to update the mapper.
    172190 *******************************************************************************************
    173191 * @ mapper     : local pointer on the mapper.
    174192 * @ page       : pointer on page to remove.
    175  * @ return 0 if success / return EIO if a dirty page cannot be copied to FS.
    176  ******************************************************************************************/
    177 error_t mapper_release_page( mapper_t      * mapper,
    178                              struct page_s * page );
    179 
    180 /*******************************************************************************************
    181  * This function searches a physical page descriptor from its index in mapper.
    182  * It must be executed by a thread running in the cluster containing the mapper.
     193 ******************************************************************************************/
     194void mapper_release_page( mapper_t      * mapper,
     195                          struct page_s * page );
     196
     197/*******************************************************************************************
     198 * This function returns an extended pointer on a mapper page, identified by <page_id>,
     199 * index in the file. The - possibly remote - mapper is identified by the <mapper_xp>
     200 * argument.  It can be executed by a thread running in any cluster, as it uses remote
     201 * access primitives to scan the mapper.
     202 * In case of miss, this function takes the mapper lock in WRITE_MODE, and call the
     203 * mapper_handle_miss() to load the missing page from device to mapper, using an RPC
     204 * when the mapper is remote.
     205 *******************************************************************************************
     206 * @ mapper_xp  : extended pointer on the mapper.
     207 * @ page_id    : page index in file
     208 * @ returns extended pointer on page base if success / return XPTR_NULL if error.
     209 ******************************************************************************************/
     210xptr_t mapper_remote_get_page( xptr_t    mapper_xp,
     211                               uint32_t  page_id );
     212
     213/*******************************************************************************************
     214 * This function allows to read a single word in a mapper seen as and array of uint32_t.
     215 * It has bee designed to support remote access tho the FAT mapper of the FATFS.
     216 * It can be called by any thread running in any cluster.
    183217 * In case of miss, it takes the mapper lock in WRITE_MODE, load the missing
    184  * page from device to the mapper, and release the mapper lock.
    185  *******************************************************************************************
    186  * @ mapper     : local pointer on the mapper.
    187  * @ index      : page index in file
    188  * @ returns pointer on page descriptor if success / return NULL if error.
    189  ******************************************************************************************/
    190 struct page_s * mapper_get_page( mapper_t * mapper,
    191                                  uint32_t   index );
    192 
    193  
     218 * page from device to mapper, and release the mapper lock.
     219 *******************************************************************************************
     220 * @ mapper_xp  : [in]  extended pointer on the mapper.
     221 * @ index          : [in]  32 bits word index in file.
     222 * @ p_value    : [out] local pointer on destination buffer.
     223 * @ returns 0 if success / return -1 if error.
     224 ******************************************************************************************/
     225error_t mapper_remote_get_32( xptr_t     mapper_xp,
     226                              uint32_t   word_id,
     227                              uint32_t * p_value );
     228
     229/*******************************************************************************************
     230 * This function allows to write a single word to a mapper seen as and array of uint32_t.
     231 * It has bee designed to support remote access tho the FAT mapper of the FATFS.
     232 * It can be called by any thread running in any cluster.
     233 * In case of miss, it takes the mapper lock in WRITE_MODE, load the missing
     234 * page from device to mapper, and release the mapper lock.
     235 *******************************************************************************************
     236 * @ mapper_xp  : [in]  extended pointer on the mapper.
     237 * @ index          : [in]  32 bits word index in file.
     238 * @ p_value    : [in]  value to be written.
     239 * @ returns 0 if success / return -1 if error.
     240 ******************************************************************************************/
     241error_t mapper_remote_set_32( xptr_t     mapper_xp,
     242                              uint32_t   word_id,
     243                              uint32_t   value );
    194244
    195245#endif /* _MAPPER_H_ */
  • trunk/kernel/mm/page.h

    r567 r606  
    3737/*************************************************************************************
    3838 * This  defines the flags that can be attached to a physical page.
    39  * TODO : the PG_BUFFER and PG_IO_ERR flags semantic is not defined
     39 * TODO : the PG_BUFFER and PG_IO_ERR flags semantic is not defined [AG]
    4040 ************************************************************************************/
    4141
     
    4343#define PG_RESERVED         0x0002     // cannot be allocated by PPM
    4444#define PG_FREE             0x0004     // page can be allocated by PPM
    45 #define PG_INLOAD           0x0008     // on-going load from disk
    4645#define PG_IO_ERR           0x0010     // mapper signals access error    TODO ??? [AG]
    4746#define PG_BUFFER           0x0020     // used in blockio.c              TODO ??? [AG]
     
    4948#define PG_COW          0x0080     // page is copy-on-write
    5049
    51 #define PG_ALL          0xFFFF     // All flags
    52 
    5350/*************************************************************************************
    5451 * This structure defines a physical page descriptor.
    55  * The busylock is used to test/modify the forks counter.
    56  * NOTE: Size is 44 bytes for a 32 bits core...
    57  * TODO : the refcount use has to be clarified [AG]
     52 * - The remote_busylock is used to allows any remote thread to atomically
     53 *   test/modify the forks counter or the page flags.
     54 * - The list entry is used to register the page in a free list or in dirty list.
     55 * NOTE: Size is 48 bytes for a 32 bits core.
     56 * TODO : the refcount use is not defined [AG]
    5857 ************************************************************************************/
    5958
     
    6766        uint32_t          refcount;       /*! reference counter TODO ??? [AG]      (4)  */
    6867        uint32_t          forks;          /*! number of pending forks              (4)  */
    69         remote_busylock_t lock;           /*! protect all accesses to page         (12) */
     68        remote_busylock_t lock;           /*! protect forks or flags modifs        (16) */
    7069}
    7170page_t;
  • trunk/kernel/mm/ppm.c

    r585 r606  
    3939#include <mapper.h>
    4040#include <ppm.h>
     41#include <vfs.h>
    4142
    4243////////////////////////////////////////////////////////////////////////////////////////
     
    395396//////////////////////////////////////////////////////////////////////////////////////
    396397
    397 /////////////////////////////////////////
    398 bool_t ppm_page_do_dirty( page_t * page )
     398//////////////////////////////////////////
     399bool_t ppm_page_do_dirty( xptr_t page_xp )
    399400{
    400401        bool_t done = false;
    401402
     403    // get page cluster and local pointer
     404    page_t * page_ptr = GET_PTR( page_xp );
     405    cxy_t    page_cxy = GET_CXY( page_xp );
     406
     407    // get local pointer on PPM (same in all clusters)
    402408        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
    403409
    404         // lock the PPM dirty_list
    405         queuelock_acquire( &ppm->dirty_lock );
    406 
    407         if( !page_is_flag( page , PG_DIRTY ) )
     410    // build extended pointers on page lock, page flags, and PPM dirty list lock
     411    xptr_t page_lock_xp  = XPTR( page_cxy , &page_ptr->lock  );   
     412    xptr_t page_flags_xp = XPTR( page_cxy , &page_ptr->flags );
     413    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
     414           
     415        // lock the remote PPM dirty_list
     416        remote_queuelock_acquire( dirty_lock_xp );
     417
     418    // lock the remote page
     419    remote_busylock_acquire( page_lock_xp );
     420
     421    // get remote page flags
     422    uint32_t flags = hal_remote_l32( page_flags_xp );
     423
     424        if( (flags & PG_DIRTY) == 0 )
    408425        {
    409426                // set dirty flag in page descriptor
    410                 page_set_flag( page , PG_DIRTY );
    411 
    412                 // register page in PPM dirty list
    413                 list_add_first( &ppm->dirty_root , &page->list );
     427        hal_remote_s32( page_flags_xp , flags | PG_DIRTY );
     428
     429                // The PPM dirty list is a LOCAL list !!!
     430        // We must update 4 pointers to insert a new page in this list.
     431        // We can use the standard LIST API when the page is local,
     432        // but we cannot use the standard API if the page is remote...
     433
     434        if( page_cxy == local_cxy )         // locally update the PPM dirty list
     435        {
     436            list_add_first( &ppm->dirty_root , &page_ptr->list );
     437        }
     438        else                                // remotely update the PPM dirty list
     439        {
     440            // get local and remote pointers on "root" list entry
     441            list_entry_t * root    = &ppm->dirty_root;
     442            xptr_t         root_xp = XPTR( page_cxy , root );
     443
     444            // get local and remote pointers on "page" list entry
     445            list_entry_t * list    = &page_ptr->list;
     446            xptr_t         list_xp = XPTR( page_cxy , list );
     447
     448            // get local and remote pointers on first dirty page
     449            list_entry_t * dirt    = hal_remote_lpt( XPTR( page_cxy, &root->next ) );
     450            xptr_t         dirt_xp = XPTR( page_cxy , dirt );
     451
     452            // set root.next, list.next, list pred, curr.pred in remote cluster
     453            hal_remote_spt( root_xp                    , list );
     454            hal_remote_spt( list_xp                    , dirt );
     455            hal_remote_spt( list_xp + sizeof(intptr_t) , root );
     456            hal_remote_spt( dirt_xp + sizeof(intptr_t) , list );
     457        }
     458
    414459                done = true;
    415460        }
    416461
    417         // unlock the PPM dirty_list
    418         queuelock_release( &ppm->dirty_lock );
     462    // unlock the remote page
     463    remote_busylock_release( page_lock_xp );
     464
     465        // unlock the remote PPM dirty_list
     466        remote_queuelock_release( dirty_lock_xp );
    419467
    420468        return done;
    421 }
    422 
    423 ///////////////////////////////////////////
    424 bool_t ppm_page_undo_dirty( page_t * page )
     469
     470} // end ppm_page_do_dirty()
     471
     472////////////////////////////////////////////
     473bool_t ppm_page_undo_dirty( xptr_t page_xp )
    425474{
    426475        bool_t done = false;
    427476
     477    // get page cluster and local pointer
     478    page_t * page_ptr = GET_PTR( page_xp );
     479    cxy_t    page_cxy = GET_CXY( page_xp );
     480
     481    // get local pointer on PPM (same in all clusters)
    428482        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
    429483
    430         // lock the dirty_list
    431         queuelock_acquire( &ppm->dirty_lock );
    432 
    433         if( page_is_flag( page , PG_DIRTY) )
    434         {
    435                 // clear dirty flag in page descriptor
    436                 page_clear_flag( page , PG_DIRTY );
    437 
    438                 // remove page from PPM dirty list
    439                 list_unlink( &page->list );
     484    // build extended pointers on page lock, page flags, and PPM dirty list lock
     485    xptr_t page_lock_xp  = XPTR( page_cxy , &page_ptr->lock  );
     486    xptr_t page_flags_xp = XPTR( page_cxy , &page_ptr->flags );
     487    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
     488           
     489        // lock the remote PPM dirty_list
     490        remote_queuelock_acquire( XPTR( page_cxy , &ppm->dirty_lock ) );
     491
     492    // lock the remote page
     493    remote_busylock_acquire( page_lock_xp );
     494
     495    // get remote page flags
     496    uint32_t flags = hal_remote_l32( page_flags_xp );
     497
     498        if( (flags & PG_DIRTY) )  // page is dirty
     499        {
     500                // reset dirty flag in page descriptor
     501        hal_remote_s32( page_flags_xp , flags & (~PG_DIRTY) );
     502
     503                // The PPM dirty list is a LOCAL list !!!
     504        // We must update 4 pointers to remove a page from this list.
     505        // we can use the standard LIST API when the page is local,
     506        // but we cannot use the standard API if the page is remote...
     507
     508        if( page_cxy == local_cxy )         // locally update the PPM dirty list
     509        {
     510            list_unlink( &page_ptr->list );
     511        }
     512        else                                // remotely update the PPM dirty list
     513        {
     514            // get local and remote pointers on "page" list entry
     515            list_entry_t * list    = &page_ptr->list;
     516            xptr_t         list_xp = XPTR( page_cxy , list );
     517
     518            // get local and remote pointers on "next" page list entry
     519            list_entry_t * next    = hal_remote_lpt( list_xp );
     520            xptr_t         next_xp = XPTR( page_cxy , next );
     521
     522            // get local and remote pointers on "pred" page list entry
     523            list_entry_t * pred    = hal_remote_lpt( list_xp + sizeof(intptr_t) );
     524            xptr_t         pred_xp = XPTR( page_cxy , pred );
     525
     526            // set root.next, list.next, list pred, curr.pred in remote cluster
     527            hal_remote_spt( pred_xp                    , next );
     528            hal_remote_spt( list_xp                    , NULL );
     529            hal_remote_spt( list_xp + sizeof(intptr_t) , NULL );
     530            hal_remote_spt( next_xp + sizeof(intptr_t) , pred );
     531        }
     532
    440533                done = true;
    441534        }
    442535
    443         // unlock the dirty_list
    444         queuelock_release( &ppm->dirty_lock );
     536    // unlock the remote page
     537    remote_busylock_release( page_lock_xp );
     538
     539        // unlock the remote PPM dirty_list
     540        remote_queuelock_release( dirty_lock_xp );
    445541
    446542        return done;
    447 }
    448 
    449 ///////////////////////////////
    450 void ppm_sync_all_pages( void )
    451 {
    452         page_t   * page;
    453         ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
     543
     544}  // end ppm_page_undo_dirty()
     545
     546/////////////////////////////////
     547void ppm_sync_dirty_pages( void )
     548{
     549        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
     550
     551    // get local pointer on PPM dirty_root
     552    list_entry_t * dirty_root = &ppm->dirty_root;
     553
     554    // build extended pointer on PPM dirty_lock
     555    xptr_t dirty_lock_xp = XPTR( local_cxy , &ppm->dirty_lock );
    454556
    455557        // get the PPM dirty_list lock
    456         queuelock_acquire( &ppm->dirty_lock );
     558        remote_queuelock_acquire( dirty_lock_xp );
    457559
    458560        while( !list_is_empty( &ppm->dirty_root ) )
    459561        {
    460                 page = LIST_FIRST( &ppm->dirty_root ,  page_t , list );
     562                page_t * page = LIST_FIRST( dirty_root ,  page_t , list );
     563        xptr_t   page_xp = XPTR( local_cxy , page );
     564
     565        // build extended pointer on page lock
     566        xptr_t page_lock_xp = XPTR( local_cxy , &page->lock );
    461567
    462568                // get the page lock
    463                 remote_busylock_acquire( XPTR( local_cxy, &page->lock ) );
     569                remote_busylock_acquire( page_lock_xp );
    464570
    465571                // sync the page
    466                 vfs_mapper_move_page( page , false );  // from mapper
     572                vfs_fs_move_page( page_xp , false );  // from mapper to device
    467573
    468574                // release the page lock
    469                 remote_busylock_release( XPTR( local_cxy , &page->lock ) );
     575                remote_busylock_release( page_lock_xp );
    470576        }
    471577
    472578        // release the PPM dirty_list lock
    473         queuelock_release( &ppm->dirty_lock );
    474 }
    475 
     579        remote_queuelock_release( dirty_lock_xp );
     580
     581}  // end ppm_sync_dirty_pages()
     582
  • trunk/kernel/mm/ppm.h

    r567 r606  
    2929#include <list.h>
    3030#include <busylock.h>
    31 #include <queuelock.h>
     31#include <remote_queuelock.h>
    3232#include <boot_info.h>
    3333#include <page.h>
     
    3939 * contains an integer number of pages, defined by the <pages_nr> field in the
    4040 * boot_info structure. It is split in three parts:
     41 *
    4142 * - the "kernel_code" section contains the kernel code, loaded by the boot-loader.
    4243 *   It starts at PPN = 0 and the size is defined by the <pages_offset> field in the
     
    6061 * Another service is to register the dirty pages in a specific dirty_list, that is
    6162 * also rooted in the PPM, in order to be able to save all dirty pages on disk.
    62  * This dirty list is protected by a specific local queuelock.
     63 * This dirty list is protected by a specific remote_queuelock, because it can be
     64 * modified by a remote thread, but it is implemented as a local list, because it
     65 * contains only local pages.
    6366 ****************************************************************************************/
    6467
    6568typedef struct ppm_s
    6669{
    67         busylock_t     free_lock;               /*! lock protecting free_pages[] lists      */
    68         list_entry_t   free_pages_root[CONFIG_PPM_MAX_ORDER];  /*! roots of free lists      */
    69         uint32_t       free_pages_nr[CONFIG_PPM_MAX_ORDER];    /*! numbers of free pages    */
    70         page_t       * pages_tbl;               /*! pointer on page descriptors array       */
    71         uint32_t       pages_nr;                /*! total number of small physical page     */
    72     queuelock_t    dirty_lock;              /*! lock protecting dirty pages list        */
    73     list_entry_t   dirty_root;              /*! root of dirty pages list                */
    74     void         * vaddr_base;              /*! pointer on local physical memory base   */
     70        busylock_t          free_lock;          /*! lock protecting free_pages[] lists      */
     71        list_entry_t        free_pages_root[CONFIG_PPM_MAX_ORDER];  /*! roots of free lists */
     72        uint32_t            free_pages_nr[CONFIG_PPM_MAX_ORDER];    /*! free pages number   */
     73        page_t            * pages_tbl;          /*! pointer on page descriptors array       */
     74        uint32_t            pages_nr;           /*! total number of small physical page     */
     75    remote_queuelock_t  dirty_lock;         /*! lock protecting dirty pages list        */
     76    list_entry_t        dirty_root;         /*! root of dirty pages list                */
     77    void              * vaddr_base;         /*! pointer on local physical memory base   */
    7578}
    7679ppm_t;
     
    135138inline xptr_t ppm_base2page( xptr_t base_xp );
    136139
    137 
    138 
    139140/*****************************************************************************************
    140141 * Get extended pointer on page base from global PPN.
     
    153154inline ppn_t ppm_base2ppn( xptr_t base_xp );
    154155
    155 
    156 
    157156/*****************************************************************************************
    158157 * Get global PPN from extended pointer on page descriptor.
     
    172171
    173172
     173/*********** debug  functions  **********************************************************/
    174174
    175175/*****************************************************************************************
     
    190190
    191191/*****************************************************************************************
    192  * This function registers a physical page as dirty.
     192 * This function registers a page identified by the <page_xp> argument as dirty.
     193 * It can be called by a thread running in any cluster.
    193194 * - it takes the queuelock protecting the PPM dirty_list.
    194195 * - it test the PG_DIRTY flag in the page descriptor.
     
    197198 * - it releases the queuelock protecting the PPM dirty_list.
    198199 *****************************************************************************************
    199  * @ page     : pointer on page descriptor.
     200 * @ page_xp  : extended pointer on page descriptor.
    200201 * @ returns true if page was not dirty / returns false if page was dirty
    201202 ****************************************************************************************/
    202 bool_t ppm_page_do_dirty( page_t * page );
    203 
    204 /*****************************************************************************************
    205  * This function unregisters a physical page as dirty.
     203bool_t ppm_page_do_dirty( xptr_t page_xp );
     204
     205/*****************************************************************************************
     206 * This function unregisters a page identified by the <page_xp> argument as dirty.
     207 * It can be called by a thread running in any cluster.
    206208 * - it takes the queuelock protecting the PPM dirty_list.
    207209 * - it test the PG_DIRTY flag in the page descriptor.
     
    210212 * - it releases the queuelock protecting the PPM dirty_list.
    211213 *****************************************************************************************
    212  * @ page     : pointer on page descriptor.
     214 * @ page_xp  : extended pointer on page descriptor.
    213215 * @ returns true if page was dirty / returns false if page was not dirty
    214216 ****************************************************************************************/
    215 bool_t ppm_page_undo_dirty( page_t * page );
    216 
    217 /*****************************************************************************************
    218  * This function synchronizes (i.e. update the disk) all dirty pages in a cluster.
     217bool_t ppm_page_undo_dirty( xptr_t page_xp );
     218
     219/*****************************************************************************************
     220 * This function synchronizes (i.e. update the IOC device) all dirty pages in a cluster.
    219221 * - it takes the queuelock protecting the PPM dirty_list.
    220222 * - it scans the PPM dirty list, and for each page:
     
    226228 $ The PPM dirty_list is empty when the sync operation completes.
    227229 ****************************************************************************************/
    228 void ppm_sync_all_pages( void );
     230void ppm_sync_dirty_pages( void );
    229231
    230232#endif  /* _PPM_H_ */
  • trunk/kernel/mm/vmm.c

    r595 r606  
    15071507    error_t    error;
    15081508    xptr_t     page_xp;           // extended pointer on physical page descriptor
    1509     page_t   * page_ptr;          // local pointer on physical page descriptor
    1510     uint32_t   index;             // missing page index in vseg mapper
     1509    uint32_t   page_id;           // missing page index in vseg mapper
    15111510    uint32_t   type;              // vseg type;
    15121511
    15131512    type      = vseg->type;
    1514     index     = vpn - vseg->vpn_base;
     1513    page_id   = vpn - vseg->vpn_base;
    15151514
    15161515#if DEBUG_VMM_GET_ONE_PPN
     
    15181517thread_t * this  = CURRENT_THREAD;
    15191518if( DEBUG_VMM_GET_ONE_PPN < cycle )
    1520 printk("\n[%s] thread[%x,%x] enter for vpn %x / type %s / index  %d / cycle %d\n",
    1521 __FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(type), index, cycle );
     1519printk("\n[%s] thread[%x,%x] enter for vpn %x / type %s / page_id  %d / cycle %d\n",
     1520__FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(type), page_id, cycle );
    15221521#endif
    15231522
     
    15311530"mapper not defined for a FILE vseg\n" );
    15321531       
    1533         // get mapper cluster and local pointer
    1534         cxy_t      mapper_cxy = GET_CXY( mapper_xp );
    1535         mapper_t * mapper_ptr = GET_PTR( mapper_xp );
    1536 
    1537         // get page descriptor from mapper
    1538         if( mapper_cxy == local_cxy )             // mapper is local
    1539         {
    1540             page_ptr = mapper_get_page( mapper_ptr , index );
    1541         }
    1542         else                                      // mapper is remote
    1543         {
    1544             rpc_mapper_get_page_client( mapper_cxy , mapper_ptr , index , &page_ptr );
    1545         }
    1546 
    1547         if ( page_ptr == NULL ) return EINVAL;
    1548 
    1549         page_xp = XPTR( mapper_cxy , page_ptr );
     1532        // get extended pointer on page descriptor
     1533        page_xp = mapper_remote_get_page( mapper_xp , page_id );
     1534
     1535        if ( page_xp == XPTR_NULL ) return EINVAL;
    15501536    }
    15511537
     
    15691555"mapper not defined for a CODE or DATA vseg\n" );
    15701556       
    1571             // get mapper cluster and local pointer
    1572             cxy_t      mapper_cxy = GET_CXY( mapper_xp );
    1573             mapper_t * mapper_ptr = GET_PTR( mapper_xp );
    1574 
    15751557            // compute missing page offset in vseg
    1576             uint32_t offset = index << CONFIG_PPM_PAGE_SHIFT;
     1558            uint32_t offset = page_id << CONFIG_PPM_PAGE_SHIFT;
    15771559
    15781560            // compute missing page offset in .elf file
     
    16151597__FUNCTION__, this->process->pid, this->trdid, vpn );
    16161598#endif
    1617                 if( mapper_cxy == local_cxy )
    1618                 {
    1619                     error = mapper_move_kernel( mapper_ptr,
    1620                                                 true,             // to_buffer
    1621                                                 elf_offset,
    1622                                                 base_xp,
    1623                                                 CONFIG_PPM_PAGE_SIZE );
    1624                 }
    1625                 else
    1626                 {
    1627                     rpc_mapper_move_buffer_client( mapper_cxy,
    1628                                                    mapper_ptr,
    1629                                                    true,         // to buffer
    1630                                                    false,        // kernel buffer
    1631                                                    elf_offset,
    1632                                                    base_xp,
    1633                                                    CONFIG_PPM_PAGE_SIZE,
    1634                                                    &error );
    1635                 }
     1599                error = mapper_move_kernel( mapper_xp,
     1600                                            true,             // to_buffer
     1601                                            elf_offset,
     1602                                            base_xp,
     1603                                            CONFIG_PPM_PAGE_SIZE );
    16361604                if( error ) return EINVAL;
    16371605            }
     
    16491617#endif
    16501618                // initialize mapper part
    1651                 if( mapper_cxy == local_cxy )
    1652                 {
    1653                     error = mapper_move_kernel( mapper_ptr,
    1654                                                 true,         // to buffer
    1655                                                 elf_offset,
    1656                                                 base_xp,
    1657                                                 file_size - offset );
    1658                 }
    1659                 else                               
    1660                 {
    1661                     rpc_mapper_move_buffer_client( mapper_cxy,
    1662                                                    mapper_ptr,
    1663                                                    true,         // to buffer
    1664                                                    false,        // kernel buffer
    1665                                                    elf_offset,
    1666                                                    base_xp,
    1667                                                    file_size - offset,
    1668                                                    &error );
    1669                 }
     1619                error = mapper_move_kernel( mapper_xp,
     1620                                            true,         // to buffer
     1621                                            elf_offset,
     1622                                            base_xp,
     1623                                            file_size - offset );
    16701624                if( error ) return EINVAL;
    16711625
Note: See TracChangeset for help on using the changeset viewer.