Changeset 614


Ignore:
Timestamp:
Jan 15, 2019, 1:59:32 PM (5 years ago)
Author:
alain
Message:

1) introduce a dev_ioc_sync_write() function in IOC API,

to improve the DEVFS synchronous update.

2) fix a big bug in both the user_dir_create() and user_dir_destroy()

functions: add an extended pointer on the reference client process
in the function's arguments.

Location:
trunk/kernel
Files:
23 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/devices/dev_ioc.c

    r605 r614  
    9191// This static function is called by dev_ioc_read() & dev_ioc_write() functions.
    9292// It builds and registers the command in the calling thread descriptor.
    93 // Then, it registers the calling thead in IOCchdev waiting queue.
     93// Then, it registers the calling thead in IOC chdev waiting queue.
    9494// Finally it blocks on the THREAD_BLOCKED_IO condition and deschedule.
    9595////////////////////////////////////i/////////////////////////////////////////////
     
    108108    if( chdev_dir.iob )
    109109    {
    110         if ( cmd_type == IOC_READ ) dev_mmc_inval( XPTR( local_cxy , buffer ) , count<<9 );
    111         else                        dev_mmc_sync ( XPTR( local_cxy , buffer ) , count<<9 );
     110        if (cmd_type == IOC_READ) dev_mmc_inval( XPTR( local_cxy , buffer ) , count<<9 );
     111        else                      dev_mmc_sync ( XPTR( local_cxy , buffer ) , count<<9 );
    112112    }
    113113
     
    162162
    163163    return dev_ioc_access( IOC_READ , buffer , lba , count );
    164 
    165 #if DEBUG_DEV_IOC_RX
    166 cycle = (uint32_t)hal_get_cycles();
    167 if( DEBUG_DEV_IOC_RX < cycle )
    168 printk("\n[%s] thread[%x,%x] exit / lba  %x / buffer %x / cycle %d\n",
    169 __FUNCTION__ , this->process->pid, this->trdid, lba, buffer, cycle );
    170 #endif
    171 
    172164}
    173165
     
    187179
    188180    return dev_ioc_access( IOC_WRITE , buffer , lba , count );
    189 
    190 #if DEBUG_DEV_IOC_TX
    191 cycle = (uint32_t)hal_get_cycles();
    192 if( DEBUG_DEV_IOC_TX < cycle )
    193 printk("\n[%s] thread[%x,%x] exit / lba  %x / buffer %x / cycle %d\n",
    194 __FUNCTION__ , this->process->pid, this->trdid, lba, buffer, cycle );
    195 #endif
    196 
    197181}
     182
     183
     184
     185
     186
     187//////////////////////////////////////////////////////////////////////////////////
     188// This static function is called by dev_ioc_sync_read() & dev_ioc_sync_write().
     189// It builds and registers the command in the calling thread descriptor, and
     190// calls directly the blocking IOC driver command, that returns only when the
     191// IO operation is completed.
     192////////////////////////////////////i/////////////////////////////////////////////
     193error_t dev_ioc_sync_access( uint32_t   cmd_type,
     194                             uint8_t  * buffer,
     195                             uint32_t   lba,
     196                             uint32_t   count )
     197{
     198    // get pointer on calling thread
     199    thread_t * this = CURRENT_THREAD;
     200
     201    // software L2/L3 cache coherence for memory buffer
     202    if( chdev_dir.iob )
     203    {
     204        if (cmd_type == IOC_SYNC_READ) dev_mmc_inval( XPTR(local_cxy,buffer) , count<<9 );
     205        else                           dev_mmc_sync ( XPTR(local_cxy,buffer) , count<<9 );
     206    }
     207
     208    // get extended pointer on IOC[0] chdev
     209    xptr_t  ioc_xp = chdev_dir.ioc[0];
     210
     211// check ioc_xp
     212assert( (ioc_xp != XPTR_NULL) , "undefined IOC chdev descriptor" );
     213
     214    // register command in calling thread descriptor
     215    this->ioc_cmd.dev_xp    = ioc_xp;
     216    this->ioc_cmd.type      = cmd_type;
     217    this->ioc_cmd.buf_xp    = XPTR( local_cxy , buffer );
     218    this->ioc_cmd.lba       = lba;
     219    this->ioc_cmd.count     = count;
     220
     221    // get driver command function
     222    cxy_t       ioc_cxy = GET_CXY( ioc_xp );
     223    chdev_t   * ioc_ptr = GET_PTR( ioc_xp );
     224    dev_cmd_t * cmd = (dev_cmd_t *)hal_remote_lpt( XPTR( ioc_cxy , &ioc_ptr->cmd ) );
     225
     226    // get core local index for the core handling the IOC IRQ
     227    thread_t * server = (thread_t *)hal_remote_lpt( XPTR( ioc_cxy , &ioc_ptr->server ) );
     228    core_t   * core   = (core_t *)hal_remote_lpt( XPTR( ioc_cxy , &server->core ) );
     229    lid_t      lid    = (lid_t)hal_remote_l32( XPTR( ioc_cxy , &core->lid ) );
     230
     231    // mask the IRQ
     232    dev_pic_disable_irq( lid , ioc_xp );
     233
     234    // call driver function
     235    cmd( XPTR( local_cxy , this ) );
     236
     237    // unmask the IRQ
     238    dev_pic_enable_irq( lid , ioc_xp );
     239
     240    // return I/O operation status from calling thread descriptor
     241    return this->ioc_cmd.error;
     242
     243}  // end ioc_sync_access()
    198244
    199245/////////////////////////////////////////////
     
    202248                           uint32_t   count )
    203249{
    204     // get pointer on calling thread
    205     thread_t * this = CURRENT_THREAD;
    206250
    207251#if DEBUG_DEV_IOC_RX
    208 uint32_t cycle = (uint32_t)hal_get_cycles();
     252thread_t * this  = CURRENT_THREAD;
     253uint32_t   cycle = (uint32_t)hal_get_cycles();
     254if( DEBUG_DEV_IOC_RX < cycle )
     255printk("\n[%s] thread[%x,%x] : lba  %x / buffer %x / cycle %d\n",
     256__FUNCTION__ , this->process->pid, this->trdid, lba, buffer, cycle );
     257#endif
     258
     259    return dev_ioc_sync_access( IOC_SYNC_READ , buffer , lba , count );
     260}
     261
     262//////////////////////////////////////////////
     263error_t dev_ioc_sync_write( uint8_t  * buffer,
     264                            uint32_t   lba,
     265                            uint32_t   count )
     266{
     267
     268#if DEBUG_DEV_IOC_RX
     269thread_t * this  = CURRENT_THREAD;
     270uint32_t   cycle = (uint32_t)hal_get_cycles();
    209271if( DEBUG_DEV_IOC_RX < cycle )
    210272printk("\n[%s] thread[%x,%x] enters / lba  %x / buffer %x / cycle %d\n",
     
    212274#endif
    213275
    214     // software L2/L3 cache coherence for memory buffer
    215     if( chdev_dir.iob ) dev_mmc_inval( XPTR( local_cxy , buffer ) , count<<9 );
    216 
    217     // get extended pointer on IOC[0] chdev
    218     xptr_t  ioc_xp = chdev_dir.ioc[0];
    219 
    220     assert( (ioc_xp != XPTR_NULL) , "undefined IOC chdev descriptor" );
    221 
    222     // register command in calling thread descriptor
    223     this->ioc_cmd.dev_xp    = ioc_xp;
    224     this->ioc_cmd.type      = IOC_SYNC_READ;
    225     this->ioc_cmd.buf_xp    = XPTR( local_cxy , buffer );
    226     this->ioc_cmd.lba       = lba;
    227     this->ioc_cmd.count     = count;
    228 
    229     // get driver command function
    230     cxy_t       ioc_cxy = GET_CXY( ioc_xp );
    231     chdev_t   * ioc_ptr = (chdev_t *)GET_PTR( ioc_xp );
    232     dev_cmd_t * cmd = (dev_cmd_t *)hal_remote_lpt( XPTR( ioc_cxy , &ioc_ptr->cmd ) );
    233 
    234     // get core local index for the core handling the IOC IRQ
    235     thread_t * server = (thread_t *)hal_remote_lpt( XPTR( ioc_cxy , &ioc_ptr->server ) );
    236     core_t   * core   = (core_t *)hal_remote_lpt( XPTR( ioc_cxy , &server->core ) );
    237     lid_t      lid    = (lid_t)hal_remote_l32( XPTR( ioc_cxy , &core->lid ) );
    238 
    239     // mask the IRQ
    240     dev_pic_disable_irq( lid , ioc_xp );
    241 
    242     // call driver function
    243     cmd( XPTR( local_cxy , this ) );
    244 
    245     // unmask the IRQ
    246     dev_pic_enable_irq( lid , ioc_xp );
    247 
    248 #if DEBUG_DEV_IOC_RX
    249 cycle = (uint32_t)hal_get_cycles();
    250 if( DEBUG_DEV_IOC_RX < cycle )
    251 printk("\n[%s] thread[%x,%x] exit / lba  %x / buffer %x / cycle %d\n",
    252 __FUNCTION__ , this->process->pid, this->trdid, lba, buffer, cycle );
    253 #endif
    254 
    255     // return I/O operation status from calling thread descriptor
    256     return this->ioc_cmd.error;
    257 
    258 }  // end ioc_sync_read()
    259 
     276    return dev_ioc_sync_access( IOC_SYNC_WRITE , buffer , lba , count );
     277}
     278
  • trunk/kernel/devices/dev_ioc.h

    r457 r614  
    3838 * magnetic hard disk or a SD card, that can store blocks of data in a linear array
    3939 * of sectors indexed by a simple lba (logic block address).
    40  * It supports three command types:
    41  * - READ      : move blocks from device to memory, with a descheduling policy.
    42  * - WRITE     : move blocks from memory to device, with a descheduling policy.
    43  * - SYNC_READ : move blocks from device to memory, with a busy waiting policy.
     40 * It supports four command types:
     41 * - READ       : move blocks from device to memory, with a descheduling policy.
     42 * - WRITE      : move blocks from memory to device, with a descheduling policy.
     43 * - SYNC_READ  : move blocks from device to memory, with a busy waiting policy.
     44 * - SYNC_WRITE : move blocks from memory to device, with a busy waiting policy.
    4445
    4546 * A READ or WRITE operation requires dynamic ressource allocation. The calling thread
     
    6465 *    3) release the WTI mailbox to the client cluster WTI allocator.
    6566 *
    66  * The SYNC_READ operation is used by the kernel in the initialisation phase. It does
    67  * not uses the IOC device waiting queue and server thread, and does not use the IOC IRQ,
    68  * but implement a busy-waiting policy for the calling thread.
     67 * The SYNC_READ and SYNC_WRITE operations are used by the kernel in the initialisation
     68 * phase. These operations do not not use the IOC device waiting queue, the server thread,
     69 * and the IOC IRQ, but implement a busy-waiting policy for the calling thread.
    6970 *****************************************************************************************/
    7071
     
    8586 *****************************************************************************************/
    8687
    87 enum ioc_impl_e
     88typedef enum
    8889{
    8990    IMPL_IOC_BDV =   0,     
     
    99100 *****************************************************************************************/
    100101
    101 enum
     102typedef enum
    102103{
    103104    IOC_READ       = 0,
    104105    IOC_WRITE      = 1,
    105106    IOC_SYNC_READ  = 2,
    106 };
     107    IOC_SYNC_WRITE = 3,
     108}
     109cmd_type_t;
    107110
    108111typedef struct ioc_command_s
     
    131134
    132135/******************************************************************************************
    133  * This blocking function try to tranfer one or several contiguous blocks of data
     136 * This blocking function moves one or several contiguous blocks of data
    134137 * from the block device to a local memory buffer. The corresponding request is actually
    135138 * registered in the device pending request queue, and the calling thread is descheduled,
     
    147150
    148151/******************************************************************************************
    149  * This blocking function try to tranfer one or several contiguous blocks of data
     152 * This blocking function moves one or several contiguous blocks of data
    150153 * from a local memory buffer to the block device. The corresponding request is actually
    151154 * registered in the device pending request queue, and the calling thread is descheduled,
     
    163166
    164167/******************************************************************************************
    165  * This blocking function try to tranfer one or several contiguous blocks of data
    166  * from the block device to a memory buffer.
     168 * This blocking function moves one or several contiguous blocks of data
     169 * from the block device to a local memory buffer.
    167170 * It does  not uses the IOC device waiting queue and server thread, and does not use
    168171 * the IOC IRQ, but call directly the relevant IOC driver, implementing a busy-waiting
     
    179182                           uint32_t       count );
    180183
     184/******************************************************************************************
     185 * This blocking function moves one or several contiguous blocks of data
     186 * from a local memory buffer to the block device.
     187 * It does  not uses the IOC device waiting queue and server thread, and does not use
     188 * the IOC IRQ, but call directly the relevant IOC driver, implementing a busy-waiting
     189 * policy for the calling thread.
     190 * It must be called in the client cluster.
     191 ******************************************************************************************
     192 * @ buffer    : local pointer on source buffer in memory (must be block aligned).
     193 * @ lba       : first block index on device.
     194 * @ count     : number of blocks to transfer.
     195 * @ returns 0 if success / returns EINVAL if error.
     196 *****************************************************************************************/
     197error_t dev_ioc_sync_write( uint8_t      * buffer,
     198                            uint32_t       lba,
     199                            uint32_t       count );
     200
    181201#endif  /* _DEV_IOC_H */
  • trunk/kernel/fs/devfs.c

    r612 r614  
    110110assert( (error == 0) , "cannot create <dev>\n" );
    111111
    112 #if DEBUG_DEVFS_INIT
     112#if DEBUG_DEVFS_GLOBAL_INIT
    113113uint32_t   cycle = (uint32_t)hal_get_cycles();
    114114thread_t * this  = CURRENT_THREAD;
    115 if( DEBUG_DEVFS_INIT < cycle )
     115if( DEBUG_DEVFS_GLOBAL_INIT < cycle )
    116116printk("\n[%s] thread[%x,%x] created <dev> inode / cycle %d\n",
    117117__FUNCTION__, this->process->pid, this->trdid, cycle );
     
    134134assert( (error == 0) , "cannot create <external>\n" );
    135135
    136 #if DEBUG_DEVFS_INIT
    137 cycle = (uint32_t)hal_get_cycles();
    138 if( DEBUG_DEVFS_INIT < cycle )
     136#if DEBUG_DEVFS_GLOBAL_INIT
     137cycle = (uint32_t)hal_get_cycles();
     138if( DEBUG_DEVFS_GLOBAL_INIT < cycle )
    139139printk("\n[%s] thread[%x,%x] created <external> inode / cycle %d\n",
    140140__FUNCTION__, this->process->pid, this->trdid, cycle );
     
    159159    error_t       error;
    160160
     161#if DEBUG_DEVFS_LOCAL_INIT
     162uint32_t   cycle = (uint32_t)hal_get_cycles();
     163thread_t * this  = CURRENT_THREAD;
     164if( DEBUG_DEVFS_LOCAL_INIT < cycle )
     165printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
     166__FUNCTION__, this->process->pid, this->trdid, cycle );
     167#endif
     168
    161169    // create "internal" directory
    162170    snprintf( node_name , 16 , "internal_%x" , local_cxy );
     
    177185assert( (error == 0) , "cannot create <external>\n" );
    178186
    179 #if DEBUG_DEVFS_INIT
    180 uint32_t   cycle = (uint32_t)hal_get_cycles();
    181 thread_t * this  = CURRENT_THREAD;
    182 if( DEBUG_DEVFS_INIT < cycle )
     187#if DEBUG_DEVFS_LOCAL_INIT
     188cycle = (uint32_t)hal_get_cycles();
     189if( DEBUG_DEVFS_LOCAL_INIT < cycle )
    183190printk("\n[%s] thread[%x,%x] created <%s> inode in cluster %x / cycle %d\n",
    184191__FUNCTION__, this->process->pid, this->trdid, node_name, local_cxy, cycle );
     
    209216        hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
    210217       
    211 #if DEBUG_DEVFS_INIT
    212 cycle = (uint32_t)hal_get_cycles();
    213 if( DEBUG_DEVFS_INIT < cycle )
     218#if DEBUG_DEVFS_LOCAL_INIT
     219cycle = (uint32_t)hal_get_cycles();
     220if( DEBUG_DEVFS_LOCAL_INIT < cycle )
    214221printk("\n[%s] thread[%x,%x] created <mmc> inode in cluster %x\n",
    215222__FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle );
     
    244251            hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
    245252       
    246 #if DEBUG_DEVFS_INIT
    247 cycle = (uint32_t)hal_get_cycles();
    248 if( DEBUG_DEVFS_INIT < cycle )
     253#if DEBUG_DEVFS_LOCAL_INIT
     254cycle = (uint32_t)hal_get_cycles();
     255if( DEBUG_DEVFS_LOCAL_INIT < cycle )
    249256printk("\n[%s] thread [%x,%x] created <dma[%d]> inode in cluster %x\n",
    250257__FUNCTION__, this->process->pid, this->trdid, channel, local_cxy, cycle );
     
    277284            hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
    278285       
    279 #if DEBUG_DEVFS_INIT
    280 cycle = (uint32_t)hal_get_cycles();
    281 if( DEBUG_DEVFS_INIT < cycle )
     286#if DEBUG_DEVFS_LOCAL_INIT
     287cycle = (uint32_t)hal_get_cycles();
     288if( DEBUG_DEVFS_LOCAL_INIT < cycle )
    282289printk("\n[%s] thread[%x,%x] created <iob> inode in cluster %x\n",
    283290__FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle );
     
    310317            hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
    311318       
    312 #if DEBUG_DEVFS_INIT
    313 cycle = (uint32_t)hal_get_cycles();
    314 if( DEBUG_DEVFS_INIT < cycle )
     319#if DEBUG_DEVFS_LOCAL_INIT
     320cycle = (uint32_t)hal_get_cycles();
     321if( DEBUG_DEVFS_LOCAL_INIT < cycle )
    315322printk("\n[%s] thread[%x,%x] created <pic> inode in cluster %x\n",
    316323__FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle );
     
    345352                hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
    346353       
    347 #if DEBUG_DEVFS_INIT
    348 cycle = (uint32_t)hal_get_cycles();
    349 if( DEBUG_DEVFS_INIT < cycle )
     354#if DEBUG_DEVFS_LOCAL_INIT
     355cycle = (uint32_t)hal_get_cycles();
     356if( DEBUG_DEVFS_LOCAL_INIT < cycle )
    350357printk("\n[%s] thread[%x,%x] created <txt_rx[%d]> inode in cluster %x\n",
    351358__FUNCTION__, this->process->pid, this->trdid, channel, local_cxy, cycle );
     
    381388                hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
    382389       
    383 #if DEBUG_DEVFS_INIT
    384 cycle = (uint32_t)hal_get_cycles();
    385 if( DEBUG_DEVFS_INIT < cycle )
     390#if DEBUG_DEVFS_LOCAL_INIT
     391cycle = (uint32_t)hal_get_cycles();
     392if( DEBUG_DEVFS_LOCAL_INIT < cycle )
    386393printk("\n[%s] thread[%x,%x] created <txt_tx[%d]> inode in cluster %x\n",
    387394__FUNCTION__, this->process->pid, this->trdid, channel, local_cxy, cycle );
     
    417424                hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
    418425       
    419 #if DEBUG_DEVFS_INIT
    420 cycle = (uint32_t)hal_get_cycles();
    421 if( DEBUG_DEVFS_INIT < cycle )
     426#if DEBUG_DEVFS_LOCAL_INIT
     427cycle = (uint32_t)hal_get_cycles();
     428if( DEBUG_DEVFS_LOCAL_INIT < cycle )
    422429printk("\n[%s] thread[%x,%x] created <ioc[%d]> inode in cluster %x\n",
    423430__FUNCTION__, this->process->pid, this->trdid, channel, local_cxy, cycle );
     
    453460                hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
    454461       
    455 #if DEBUG_DEVFS_INIT
    456 cycle = (uint32_t)hal_get_cycles();
    457 if( DEBUG_DEVFS_INIT < cycle )
     462#if DEBUG_DEVFS_LOCAL_INIT
     463cycle = (uint32_t)hal_get_cycles();
     464if( DEBUG_DEVFS_LOCAL_INIT < cycle )
    458465printk("\n[%s] thread[%x,%x] created <fbf[%d]> inode in cluster %x\n",
    459466__FUNCTION__, this->process->pid, this->trdid, channel, local_cxy, cycle );
     
    489496                hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
    490497 
    491 #if DEBUG_DEVFS_INIT
    492 cycle = (uint32_t)hal_get_cycles();
    493 if( DEBUG_DEVFS_INIT < cycle )
     498#if DEBUG_DEVFS_LOCAL_INIT
     499cycle = (uint32_t)hal_get_cycles();
     500if( DEBUG_DEVFS_LOCAL_INIT < cycle )
    494501printk("\n[%s] thread[%x,%x] created <nic_rx[%d]> inode in cluster %x\n",
    495502__FUNCTION__, this->process->pid, this->trdid, channel, local_cxy, cycle );
     
    525532                hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
    526533       
    527 #if DEBUG_DEVFS_INIT
    528 cycle = (uint32_t)hal_get_cycles();
    529 if( DEBUG_DEVFS_INIT < cycle )
     534#if DEBUG_DEVFS_LOCAL_INIT
     535cycle = (uint32_t)hal_get_cycles();
     536if( DEBUG_DEVFS_LOCAL_INIT < cycle )
    530537printk("\n[%s] thread[%x,%x] created <nic_tx[%d]> inode in cluster %x\n",
    531538__FUNCTION__, this->process->pid, this->trdid, channel, local_cxy, cycle );
  • trunk/kernel/fs/fatfs.c

    r612 r614  
    10641064        {
    10651065            // copy the modified page to IOC device
    1066             fatfs_move_page( page_xp , false );   
     1066            fatfs_move_page( page_xp , IOC_SYNC_WRITE );   
    10671067
    10681068            // get the next page in FAT mapper
     
    12171217
    12181218    // copy the modified page to the IOC device
    1219     fatfs_move_page( page_xp , false );   
     1219    fatfs_move_page( page_xp , IOC_SYNC_WRITE );   
    12201220
    12211221#if DEBUG_FATFS_ADD_DENTRY
    12221222cycle = (uint32_t)hal_get_cycles();
    12231223if( DEBUG_FATFS_ADD_DENTRY < cycle )
    1224 printk("\n[%s]  thread[%x,%x] exit / parent %s / child %s / cycle %d\n",
     1224printk("\n[%s]  thread[%x,%x] exit / parent <%s> / child <%s> / cycle %d\n",
    12251225__FUNCTION__, this->process->pid, this->trdid, dir_name, dentry->name, cycle );
    12261226#endif
     
    13131313
    13141314            // copy the modified page to the IOC device
    1315             fatfs_move_page( page_xp , false );   
     1315            fatfs_move_page( page_xp , IOC_SYNC_WRITE );   
    13161316
    13171317            // get extended pointer on page descriptor from parent directory mapper
     
    13451345
    13461346    // copy the modified page to the IOC device
    1347     fatfs_move_page( page_xp , false );   
     1347    fatfs_move_page( page_xp , IOC_SYNC_WRITE );   
    13481348   
    13491349
     
    13961396    xptr_t     base_xp;          // extended pointer on page base
    13971397    uint8_t  * base;             // local pointer on page base
    1398     uint32_t   attr;             // directory entry ATTR field
    1399     uint32_t   ord;              // directory entry ORD field
     1398    uint8_t    attr;             // directory entry ATTR field
     1399    uint8_t    ord;              // directory entry ORD field
    14001400    uint32_t   seq;              // sequence index
    14011401    uint32_t   lfn       = 0;    // LFN entries number
     
    14221422#if (DEBUG_FATFS_GET_DENTRY & 0x1)
    14231423if( DEBUG_FATFS_GET_DENTRY < cycle )
    1424 mapper_display_page( mapper_xp , page_id , 256 , parent_name );
     1424mapper_display_page( mapper_xp , page_id , 256 );
    14251425#endif
    14261426        // scan this page until end of directory, end of page, or name found
     
    14351435            }
    14361436            else if ( ord == FREE_ENTRY )             // free entry => skip
     1437            {
     1438                offset = offset + 32;
     1439            }
     1440            else if ( attr == 0x28 )                  // volune_id => skip
    14371441            {
    14381442                offset = offset + 32;
     
    15771581assert( (detailed == false), "detailed argument not supported/n");
    15781582
    1579     char       cname[CONFIG_VFS_MAX_NAME_LENGTH];  // name extracter from each directory entry
     1583    char       cname[CONFIG_VFS_MAX_NAME_LENGTH];  // name extracted from each dentry
    15801584
    15811585    char       lfn1[16];           // buffer for one partial cname
     
    15851589    xptr_t     base_xp;            // extended pointer on page base
    15861590    uint8_t  * base;               // local pointer on page base
    1587     uint32_t   attr;               // directory entry ATTR field
    1588     uint32_t   ord;                // directory entry ORD field
     1591    uint8_t    attr;               // directory entry ATTR field
     1592    uint8_t    ord;                // directory entry ORD field
    15891593    uint32_t   seq;                // sequence index
    15901594    uint32_t   lfn       = 0;      // LFN entries number
     
    16091613#if (DEBUG_FATFS_GET_USER_DIR & 0x1)
    16101614if( DEBUG_FATFS_GET_USER_DIR < cycle )
    1611 mapper_display_page( mapper_xp , page_id , 256 , inode_name );
     1615mapper_display_page( mapper_xp , page_id , 256 );
    16121616#endif
    16131617        // loop on NORMAL/LFN (32 bytes) directory entries in this page
     
    16251629            }
    16261630            else if ( ord == FREE_ENTRY )             // free entry => skip
     1631            {
     1632                offset = offset + 32;
     1633            }
     1634            else if ( attr == 0x28 )                  // volune_id => skip
    16271635            {
    16281636                offset = offset + 32;
     
    16931701if( DEBUG_FATFS_GET_USER_DIR < cycle )
    16941702printk("\n[%s]  thread[%x,%x] exit for inode <%s> / %d entries / cycle %d\n",
    1695 __FUNCTION__, this->process->pid, this->trdid, inode_name, entries, cycle );
     1703__FUNCTION__, this->process->pid, this->trdid, inode_name, dirent_id, cycle );
    16961704#endif
    16971705
     
    17561764
    17571765                // move page from mapper to device
    1758                 error = fatfs_move_page( page_xp , false );
     1766                error = fatfs_move_page( page_xp , IOC_WRITE );
    17591767
    17601768                if ( error )  return -1;
     
    18271835#endif
    18281836                // move page from mapper to device
    1829                 error = fatfs_move_page( page_xp , false );
     1837                error = fatfs_move_page( page_xp , IOC_SYNC_WRITE );
    18301838
    18311839                if ( error )  return -1;
     
    21322140}  // end fatfs_release_inode()
    21332141
    2134 /////////////////////////////////////////
    2135 error_t fatfs_move_page( xptr_t  page_xp,
    2136                          bool_t  to_mapper )
     2142////////////////////////////////////////////
     2143error_t fatfs_move_page( xptr_t     page_xp,
     2144                         cmd_type_t cmd_type )
    21372145{
    21382146    error_t       error;
     
    21722180 
    21732181        // access device
    2174         if( to_mapper ) error = dev_ioc_sync_read ( buffer , lba , 8 );
    2175         else            error = dev_ioc_write( buffer , lba , 8 );     
     2182        if     ( cmd_type == IOC_SYNC_READ  ) error = dev_ioc_sync_read ( buffer , lba , 8 );
     2183        else if( cmd_type == IOC_SYNC_WRITE ) error = dev_ioc_sync_write( buffer , lba , 8 );
     2184        else if( cmd_type == IOC_READ       ) error = dev_ioc_read      ( buffer , lba , 8 );
     2185        else if( cmd_type == IOC_WRITE      ) error = dev_ioc_write     ( buffer , lba , 8 );
     2186        else                                  error = -1;
    21762187
    21772188        if( error ) return EIO;
     
    21792190#if (DEBUG_FATFS_MOVE_PAGE & 0x1)
    21802191if( DEBUG_FATFS_MOVE_PAGE < cycle )
    2181 mapper_display_page( XPTR(page_cxy , mapper_ptr) , page_id , "FAT" );
     2192mapper_display_page( XPTR(page_cxy , mapper_ptr) , page_id );
    21822193#endif
    21832194
     
    21862197if( DEBUG_FATFS_MOVE_PAGE < cycle )
    21872198{
    2188     if (to_mapper)
     2199    if ( (cmd_type == IOC_READ) || (cmd_type == IOC_SYNC_READ) )
    21892200         printk("\n[%s] thread[%x,%x] load page %d of FAT / cycle %d\n",
    21902201         __FUNCTION__, this->process->pid, this->trdid, page_id, cycle );
     
    22302241
    22312242        // access device
    2232         if( to_mapper ) error = dev_ioc_sync_read ( buffer , lba , 8 );
    2233         else            error = dev_ioc_write( buffer , lba , 8 );     
     2243        if     ( cmd_type == IOC_SYNC_READ  ) error = dev_ioc_sync_read ( buffer , lba , 8 );
     2244        else if( cmd_type == IOC_SYNC_WRITE ) error = dev_ioc_sync_write( buffer , lba , 8 );
     2245        else if( cmd_type == IOC_READ       ) error = dev_ioc_read      ( buffer , lba , 8 );
     2246        else if( cmd_type == IOC_WRITE      ) error = dev_ioc_write     ( buffer , lba , 8 );
     2247        else                                  error = -1;
    22342248
    22352249        if( error ) return EIO;
     
    22372251#if (DEBUG_FATFS_MOVE_PAGE & 0x1)
    22382252if( DEBUG_FATFS_MOVE_PAGE < cycle )
    2239 char string[CONFIG_VFS_MAX_NAME_LENGTH];
    2240 vfs_inode_get_name( XPTR(page_cxy , inode_ptr) , string );
    2241 mapper_display_page( XPTR(page_cxy , mapper_ptr) , page_id , string );
     2253mapper_display_page( XPTR(page_cxy , mapper_ptr) , page_id );
    22422254#endif
    22432255
     
    22462258if(DEBUG_FATFS_MOVE_PAGE < cycle)
    22472259{
    2248     if(to_mapper)
     2260    if ( (cmd_type == IOC_READ) || (cmd_type == IOC_SYNC_READ) )
    22492261        printk("\n[%s] thread[%x,%x] load page %d of <%s> inode / cycle %d\n",
    22502262        __FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle );
  • trunk/kernel/fs/fatfs.h

    r612 r614  
    2929#include <remote_queuelock.h>
    3030#include <vfs.h>
     31#include <dev_ioc.h>
    3132
    3233
     
    365366 * and copies from mapper to device each page marked as dirty.
    366367 * WARNING : The target <inode> cannot be a directory, because all modifications in a
    367  * directory * are synchronously done on the IOC device by the two fatfs_add_dentry()
     368 * directory are synchronously done on the IOC device by the two fatfs_add_dentry()
    368369 * and fatfs_remove_dentry() functions.
    369370 *****************************************************************************************
     
    451452 *****************************************************************************************
    452453 * @ page_xp   : extended pointer on page descriptor.
    453  * @ to_mapper : true for device->mapper / false for mapper->device
     454 * @ cmd_type  : IOC_READ / IOC_WRITE / IOC_SYNC_READ / IOC_SYNC_WRITE
    454455 * @ return 0 if success / return EIO if error during device access.
    455456 ****************************************************************************************/
    456 error_t fatfs_move_page( xptr_t  page_xp,
    457                          bool_t  to_mapper );
     457error_t fatfs_move_page( xptr_t      page_xp,
     458                         cmd_type_t  cmd_type );
    458459
    459460
  • trunk/kernel/fs/vfs.c

    r612 r614  
    424424uint32_t cycle = (uint32_t)hal_get_cycles();
    425425if( DEBUG_VFS_DENTRY_CREATE < cycle )
    426 printk("\n[%s] thread[%x,%x] enter for <%s> / parent_inode %x / cycle %d\n",
    427 __FUNCTION__, this->process->pid, this->trdid, name, parent, cycle );
     426printk("\n[%s] thread[%x,%x] enter for <%s> / cycle %d\n",
     427__FUNCTION__, this->process->pid, this->trdid, name, cycle );
    428428#endif
    429429
     
    19461946
    19471947    // display inode
    1948     nolock_printk("%s%s <%s> : inum %d / %d bytes / dirty %d / cxy %x (inode %x / mapper %x)\n",
    1949                   indent_str[indent], vfs_inode_type_str( inode_type ), name,
     1948    nolock_printk("%s<%s> : %s / inum %d / %d bytes / dirty %d / cxy %x / inode %x / mapper %x\n",
     1949                  indent_str[indent], name, vfs_inode_type_str( inode_type ),
    19501950                  inode_inum, inode_size, inode_dirty, inode_cxy, inode_ptr, mapper_ptr );
    19511951
     
    31913191//////////////////////////////////////////////////////////////////////////////////////////
    31923192
    3193 ///////////////////////////////////////////
    3194 error_t vfs_fs_move_page( xptr_t   page_xp,
    3195                           bool_t   to_mapper )
     3193//////////////////////////////////////////////
     3194error_t vfs_fs_move_page( xptr_t      page_xp,
     3195                          cmd_type_t  cmd_type )
    31963196{
    31973197    error_t error = 0;
     
    32133213    if( fs_type == FS_TYPE_FATFS )
    32143214    {
    3215         error = fatfs_move_page( page_xp , to_mapper );
     3215        error = fatfs_move_page( page_xp , cmd_type );
    32163216    }
    32173217    else if( fs_type == FS_TYPE_RAMFS )
  • trunk/kernel/fs/vfs.h

    r612 r614  
    4141#include <ramfs.h>
    4242#include <devfs.h>
     43#include <dev_ioc.h>
    4344
    4445/****  Forward declarations  ***/
     
    408409 * It must called by a local thread. Use the RPC_DENTRY_CREATE if client thread is remote.
    409410 ******************************************************************************************
    410  * @ fs_type    : file system type.
    411  * @ name       : directory entry file/dir name.
     411 * @ fs_type    : [in]  file system type.
     412 * @ name       : [in]  directory entry file/dir name.
    412413 * @ dentry_xp  : [out] buffer for extended pointer on created dentry.
    413414 * @ return 0 if success / return ENOMEM or EINVAL if error.
     
    421422 * allocated to the dentry descriptor.
    422423 * It must be executed by a thread running in the cluster containing the dentry.
    423  * Use the rpc_vfs_dentry_destroy_client() function if required.
     424 * Use the RPC_DENTRY_DESTROY if required.
    424425 ******************************************************************************************
    425  * @ dentry  : local pointer on dentry descriptor.
     426 * @ dentry  : [in] local pointer on dentry descriptor.
    426427 *****************************************************************************************/
    427428void vfs_dentry_destroy( vfs_dentry_t *  dentry ); 
     
    875876
    876877/******************************************************************************************
     878 * This function makes the I/O operation to move one page identified by the <page_xp>
     879 * argument to/from the IOC device from/to the mapper, as defined by <cmd_type>.
     880 * Depending on the file system type, it calls the proper, FS specific function.
     881 * It is used in case of MISS on the mapper, or when a dirty page in the mapper must
     882 * be updated in the File System.
     883 * The mapper pointer is obtained from the page descriptor.
     884 * It can be executed by any thread running in any cluster.
     885 * This function does NOT take any lock.
     886 ******************************************************************************************
     887 * @ page_xp   : extended pointer on page descriptor (for mapper and page_id).
     888 * @ cmd_type  : IOC_READ / IOC_WRITE / IOC_SYNC_READ / IOC_SYNC_WRITE
     889 * @ returns 0 if success / return -1 if device access failure.
     890 *****************************************************************************************/
     891error_t vfs_fs_move_page( xptr_t      page_xp,
     892                          cmd_type_t  cmd_type );
     893
     894/******************************************************************************************
    877895 * This function updates the mapper associated to a directory inode identified by the
    878896 * <parent> argument, to add a new entry identified by the <dentry> argument.
     
    10321050error_t vfs_fs_release_inode( xptr_t  inode_xp );
    10331051
    1034 /******************************************************************************************
    1035  * This function makes the I/O operation to move one page identified by the <page_xp>
    1036  * argument to/from the IOC device from/to the mapper, as defined by <to_mapper>.
    1037  * Depending on the file system type, it calls the proper, FS specific function.
    1038  * It is used in case of MISS on the mapper, or when a dirty page in the mapper must
    1039  * be updated in the File System.
    1040  * The mapper pointer is obtained from the page descriptor.
    1041  * It can be executed by any thread running in any cluster.
    1042  * This function does NOT take any lock.
    1043  ******************************************************************************************
    1044  * @ page_xp   : extended pointer on the page descriptor.
    1045  * @ to_mapper : transfer direction.
    1046  * @ returns 0 if success / return -1 if device access failure.
    1047  *****************************************************************************************/
    1048 error_t vfs_fs_move_page( xptr_t  page_xp,
    1049                           bool_t  to_mapper );
    1050 
    10511052
    10521053#endif  /* _VFS_H_ */
  • trunk/kernel/kern/kernel_init.c

    r612 r614  
    11131113        vfs_root_inode_xp = XPTR_NULL;
    11141114
    1115         // File System must be FATFS in this implementation,
    1116         // but other File System can be introduced here
     1115        // Only FATFS is supported yet,
     1116        // other File System can be introduced here
    11171117        if( CONFIG_VFS_ROOT_IS_FATFS )
    11181118        {
     
    11721172        }
    11731173
     1174        // create the <.> and <..> dentries in VFS root directory
     1175        // the VFS root parent inode is the VFS root inode itself
     1176        vfs_add_special_dentries( vfs_root_inode_xp,
     1177                                  vfs_root_inode_xp );
     1178
    11741179        // register VFS root inode in process_zero descriptor of cluster 0
    11751180        process_zero.vfs_root_xp = vfs_root_inode_xp;
     
    12551260#if DEBUG_KERNEL_INIT
    12561261if( (core_lid ==  0) & (local_cxy == 1) )
    1257 printk("\n[%s] : exit barrier 4 : VFS root (%x,%x) in cluster 1 / cycle %d\n",
     1262printk("\n[%s] : exit barrier 5 : VFS root (%x,%x) in cluster 1 / cycle %d\n",
    12581263__FUNCTION__, GET_CXY(process_zero.vfs_root_xp),
    12591264GET_PTR(process_zero.vfs_root_xp), (uint32_t)hal_get_cycles() );
  • trunk/kernel/kern/rpc.c

    r612 r614  
    5454    &rpc_undefined,                        // 2    unused slot
    5555    &rpc_process_make_fork_server,         // 3
    56     &rpc_undefined,                        // 4    unused slot
    57     &rpc_undefined,                        // 5    unused slot
     56    &rpc_user_dir_create_server,           // 4
     57    &rpc_user_dir_destroy_server,          // 5
    5858    &rpc_thread_user_create_server,        // 6
    5959    &rpc_thread_kernel_create_server,      // 7
     
    9090    "undefined",                 // 2
    9191    "PROCESS_MAKE_FORK",         // 3
    92     "undefined",                 // 4
    93     "undefined",                 // 5
     92    "USER_DIR_CREATE",           // 4
     93    "USER_DIR_DESTROY",          // 5
    9494    "THREAD_USER_CREATE",        // 6
    9595    "THREAD_KERNEL_CREATE",      // 7
     
    657657void rpc_user_dir_create_client( cxy_t          cxy,
    658658                                 vfs_inode_t *  inode,
     659                                 xptr_t         ref_xp,
    659660                                 user_dir_t  ** dir )
    660661{
     
    677678    // set input arguments in RPC descriptor
    678679    rpc.args[0] = (uint64_t)(intptr_t)inode;
     680    rpc.args[1] = (uint64_t)ref_xp;
    679681
    680682    // register RPC request in remote RPC fifo
     
    682684
    683685    // get output argument from RPC descriptor
    684     *dir = (user_dir_t *)(intptr_t)rpc.args[1];
     686    *dir = (user_dir_t *)(intptr_t)rpc.args[2];
    685687
    686688#if DEBUG_RPC_USER_DIR_CREATE
     
    704706
    705707    vfs_inode_t * inode;          // pointer on inode in server cluster
     708    xptr_t        ref_xp;         // extended pointer on reference user process
    706709    user_dir_t  * dir;            // pointer on user_dir structure in server cluster
    707710
     
    711714
    712715    // get input argument from RPC descriptor
    713     inode = (vfs_inode_t *)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[0]));
     716    inode  = (vfs_inode_t *)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[0]));
     717    ref_xp = (xptr_t)                 hal_remote_l64(XPTR(client_cxy , &desc->args[1]));
    714718
    715719    // call kernel function
    716     dir = user_dir_create( inode );
     720    dir = user_dir_create( inode , ref_xp );
    717721
    718722    // set output argument into RPC descriptor
    719     hal_remote_s64( XPTR( client_cxy , &desc->args[1] ) , (intptr_t)dir );
     723    hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (intptr_t)dir );
    720724
    721725#if DEBUG_RPC_USER_DIR_CREATE
     
    733737////////////////////////////////////////////////////
    734738void rpc_user_dir_destroy_client( cxy_t         cxy,
    735                                   user_dir_t  * dir )
     739                                  user_dir_t  * dir,
     740                                  xptr_t        ref_xp )
    736741{
    737742#if DEBUG_RPC_USER_DIR_DESTROY
     
    753758    // set input arguments in RPC descriptor
    754759    rpc.args[0] = (uint64_t)(intptr_t)dir;
     760    rpc.args[1] = (uint64_t)ref_xp;
    755761
    756762    // register RPC request in remote RPC fifo
     
    777783
    778784    user_dir_t * dir;            // pointer on user_dir structure in server cluster
     785    xptr_t       ref_xp;         // extended pointer on reference process
    779786
    780787    // get client cluster identifier and pointer on RPC descriptor
     
    783790
    784791    // get input argument from RPC descriptor
    785     dir = (user_dir_t *)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[0]));
     792    dir    = (user_dir_t *)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[0]));
     793    ref_xp = (xptr_t)                hal_remote_l64(XPTR(client_cxy , &desc->args[1]));
    786794
    787795    // call kernel function
    788     user_dir_destroy( dir );
     796    user_dir_destroy( dir , ref_xp );
    789797
    790798#if DEBUG_RPC_USER_DIR_DESTROY
  • trunk/kernel/kern/rpc.h

    r612 r614  
    229229 * [4] The RPC_USER_DIR_CREATE allows a client thread to create an user_dir_t
    230230 * structure and the associated array of dirents in a remote cluster containing
    231  * the target directory inode. It is called by the sys_opendir() function.
     231 * the target directory <inode>. It creates an ANON vseg in the user reference
     232 * process VMM identified by the <ref_xp>. This reference cluster cluster can be
     233 * different from both the client and server clusters.
     234 * It is called by the sys_opendir() function.
    232235 ***********************************************************************************
    233236 * @ cxy        : server cluster identifier.
    234237 * @ inode      : [in]   local pointer on inode in server cluster.
     238 * @ ref_xp     : [in]   extended pointer on user reference process descriptor.
    235239 * @ dir        : [out]  local pointer on created user_dir structure.
    236240 **********************************************************************************/
    237241void rpc_user_dir_create_client( cxy_t                 cxy,
    238242                                 struct vfs_inode_s  * inode,
     243                                 xptr_t                ref_xp,
    239244                                 struct user_dir_s  ** dir );
    240245
     
    248253 * @ cxy        : server cluster identifier.
    249254 * @ dir        : [in]  local pointer on created user_dir structure.
     255 * @ ref_xp     : [in]   extended pointer on user reference process descriptor.
    250256 **********************************************************************************/
    251257void rpc_user_dir_destroy_client( cxy_t               cxy,
    252                                   struct user_dir_s * dir );
     258                                  struct user_dir_s * dir,
     259                                  xptr_t              ref_xp );
    253260
    254261void rpc_user_dir_destroy_server( xptr_t xp );
  • trunk/kernel/kern/scheduler.c

    r610 r614  
    487487 
    488488#if (DEBUG_SCHED_YIELD & 0x1)
    489 if( sched->trace ) sched_display( lid );
     489if( sched->trace )
     490sched_display( lid );
    490491#endif
    491492
    492 // This assert should never be false, as this check must be
    493 // done before by any function that can possibly deschedule...
     493// This assert should never be false, as this check has been
     494// done before, by any function that can possibly deschedule...
    494495assert( (current->busylocks == 0),
    495496"unexpected descheduling of thread holding %d busylocks = %d\n", current->busylocks );
  • trunk/kernel/kernel_config.h

    r612 r614  
    33 *
    44 * Authors  Mohamed Lamine Karaoui (2015)
    5  *          Alain Greiner (2016,2017,2018)
     5 *          Alain Greiner (2016,2017,2018,2019)
    66 *
    77 * Copyright (c)  UPMC Sorbonne Universites
     
    6262#define DEBUG_DEV_PIC                     0
    6363
    64 #define DEBUG_DEVFS_INIT                  1
     64#define DEBUG_DEVFS_GLOBAL_INIT           0
     65#define DEBUG_DEVFS_LOCAL_INIT            0
    6566#define DEBUG_DEVFS_MOVE                  0
    6667
     
    7879#define DEBUG_FATFS_FREE_CLUSTERS         0
    7980#define DEBUG_FATFS_GET_CLUSTER           0
    80 #define DEBUG_FATFS_GET_DIRENT            1
    81 #define DEBUG_FATFS_GET_USER_DIR          1
     81#define DEBUG_FATFS_GET_DENTRY            0
     82#define DEBUG_FATFS_GET_USER_DIR          0
    8283#define DEBUG_FATFS_MOVE_PAGE             0
    8384#define DEBUG_FATFS_RELEASE_INODE         0
     
    212213#define DEBUG_THREAD_USER_EXEC            0
    213214
    214 #define DEBUG_USER_DIR                    1
     215#define DEBUG_USER_DIR                    0
    215216
    216217#define DEBUG_VFS_ADD_CHILD               0
    217 #define DEBUG_VFS_ADD_SPECIAL             1
     218#define DEBUG_VFS_ADD_SPECIAL             0
    218219#define DEBUG_VFS_CHDIR                   0
    219220#define DEBUG_VFS_CLOSE                   0
     
    229230#define DEBUG_VFS_NEW_CHILD_INIT          0
    230231#define DEBUG_VFS_OPEN                    0
     232#define DEBUG_VFS_OPENDIR                 0
    231233#define DEBUG_VFS_STAT                    0
    232234#define DEBUG_VFS_UNLINK                  0
     
    241243#define DEBUG_VMM_HANDLE_COW              0
    242244#define DEBUG_VMM_INIT                    0
     245#define DEBUG_VMM_MMAP_ALLOC              0
    243246#define DEBUG_VMM_PAGE_ALLOCATE           0
    244247#define DEBUG_VMM_SET_COW                 0
     
    356359#define CONFIG_VFS_ROOT_IS_EX2FS            0          // root FS is EX2FS if non zero
    357360
     361#define CONFIG_MAPPER_GRDXT_W1              7      // number of bits for RADIX_TREE_IX1
     362#define CONFIG_MAPPER_GRDXT_W2              7      // number of bits for RADIX_TREE_IX2
     363#define CONFIG_MAPPER_GRDXT_W3              6      // number of bits for RADIX_TREE_IX3
     364
    358365////////////////////////////////////////////////////////////////////////////////////////////
    359366//                                  DQDT       
     
    394401#define CONFIG_REMOTE_FIFO_MAX_ITERATIONS   1024
    395402#define CONFIG_RPC_THREADS_MAX              4      // max number of RPC threads per core
    396 
    397 ////////////////////////////////////////////////////////////////////////////////////////////
    398 //                                  MAPPER
    399 ////////////////////////////////////////////////////////////////////////////////////////////
    400 
    401 #define CONFIG_MAPPER_MAX_FRAGMENTS         10     // max number of fragments moved
    402 #define CONFIG_MAPPER_MIN                   CONFIG_VFS_INODE_MIN
    403 #define CONFIG_MAPPER_MAX                   CONFIG_VFS_INODE_MAX
    404 #define CONFIG_MAPPER_GRDXT_W1              7      // number of bits for RADIX_TREE_IX1
    405 #define CONFIG_MAPPER_GRDXT_W2              7      // number of bits for RADIX_TREE_IX2
    406 #define CONFIG_MAPPER_GRDXT_W3              6      // number of bits for RADIX_TREE_IX3
    407403
    408404////////////////////////////////////////////////////////////////////////////////////////////
  • trunk/kernel/libk/user_dir.c

    r613 r614  
    8080}  // end user_dir_from_ident()
    8181
    82 ///////////////////////////////////////////////////
    83 user_dir_t * user_dir_create( vfs_inode_t * inode )
     82//////////////////////////////////////////////////
     83user_dir_t * user_dir_create( vfs_inode_t * inode,
     84                              xptr_t        ref_xp )
    8485{
    8586    user_dir_t    * dir;               // local pointer on created user_dir_t
    8687    vseg_t        * vseg;              // local pointer on dirent array vseg
    8788    uint32_t        vseg_size;         // size of vseg in bytes
    88     process_t     * process;           // local pointer on calling process
    89     xptr_t          ref_xp;            // extended pointer on reference process
    9089    process_t     * ref_ptr;           // local pointer on reference process
    9190    cxy_t           ref_cxy;           // reference process cluster identifier
     91    pid_t           ref_pid;           // reference process PID
    9292    xptr_t          gpt_xp;            // extended pointer on reference process GPT
    9393    uint32_t        gpt_attributes;    // attributes for all mapped gpt entries
     
    109109    error_t         error;
    110110
    111     // get pointer on local process descriptor
    112     process = CURRENT_THREAD->process;
     111    // get cluster, local pointer, and pid of reference user process
     112    ref_cxy = GET_CXY( ref_xp );
     113    ref_ptr = GET_PTR( ref_xp );
     114    ref_pid = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->pid ) );
    113115
    114116#if DEBUG_USER_DIR
     
    116118thread_t * this = CURRENT_THREAD;
    117119if( cycle > DEBUG_USER_DIR )
    118 printk("\n[%s] thread[%x,%x] enter for inode (%x,%x) / cycle %d\n",
    119 __FUNCTION__, process->pid, this->trdid, local_cxy, inode, cycle );
     120printk("\n[%s] thread[%x,%x] enter for inode (%x,%x) and process %x / cycle %d\n",
     121__FUNCTION__, this->process->pid, this->trdid, local_cxy, inode, ref_pid, cycle );
    120122#endif
    121123
     
    128130    // initialise temporary list of pages
    129131    list_root_init( &root );
    130 
    131     // get pointers on reference process
    132     ref_xp  = process->ref_xp;
    133     ref_cxy = GET_CXY( ref_xp );
    134     ref_ptr = GET_PTR( ref_xp );
    135132
    136133    // allocate memory for a local user_dir descriptor
     
    207204    } // end while
    208205       
     206#if DEBUG_USER_DIR
     207if( cycle > DEBUG_USER_DIR )
     208printk("\n[%s] thread[%x,%x] initialised dirent array / %d entries\n",
     209__FUNCTION__, this->process->pid, this->trdid, total_dirents, cycle );
     210#endif
     211
    209212    // compute required vseg size for a 64 bytes dirent
    210213    vseg_size = total_dirents << 6;
     
    213216    if( local_cxy == ref_cxy )
    214217    {
    215         vseg = vmm_create_vseg( process,
     218        vseg = vmm_create_vseg( ref_ptr,
    216219                                VSEG_TYPE_ANON,
    217220                                0,                      // vseg base (unused)
     
    220223                                0,                      // file_size (unused)
    221224                                XPTR_NULL,              // mapper (unused)
    222                                 ref_cxy );
     225                                local_cxy );
    223226    }
    224227    else
     
    232235                                    0,                     // file size (unused)
    233236                                    XPTR_NULL,             // mapper (unused)
    234                                     ref_cxy,
     237                                    local_cxy,
    235238                                    &vseg );
    236239    }
     240
    237241    if( vseg == NULL )
    238242    {
    239         printk("\n[ERROR] in %s : cannot create vseg for DIR in cluster %x\n",
     243        printk("\n[ERROR] in %s : cannot create vseg for user_dir in cluster %x\n",
    240244        __FUNCTION__, ref_cxy);
    241245        goto user_dir_create_failure;
    242246    }
    243247
    244 #if (DEBUG_USER_DIR & 1)
     248#if DEBUG_USER_DIR
    245249if( cycle > DEBUG_USER_DIR )
    246250printk("\n[%s] thread[%x,%x] allocated vseg ANON / base %x / size %x\n",
    247 __FUNCTION__, process->pid, this->trdid, vseg->min, vseg->max - vseg->min );
     251__FUNCTION__, this->process->pid, this->trdid, vseg->min, vseg->max - vseg->min );
    248252#endif
    249253
     
    289293            desc.lid       = CURRENT_THREAD->core->lid;
    290294            desc.blocking  = true;
    291             desc.args[0]   = process->pid;
     295            desc.args[0]   = ref_pid;
    292296            desc.args[1]   = vpn << CONFIG_PPM_PAGE_SHIFT;
    293297            rpc_vmm_delete_vseg_client( ref_cxy , &desc );
     
    299303        }
    300304
    301 #if (DEBUG_USER_DIR & 1)
     305#if DEBUG_USER_DIR
    302306if( cycle > DEBUG_USER_DIR )
    303307printk("\n[%s] thread[%x,%x] mapped vpn %x to ppn %x\n",
    304 __FUNCTION__, process->pid, this->trdid, vpn + page_id, ppn );
     308__FUNCTION__, this->process->pid, this->trdid, vpn + page_id, ppn );
    305309#endif
    306310
     
    340344if( cycle > DEBUG_USER_DIR )
    341345printk("\n[%s] thread[%x,%x] created user_dir (%x,%x) / %d entries / cycle %d\n",
    342 __FUNCTION__, process->pid, this->trdid, local_cxy, dir, total_dirents, cycle );
     346__FUNCTION__, this->process->pid, this->trdid, local_cxy, dir, total_dirents, cycle );
    343347#endif
    344348
     
    365369}  // end user_dir_create()
    366370
    367 /////////////////////////////////////////
    368 void user_dir_destroy( user_dir_t * dir )
     371////////////////////////////////////////
     372void user_dir_destroy( user_dir_t * dir,
     373                       xptr_t       ref_xp )
    369374{
    370     process_t    * process;    // local pointer on client process
    371     thread_t     * this;       // local pointer on client thread
     375    thread_t     * this;       // local pointer on calling thread
     376    process_t    * process;    // local pointer on calling process
    372377    cluster_t    * cluster;    // local pointer on local cluster
    373378    intptr_t       ident;      // user pointer on dirent array
    374     xptr_t         ref_xp;     // extended pointer on reference process
     379    xptr_t         ref_pid;    // reference process PID
    375380    cxy_t          ref_cxy;    // reference process cluster identifier
    376381    process_t    * ref_ptr;    // local pointer on reference process
     
    379384    xptr_t         iter_xp;    // iteratot in xlist
    380385    reg_t          save_sr;    // for critical section
    381     pid_t          pid;        // process descriptor
    382386    cxy_t          owner_cxy;  // owner process cluster
    383387    lpid_t         lpid;       // process local index
    384388    rpc_desc_t     rpc;        // rpc descriptor
    385389     
    386     // get pointers on client process & thread
     390    // get pointers on calling process & thread
    387391    this    = CURRENT_THREAD;
    388392    process = this->process;
    389393    cluster = LOCAL_CLUSTER;
    390394
     395    // get cluster, local pointer, and PID of reference user process
     396    ref_cxy = GET_CXY( ref_xp );
     397    ref_ptr = GET_PTR( ref_xp );
     398    ref_pid = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->pid ) );
     399
    391400#if DEBUG_USER_DIR
    392401uint32_t cycle = (uint32_t)hal_get_cycles();
    393402if( cycle > DEBUG_USER_DIR )
    394 printk("\n[%s] thread[%x,%x] enter for user_dir (%x,%x) / cycle %d\n",
    395 __FUNCTION__, process->pid, this->trdid, local_cxy, dir, cycle );
     403printk("\n[%s] thread[%x,%x] enter for user_dir (%x,%x) and process %x / cycle %d\n",
     404__FUNCTION__, process->pid, this->trdid, local_cxy, dir, ref_pid, cycle );
    396405#endif
    397406
    398407    // get user pointer on dirent array
    399408    ident = dir->ident;
    400 
    401     // get pointers on reference process
    402     ref_xp  = process->ref_xp;
    403     ref_cxy = GET_CXY( ref_xp );
    404     ref_ptr = GET_PTR( ref_xp );
    405409
    406410    // build extended pointer on lock protecting open directories list
     
    424428
    425429    // get owner cluster identifier and process lpid
    426     pid       = process->pid;
    427     owner_cxy = CXY_FROM_PID( pid );
    428     lpid      = LPID_FROM_PID( pid );
     430    owner_cxy = CXY_FROM_PID( ref_pid );
     431    lpid      = LPID_FROM_PID( ref_pid );
    429432
    430433    // get root of list of copies and lock from owner cluster
     
    444447    rpc.thread    = this;
    445448    rpc.lid       = this->core->lid;
    446     rpc.args[0]   = process->pid;
     449    rpc.args[0]   = ref_pid;
    447450    rpc.args[1]   = ident;
    448451
  • trunk/kernel/libk/user_dir.h

    r613 r614  
    7070 * in the reference process descriptor.
    7171 *****************************************************************************************
    72  * @ ident    : DIR virtual address, used as identifier.
     72 * @ ident    : [in] DIR virtual address, used as identifier.
    7373 * @ returns extended pointer on user_dir_t if success / returns XPTR_NULL if not found.
    7474 ****************************************************************************************/
     
    7777/*****************************************************************************************
    7878 * This function allocates memory and initializes a user_dir_t structure in the cluster
    79  * containing the directory inode identified by the <inode> argument.
     79 * containing the directory inode identified by the <inode> argument and map the
     80 * user accessible dirent array in the reference user process VMM, identified by the
     81 * <ref_xp> argument.
    8082 * It must be executed by a thread running in the cluster containing the target inode.
    8183 * Use the RPC_USER_DIR_CREATE when the client thread is remote.
    8284 * It makes the following actions:
    83  * - the allocation of one user_dir_t descriptor in reference cluster.
     85 * - the allocation of one user_dir_t descriptor in the directory inode cluster.
    8486 * - the allocation of one or several physical pages in reference cluster to store
    8587 *   all directory entries in an array of 64 bytes dirent structures,
    8688 * - the initialisation of this array from informations found in the Inode Tree.
    87  * - the creation of an user accessible vseg containing this dirent array, and the
    88  *   mapping of all relevant physical pages in this vseg.
     89 * - the creation of an ANON vseg containing this dirent array in reference process VMM,
     90 *   and the mapping of the relevant physical pages in this vseg.
    8991 * - the registration of the created user_dir_t structure in the xlist rooted
    9092 *   in the reference process,
    9193 * It returns a local pointer on the created user_dir_t structure.
    9294 *****************************************************************************************
    93  * @ inode    : local pointer on the directory inode.
     95 * @ inode    : [in] local pointer on the directory inode.
     96 * @ ref_xp   : [in] extended pointer on the reference user process descriptor.
    9497 * @ return local pointer on user_dir_t if success / return XPTR_NULL if failure.
    9598 ****************************************************************************************/
    96 user_dir_t * user_dir_create( struct vfs_inode_s * inode );
     99user_dir_t * user_dir_create( struct vfs_inode_s * inode,
     100                              xptr_t               ref_xp );
    97101
    98102/*****************************************************************************************
    99103 * This function removes a user_dir_t structure from the xlist of user_dir_t
    100  * structures rooted in the reference process descriptor, and release all memory
    101  * allocated for the user_dir_t struct in the directory inode cluster,
    102  * including the dirent array.
     104 * structures rooted in the reference process descriptor, release all memory
     105 * allocated for the user_dir_t struct in the directory inode cluster, including
     106 * the dirent array, and delete all ANON vseg copies in all process VMM copies,
     107 * using parallel RPCs.
    103108 * It must be executed by a thread running in the cluster containing the target inode.
    104109 * Use the RPC_USER_DIR_DESTROY when the client thread is remote.
    105110 *****************************************************************************************
    106  * @ dir  : local pointer on user_dir_t structure.
     111 * @ dir      : [in] local pointer on user_dir_t structure.
     112 * @ ref_xp   : [in] extended pointer on the reference user process descriptor.
    107113 ****************************************************************************************/
    108 void user_dir_destroy( struct user_dir_s * dir );
     114void user_dir_destroy( struct user_dir_s * dir,
     115                       xptr_t              ref_xp );
    109116
    110117
  • trunk/kernel/libk/xhtab.c

    r612 r614  
    4242// XHTAB_DENTRY_TYPE
    4343// This functions compute the hash index from the key, that is the directory entry name.
     44// In this implementation, the index value is simply the ASCII code of the first
     45// character, to provide an approximate lexicographic order.
    4446///////////////////////////////////////////////////////////////////////////////////////////
    4547// @ key      : local pointer on name.
     
    4951{
    5052        char     * name  = key;
    51         uint32_t   index = 0;
     53
     54        return (name[0] % XHASHTAB_SIZE);
     55/*
     56    uint32_t   index = 0;
    5257        while( *name )
    5358    {
     
    5560    }
    5661        return index % XHASHTAB_SIZE;
     62*/
     63
    5764}
    5865
  • trunk/kernel/libk/xhtab.h

    r611 r614  
    6161///////////////////////////////////////////////////////////////////////////////////////////
    6262
    63 #define XHASHTAB_SIZE    8   // number of subsets
     63#define XHASHTAB_SIZE    128   // number of subsets
    6464
    6565/******************************************************************************************
  • trunk/kernel/mm/mapper.c

    r611 r614  
    2828#include <hal_uspace.h>
    2929#include <grdxt.h>
     30#include <string.h>
    3031#include <rwlock.h>
    3132#include <printk.h>
     
    4142#include <vfs.h>
    4243#include <mapper.h>
     44#include <dev_ioc.h>
    4345
    4446
     
    302304
    303305    // launch I/O operation to load page from device to mapper
    304     error = vfs_fs_move_page( XPTR( local_cxy , page ) , true );
     306    error = vfs_fs_move_page( XPTR( local_cxy , page ) , IOC_SYNC_READ );
    305307
    306308    if( error )
     
    647649error_t mapper_display_page( xptr_t     mapper_xp,
    648650                             uint32_t   page_id,
    649                              uint32_t   nbytes,
    650                              char     * string )
    651 {
    652     xptr_t     page_xp;        // extended pointer on page descriptor
    653     xptr_t     base_xp;        // extended pointer on page base
    654     char       buffer[4096];   // local buffer
    655     uint32_t * tab;            // pointer on uint32_t to scan the buffer
    656     uint32_t   line;           // line index
    657     uint32_t   word;           // word index
     651                             uint32_t   nbytes )
     652{
     653    xptr_t        page_xp;        // extended pointer on page descriptor
     654    xptr_t        base_xp;        // extended pointer on page base
     655    char          buffer[4096];   // local buffer
     656    uint32_t    * tabi;           // pointer on uint32_t to scan buffer
     657    char        * tabc;           // pointer on char to scan buffer
     658    uint32_t      line;           // line index
     659    uint32_t      word;           // word index
     660    uint32_t      n;              // char index
     661    cxy_t         mapper_cxy;     // mapper cluster identifier
     662    mapper_t    * mapper_ptr;     // mapper local pointer
     663    vfs_inode_t * inode_ptr;      // inode local pointer
     664 
     665    char       name[CONFIG_VFS_MAX_NAME_LENGTH];
    658666
    659667    if( nbytes > 4096)
     
    674682    }
    675683
     684    // get cluster and local pointer
     685    mapper_cxy = GET_CXY( mapper_xp );
     686    mapper_ptr = GET_PTR( mapper_xp );
     687
     688    // get inode
     689    inode_ptr = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
     690
     691    // get inode name
     692    if( inode_ptr == NULL ) strcpy( name , "fat" );
     693    else  vfs_inode_get_name( XPTR( mapper_cxy , inode_ptr ) , name );
     694   
    676695    // get extended pointer on page base
    677696    base_xp = ppm_page2base( page_xp );
     
    681700
    682701    // display 8 words per line
    683     tab = (uint32_t *)buffer;
    684     printk("\n***** %s : first %d bytes of page %d *****\n", string, nbytes, page_id );
     702    tabi = (uint32_t *)buffer;
     703    tabc = (char *)buffer;
     704    printk("\n***** <%s> first %d bytes of page %d *****\n", name, nbytes, page_id );
    685705    for( line = 0 ; line < (nbytes >> 5) ; line++ )
    686706    {
    687707        printk("%X : ", line );
    688         for( word = 0 ; word < 8 ; word++ ) printk("%X ", tab[(line<<3) + word] );
     708        for( word = 0 ; word < 8 ; word++ ) printk("%X ", tabi[(line<<3) + word] );
     709        printk(" | ");
     710        for( n = 0 ; n < 32 ; n++ ) printk("%c", tabc[(line<<5) + n] );
    689711        printk("\n");
    690712    }
  • trunk/kernel/mm/mapper.h

    r612 r614  
    3838
    3939/*******************************************************************************************
    40  * The mapper implements the kernel cache for a given VFS file or directory.
     40 * This mapper_t object implements the kernel cache for a given VFS file or directory.
    4141 * There is one mapper per file/dir. It is implemented as a three levels radix tree,
    4242 * entirely stored in the same cluster as the inode representing the file/dir.
     
    6363 *
    6464 * TODO : the mapper being only used to implement the VFS cache(s), the mapper.c
    65  *        and mapper.h file should beerro trandfered to the vfs directory.
     65 *        and mapper.h file should be trandfered to the vfs directory.
    6666 ******************************************************************************************/
    6767
     
    8585
    8686/*******************************************************************************************
    87  * This structure defines a "fragment". It is used to move data between the kernel mapper,
    88  * and an user buffer, that can be split in several distributed physical pages located
    89  * in different clusters. A fragment is a set of contiguous bytes in the file.
    90  * - It can be stored in one single physical page in the user buffer.
    91  * - It can spread two successive physical pages in the kernel mapper.
    92  ******************************************************************************************/
    93 
    94 typedef struct fragment_s
    95 {
    96     uint32_t    file_offset;         /*! offset of fragment in file (i.e. in mapper)      */
    97     uint32_t    size;                /*! number of bytes in fragment                      */
    98     cxy_t       buf_cxy;             /*! user buffer cluster identifier                   */
    99     void      * buf_ptr;             /*! local pointer on first byte in user buffer       */
    100 }
    101 fragment_t;
    102 
    103 /*******************************************************************************************
    10487 * This function allocates physical memory for a mapper descriptor, and initializes it
    10588 * (refcount <= 0) / inode <= NULL).
     
    158141 * returns O if success / returns -1 if error.
    159142 ******************************************************************************************/
    160 error_t mapper_move_user( xptr_t     mappe_xp,
     143error_t mapper_move_user( xptr_t     mapper_xp,
    161144                          bool_t     to_buffer,
    162145                          uint32_t   file_offset,
     
    258241 * @ page_id    : [in]  page index in file.
    259242 * @ nbytes     : [in]  value to be written.
    260  * @ string     : [in]  string printed in header.
    261243 * @ returns 0 if success / return -1 if error.
    262244 ******************************************************************************************/
    263245error_t mapper_display_page( xptr_t     mapper_xp,
    264246                             uint32_t   page_id,
    265                              uint32_t   nbytes,
    266                              char     * string );
     247                             uint32_t   nbytes );
    267248
    268249
  • trunk/kernel/mm/vmm.c

    r611 r614  
    6464    intptr_t  base;
    6565    intptr_t  size;
     66    uint32_t  i;
    6667
    6768#if DEBUG_VMM_INIT
     
    6970uint32_t cycle = (uint32_t)hal_get_cycles();
    7071if( DEBUG_VMM_INIT )
    71 printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
    72 __FUNCTION__ , this->process->pid, this->trdid, process->pid , cycle );
     72printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
     73__FUNCTION__ , this->process->pid, this->trdid, process->pid, local_cxy, cycle );
    7374#endif
    7475
     
    183184    vmm->mmap_mgr.first_free_vpn  = CONFIG_VMM_HEAP_BASE;
    184185    busylock_init( &vmm->mmap_mgr.lock , LOCK_VMM_MMAP );
    185 
    186     uint32_t i;
    187186    for( i = 0 ; i < 32 ; i++ ) list_root_init( &vmm->mmap_mgr.zombi_list[i] );
    188187
     
    195194cycle = (uint32_t)hal_get_cycles();
    196195if( DEBUG_VMM_INIT )
    197 printk("\n[%s] thread[%x,%x] exit / process %x / entry_point %x / cycle %d\n",
    198 __FUNCTION__, this->process->pid, this->trdid, process->pid, process->vmm.entry_point, cycle );
     196printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
     197__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
    199198#endif
    200199
     
    944943    vpn_t      free;
    945944
    946     // mmap vseg size must be power of 2
     945#if DEBUG_VMM_MMAP_ALLOC
     946thread_t * this = CURRENT_THREAD;
     947uint32_t cycle = (uint32_t)hal_get_cycles();
     948if( DEBUG_VMM_MMAP_ALLOC < cycle )
     949printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
     950__FUNCTION__, this->process->pid, this->trdid, cycle );
     951#endif
     952
     953    // vseg size must be power of 2
    947954    // compute actual size and index in zombi_list array
    948955    size  = POW2_ROUNDUP( npages );
     
    952959    mmap_mgr_t * mgr = &vmm->mmap_mgr;
    953960
     961printk("\n@@@ in %s : size = %d / index = %d / first = %d / empty = %d\n",
     962__FUNCTION__, size, index, mgr->vpn_size, list_is_empty(&mgr->zombi_list[index]) );
     963
    954964    // get lock on mmap allocator
    955965    busylock_acquire( &mgr->lock );
     
    958968    if( list_is_empty( &mgr->zombi_list[index] ) )     // from mmap zone
    959969    {
     970
     971printk("\n@@@ from mmap zone\n" );
     972
    960973        // check overflow
    961974        free = mgr->first_free_vpn;
    962         if( (free + size) > mgr->vpn_size ) return ENOMEM;
    963 
    964         // update STACK allocator
     975        if( (free + size) > mgr->vpn_size ) return -1;
     976
     977        // update MMAP allocator
    965978        mgr->first_free_vpn += size;
    966979
     
    970983    else                                             // from zombi_list
    971984    {
     985
     986printk("\n@@@ from zombi_list\n" );
     987
    972988        // get pointer on zombi vseg from zombi_list
    973989        vseg = LIST_FIRST( &mgr->zombi_list[index] , vseg_t , zlist );
     
    982998    // release lock on mmap allocator
    983999    busylock_release( &mgr->lock );
     1000
     1001#if DEBUG_VMM_MMAP_ALLOC
     1002cycle = (uint32_t)hal_get_cycles();
     1003if( DEBUG_VMM_DESTROY < cycle )
     1004printk("\n[%s] thread[%x,%x] exit / vpn_base %x / vpn_size %x / cycle %d\n",
     1005__FUNCTION__, this->process->pid, this->trdid, base, size, cycle );
     1006#endif
    9841007
    9851008    // returns vpn_base, vpn_size
     
    10091032uint32_t   cycle = (uint32_t)hal_get_cycles();
    10101033if( DEBUG_VMM_CREATE_VSEG < cycle )
    1011 printk("\n[%s] thread[%x,%x] enter / %s / cxy %x / cycle %d\n",
    1012 __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), cxy, cycle );
     1034printk("\n[%s] thread[%x,%x] enter for process %x / %s / cxy %x / cycle %d\n",
     1035__FUNCTION__, this->process->pid, this->trdid, process->pid, vseg_type_str(type), cxy, cycle );
    10131036#endif
    10141037
     
    10921115    if( vseg != NULL )
    10931116    {
    1094         printk("\n[ERROR] in %s for process %x : new vseg [vpn_base = %x / vpn_size = %x]\n"
    1095                "  overlap existing vseg [vpn_base = %x / vpn_size = %x]\n",
     1117        printk("\n[ERROR] in %s for process %x : new vseg [vpn_base %x / vpn_size %x]\n"
     1118               "  overlap existing vseg [vpn_base %x / vpn_size %x]\n",
    10961119        __FUNCTION__ , process->pid, vpn_base, vpn_size, vseg->vpn_base, vseg->vpn_size );
    10971120        return NULL;
     
    11061129        return NULL;
    11071130        }
     1131
     1132#if DEBUG_VMM_CREATE_VSEG
     1133if( DEBUG_VMM_CREATE_VSEG < cycle )
     1134printk("\n[%s] thread[%x,%x] : base %x / size %x / vpn_base %x / vpn_size %x\n",
     1135__FUNCTION__, this->process->pid, this->trdid, base, size, vpn_base, vpn_size );
     1136#endif
    11081137
    11091138    // initialize vseg descriptor
  • trunk/kernel/mm/vmm.h

    r611 r614  
    138138 * - It initializes the generic page table, calling the HAL specific hal_gpt_init() function.
    139139 * - For TSAR it map all pages for the "kentry" vseg, that must be identity mapping.
    140  * Note:
     140 ******************************************************a**************************************
     141 * Implementation notes:
    141142 * - The "code" and "data" vsegs are registered by the elf_load_process() function.
    142143 * - The "stack" vsegs are dynamically created by the thread_user_create() function.
  • trunk/kernel/syscalls/sys_closedir.c

    r612 r614  
    7474    if( dir_cxy == local_cxy )
    7575    {
    76         user_dir_destroy( dir_ptr );
     76        user_dir_destroy( dir_ptr,
     77                          process->ref_xp );
    7778    }
    7879    else
    7980    {
    8081        rpc_user_dir_destroy_client( dir_cxy,
    81                                      dir_ptr );
     82                                     dir_ptr,
     83                                     process->ref_xp );
    8284    }
    8385
  • trunk/kernel/syscalls/sys_display.c

    r612 r614  
    357357
    358358        // display mapper
    359         error = mapper_display_page( mapper_xp , page_id , nbytes , kbuf );
     359        error = mapper_display_page( mapper_xp , page_id , nbytes );
    360360
    361361        if( error )
  • trunk/kernel/syscalls/sys_opendir.c

    r612 r614  
    143143        }
    144144   
    145     // create a new user_dir_t structure in inode cluster
    146     // and get the user space pointer on dirent array
     145    // create a new user_dir_t structure in target directory inode cluster
     146    // map it in the reference user process VMM (in a new ANON vseg)
     147    // an get the local pointer on the created user_dir_t structure
    147148    if( inode_cxy == local_cxy )
    148149    {
    149         dir_ptr = user_dir_create( inode_ptr );
     150        dir_ptr = user_dir_create( inode_ptr,
     151                                   process->ref_xp );
    150152    }
    151153    else
     
    153155        rpc_user_dir_create_client( inode_cxy,
    154156                                    inode_ptr,
     157                                    process->ref_xp,
    155158                                    &dir_ptr );
    156159    }
Note: See TracChangeset for help on using the changeset viewer.