Changeset 611


Ignore:
Timestamp:
Jan 9, 2019, 3:02:51 PM (5 years ago)
Author:
alain
Message:

Introduce sigificant modifs in VFS to support the <ls> command,
and the . and .. directories entries.

Location:
trunk
Files:
42 edited

Legend:

Unmodified
Added
Removed
  • trunk/hal/tsar_mips32/core/hal_exception.c

    r610 r611  
    455455            else                                                // undefined coprocessor
    456456            {
    457                 printk("\n[USER_ERROR] in %s for thread %x in process %x\n"
     457                printk("\n[USER_ERROR] in %s for thread[%x,%x]\n"
    458458                "   undefined coprocessor / epc %x\n",
    459                 __FUNCTION__, this->trdid, this->process->pid, excPC );
     459                __FUNCTION__, this->process->pid, this->trdid, excPC );
    460460
    461461                        error = EXCP_USER_ERROR;
     
    465465        case XCODE_OVR:    // Arithmetic Overflow : user fatal error
    466466        {
    467             printk("\n[USER_ERROR] in %s for thread %x in process %x\n"
     467            printk("\n[USER_ERROR] in %s for thread[%x,%x]\n"
    468468            "   arithmetic overflow / epc %x\n",
    469             __FUNCTION__, this->trdid, this->process->pid, excPC );
     469            __FUNCTION__, this->process->pid, this->trdid, excPC );
    470470
    471471                    error = EXCP_USER_ERROR;
     
    474474        case XCODE_RI:     // Reserved Instruction : user fatal error
    475475        {
    476             printk("\n[USER_ERROR] in %s for thread %x in process %x\n"
     476            printk("\n[USER_ERROR] in %s for thread[%x,%x]\n"
    477477            "   reserved instruction / epc %x\n",
    478             __FUNCTION__, this->trdid, this->process->pid, excPC );
     478            __FUNCTION__, this->process->pid, this->trdid, excPC );
    479479
    480480                    error = EXCP_USER_ERROR;
     
    483483        case XCODE_ADEL:   // user fatal error
    484484        {
    485             printk("\n[USER_ERROR] in %s for thread %x in process %x\n"
     485            printk("\n[USER_ERROR] in %s for thread[%x,%x]\n"
    486486            "   illegal data load address / epc %x / bad_address %x\n",
    487             __FUNCTION__, this->trdid, this->process->pid, excPC, hal_get_bad_vaddr() );
     487            __FUNCTION__, this->process->pid, this->trdid, excPC, hal_get_bad_vaddr() );
    488488
    489489                    error = EXCP_USER_ERROR;
     
    492492        case XCODE_ADES:   //   user fatal error
    493493        {
    494             printk("\n[USER_ERROR] in %s for thread %x in process %x\n"
     494            printk("\n[USER_ERROR] in %s for thread[%x,%x]\n"
    495495            "   illegal data store address / epc %x / bad_address %x\n",
    496             __FUNCTION__, this->trdid, this->process->pid, excPC, hal_get_bad_vaddr() );
     496            __FUNCTION__, this->process->pid, this->trdid, excPC, hal_get_bad_vaddr() );
    497497
    498498                    error = EXCP_USER_ERROR;
  • trunk/hal/tsar_mips32/core/hal_gpt.c

    r587 r611  
    137137uint32_t cycle = (uint32_t)hal_get_cycles();
    138138if( DEBUG_HAL_GPT_CREATE < cycle )
    139 printk("\n[DBG] %s : thread[%x,%x] enter / cycle %d\n",
     139printk("\n[%s] : thread[%x,%x] enter / cycle %d\n",
    140140__FUNCTION__, this->process->pid, this->trdid, cycle );
    141141#endif
     
    167167cycle = (uint32_t)hal_get_cycles();
    168168if( DEBUG_HAL_GPT_CREATE < cycle )
    169 printk("\n[DBG] %s : thread[%x,%x] exit / cycle %d\n",
     169printk("\n[%s] : thread[%x,%x] exit / cycle %d\n",
    170170__FUNCTION__, this->process->pid, this->trdid, cycle );
    171171#endif
     
    194194thread_t * this  = CURRENT_THREAD;
    195195if( DEBUG_HAL_GPT_DESTROY < cycle )
    196 printk("\n[DBG] %s : thread[%x,%x] enter / cycle %d\n",
     196printk("\n[%s] : thread[%x,%x] enter / cycle %d\n",
    197197__FUNCTION__, this->process->pid, this->trdid, cycle );
    198198#endif
     
    267267cycle = (uint32_t)hal_get_cycles();
    268268if( DEBUG_HAL_GPT_DESTROY < cycle )
    269 printk("\n[DBG] %s : thread[%x,%x] exit / cycle %d\n",
     269printk("\n[%s] : thread[%x,%x] exit / cycle %d\n",
    270270__FUNCTION__, this->process->pid, this->trdid, cycle );
    271271#endif
     
    368368uint32_t cycle = (uint32_t)hal_get_cycles();
    369369if( DEBUG_HAL_GPT_SET_PTE < cycle )
    370 printk("\n[DBG] %s : thread[%x,%x] enter / vpn %x / attr %x / ppn %x / cluster %x / cycle %d\n",
     370printk("\n[%s] : thread[%x,%x] enter / vpn %x / attr %x / ppn %x / cluster %x / cycle %d\n",
    371371__FUNCTION__, this->process->pid, this->trdid, vpn, attr, ppn, gpt_cxy, cycle );
    372372#endif
     
    400400#if DEBUG_HAL_GPT_SET_PTE
    401401if( DEBUG_HAL_GPT_SET_PTE < cycle )
    402 printk("\n[DBG] %s : thread[%x,%x] map PTE1 / cxy %x / ix1 %x / pt1 %x / pte1 %x\n",
     402printk("\n[%s] : thread[%x,%x] map PTE1 / cxy %x / ix1 %x / pt1 %x / pte1 %x\n",
    403403__FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix1, pt1_ptr, pte1 );
    404404#endif
     
    443443#if DEBUG_HAL_GPT_SET_PTE
    444444if( DEBUG_HAL_GPT_SET_PTE < cycle )
    445 printk("\n[DBG] %s : thread[%x,%x] map PTD1 / cxy %x / ix1 %d / pt1 %x / ptd1 %x\n",
     445printk("\n[%s] : thread[%x,%x] map PTD1 / cxy %x / ix1 %d / pt1 %x / ptd1 %x\n",
    446446__FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix1, pt1_ptr, pte1 );
    447447#endif
     
    452452#if DEBUG_HAL_GPT_SET_PTE
    453453if( DEBUG_HAL_GPT_SET_PTE < cycle )
    454 printk("\n[DBG] %s : thread[%x,%x] get PTD1 / cxy %x / ix1 %d / pt1 %x / ptd1 %x\n",
     454printk("\n[%s] : thread[%x,%x] get PTD1 / cxy %x / ix1 %d / pt1 %x / ptd1 %x\n",
    455455__FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix1, pt1_ptr, pte1 );
    456456#endif
     
    470470#if DEBUG_HAL_GPT_SET_PTE
    471471if( DEBUG_HAL_GPT_SET_PTE < cycle )
    472 printk("\n[DBG] %s : thread[%x,%x] map PTE2 / cxy %x / ix2 %x / pt2 %x / attr %x / ppn %x\n",
     472printk("\n[%s] : thread[%x,%x] map PTE2 / cxy %x / ix2 %x / pt2 %x / attr %x / ppn %x\n",
    473473__FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix2, pt2_ptr, tsar_attr, ppn );
    474474#endif
     
    788788thread_t * this  = CURRENT_THREAD;
    789789if( DEBUG_HAL_GPT_COPY < cycle )
    790 printk("\n[DBG] %s : thread[%x,%x] enter / vpn %x / src_cxy %x / dst_cxy %x / cycle %d\n",
     790printk("\n[%s] : thread[%x,%x] enter / vpn %x / src_cxy %x / dst_cxy %x / cycle %d\n",
    791791__FUNCTION__, this->process->pid, this->trdid, vpn, src_cxy, local_cxy, cycle );
    792792#endif
     
    879879cycle = (uint32_t)hal_get_cycles;
    880880if( DEBUG_HAL_GPT_COPY < cycle )
    881 printk("\n[DBG] %s : thread[%x,%x] exit / copy done for vpn %x / cycle %d\n",
     881printk("\n[%s] : thread[%x,%x] exit / copy done for vpn %x / cycle %d\n",
    882882__FUNCTION__, this->process->pid, this->trdid, vpn, cycle );
    883883#endif
     
    896896cycle = (uint32_t)hal_get_cycles;
    897897if( DEBUG_HAL_GPT_COPY < cycle )
    898 printk("\n[DBG] %s : thread[%x,%x] exit / nothing done for vpn %x / cycle %d\n",
     898printk("\n[%s] : thread[%x,%x] exit / nothing done for vpn %x / cycle %d\n",
    899899__FUNCTION__, this->process->pid, this->trdid, vpn, cycle );
    900900#endif
  • trunk/kernel/Makefile

    r610 r611  
    122122              build/libk/remote_fifo.o      \
    123123              build/libk/remote_mutex.o     \
     124              build/libk/remote_dir.o       \
    124125              build/libk/remote_sem.o       \
    125126              build/libk/remote_condvar.o   \
  • trunk/kernel/fs/fatfs.c

    r610 r611  
    20112011
    20122012    // get pointer on local FATFS context
    2013     fatfs_ctx_t * fatfs_ctx      = fs_context[FS_TYPE_FATFS].extend;
     2013    fatfs_ctx_t * fatfs_ctx = fs_context[FS_TYPE_FATFS].extend;
    20142014
    20152015    // get page base address
     
    20342034#if (DEBUG_FATFS_MOVE_PAGE & 0x1)
    20352035if( DEBUG_FATFS_MOVE_PAGE < cycle )
    2036 {
    2037     uint32_t * tab = (uint32_t *)buffer;
    2038     uint32_t line , word;
    2039     printk("\n***** %s : First 64 words of page %d in FAT mapper\n",
    2040     __FUNCTION__ , page_id );
    2041     for( line = 0 ; line < 8 ; line++ )
    2042     {
    2043         printk("%X : ", line );
    2044         for( word = 0 ; word < 8 ; word++ ) printk("%X ", tab[(line<<3) + word] );
    2045         printk("\n");
    2046     }
    2047 }
     2036mapper_display_page( XPTR(page_cxy , mapper_ptr) , page_id , "FAT" );
    20482037#endif
    20492038
     
    21032092#if (DEBUG_FATFS_MOVE_PAGE & 0x1)
    21042093if( DEBUG_FATFS_MOVE_PAGE < cycle )
    2105 {
    2106     uint32_t * tab = (uint32_t *)buffer;
    2107     uint32_t line , word;
    2108     printk("\n***** %s : First 64 words of page %d in <%s> mapper\n",
    2109     __FUNCTION__, page_id, name );
    2110     for( line = 0 ; line < 8 ; line++ )
    2111     {
    2112         printk("%X : ", line );
    2113         for( word = 0 ; word < 8 ; word++ ) printk("%X ", tab[(line<<3) + word] );
    2114         printk("\n");
    2115     }
    2116 }
     2094char string[CONFIG_VFS_MAX_NAME_LENGTH];
     2095vfs_inode_get_name( XPTR(page_cxy , inode_ptr) , string );
     2096mapper_display_page( XPTR(page_cxy , mapper_ptr) , page_id , string );
    21172097#endif
    21182098
  • trunk/kernel/fs/fatfs.h

    r610 r611  
    238238
    239239
    240 
    241240//////////////////////////////////////////////////////////////////////////////////////////
    242241// Generic API: These functions are called by the kernel VFS,
     
    409408 *****************************************************************************************
    410409 * This function moves a page from/to the mapper to/from the FATFS file system on device.
    411  * The page must have been previously allocated and registered in the mapper, but the 
    412  * page - and the mapper - can be located in another cluster than the calling thread.
     410 * The page must have been previously allocated and registered in the mapper.  
     411 * The page - and the mapper - can be located in another cluster than the calling thread.
    413412 * The pointer on the mapper and the page index in file are found in the page descriptor.
    414  * It is used both for the regular file/directory mappers, and for the FAT mapper.
     413 * It is used for both for a regular file/directory mapper, and the FAT mapper.
    415414 * For the FAT mapper, it access the FATFS to get the location on IOC device.
    416415 * For a regular file, it access the FAT mapper to get the cluster index on IOC device.
  • trunk/kernel/fs/vfs.c

    r610 r611  
    2323 */
    2424
    25 
    2625#include <kernel_config.h>
    2726#include <hal_kernel_types.h>
     
    4847#include <syscalls.h>
    4948
    50 
    5149//////////////////////////////////////////////////////////////////////////////////////////
    5250//           Extern variables         
     
    136134        case INODE_TYPE_SOCK: return "SOCK";
    137135        case INODE_TYPE_DEV:  return "DEV ";
     136        case INODE_TYPE_BLK:  return "BLK ";
    138137        case INODE_TYPE_SYML: return "SYML";
    139138        default:              return "undefined";
     
    10091008    cxy_t          vfs_root_cxy;       // VFS root inode cluster identifier
    10101009    xptr_t         lock_xp;            // extended pointer on lock protecting Inode Tree
    1011     xptr_t         inode_xp;           // extended pointer on target inode
    1012     vfs_inode_t  * inode_ptr;          // local pointer on target inode
    1013     cxy_t          inode_cxy;          // target inode cluster identifier
     1010    xptr_t         inode_xp;           // extended pointer on new directory inode
     1011    vfs_inode_t  * inode_ptr;          // local pointer on new directory inode
     1012    cxy_t          inode_cxy;          // new directory inode cluster identifier
    10141013    xptr_t         dentry_xp;          // extended pointer on new dentry
    1015     vfs_dentry_t * dentry_ptr;         // target dentry local pointer
    1016     xptr_t         parent_xp;          // extended pointer on new parent inode
    1017     vfs_inode_t  * parent_ptr;         // local pointer on new parent inode 
    1018     cxy_t          parent_cxy;         // new parent inode cluster identifier
    1019     vfs_ctx_t    * parent_ctx_ptr;     // local pointer on target inode context
    1020     uint32_t       parent_fs_type;     // target inode file system type
     1014    vfs_dentry_t * dentry_ptr;         // new dentry local pointer
     1015    xptr_t         parent_xp;          // extended pointer on parent inode
     1016    vfs_inode_t  * parent_ptr;         // local pointer on parent inode 
     1017    cxy_t          parent_cxy;         // parent inode cluster identifier
     1018    vfs_ctx_t    * parent_ctx_ptr;     // local pointer on parent inode context
     1019    uint32_t       parent_fs_type;     // parent inode file system type
    10211020
    10221021    xptr_t         parents_root_xp;    // extended pointer on parents field in inode (root)
     
    11091108#endif
    11101109
    1111     // 3. create new directory inode in child cluster
     1110    // 3. create new directory inode
    11121111    // TODO : define attr / uid / gid
    11131112    uint32_t attr = 0;
     
    11181117    inode_cxy = cluster_random_select();
    11191118   
    1120     if( inode_cxy == local_cxy )      // child cluster is local
     1119    if( inode_cxy == local_cxy )      // target cluster is local
    11211120    {
    11221121        error = vfs_inode_create( parent_fs_type,
     
    11281127                                  &inode_xp );
    11291128    }
    1130     else                              // child cluster is remote
     1129    else                              // target cluster is remote
    11311130    {
    11321131        rpc_vfs_inode_create_client( inode_cxy,
     
    11431142    if( error )
    11441143    {
     1144        remote_rwlock_wr_release( lock_xp );
    11451145        printk("\n[ERROR] in %s : cannot create new inode in cluster %x for <%s>\n",
    11461146               __FUNCTION__ , inode_cxy , path );
    1147  
    11481147        if( parent_cxy == local_cxy ) vfs_dentry_destroy( dentry_ptr );
    11491148        else rpc_vfs_dentry_destroy_client( parent_cxy , dentry_ptr );
     
    11811180#endif
    11821181
     1182    // 7. create the two special dentries <.> and <..> in new directory
     1183    // both the new directory mapper, and the Inode Tree are updated
     1184    error = vfs_add_special_dentries( inode_xp,
     1185                                      parent_xp );
     1186
     1187    if( error )
     1188    {
     1189        remote_rwlock_wr_release( lock_xp );
     1190        printk("\n[ERROR] in %s : cannot create new inode in cluster %x for <%s>\n",
     1191               __FUNCTION__ , inode_cxy , path );
     1192        if( parent_cxy == local_cxy ) vfs_dentry_destroy( dentry_ptr );
     1193        else rpc_vfs_dentry_destroy_client( parent_cxy , dentry_ptr );
     1194        return -1;
     1195    }
     1196
    11831197    // release the lock protecting Inode Tree
    11841198    remote_rwlock_wr_release( lock_xp );
    11851199
    1186     // 5. update parent directory mapper
     1200    // 8. update parent directory mapper
    11871201    //    and synchronize the parent directory on IOC device
    11881202    if (parent_cxy == local_cxy)
     
    16251639}  // end vfs_unlink()
    16261640
    1627 ///////////////////////////////////////////
    1628 error_t vfs_stat( xptr_t     root_inode_xp,
    1629                   char     * path,
    1630                   stat_t   * st )
     1641////////////////////////////////////////////////
     1642error_t vfs_stat( xptr_t         root_inode_xp,
     1643                  char         * path,
     1644                  struct stat  * st )
    16311645{
    16321646    error_t       error;
     
    19361950                  inode_inum, inode_size, inode_dirty, inode_cxy, inode_ptr, mapper_ptr );
    19371951
    1938     // scan directory entries 
    1939     if( inode_type == INODE_TYPE_DIR )
     1952    // scan directory entries when current inode is a directory
     1953    // don't scan the the "." and ".." directories to break loops
     1954    if( (inode_type == INODE_TYPE_DIR) &&
     1955        (strcmp( name , "." ) != 0)    &&
     1956        (strcmp( name , ".." ) != 0) )
    19401957    {
    19411958        // get extended pointer on directory entries xhtab
     
    22342251        error = vfs_get_name_from_path( current , name , &next , &last );
    22352252
    2236         // VFS root case
     2253        // handle VFS root case
    22372254        if ( error )
    22382255        {
     
    22582275                               name,
    22592276                               &child_xp );
     2277
     2278        // get child inode local pointer and cluster
     2279        child_ptr  = GET_PTR( child_xp );
     2280        child_cxy  = GET_CXY( child_xp );
    22602281
    22612282        // analyse found & last, depending on lookup_mode
     
    23022323                else      child_type = INODE_TYPE_FILE;
    23032324 
    2304                 // insert (speculatively) a new child dentry/inode in inode tree
     2325                // insert a new child dentry/inode couple in inode tree
    23052326                error = vfs_add_child_in_parent( child_cxy,
    23062327                                                 child_type,
     
    23262347#endif
    23272348                // scan parent mapper to find the missing dentry, and complete
    2328                 // the initialisation of dentry and child inode desciptors
     2349                // the initialisation of dentry and child inode descriptors
    23292350                if( parent_cxy == local_cxy )
    2330 
    23312351                {
    23322352                    error = vfs_fs_child_init( parent_ptr,
     
    24242444if( DEBUG_VFS_LOOKUP < cycle )
    24252445printk("\n[%s] thread[%x,%x] found <%s> in Inode Tree / inode (%x,%x)\n",
    2426 __FUNCTION__, process->pid, this->trdid, name, GET_CXY(child_xp), GET_PTR(child_xp) );
    2427 #endif
    2428             // get child inode local pointer and cluster
    2429             child_ptr  = GET_PTR( child_xp );
    2430             child_cxy  = GET_CXY( child_xp );
    2431 
     2446__FUNCTION__, process->pid, this->trdid, name, child_cxy, child_ptr );
     2447#endif
    24322448            // check the excl flag
    24332449            if( last && create && excl )
     
    25842600
    25852601}  // end vfs_new_child_init()
     2602
     2603///////////////////////////////////////////////////
     2604error_t vfs_add_special_dentries( xptr_t  child_xp,
     2605                                  xptr_t  parent_xp )
     2606{
     2607    error_t         error;
     2608    vfs_inode_t   * child_ptr;         // local pointer on child inode directory
     2609    cxy_t           child_cxy;         // child inode directory cluster identifier
     2610    vfs_inode_t   * parent_ptr;        // local pointer on parent inode directory
     2611    cxy_t           parent_cxy;        // parent inode directory cluster identifier
     2612    vfs_ctx_t     * ctx_ptr;           // local pointer on child inode FS context
     2613    vfs_fs_type_t   fs_type;           // FS type of child inode
     2614    xptr_t          dentry_xp;         // extended pointer on dentry (used for . and ..)
     2615    vfs_dentry_t  * dentry_ptr;        // local pointer on dentry (used for . and ..)
     2616
     2617    xptr_t          parents_root_xp;   // extended pointer on inode "parents" field
     2618    xptr_t          parents_entry_xp;  // extended pointer on dentry "parents" field
     2619    xptr_t          children_xhtab_xp; // extended pointer on inode "children" field
     2620    xptr_t          children_entry_xp; // extended pointer on dentry "children" field
     2621
     2622#if DEBUG_VFS_ADD_SPECIAL
     2623uint32_t   cycle = (uint32_t)hal_get_cycles();
     2624thread_t * this  = CURRENT_THREAD;
     2625char child_name[CONFIG_VFS_MAX_NAME_LENGTH];
     2626char parent_name[CONFIG_VFS_MAX_NAME_LENGTH];
     2627vfs_inode_get_name( child_xp  , child_name );
     2628vfs_inode_get_name( parent_xp , parent_name );
     2629if( DEBUG_VFS_ADD_SPECIAL < cycle )
     2630printk("\n[%s] thread[%x,%x] enter / child <%s> / parent <%s> / cycle %d\n",
     2631__FUNCTION__, this->process->pid, this->trdid, child_name, parent_name, cycle );
     2632#endif
     2633
     2634    // get new directory cluster and local pointer
     2635    child_cxy  = GET_CXY( child_xp );
     2636    child_ptr  = GET_PTR( child_xp );
     2637
     2638    // get parent directory cluster and local pointer
     2639    parent_cxy = GET_CXY( parent_xp );
     2640    parent_ptr = GET_PTR( parent_xp );
     2641
     2642    // get child inode FS type
     2643    ctx_ptr    = hal_remote_lpt( XPTR( child_cxy , &child_ptr->ctx ) );
     2644    fs_type    = hal_remote_l32( XPTR( child_cxy , &ctx_ptr->type ) );
     2645
     2646    //////////////////////////// create <.>
     2647    if( child_cxy == local_cxy )     
     2648    {
     2649        error = vfs_dentry_create( fs_type,
     2650                                   ".",
     2651                                   &dentry_xp );
     2652    }
     2653    else
     2654    {
     2655        rpc_vfs_dentry_create_client( child_cxy,
     2656                                      fs_type,
     2657                                      ".",
     2658                                      &dentry_xp,
     2659                                      &error );
     2660    }
     2661    if( error )
     2662    {
     2663        printk("\n[ERROR] in %s : cannot create dentry <.> in cluster %x\n",
     2664        __FUNCTION__ , child_cxy );
     2665        return -1;
     2666    }
     2667
     2668    // get <.> dentry local pointer
     2669    dentry_ptr = GET_PTR( dentry_xp );
     2670
     2671#if(DEBUG_VFS_ADD_SPECIAL & 1)
     2672if( DEBUG_VFS_ADD_SPECIAL < cycle )
     2673printk("\n[%s] thread[%x,%x] created dentry <.> (%x,%x)\n",
     2674__FUNCTION__, this->process->pid, this->trdid, child_cxy, dentry_ptr );
     2675#endif
     2676
     2677    // register <.> dentry in child inode xhtab of children
     2678    children_xhtab_xp = XPTR( child_cxy , &child_ptr->children );
     2679    children_entry_xp = XPTR( child_cxy , &dentry_ptr->children );
     2680    error = xhtab_insert( children_xhtab_xp , "." , children_entry_xp );
     2681    if( error )
     2682    {
     2683        printk("\n[ERROR] in %s : cannot register dentry <.> in xhtab\n",
     2684        __FUNCTION__ );
     2685        return -1;
     2686    }
     2687
     2688    // register <.> dentry in child_inode xlist of parents TODO faut-il ?
     2689    parents_root_xp  = XPTR( child_cxy , &child_ptr->parents );
     2690    parents_entry_xp = XPTR( child_cxy , &dentry_ptr->parents );
     2691    xlist_add_first( parents_root_xp , parents_entry_xp );
     2692    hal_remote_atomic_add( XPTR( child_cxy , &child_ptr->links ) , 1 );
     2693
     2694    // update "parent" and "child_xp" fields in <.> dentry
     2695    hal_remote_s64( XPTR( child_cxy , &dentry_ptr->child_xp ) , child_xp );
     2696    hal_remote_spt( XPTR( child_cxy , &dentry_ptr->parent ) , child_ptr );
     2697
     2698#if(DEBUG_VFS_ADD_SPECIAL & 1)
     2699if( DEBUG_VFS_ADD_SPECIAL < cycle )
     2700printk("\n[%s] thread[%x,%x] linked dentry <.> to parent and child inodes\n",
     2701__FUNCTION__, this->process->pid, this->trdid );
     2702#endif
     2703
     2704    // introduce <.> dentry into child directory mapper
     2705    if( child_cxy == local_cxy )
     2706    {
     2707        error = vfs_fs_add_dentry( child_ptr,
     2708                                   dentry_ptr );
     2709    }
     2710    else
     2711    {
     2712        rpc_vfs_fs_add_dentry_client( child_cxy,
     2713                                      child_ptr,
     2714                                      dentry_ptr,
     2715                                      &error );
     2716    }
     2717    if( error )
     2718    {
     2719        printk("\n[ERROR] in %s : cannot introduce dentry <..> in mapper %x\n",
     2720        __FUNCTION__ );
     2721        return -1;
     2722    }
     2723
     2724#if(DEBUG_VFS_ADD_SPECIAL & 1)
     2725if( DEBUG_VFS_ADD_SPECIAL < cycle )
     2726printk("\n[%s] thread[%x,%x] registered dentry <.> in child mapper\n",
     2727__FUNCTION__, this->process->pid, this->trdid );
     2728#endif
     2729
     2730    ///////////////////////////// create <..> dentry
     2731    if( child_cxy == local_cxy )     
     2732    {
     2733        error = vfs_dentry_create( fs_type,
     2734                                   "..",
     2735                                   &dentry_xp );
     2736    }
     2737    else
     2738    {
     2739        rpc_vfs_dentry_create_client( child_cxy,
     2740                                      fs_type,
     2741                                      "..",
     2742                                      &dentry_xp,
     2743                                      &error );
     2744    }
     2745    if( error )
     2746    {
     2747        printk("\n[ERROR] in %s : cannot create dentry <..> in cluster %x\n",
     2748        __FUNCTION__ , child_cxy );
     2749        return -1;
     2750    }
     2751
     2752    // get <..> dentry local pointer
     2753    dentry_ptr = GET_PTR( dentry_xp );
     2754
     2755#if(DEBUG_VFS_ADD_SPECIAL & 1)
     2756if( DEBUG_VFS_ADD_SPECIAL < cycle )
     2757printk("\n[%s] thread[%x,%x] created dentry <..> (%x,%x)\n",
     2758__FUNCTION__, this->process->pid, this->trdid, child_cxy, dentry_ptr );
     2759#endif
     2760
     2761    // register <..> dentry in child_inode xhtab of children
     2762    children_xhtab_xp = XPTR( child_cxy , &child_ptr->children );
     2763    children_entry_xp = XPTR( child_cxy , &dentry_ptr->children );
     2764    error = xhtab_insert( children_xhtab_xp , ".." , children_entry_xp );
     2765    if( error )
     2766    {
     2767        printk("\n[ERROR] in %s : cannot register dentry <..> in xhtab\n",
     2768        __FUNCTION__ );
     2769        return -1;
     2770    }
     2771
     2772    // register <..> dentry in parent_inode xlist of parents TODO faut-il ?
     2773    parents_root_xp  = XPTR( parent_cxy , &parent_ptr->parents );
     2774    parents_entry_xp = XPTR( child_cxy  , &dentry_ptr->parents );
     2775    xlist_add_first( parents_root_xp , parents_entry_xp );
     2776    hal_remote_atomic_add( XPTR( parent_cxy , &parent_ptr->links ) , 1 );
     2777
     2778    // update "parent" and "child_xp" fields in <..> dentry
     2779    hal_remote_s64( XPTR( child_cxy , &dentry_ptr->child_xp ) , parent_xp );
     2780    hal_remote_spt( XPTR( child_cxy , &dentry_ptr->parent ) , child_ptr );
     2781
     2782#if(DEBUG_VFS_ADD_SPECIAL & 1)
     2783if( DEBUG_VFS_ADD_SPECIAL < cycle )
     2784printk("\n[%s] thread[%x,%x] linked dentry <..> to parent and child inodes\n",
     2785__FUNCTION__, this->process->pid, this->trdid );
     2786#endif
     2787
     2788    // introduce <..> dentry into child directory mapper
     2789    if( child_cxy == local_cxy )
     2790    {
     2791        error = vfs_fs_add_dentry( child_ptr,
     2792                                   dentry_ptr );
     2793    }
     2794    else
     2795    {
     2796        rpc_vfs_fs_add_dentry_client( child_cxy,
     2797                                      child_ptr,
     2798                                      dentry_ptr,
     2799                                      &error );
     2800    }
     2801    if( error )
     2802    {
     2803        printk("\n[ERROR] in %s : cannot introduce dentry <..> in mapper %x\n",
     2804        __FUNCTION__ );
     2805        return -1;
     2806    }
     2807
     2808#if(DEBUG_VFS_ADD_SPECIAL & 1)
     2809if( DEBUG_VFS_ADD_SPECIAL < cycle )
     2810printk("\n[%s] thread[%x,%x] registered dentry <..> in child mapper\n",
     2811__FUNCTION__, this->process->pid, this->trdid );
     2812#endif
     2813
     2814#if DEBUG_VFS_ADD_SPECIAL
     2815cycle = (uint32_t)hal_get_cycles();
     2816if( DEBUG_VFS_ADD_SPECIAL < cycle )
     2817printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
     2818__FUNCTION__, this->process->pid, this->trdid, (uint32_t)hal_get_cycles() );
     2819#endif
     2820
     2821    return 0;
     2822
     2823}  // end vfs_add_special_dentries()
    25862824
    25872825//////////////////////////////////////////
     
    28453083
    28463084#if(DEBUG_VFS_ADD_CHILD & 1)
    2847 if( local_cxy == 1 )
    2848 // if( DEBUG_VFS_ADD_CHILD < cycle )
     3085if( DEBUG_VFS_ADD_CHILD < cycle )
    28493086printk("\n[%s] thread[%x,%x] / dentry (%x,%x) registered in child inode (%x,%x)\n",
    28503087__FUNCTION__, this->process->pid, this->trdid,
     
    28523089#endif
    28533090
    2854     // 4. register new_dentry in parent_inode xhtab of children
     3091    // register new_dentry in parent_inode xhtab of children
    28553092    children_xhtab_xp = XPTR( parent_cxy , &parent_inode_ptr->children );
    28563093    children_entry_xp = XPTR( parent_cxy , &new_dentry_ptr->children );
     
    28643101#endif
    28653102
    2866     // 5. update "parent" and "child_xp" fields in new_dentry
     3103    // update "parent" and "child_xp" fields in new_dentry
    28673104    hal_remote_s64( XPTR( parent_cxy , &new_dentry_ptr->child_xp ) , new_inode_xp );
    28683105    hal_remote_spt( XPTR( parent_cxy , &new_dentry_ptr->parent ) , parent_inode_ptr );
  • trunk/kernel/fs/vfs.h

    r610 r611  
    4545
    4646struct vfs_inode_s;
    47 struct vfs_dentry_t;
    48 struct vfs_ctx_t;
    49 struct vfs_file_ref_s;
     47struct vfs_dentry_s;
     48struct vfs_ctx_s;
    5049struct vfs_file_s;
    51 
    52 struct vfs_inode_op_s;
    53 struct vfs_dentry_op_s;
    54 struct vfs_file_op_s;
    55 struct vfs_ctx_op_s;
    56 
    57 struct vfs_lookup_cmd_s;
    58 struct vfs_lookup_rsp_s;
    5950
    6051struct mapper_s;
     
    6354struct vseg_s;
    6455struct page_s;
    65 
    6656
    6757/******************************************************************************************
     
    133123 *****************************************************************************************/
    134124
    135 /* this enum define the VFS inode types values */
    136 /* WARNING : this enum must be kept consistent with macros in <shared_stat.h> file */
     125/* this enum define the VFS inode types values                                           */
     126/* WARNING : this enum must be kept consistent with macros in <shared_stat.h> file       */
     127/*           and with types in <shared_dirent.h> file.                                   */
    137128
    138129typedef enum   
     
    144135    INODE_TYPE_SOCK  =     4,           /*! POSIX socket                                 */
    145136    INODE_TYPE_DEV   =     5,           /*! character device                             */
    146     INODE_TYPE_SYML  =     6,           /*! symbolic link                                */
     137    INODE_TYPE_BLK   =     6,           /*! block device                                 */
     138    INODE_TYPE_SYML  =     7,           /*! symbolic link                                */
    147139}
    148140vfs_inode_type_t;
     
    184176#define VFS_ISUID          0x0004000
    185177#define VFS_ISGID          0x0002000
    186 define VFS_ISVTX           0x0001000
     178#define VFS_ISVTX          0x0001000
    187179
    188180#define VFS_IRWXU      0x0000700
     
    316308 * This function allocates memory from local cluster for an inode descriptor and the
    317309 * associated mapper. It initialise these descriptors from arguments values.
    318  * If the client thread is not running in the cluster containing this inode,
    319  * it must use the rpc_vfs_inode_create_client() function.
     310 * It must called by a local thread. Use the RPC_INODE_CREATE if client thread is remote.
    320311 ******************************************************************************************
    321312 * @ fs_type    : file system type.
     
    407398
    408399
     400
    409401/******************************************************************************************
    410402 *        These low-level functions access / modify a VFS dentry descriptor
     
    414406 * This function allocates memory from local cluster for a dentry descriptor,
    415407 * initialises it from  arguments values, and returns the extended pointer on dentry.
    416  * If the client thread is not running in the target cluster for this inode,
    417  * it must use the rpc_dentry_create_client() function.
     408 * It must called by a local thread. Use the RPC_DENTRY_CREATE if client thread is remote.
    418409 ******************************************************************************************
    419410 * @ fs_type    : file system type.
     
    548539 *
    549540 * [Implementation]
    550  * As there are cross-references between the inode and the associated dentry, this
    551  * function implement a three steps scenario :
     541 * As there are cross-references between inode and dentry, this function implements
     542 * a three steps scenario :
    552543 * 1) The dentry descriptor is created in the cluster containing the existing <parent_xp>
    553  *    inode, and is only partially initialized : "fs_type", "name", "parent_xp" fields.
     544 *    inode, and partially initialized, using the RPC_VFS_CREATE DENTRY if required.
    554545 * 2) The inode and its associated mapper are created in cluster identified by <child_cxy>,
    555  *    and initialised. The new inode and the parent inode can have different FS types.
    556  * 3) The "child_xp" field in dentry (pointing on the created inode) is updated,
    557  *    and the refcount is incremented for both the inode and the dentry.
     546 *    and partially initialised, using the RPC_VFS_CREATE_INODE if required.
     547 *    The new inode and the parent inode can have different FS types.
     548 * 3) The pointers between the parent inode, the new dentry, and the child inode
     549 *    are updated, using remote accesses.
    558550 ******************************************************************************************
    559551 * @ child_inode_cxy  : [in]  target cluster for child inode.
     
    612604
    613605/******************************************************************************************
     606 * This function is called by the vfs_mkdir() function to create the two special dentries
     607 * <.> and <..> in a new directory identified by the <child_xp> argument. The parent
     608 * directory inode is defined by the <parent_xp> argument.
     609 * The two dentries are introduced in the Inode Tree. They are also introduced in the
     610 * in the child directory mapper, and the IOC device is updated.
     611 ******************************************************************************************
     612 * @ child_xp    : extended pointer on new directory inode.
     613 * @ parent_xp   : extended pointer on parent directory inode.
     614 * @ return 0 if success / -1 if failure.
     615 *****************************************************************************************/
     616error_t vfs_add_special_dentries( xptr_t  child_xp,
     617                                  xptr_t  parent_xp );
     618
     619/******************************************************************************************
    614620 * This recursive function diplays a complete inode/dentry sub-tree.
    615621 * Any inode can be selected as the sub-tree root.
    616  * TODO this function is not protected against a concurrent inode/dentry removal...
     622 * WARNING : this function is not protected against a concurrent inode/dentry removal...
    617623 ******************************************************************************************
    618624 * @ inode_xp   : extended pointer on sub-tree root inode.
     
    809815
    810816/******************************************************************************************
    811  * This function returns, in the structure pointed by the <k_dirent> kernel pointer,
    812  * various infos on the directory entry currently pointed by the <file_xp> file descriptor.
    813  * TODO not implemented yet...
    814  ******************************************************************************************
    815  * @ file_xp    : extended pointer on the file descriptor of the searched directory .
    816  * @ k_dirent   : local pointer on the dirent structure in kernel space.
    817  * @ returns 0 if success / -1 if error.
    818  *****************************************************************************************/
    819 error_t vfs_readdir( xptr_t          file_xp,
    820                      struct dirent * k_dirent );
    821 
    822 /******************************************************************************************
    823817 * This function  creates a new directory as defined by the <root_xp> & <path> arguments.
    824818 * TODO not implemented yet...
     
    880874 * The directory inode descriptor and the dentry descriptor are in the same cluster.
    881875 * Depending on the file system type, it calls the proper, FS specific function.
    882  * It ulso pdates the dentry descriptor and/or the inode descriptor extensions
     876 * It also updates the dentry descriptor and/or the inode descriptor extensions
    883877 * as required by the specific file system type.
    884878 * Finally, it synchronously updates the parent directory on IOC device.
    885879 *
    886880 * It must be executed by a thread running in the cluster containing the parent directory.
    887  * It can be the RPC_VFS_VS_ADD_DENTRY. This function does NOT take any lock.
     881 * It can be the RPC_VFS_FS_ADD_DENTRY. This function does NOT take any lock.
    888882 ******************************************************************************************
    889883 * @ parent  : local pointer on parent (directory) inode.
  • trunk/kernel/kern/cluster.h

    r583 r611  
    264264/******************************************************************************************
    265265 * This function returns a pointer on the local process descriptor from the PID.
    266  * It uses the RPC
    267  * to create a local process descriptor copy if it does not exist yet.
    268266 ******************************************************************************************
    269267 * @ pid     : searched process identifier.
  • trunk/kernel/kern/kernel_init.c

    r610 r611  
    167167    "PROCESS_FDARRAY",       // 27
    168168    "FATFS_FREE",            // 28
    169 
    170     "PROCESS_THTBL",         // 29
    171 
    172     "MAPPER_STATE",          // 30
    173     "VFS_SIZE",              // 31
    174     "VFS_FILE",              // 32
    175     "VMM_VSL",               // 33
    176     "VMM_GPT",               // 34
    177     "VFS_MAIN",              // 35
     169    "PROCESS_DIR",           // 29
     170
     171    "PROCESS_THTBL",         // 30
     172
     173    "MAPPER_STATE",          // 31
     174    "VFS_SIZE",              // 32
     175    "VFS_FILE",              // 33
     176    "VMM_VSL",               // 34
     177    "VMM_GPT",               // 35
     178    "VFS_MAIN",              // 36
    178179};       
    179180
  • trunk/kernel/kern/process.c

    r610 r611  
    274274    remote_queuelock_init( XPTR( local_cxy , &process->children_lock ), LOCK_PROCESS_CHILDREN );
    275275
    276     // reset semaphore / mutex / barrier / condvar list roots
     276    // reset semaphore / mutex / barrier / condvar list roots and lock
    277277    xlist_root_init( XPTR( local_cxy , &process->sem_root ) );
    278278    xlist_root_init( XPTR( local_cxy , &process->mutex_root ) );
     
    280280    xlist_root_init( XPTR( local_cxy , &process->condvar_root ) );
    281281    remote_queuelock_init( XPTR( local_cxy , &process->sync_lock ), LOCK_PROCESS_USERSYNC );
     282
     283    // reset open directories root and lock
     284    xlist_root_init( XPTR( local_cxy , &process->dir_root ) );
     285    remote_queuelock_init( XPTR( local_cxy , &process->dir_lock ), LOCK_PROCESS_DIR );
    282286
    283287    // register new process in the local cluster manager pref_tbl[]
     
    546550    thread_block( client_xp , THREAD_BLOCKED_RPC );
    547551
    548     // take the lock protecting process copies
    549     remote_queuelock_acquire( lock_xp );
    550 
    551552    // initialize shared RPC descriptor
    552553    rpc.responses = 0;
     
    555556    rpc.thread    = client;
    556557    rpc.lid       = client->core->lid;
    557     rpc.args[0]   = type;
    558     rpc.args[1]   = pid;
     558    rpc.args[0]   = pid;
     559    rpc.args[1]   = type;
     560
     561    // take the lock protecting process copies
     562    remote_queuelock_acquire( lock_xp );
    559563
    560564    // scan list of process copies
    561     // to send RPCs to remote copies
    562565    XLIST_FOREACH( root_xp , iter_xp )
    563566    {
  • trunk/kernel/kern/process.h

    r610 r611  
    6060 ********************************************************************************************/
    6161
    62 typedef enum process_sigactions
     62typedef enum
    6363{
    6464    BLOCK_ALL_THREADS    = 0x11,
    6565    UNBLOCK_ALL_THREADS  = 0x22,
    6666    DELETE_ALL_THREADS   = 0x33,
    67 } process_sigactions_t;
     67}
     68process_sigactions_t;
    6869
    6970/*********************************************************************************************
     
    145146
    146147        struct thread_s  * th_tbl[CONFIG_THREADS_MAX_PER_CLUSTER];       /*! local threads       */
     148
    147149        uint32_t           th_nr;            /*! number of threads in this cluster               */
    148150    rwlock_t           th_lock;          /*! lock protecting th_tbl[]  i                     */
    149151
    150     xlist_entry_t      sem_root;         /*! root of the user definedsemaphore list          */
     152    xlist_entry_t      sem_root;         /*! root of the user defined semaphore list         */
    151153    xlist_entry_t      mutex_root;       /*! root of the user defined mutex list             */
    152154    xlist_entry_t      barrier_root;     /*! root of the user defined barrier list           */
    153155    xlist_entry_t      condvar_root;     /*! root of the user defined condvar list           */
    154156    remote_queuelock_t sync_lock;        /*! lock protecting user defined synchro lists      */
     157
     158    xlist_entry_t      dir_root;         /*! root of the user defined DIR list               */
     159    remote_queuelock_t dir_lock;         /*! lock protexting user defined DIR list           */
    155160
    156161    uint32_t           term_state;       /*! termination status (flags & exit status)        */
  • trunk/kernel/kern/rpc.c

    r610 r611  
    7777    &rpc_undefined,                        // 24   unused slot
    7878    &rpc_mapper_handle_miss_server,        // 25
    79     &rpc_undefined,                        // 26   unused slot
     79    &rpc_vmm_delete_vseg_server,           // 26
    8080    &rpc_vmm_create_vseg_server,           // 27
    8181    &rpc_vmm_set_cow_server,               // 28
     
    113113    "undefined",                 // 24
    114114    "MAPPER_HANDLE_MISS",        // 25
    115     "undefined",                 // 26
     115    "VMM_DELETE_VSEG",           // 26
    116116    "VMM_CREATE_VSEG",           // 27
    117117    "VMM_SET_COW",               // 28
     
    283283    bool_t          blocking;           // blocking RPC when true
    284284        remote_fifo_t * rpc_fifo;           // local pointer on RPC fifo
     285    uint32_t        count;              // current number of expected responses
    285286 
    286287    // makes RPC thread not preemptable
     
    302303uint32_t cycle = (uint32_t)hal_get_cycles();
    303304if( DEBUG_RPC_SERVER_GENERIC < cycle )
    304 printk("\n[%s] RPC thread %x on core[%d] takes RPC_FIFO ownership / cycle %d\n",
    305 __FUNCTION__, server_ptr->trdid, server_core_lid, cycle );
     305printk("\n[%s] RPC thread[%x,%x] on core[%d] takes RPC_FIFO ownership / cycle %d\n",
     306__FUNCTION__, server_ptr->process->pid, server_ptr->trdid, server_core_lid, cycle );
    306307#endif
    307308                // try to consume one RPC request 
     
    326327uint32_t items = remote_fifo_items( XPTR( local_cxy , rpc_fifo ) );
    327328if( DEBUG_RPC_SERVER_GENERIC < cycle )
    328 printk("\n[%s] RPC thread %x got rpc %s / client_cxy %x / items %d / cycle %d\n",
    329 __FUNCTION__, server_ptr->trdid, rpc_str[index], desc_cxy, items, cycle );
     329printk("\n[%s] RPC thread[%x,%x] got rpc %s / client_cxy %x / items %d / cycle %d\n",
     330__FUNCTION__, server_ptr->process->pid, server_ptr->trdid, rpc_str[index], desc_cxy, items, cycle );
    330331#endif
    331332                // register client thread in RPC thread descriptor
     
    338339cycle = (uint32_t)hal_get_cycles();
    339340if( DEBUG_RPC_SERVER_GENERIC < cycle )
    340 printk("\n[%s] RPC thread %x completes rpc %s / client_cxy %x / cycle %d\n",
    341 __FUNCTION__, server_ptr->trdid, rpc_str[index], desc_cxy, cycle );
    342 #endif
    343                 // decrement response counter in RPC descriptor if blocking RPC
    344                 if( blocking )
     341printk("\n[%s] RPC thread[%x,%x] completes rpc %s / client_cxy %x / cycle %d\n",
     342__FUNCTION__, server_ptr->process->pid, server_ptr->trdid, rpc_str[index], desc_cxy, cycle );
     343#endif
     344                // decrement expected responses counter in RPC descriptor
     345                count = hal_remote_atomic_add( XPTR( desc_cxy, &desc_ptr->responses ), -1 );
     346
     347                // decrement response counter in RPC descriptor if last response
     348                if( count == 1 )
    345349                {
    346                     // decrement responses counter in RPC descriptor
    347                     hal_remote_atomic_add( XPTR( desc_cxy, &desc_ptr->responses ), -1 );
    348 
    349350                    // get client thread pointer and client core lid from RPC descriptor
    350351                    client_ptr      = hal_remote_lpt( XPTR( desc_cxy , &desc_ptr->thread ) );
     
    359360cycle = (uint32_t)hal_get_cycles();
    360361if( DEBUG_RPC_SERVER_GENERIC < cycle )
    361 printk("\n[%s] RPC thread %x unblocked client thread %x / cycle %d\n",
    362 __FUNCTION__, server_ptr->trdid, client_ptr->trdid, cycle );
     362printk("\n[%s] RPC thread[%x,%x] unblocked client thread[%x,%x] / cycle %d\n",
     363__FUNCTION__, server_ptr->process->pid, server_ptr->trdid,
     364client_ptr->process->pid, client_ptr->trdid, cycle );
    363365#endif
    364366                    // send IPI to client core
    365367                    dev_pic_send_ipi( desc_cxy , client_core_lid );
    366 
    367                 }  // end if blocking RPC
     368                }
    368369            }  // end RPC handling if fifo non empty
    369370        }  // end if RPC_fIFO ownership successfully taken and released
     
    376377uint32_t cycle = (uint32_t)hal_get_cycles();
    377378if( DEBUG_RPC_SERVER_GENERIC < cycle )
    378 printk("\n[%s] RPC thread %x suicides / cycle %d\n",
    379 __FUNCTION__, server_ptr->trdid, cycle );
     379printk("\n[%s] RPC thread[%x,%x] suicides / cycle %d\n",
     380__FUNCTION__, server_ptr->process->pid, server_ptr->trdid, cycle );
    380381#endif
    381382            // update RPC threads counter
     
    395396uint32_t cycle = (uint32_t)hal_get_cycles();
    396397if( DEBUG_RPC_SERVER_GENERIC < cycle )
    397 printk("\n[%s] RPC thread %x block IDLE & deschedules / cycle %d\n",
    398 __FUNCTION__, server_ptr->trdid, cycle );
     398printk("\n[%s] RPC thread[%x,%x] block IDLE & deschedules / cycle %d\n",
     399__FUNCTION__, server_ptr->process->pid, server_ptr->trdid, cycle );
    399400#endif
    400401            // RPC thread blocks on IDLE
     
    425426#endif
    426427
    427     assert( (cxy != local_cxy) , "target cluster is not remote\n");
     428    assert( (cxy != local_cxy) , "server cluster is not remote\n");
    428429
    429430    // initialise RPC descriptor header
     
    498499#endif
    499500
    500     assert( (cxy != local_cxy) , "target cluster is not remote\n");
     501    assert( (cxy != local_cxy) , "server cluster is not remote\n");
    501502
    502503    // initialise RPC descriptor header
     
    576577#endif
    577578
    578     assert( (cxy != local_cxy) , "target cluster is not remote\n");
     579    assert( (cxy != local_cxy) , "server cluster is not remote\n");
    579580
    580581    // initialise RPC descriptor header
     
    677678#endif
    678679
    679     assert( (cxy != local_cxy) , "target cluster is not remote\n");
     680    assert( (cxy != local_cxy) , "server cluster is not remote\n");
    680681
    681682    // initialise RPC descriptor header
     
    784785#endif
    785786
    786     assert( (cxy != local_cxy) , "target cluster is not remote\n");
     787    assert( (cxy != local_cxy) , "server cluster is not remote\n");
    787788
    788789    // initialise RPC descriptor header
     
    862863
    863864/////////////////////////////////////////////////////////////////////////////////////////
    864 // [9] Marshaling functions attached to RPC_PROCESS_SIGACTION (multicast / non blocking)
     865// [9]   Marshaling functions attached to RPC_PROCESS_SIGACTION (non blocking)
    865866/////////////////////////////////////////////////////////////////////////////////////////
    866867
     
    869870                                   rpc_desc_t * rpc )
    870871{
    871 
    872872#if DEBUG_RPC_PROCESS_SIGACTION
    873 uint32_t  cycle  = (uint32_t)hal_get_cycles();
    874 uint32_t  action = rpc->args[0];
    875 pid_t     pid    = rpc->args[1];
     873uint32_t  cycle = (uint32_t)hal_get_cycles();
    876874thread_t * this = CURRENT_THREAD;
    877875if( DEBUG_RPC_PROCESS_SIGACTION < cycle )
    878 printk("\n[%s] thread[%x,%x] enter to request %s of process %x in cluster %x / cycle %d\n",
    879 __FUNCTION__, this->process->pid, this->trdid, process_action_str(action), pid, cxy, cycle );
    880 #endif
    881 
    882     // check some RPC arguments
    883     assert( (rpc->blocking == false) , "must be non-blocking\n");
    884     assert( (rpc->index == RPC_PROCESS_SIGACTION ) , "bad RPC index\n" );
     876printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n",
     877__FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle );
     878#endif
     879
     880// check RPC "index" and "blocking" arguments
     881assert( (rpc->blocking == false) , "must be non-blocking\n");
     882assert( (rpc->index == RPC_PROCESS_SIGACTION ) , "bad RPC index\n" );
    885883
    886884    // register RPC request in remote RPC fifo and return
     
    890888cycle = (uint32_t)hal_get_cycles();
    891889if( DEBUG_RPC_PROCESS_SIGACTION < cycle )
    892 printk("\n[%s] thread[%x,%x] requested %s of process %x in cluster %x / cycle %d\n",
    893 __FUNCTION__, this->process->pid, this->trdid, process_action_str(action), pid, cxy, cycle );
    894 #endif
    895 
     890printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n",
     891__FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle );
     892#endif
    896893}  // end rpc_process_sigaction_client()
    897894
     
    899896void rpc_process_sigaction_server( xptr_t xp )
    900897{
    901     pid_t        pid;             // target process identifier
    902     process_t  * process;         // pointer on local target process descriptor
    903     uint32_t     action;          // sigaction index
    904     thread_t   * client_ptr;      // pointer on client thread in client cluster
    905     xptr_t       client_xp;       // extended pointer client thread
    906     cxy_t        client_cxy;      // client cluster identifier
    907     rpc_desc_t * rpc;             // pointer on rpc descriptor in client cluster
    908     xptr_t       count_xp;        // extended pointer on responses counter
    909     uint32_t     count_value;     // responses counter value
    910     lid_t        client_lid;      // client core local index
     898#if DEBUG_RPC_PROCESS_SIGACTION
     899uint32_t cycle = (uint32_t)hal_get_cycles();
     900thread_t * this = CURRENT_THREAD;
     901if( DEBUG_RPC_PROCESS_SIGACTION < cycle )
     902printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n",
     903__FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle );
     904#endif
    911905
    912906    // get client cluster identifier and pointer on RPC descriptor
    913     client_cxy = GET_CXY( xp );
    914     rpc        = GET_PTR( xp );
     907    cxy_t        client_cxy = GET_CXY( xp );
     908    rpc_desc_t * desc       = GET_PTR( xp );
    915909
    916910    // get arguments from RPC descriptor
    917     action   = (uint32_t)hal_remote_l64( XPTR(client_cxy , &rpc->args[0]) );
    918     pid      = (pid_t)   hal_remote_l64( XPTR(client_cxy , &rpc->args[1]) );
    919 
    920 #if DEBUG_RPC_PROCESS_SIGACTION
    921 uint32_t cycle = (uint32_t)hal_get_cycles();
    922 thread_t * this = CURRENT_THREAD;
    923 if( DEBUG_RPC_PROCESS_SIGACTION < cycle )
    924 printk("\n[%s] thread[%x,%x] enter to %s process %x in cluster %x / cycle %d\n",
    925 __FUNCTION__, this->process->pid, this->trdid,
    926 process_action_str( action ), pid, local_cxy, cycle );
    927 #endif
     911    pid_t    pid    = (pid_t)   hal_remote_l64( XPTR(client_cxy , &desc->args[0]) );
     912    uint32_t action = (uint32_t)hal_remote_l64( XPTR(client_cxy , &desc->args[1]) );
    928913
    929914    // get client thread pointers
    930     client_ptr = (thread_t *)hal_remote_lpt( XPTR( client_cxy , &rpc->thread ) );
    931     client_xp  = XPTR( client_cxy , client_ptr );
     915    thread_t * client_ptr = hal_remote_lpt( XPTR( client_cxy , &desc->thread ) );
     916    xptr_t     client_xp  = XPTR( client_cxy , client_ptr );
    932917
    933918    // get local process descriptor
    934     process = cluster_get_local_process_from_pid( pid );
     919    process_t * process = cluster_get_local_process_from_pid( pid );
    935920
    936921    // call relevant kernel function
     
    939924    else if ( action == UNBLOCK_ALL_THREADS ) process_unblock_threads( process );
    940925
    941     // build extended pointer on response counter in RPC
    942     count_xp = XPTR( client_cxy , &rpc->responses );
    943 
    944     // decrement the responses counter in RPC descriptor,
    945     count_value = hal_remote_atomic_add( count_xp , -1 );
    946 
    947     // unblock the client thread only if it is the last response.
    948     if( count_value == 1 )
    949     {
    950         // get client core lid
    951         client_lid    = (lid_t)     hal_remote_l32 ( XPTR( client_cxy , &rpc->lid    ) );
    952 
    953         // unblock client thread
    954         thread_unblock( client_xp , THREAD_BLOCKED_RPC );
    955 
    956         // send an IPI to client core
    957         // dev_pic_send_ipi( client_cxy , client_lid );
    958     }
    959 
    960926#if DEBUG_RPC_PROCESS_SIGACTION
    961927cycle = (uint32_t)hal_get_cycles();
    962928if( DEBUG_RPC_PROCESS_SIGACTION < cycle )
    963 printk("\n[%s] thread[%x,%x] exit after %s process %x in cluster %x / cycle %d\n",
    964 __FUNCTION__, this->process->pid, this->trdid,
    965 process_action_str( action ), pid, local_cxy, cycle );
    966 #endif
    967 
     929printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n",
     930__FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle );
     931#endif
    968932} // end rpc_process_sigaction_server()
    969933
     
    991955#endif
    992956
    993     assert( (cxy != local_cxy) , "target cluster is not remote\n");
     957    assert( (cxy != local_cxy) , "server cluster is not remote\n");
    994958
    995959    // initialise RPC descriptor header
     
    10911055#endif
    10921056
    1093     assert( (cxy != local_cxy) , "target cluster is not remote\n");
     1057    assert( (cxy != local_cxy) , "server cluster is not remote\n");
    10941058
    10951059    // initialise RPC descriptor header
     
    11631127#endif
    11641128
    1165     assert( (cxy != local_cxy) , "target cluster is not remote\n");
     1129    assert( (cxy != local_cxy) , "server cluster is not remote\n");
    11661130
    11671131    // initialise RPC descriptor header
     
    12511215#endif
    12521216
    1253     assert( (cxy != local_cxy) , "target cluster is not remote\n");
     1217    assert( (cxy != local_cxy) , "server cluster is not remote\n");
    12541218
    12551219    // initialise RPC descriptor header
     
    13241288#endif
    13251289
    1326     assert( (cxy != local_cxy) , "target cluster is not remote\n");
     1290    assert( (cxy != local_cxy) , "server cluster is not remote\n");
    13271291
    13281292    // initialise RPC descriptor header
     
    14081372#endif
    14091373
    1410     assert( (cxy != local_cxy) , "target cluster is not remote\n");
     1374    assert( (cxy != local_cxy) , "server cluster is not remote\n");
    14111375
    14121376    // initialise RPC descriptor header
     
    14801444#endif
    14811445
    1482     assert( (cxy != local_cxy) , "target cluster is not remote\n");
     1446    assert( (cxy != local_cxy) , "server cluster is not remote\n");
    14831447
    14841448    // initialise RPC descriptor header
     
    15691533#endif
    15701534
    1571     assert( (cxy != local_cxy) , "target cluster is not remote\n");
     1535    assert( (cxy != local_cxy) , "server cluster is not remote\n");
    15721536
    15731537    // initialise RPC descriptor header
     
    16491613#endif
    16501614
    1651     assert( (cxy != local_cxy) , "target cluster is not remote\n");
     1615    assert( (cxy != local_cxy) , "server cluster is not remote\n");
    16521616
    16531617    // initialise RPC descriptor header
     
    17291693#endif
    17301694
    1731     assert( (cxy != local_cxy) , "target cluster is not remote\n");
     1695    assert( (cxy != local_cxy) , "server cluster is not remote\n");
    17321696
    17331697    // initialise RPC descriptor header
     
    18081772#endif
    18091773
    1810     assert( (cxy != local_cxy) , "target cluster is not remote\n");
     1774    assert( (cxy != local_cxy) , "server cluster is not remote\n");
    18111775
    18121776    // initialise RPC descriptor header
     
    18961860#endif
    18971861
    1898     assert( (cxy != local_cxy) , "target cluster is not remote\n");
     1862    assert( (cxy != local_cxy) , "server cluster is not remote\n");
    18991863
    19001864    // initialise RPC descriptor header
     
    19751939#endif
    19761940
    1977     assert( (cxy != local_cxy) , "target cluster is not remote\n");
     1941    assert( (cxy != local_cxy) , "server cluster is not remote\n");
    19781942
    19791943    // initialise RPC descriptor header
     
    20532017#endif
    20542018
    2055     assert( (cxy != local_cxy) , "target cluster is not remote\n");
     2019    assert( (cxy != local_cxy) , "server cluster is not remote\n");
    20562020
    20572021    // initialise RPC descriptor header
     
    21252089{
    21262090#if DEBUG_RPC_MAPPER_HANDLE_MISS
     2091thread_t * this = CURRENT_THREAD;
    21272092uint32_t cycle = (uint32_t)hal_get_cycles();
    21282093if( cycle > DEBUG_RPC_MAPPER_HANDLE_MISS )
     
    21312096#endif
    21322097
    2133     assert( (cxy != local_cxy) , "target cluster is not remote\n");
     2098    assert( (cxy != local_cxy) , "server cluster is not remote\n");
    21342099
    21352100    // initialise RPC descriptor header
     
    21622127{
    21632128#if DEBUG_RPC_MAPPER_HANDLE_MISS
     2129thread_t * this = CURRENT_THREAD;
    21642130uint32_t cycle = (uint32_t)hal_get_cycles();
    21652131if( cycle > DEBUG_RPC_MAPPER_HANDLE_MISS )
     
    21992165
    22002166/////////////////////////////////////////////////////////////////////////////////////////
    2201 // [26]         undefined slot
    2202 /////////////////////////////////////////////////////////////////////////////////////////
     2167// [26]  Marshaling functions attached to RPC_VMM_DELETE_VSEG (parallel / non blocking)
     2168/////////////////////////////////////////////////////////////////////////////////////////
     2169
     2170//////////////////////////////////////////////////
     2171void rpc_vmm_delete_vseg_client( cxy_t        cxy,
     2172                                 rpc_desc_t * rpc )
     2173{
     2174#if DEBUG_RPC_VMM_DELETE_VSEG
     2175thread_t * this  = CURRENT_THREAD;
     2176uint32_t   cycle = (uint32_t)hal_get_cycles();
     2177if( cycle > DEBUG_RPC_VMM_DELETE_VSEG )
     2178printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n",
     2179__FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle );
     2180#endif
     2181
     2182// check RPC "index" and "blocking" arguments
     2183assert( (rpc->blocking == false) , "must be non-blocking\n");
     2184assert( (rpc->index == RPC_VMM_DELETE_VSEG ) , "bad RPC index\n" );
     2185
     2186    // register RPC request in remote RPC fifo
     2187    rpc_send( cxy , rpc );
     2188
     2189#if DEBUG_RPC_VMM_DELETE_VSEG
     2190cycle = (uint32_t)hal_get_cycles();
     2191if( cycle > DEBUG_RPC_VMM_DELETE_VSEG )
     2192printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n",
     2193__FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle );
     2194#endif
     2195}
     2196
     2197////////////////////////////////////////////
     2198void rpc_vmm_delete_vseg_server( xptr_t xp )
     2199{
     2200#if DEBUG_RPC_VMM_DELETE_VSEG
     2201uint32_t cycle = (uint32_t)hal_get_cycles();
     2202thread_t * this = CURRENT_THREAD;
     2203if( DEBUG_RPC_VMM_DELETE_VSEG < cycle )
     2204printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n",
     2205__FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle );
     2206#endif
     2207
     2208    // get client cluster identifier and pointer on RPC descriptor
     2209    cxy_t        client_cxy = GET_CXY( xp );
     2210    rpc_desc_t * desc       = GET_PTR( xp );
     2211
     2212    // get arguments from RPC descriptor
     2213    pid_t    pid   = (pid_t)   hal_remote_l64( XPTR(client_cxy , &desc->args[0]) );
     2214    intptr_t vaddr = (intptr_t)hal_remote_l64( XPTR(client_cxy , &desc->args[1]) );
     2215
     2216    // call relevant kernel function
     2217    vmm_delete_vseg( pid , vaddr );
     2218
     2219#if DEBUG_RPC_VMM_DELETE_VSEG
     2220cycle = (uint32_t)hal_get_cycles();
     2221if( DEBUG_RPC_VMM_DELETE_VSEG < cycle )
     2222printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n",
     2223__FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle );
     2224#endif
     2225}
    22032226
    22042227/////////////////////////////////////////////////////////////////////////////////////////
     
    22182241                                 struct vseg_s   ** vseg )
    22192242{
    2220     assert( (cxy != local_cxy) , "target cluster is not remote\n");
     2243#if DEBUG_RPC_VMM_CREATE_VSEG
     2244thread_t * this = CURRENT_THREAD;
     2245uint32_t cycle = (uint32_t)hal_get_cycles();
     2246if( cycle > DEBUG_RPC_VMM_CREATE_VSEG )
     2247printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n",
     2248__FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
     2249#endif
     2250
     2251    assert( (cxy != local_cxy) , "server cluster is not remote\n");
    22212252
    22222253    // initialise RPC descriptor header
     
    22422273    *vseg = (vseg_t *)(intptr_t)rpc.args[8];
    22432274
     2275#if DEBUG_RPC_VMM_CREATE_VSEG
     2276cycle = (uint32_t)hal_get_cycles();
     2277if( cycle > DEBUG_RPC_VMM_CREATE_VSEG )
     2278printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n",
     2279__FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
     2280#endif
    22442281}
    22452282
     
    22472284void rpc_vmm_create_vseg_server( xptr_t xp )
    22482285{
     2286#if DEBUG_RPC_VMM_CREATE_VSEG
     2287thread_t * this = CURRENT_THREAD;
     2288uint32_t cycle = (uint32_t)hal_get_cycles();
     2289if( cycle > DEBUG_RPC_VMM_CREATE_VSEG )
     2290printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n",
     2291__FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
     2292#endif
     2293
    22492294    // get client cluster identifier and pointer on RPC descriptor
    22502295    cxy_t        cxy  = GET_CXY( xp );
     
    22742319    hal_remote_s64( XPTR( cxy , &desc->args[8] ) , (uint64_t)(intptr_t)vseg );
    22752320
     2321#if DEBUG_RPC_VMM_CREATE_VSEG
     2322cycle = (uint32_t)hal_get_cycles();
     2323if( cycle > DEBUG_RPC_VMM_CREATE_VSEG )
     2324printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n",
     2325__FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
     2326#endif
    22762327}
    22772328
     
    22842335                             process_t * process )
    22852336{
    2286     assert( (cxy != local_cxy) , "target cluster is not remote\n");
     2337    assert( (cxy != local_cxy) , "server cluster is not remote\n");
    22872338
    22882339    // initialise RPC descriptor header
     
    23262377                             bool_t      detailed )
    23272378{
    2328     assert( (cxy != local_cxy) , "target cluster is not remote\n");
     2379    assert( (cxy != local_cxy) , "server cluster is not remote\n");
    23292380
    23302381    // initialise RPC descriptor header
  • trunk/kernel/kern/rpc.h

    r610 r611  
    5454/***********************************************************************************
    5555 * This enum defines all RPC indexes.
    56  * It must be consistent with the rpc_server[] array defined in in the rpc.c file.
     56 * It must be consistent with the rpc_server[] arrays defined in in the rpc.c file.
    5757 **********************************************************************************/
    5858
     
    6868    RPC_THREAD_KERNEL_CREATE      = 7,
    6969    RPC_UNDEFINED_8               = 8,
    70     RPC_PROCESS_SIGACTION         = 9,
     70    RPC_PROCESS_SIGACTION         = 9,       // non blocking
    7171
    7272    RPC_VFS_INODE_CREATE          = 10,
     
    8787    RPC_UNDEFINED_24              = 24,
    8888    RPC_MAPPER_HANDLE_MISS        = 25,
    89     RPC_UNDEFINED_26              = 26,
     89    RPC_VMM_DELETE_VSEG           = 26,      // non blocking
    9090    RPC_VMM_CREATE_VSEG           = 27,
    9191    RPC_VMM_SET_COW               = 28,
     
    281281
    282282/***********************************************************************************
    283  * [9] The RPC_PROCESS_SIGACTION allows a thread running in any cluster
    284  * to request a cluster identified by the <cxy> argument (local or remote)
    285  * to execute a given sigaction for a given cluster. The <action_type> and
    286  * the <pid> arguments are defined in the shared RPC descriptor, that must be
    287  * initialised by the client thread.
     283 * [9] The non blocking RPC_PROCESS_SIGACTION allows any client thread running in
     284 * any cluster to send parallel RPC requests to one or several servers (that can be
     285 * local or remote), to execute a given sigaction, defined by the <action_type>
     286 * argument[1], for a given process identified by the <pid> argument[0].
    288287 *
    289  * WARNING : It is implemented as a NON BLOCKING multicast RPC, that can be sent
    290  * in parallel to all process copies. The various RPC server threads atomically
    291  * decrement the <response> field in the shared RPC descriptor.
    292  * The last server thread unblock the client thread that blocked (after sending
    293  * all RPC requests) in the process_sigaction() function.
     288 * WARNING : It is implemented as a NON BLOCKING RPC, that can be sent in parallel
     289 * to several servers. The RPC descriptor, containing the <action_type> and <pid>
     290 * arguments, as well as the RPC <index>, <blocked>, and <response> fields, must
     291 * be allocated and initialised by the calling function itself.
     292 * Each RPC server thread atomically decrements the <response> field in this
     293 * shared RPC descriptor. The last server thread unblock the client thread,
     294 * that blocked only after sending all parallel RPC requests to all servers.
    294295 ***********************************************************************************
    295296 * @ cxy     : server cluster identifier.
    296  * @ rpc     : pointer on ishared RPC descriptor initialized by the client thread.
     297 * @ rpc     : pointer on shared RPC descriptor initialized by the client thread.
    297298 **********************************************************************************/
    298299void rpc_process_sigaction_client( cxy_t               cxy,
     
    550551 * On the server side, this RPC call the mapper_handle_miss() function and return
    551552 * an extended pointer on the allocated page descriptor and an error status.
     553 ***********************************************************************************
    552554 * @ cxy         : server cluster identifier.
    553555 * @ mapper      : [in]  local pointer on mapper.
     
    566568
    567569/***********************************************************************************
    568  * [26] undefined slot
    569  **********************************************************************************/
     570 * [26] The non blocking RPC_VMM_DELETE_VSEG allows any client thread running in
     571 * any cluster to send parallel RPC requests to one or several clusters (that can be
     572 * local or remote), to delete from a given VMM, identified by the <pid> argument[0]
     573 * a given vseg, identified by the <vaddr> argument[1].
     574 *
     575 * WARNING : It is implemented as a NON BLOCKING RPC, that can be sent in parallel
     576 * to several servers. The RPC descriptor, containing the <pid> and <vaddr>
     577 * arguments, as well as the RPC <index>, <blocked>, and <response> fields, must
     578 * be allocated and initialised by the calling function itself.
     579 * Each RPC server thread atomically decrements the the <response> field in this
     580 * shared RPC descriptor. The last server thread unblock the client thread,
     581 * that blocked only after sending all paralle RPC requests to all servers.
     582 ***********************************************************************************
     583 * @ cxy         : server cluster identifier.
     584 * @ rpc     : pointer on shared RPC descriptor initialized by the client thread.
     585 **********************************************************************************/
     586void rpc_vmm_delete_vseg_client( cxy_t               cxy,
     587                                 struct rpc_desc_s * rpc );
     588 
     589void rpc_vmm_delete_vseg_server( xptr_t xp );
    570590
    571591/***********************************************************************************
  • trunk/kernel/kern/thread.c

    r593 r611  
    326326    {
    327327            printk("\n[ERROR] in %s : cannot create new thread\n", __FUNCTION__ );
    328         vmm_remove_vseg( vseg );
     328        vmm_delete_vseg( process->pid , vseg->min );
    329329        return ENOMEM;
    330330    }
     
    348348    {
    349349            printk("\n[ERROR] in %s : cannot initialize new thread\n", __FUNCTION__ );
    350         vmm_remove_vseg( vseg );
     350        vmm_delete_vseg( process->pid , vseg->min );
    351351        thread_release( thread );
    352352        return EINVAL;
     
    369369    {
    370370            printk("\n[ERROR] in %s : cannot create CPU context\n", __FUNCTION__ );
    371         vmm_remove_vseg( vseg );
     371        vmm_delete_vseg( process->pid , vseg->min );
    372372        thread_release( thread );
    373373        return ENOMEM;
     
    379379    {
    380380            printk("\n[ERROR] in %s : cannot create FPU context\n", __FUNCTION__ );
    381         vmm_remove_vseg( vseg );
     381        vmm_delete_vseg( process->pid , vseg->min );
    382382        thread_release( thread );
    383383        return ENOMEM;
     
    538538
    539539    // register STACK vseg in local child VSL
    540     vmm_vseg_attach( &child_process->vmm , vseg );
     540    vmm_attach_vseg_to_vsl( &child_process->vmm , vseg );
    541541
    542542#if (DEBUG_THREAD_USER_FORK & 1)
     
    560560        if( error )
    561561        {
    562             vmm_vseg_detach( &child_process->vmm , vseg );
    563             vseg_free( vseg );
     562            vmm_detach_vseg_from_vsl( &child_process->vmm , vseg );
    564563            thread_release( child_ptr );
    565564            printk("\n[ERROR] in %s : cannot update child GPT\n", __FUNCTION__ );
  • trunk/kernel/kernel_config.h

    r610 r611  
    128128#define DEBUG_QUEUELOCK_TYPE              0    // lock type (0 is undefined)
    129129
     130#define DEBUG_REMOTE_DIR                  0
     131
    130132#define DEBUG_RPC_CLIENT_GENERIC          0
    131133#define DEBUG_RPC_SERVER_GENERIC          0
     
    148150#define DEBUG_RPC_VFS_FILE_DESTROY        0
    149151#define DEBUG_RPC_VFS_DEVICE_GET_DENTRY   0
     152#define DEBUG_RPC_VMM_CREATE_VSEG         0
    150153#define DEBUG_RPC_VMM_GET_PTE             0
    151154#define DEBUG_RPC_VMM_GET_VSEG            0
     155#define DEBUG_RPC_VMM_UNMAP_VSEG          0
    152156
    153157#define DEBUG_RWLOCK_TYPE                 0    // lock type (0 is undefined)
     
    163167#define DEBUG_SYS_BARRIER                 0
    164168#define DEBUG_SYS_CLOSE                   0
     169#define DEBUG_SYS_CLOSEDIR                0
    165170#define DEBUG_SYS_CONDVAR                 0
    166171#define DEBUG_SYS_DISPLAY                 0
     
    176181#define DEBUG_SYS_KILL                    0
    177182#define DEBUG_SYS_OPEN                    0
    178 #define DEBUG_SYS_MKDIR                   2
     183#define DEBUG_SYS_OPENDIR                 0
     184#define DEBUG_SYS_MKDIR                   0
    179185#define DEBUG_SYS_MMAP                    0
    180186#define DEBUG_SYS_MUNMAP                  0
    181187#define DEBUG_SYS_MUTEX                   0
    182188#define DEBUG_SYS_READ                    0
     189#define DEBUG_SYS_READDIR                 0
    183190#define DEBUG_SYS_SEM                     0
    184191#define DEBUG_SYS_STAT                    0
     
    207214
    208215#define DEBUG_VFS_ADD_CHILD               0
     216#define DEBUG_VFS_ADD_SPECIAL             1
     217#define DEBUG_VFS_CHDIR                   0
    209218#define DEBUG_VFS_CLOSE                   0
    210 #define DEBUG_VFS_CHDIR                   0
    211219#define DEBUG_VFS_DENTRY_CREATE           0
    212220#define DEBUG_VFS_FILE_CREATE             0
     
    215223#define DEBUG_VFS_INODE_LOAD_ALL          0
    216224#define DEBUG_VFS_LINK                    0
    217 #define DEBUG_VFS_LOOKUP                  1
     225#define DEBUG_VFS_LOOKUP                  0
    218226#define DEBUG_VFS_LSEEK                   0
    219 #define DEBUG_VFS_MKDIR                   1
     227#define DEBUG_VFS_MKDIR                   0
    220228#define DEBUG_VFS_NEW_CHILD_INIT          0
    221229#define DEBUG_VFS_OPEN                    0
     
    224232
    225233#define DEBUG_VMM_CREATE_VSEG             0
     234#define DEBUG_VMM_DELETE_VSEG             0
    226235#define DEBUG_VMM_DESTROY                 0
    227236#define DEBUG_VMM_FORK_COPY               0
     
    233242#define DEBUG_VMM_PAGE_ALLOCATE           0
    234243#define DEBUG_VMM_SET_COW                 0
    235 #define DEBUG_VMM_UNMAP_VSEG              0
    236244#define DEBUG_VMM_UPDATE_PTE              0
    237245
     
    276284#define LOCK_PROCESS_FDARRAY  27   // remote (Q)  protect array of open files in owner process
    277285#define LOCK_FATFS_FREE       28   // remote (Q)  protect the FATFS context (free clusters)
    278 
    279 #define LOCK_PROCESS_THTBL    29   // local  (RW) protect local array of threads in a process
    280 
    281 #define LOCK_MAPPER_STATE     30   // remote (RW) protect mapper state
    282 #define LOCK_VFS_SIZE         31   // remote (RW) protect inode state and associated mapper
    283 #define LOCK_VFS_FILE         32   // remote (RW) protect file descriptor state
    284 #define LOCK_VMM_VSL          33   // remote (RW) protect VSL (local list of vsegs)
    285 #define LOCK_VMM_GPT          34   // remote (RW) protect GPT (local page table)
    286 #define LOCK_VFS_MAIN         35   // remote (RW) protect vfs traversal (in root inode)
     286#define LOCK_PROCESS_DIR      29   // remote (Q)  protect xlist of open directories in process
     287
     288#define LOCK_PROCESS_THTBL    30   // local  (RW) protect local array of threads in a process
     289
     290#define LOCK_MAPPER_STATE     31   // remote (RW) protect mapper state
     291#define LOCK_VFS_SIZE         32   // remote (RW) protect inode state and associated mapper
     292#define LOCK_VFS_FILE         33   // remote (RW) protect file descriptor state
     293#define LOCK_VMM_VSL          34   // remote (RW) protect VSL (local list of vsegs)
     294#define LOCK_VMM_GPT          35   // remote (RW) protect GPT (local page table)
     295#define LOCK_VFS_MAIN         36   // remote (RW) protect vfs traversal (in root inode)
    287296
    288297
     
    338347
    339348#define CONFIG_VFS_MAX_INODES               128        // max number of inodes per cluster
    340 #define CONFIG_VFS_MAX_NAME_LENGTH          32         // dentry name max length (bytes)
     349#define CONFIG_VFS_MAX_NAME_LENGTH          56         // dentry name max length (bytes)
    341350#define CONFIG_VFS_MAX_PATH_LENGTH          256        // pathname max length (bytes)
    342351#define CONFIG_VFS_FREE_CLUSTERS_MIN        32         // min number of free clusters
    343 
     352#define CONFIG_VFS_MAX_DENTRIES             63         // max number of dentries in one dir
    344353
    345354#define CONFIG_VFS_ROOT_IS_FATFS            1          // root FS is FATFS if non zero
     
    397406
    398407////////////////////////////////////////////////////////////////////////////////////////////
    399 //                USER SPACE SEGMENTATION / all values are number of pages
     408//                USER SPACE SEGMENTATION / all values are numbers of pages
    400409////////////////////////////////////////////////////////////////////////////////////////////
    401410
  • trunk/kernel/libk/remote_mutex.c

    r581 r611  
    137137#if DEBUG_MUTEX
    138138thread_t * this = CURRENT_THREAD;
    139 if( (uint32_t)hal_get_cycles() > DEBUG_QUEUELOCK )
     139if( (uint32_t)hal_get_cycles() > DEBUG_MUTEX )
    140140printk("\n[DBG] %s : thread %x in %x process / mutex(%x,%x)\n",
    141141__FUNCTION__, this->trdid, this->process->pid, local_cxy, mutex_ptr );
  • trunk/kernel/libk/remote_mutex.h

    r581 r611  
    3838 * This user type is implemented as an unsigned long, but the value is not used by the
    3939 * kernel. ALMOS-MKH uses only the mutex virtual address as an identifier.
    40  * For each user mutex, ALMOS-MKH creates a kernel "remote_mutex_t" structure,
    41  * dynamically allocated in the reference cluster by the remote_mutex_create() function,
    42  * and destroyed by the remote_mutex_destroy() function, using RPC if the calling thread
     40 * For each user mutex, ALMOS-MKH creates a kernel "remote_mutex_t" structure, allocated
     41 * in the user process reference cluster by the remote_mutex_create() function, and
     42 * destroyed by the remote_mutex_destroy() function, using RPC if the calling thread
    4343 * is not running in the reference cluster.
    4444 *
  • trunk/kernel/libk/string.h

    r457 r611  
    4949/********************************************************************************************
    5050 * This function compares lexicographically the strind s1 and s2.
    51  * characters are considered unsigned.
     51 * Characters are considered unsigned.
    5252 * I does not compare characters after the first NUL character.
    5353 ********************************************************************************************
     
    6161/********************************************************************************************
    6262 * This function compares lexicographically the strind s1 and s2.
    63  * I does not compare than <n> characters and stops after the first NUL character.
     63 * I does not compare more than <n> characters and stops after the first NUL character.
    6464 ********************************************************************************************
    6565 * @ s1   : pointer on string.
  • trunk/kernel/libk/xhtab.h

    r610 r611  
    3232
    3333///////////////////////////////////////////////////////////////////////////////////////////
    34 // This file define a generic, embedded, remotely accessible hash table.
     34// This file define a generic, embedded, remotely accessible, hash table.
    3535//
    3636// It can be accessed by any thread, running in any cluster.
     
    3939// For this purpose the set of all registered items is split in several subsets.
    4040// Each subset is organised as an embedded double linked xlists.
    41 // - an item is uniquely identified by a <key>, that is a single uint32_t value.
    42 // - From the <key> value, the hash table uses an item type specific xhtab_index()
     41// - an item is uniquely identified by a <key>, that is a item specific pointer,
     42//   that can be a - for example - a char* defining the item "name".
     43// - From the <key> value, the hash table uses an item type specific index_from_key()
    4344//   function, to compute an <index> value, defining a subset of registered items.
    4445// - to discriminate between items that have the same <index>, the hash table makes
    45 //   an associative search on the key in subset.
     46//   an associative search on the key in subset, using the item type specific
     47//   item_match_key() function.
    4648// - Each registered item is a structure, that must contain an embedded xlist_entry,
    4749//   that is part of the xlist implementing the subset.
    4850//
    4951// For all registered items, a total order is defined by the increasing index values,
    50 // and for each index value, by the position in the partial xlist.
     52// and for each index value, by the position in the xlist implementing a subset.
    5153// This order is used by the two functions xhtab_get_first() and xhtab_get_next(), that
    5254// are used to scan all registered items. The two "current_index" and "current_xlist_xp"
  • trunk/kernel/mm/kmem.c

    r577 r611  
    4040#include <fatfs.h>
    4141#include <ramfs.h>
     42#include <remote_dir.h>
    4243#include <remote_sem.h>
    4344#include <remote_barrier.h>
     
    100101    else if( type == KMEM_CONDVAR )       return sizeof( remote_condvar_t );
    101102    else if( type == KMEM_MUTEX )         return sizeof( remote_mutex_t );
     103    else if( type == KMEM_DIR )           return sizeof( remote_dir_t );
     104
    102105        else if( type == KMEM_512_BYTES )     return 512;
    103106
     
    128131    else if( type == KMEM_CONDVAR )       return "KMEM_CONDVAR";
    129132    else if( type == KMEM_MUTEX )         return "KMEM_MUTEX";
     133    else if( type == KMEM_DIR )           return "KMEM_DIR";
     134
    130135        else if( type == KMEM_512_BYTES )     return "KMEM_512_BYTES";
    131136
     
    144149
    145150#if DEBUG_KMEM
     151thread_t * this = CURRENT_THREAD;
    146152uint32_t cycle = (uint32_t)hal_get_cycles();
    147153if( DEBUG_KMEM < cycle )
    148 printk("\n[DBG] %s : thread %x enter / KCM type %s missing in cluster %x / cycle %d\n",
    149 __FUNCTION__, CURRENT_THREAD, kmem_type_str( type ), local_cxy, cycle );
     154printk("\n[%s] thread[%x,%x] enter / KCM type %s missing in cluster %x / cycle %d\n",
     155__FUNCTION__, this->process->pid, this->trdid, kmem_type_str( type ), local_cxy, cycle );
    150156#endif
    151157
     
    174180cycle = (uint32_t)hal_get_cycles();
    175181if( DEBUG_KMEM < cycle )
    176 printk("\n[DBG] %s : thread %x exit / cycle %d\n",
    177 __FUNCTION__, CURRENT_THREAD, cycle );
     182printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
     183__FUNCTION__, this->process->pid, this->trdid, cycle );
    178184#endif
    179185
     
    198204
    199205#if DEBUG_KMEM
     206thread_t * this = CURRENT_THREAD;
    200207uint32_t cycle = (uint32_t)hal_get_cycles();
    201208if( DEBUG_KMEM < cycle )
    202 printk("\n[DBG] %s : thread %x enter / type %s / cluster %x / cycle %d\n",
    203 __FUNCTION__, CURRENT_THREAD, kmem_type_str( type ), local_cxy, cycle );
     209printk("\n[%s] thread [%x,%x] enter / %s / size %d / cluster %x / cycle %d\n",
     210__FUNCTION__, this->process->pid, this->trdid,
     211kmem_type_str( type ), size, local_cxy, cycle );
    204212#endif
    205213
     
    222230cycle = (uint32_t)hal_get_cycles();
    223231if( DEBUG_KMEM < cycle )
    224 printk("\n[DBG] %s : thread %x exit / %d page(s) allocated / ppn %x / cycle %d\n",
    225 __FUNCTION__, CURRENT_THREAD, 1<<size, ppm_page2ppn(XPTR(local_cxy,ptr)), cycle );
     232printk("\n[%s] thread[%x,%x] exit / %d page(s) allocated / ppn %x / cycle %d\n",
     233__FUNCTION__, this->process->pid, this->trdid,
     2341<<size, ppm_page2ppn(XPTR(local_cxy,ptr)), cycle );
    226235#endif
    227236
     
    244253cycle = (uint32_t)hal_get_cycles();
    245254if( DEBUG_KMEM < cycle )
    246 printk("\n[DBG] %s : thread %x exit / type %s allocated / base %x / size %d / cycle %d\n",
    247 __FUNCTION__, CURRENT_THREAD, kmem_type_str( type ), (intptr_t)ptr, size, cycle );
     255printk("\n[%s] thread[%x,%x] exit / type %s allocated / base %x / size %d / cycle %d\n",
     256__FUNCTION__, this->process->pid, this->trdid,
     257kmem_type_str( type ), (intptr_t)ptr, size, cycle );
    248258#endif
    249259
     
    286296cycle = (uint32_t)hal_get_cycles();
    287297if( DEBUG_KMEM < cycle )
    288 printk("\n[DBG] %s : thread %x exit / type %s allocated / base %x / size %d / cycle %d\n",
    289 __FUNCTION__, CURRENT_THREAD, kmem_type_str(type), (intptr_t)ptr,
     298printk("\n[%s] thread [%x,%x] exit / type %s allocated / base %x / size %d / cycle %d\n",
     299__FUNCTION__, this->process->pid, this->trdid, kmem_type_str(type), (intptr_t)ptr,
    290300kmem_type_size(type), cycle );
    291301#endif
  • trunk/kernel/mm/kmem.h

    r567 r611  
    3636enum
    3737{
    38   KMEM_PAGE             = 0,   /*! reserved for PPM allocator                       */
    39   KMEM_GENERIC          = 1,   /*! reserved for KHM allocator                       */
    40   KMEM_KCM              = 2,   /*! kcm_t                                            */
    41   KMEM_VSEG             = 3,   /*! vseg_t                                           */
    42   KMEM_DEVICE           = 4,   /*! device_t                                         */
    43   KMEM_MAPPER           = 5,   /*! mapper_t                                         */
    44   KMEM_PROCESS          = 6,   /*! process_t                                        */
    45   KMEM_CPU_CTX          = 7,   /*! hal_cpu_context_t                                */
    46   KMEM_FPU_CTX          = 8,   /*! hal_fpu_context_t                                */
    47   KMEM_BARRIER          = 9,   /*! remote_barrier_t                                 */
     38    KMEM_PAGE             = 0,   /*! reserved for PPM allocator                     */
     39    KMEM_GENERIC          = 1,   /*! reserved for KHM allocator                     */
     40    KMEM_KCM              = 2,   /*! kcm_t                                          */
     41    KMEM_VSEG             = 3,   /*! vseg_t                                         */
     42    KMEM_DEVICE           = 4,   /*! device_t                                       */
     43    KMEM_MAPPER           = 5,   /*! mapper_t                                       */
     44    KMEM_PROCESS          = 6,   /*! process_t                                      */
     45    KMEM_CPU_CTX          = 7,   /*! hal_cpu_context_t                              */
     46    KMEM_FPU_CTX          = 8,   /*! hal_fpu_context_t                              */
     47    KMEM_BARRIER          = 9,   /*! remote_barrier_t                               */
    4848
    49   KMEM_DEVFS_CTX        = 10,  /*! fatfs_inode_t                                    */
    50   KMEM_FATFS_CTX        = 11,  /*! fatfs_ctx_t                                      */
    51   KMEM_VFS_CTX          = 12,  /*! vfs_context_t                                    */
    52   KMEM_VFS_INODE        = 13,  /*! vfs_inode_t                                      */
    53   KMEM_VFS_DENTRY       = 14,  /*! vfs_dentry_t                                     */
    54   KMEM_VFS_FILE         = 15,  /*! vfs_file_t                                       */
    55   KMEM_SEM              = 16,  /*! remote_sem_t                                     */
    56   KMEM_CONDVAR          = 17,  /*! remote_condvar_t                                 */
    57   KMEM_MUTEX            = 18,  /*! remote_mutex_t                                   */
    58   KMEM_512_BYTES        = 19,  /*! 512 bytes aligned                                */
     49    KMEM_DEVFS_CTX        = 10,  /*! fatfs_inode_t                                  */
     50    KMEM_FATFS_CTX        = 11,  /*! fatfs_ctx_t                                    */
     51    KMEM_VFS_CTX          = 12,  /*! vfs_context_t                                  */
     52    KMEM_VFS_INODE        = 13,  /*! vfs_inode_t                                    */
     53    KMEM_VFS_DENTRY       = 14,  /*! vfs_dentry_t                                   */
     54    KMEM_VFS_FILE         = 15,  /*! vfs_file_t                                     */
     55    KMEM_SEM              = 16,  /*! remote_sem_t                                   */
     56    KMEM_CONDVAR          = 17,  /*! remote_condvar_t                               */
     57    KMEM_MUTEX            = 18,  /*! remote_mutex_t                                 */
     58    KMEM_DIR              = 19,  /*! remote_dir_t                                   */
    5959
    60   KMEM_TYPES_NR         = 21,
     60    KMEM_512_BYTES        = 20,  /*! 512 bytes aligned                              */
     61
     62    KMEM_TYPES_NR         = 21,
    6163};
    6264
  • trunk/kernel/mm/mapper.c

    r610 r611  
    644644}  // end mapper_remote_set_32()
    645645
    646 
     646//////////////////////////////////////////////////
     647error_t mapper_display_page( xptr_t     mapper_xp,
     648                             uint32_t   page_id,
     649                             uint32_t   nbytes,
     650                             char     * string )
     651{
     652    xptr_t     page_xp;        // extended pointer on page descriptor
     653    xptr_t     base_xp;        // extended pointer on page base
     654    char       buffer[4096];   // local buffer
     655    uint32_t * tab;            // pointer on uint32_t to scan the buffer
     656    uint32_t   line;           // line index
     657    uint32_t   word;           // word index
     658
     659    if( nbytes > 4096)
     660    {
     661        printk("\n[ERROR] in %s : nbytes (%d) cannot be larger than 4096\n",
     662        __FUNCTION__, nbytes );
     663        return -1;
     664    }
     665   
     666    // get extended pointer on page descriptor
     667    page_xp = mapper_remote_get_page( mapper_xp , page_id );
     668
     669    if( page_xp == XPTR_NULL)
     670    {
     671        printk("\n[ERROR] in %s : cannot access page %d in mapper\n",
     672        __FUNCTION__, page_id );
     673        return -1;
     674    }
     675
     676    // get extended pointer on page base
     677    base_xp = ppm_page2base( page_xp );
     678   
     679    // copy remote page to local buffer
     680    hal_remote_memcpy( XPTR( local_cxy , buffer ) , base_xp , nbytes );
     681
     682    // display 8 words per line
     683    tab = (uint32_t *)buffer;
     684    printk("\n***** %s : first %d bytes of page %d *****\n", string, nbytes, page_id );
     685    for( line = 0 ; line < (nbytes >> 5) ; line++ )
     686    {
     687        printk("%X : ", line );
     688        for( word = 0 ; word < 8 ; word++ ) printk("%X ", tab[(line<<3) + word] );
     689        printk("\n");
     690    }
     691
     692    return 0;
     693
     694}  // end mapper_display_page
     695
     696
  • trunk/kernel/mm/mapper.h

    r610 r611  
    11/*
    2  * mapper.h - Kernel cache for FS files or directories definition.
     2 * mapper.h - Kernel cache for VFS files/directories definition.
    33 *
    44 * Authors   Mohamed Lamine Karaoui (2015)
     
    195195
    196196/*******************************************************************************************
    197  * This function returns an extended pointer on a mapper page, identified by <page_id>,
    198  * index in the file. The - possibly remote - mapper is identified by the <mapper_xp>
    199  * argument.  It can be executed by a thread running in any cluster, as it uses remote
     197 * This function returns an extended pointer on a page descriptor.
     198 * The - possibly remote - mapper is identified by the <mapper_xp> argument.
     199 * The page is identified by <page_id> argument (page index in the file).
     200 * It can be executed by a thread running in any cluster, as it uses remote
    200201 * access primitives to scan the mapper.
    201202 * In case of miss, this function takes the mapper lock in WRITE_MODE, and call the
     
    205206 * @ mapper_xp  : extended pointer on the mapper.
    206207 * @ page_id    : page index in file
    207  * @ returns extended pointer on page base if success / return XPTR_NULL if error.
     208 * @ returns extended pointer on page descriptor if success / return XPTR_NULL if error.
    208209 ******************************************************************************************/
    209210xptr_t mapper_remote_get_page( xptr_t    mapper_xp,
     
    212213/*******************************************************************************************
    213214 * This function allows to read a single word in a mapper seen as and array of uint32_t.
    214  * It has bee designed to support remote access tho the FAT mapper of the FATFS.
     215 * It has bee designed to support remote access to the FAT mapper of the FATFS.
    215216 * It can be called by any thread running in any cluster.
    216217 * In case of miss, it takes the mapper lock in WRITE_MODE, load the missing
     
    218219 *******************************************************************************************
    219220 * @ mapper_xp  : [in]  extended pointer on the mapper.
    220  * @ index          : [in]  32 bits word index in file.
     221 * @ word_id    : [in]  32 bits word index in file.
    221222 * @ p_value    : [out] local pointer on destination buffer.
    222223 * @ returns 0 if success / return -1 if error.
     
    234235 *******************************************************************************************
    235236 * @ mapper_xp  : [in]  extended pointer on the mapper.
    236  * @ index          : [in]  32 bits word index in file.
    237  * @ p_value    : [in]  value to be written.
     237 * @ word_id    : [in]  32 bits word index in file.
     238 * @ value      : [in]  value to be written.
    238239 * @ returns 0 if success / return -1 if error.
    239240 ******************************************************************************************/
     
    242243                              uint32_t   value );
    243244
     245/*******************************************************************************************
     246 * This debug function displays the content of a given page of a given mapper.
     247 * - the mapper is identified by the <mapper_xp> argument.
     248 * - the page is identified by the <page_id> argument.
     249 * - the number of bytes to display in page is defined by the <nbytes> argument.
     250 * The format is eigth (32 bits) words per line in hexadecimal.
     251 * It can be called by any thread running in any cluster.
     252 * In case of miss in mapper, it load the missing page from device to mapper.
     253 *******************************************************************************************
     254 * @ mapper_xp  : [in]  extended pointer on the mapper.
     255 * @ page_id    : [in]  page index in file.
     256 * @ nbytes     : [in]  value to be written.
     257 * @ string     : [in]  string printed in header.
     258 * @ returns 0 if success / return -1 if error.
     259 ******************************************************************************************/
     260error_t mapper_display_page( xptr_t     mapper_xp,
     261                             uint32_t   page_id,
     262                             uint32_t   nbytes,
     263                             char     * string );
     264
     265
    244266#endif /* _MAPPER_H_ */
  • trunk/kernel/mm/ppm.c

    r610 r611  
    210210
    211211#if DEBUG_PPM_ALLOC_PAGES
     212thread_t * this = CURRENT_THREAD;
    212213uint32_t cycle = (uint32_t)hal_get_cycles();
    213214if( DEBUG_PPM_ALLOC_PAGES < cycle )
    214 printk("\n[DBG] in %s : thread %x in process %x enter for %d page(s) / cycle %d\n",
    215 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 1<<order, cycle );
     215printk("\n[%s] thread[%x,%x] enter for %d page(s) / cycle %d\n",
     216__FUNCTION__, this->process->pid, this->trdid, 1<<order, cycle );
    216217#endif
    217218
    218219#if(DEBUG_PPM_ALLOC_PAGES & 0x1)
    219220if( DEBUG_PPM_ALLOC_PAGES < cycle )
    220 ppm_print();
     221ppm_print("enter ppm_alloc_pages");
    221222#endif
    222223
    223224        ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
    224225
    225         assert( (order < CONFIG_PPM_MAX_ORDER) ,
    226     "illegal order argument = %x\n" , order );
     226// check order
     227assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );
    227228
    228229        page_t * block = NULL; 
     
    250251cycle = (uint32_t)hal_get_cycles();
    251252if( DEBUG_PPM_ALLOC_PAGES < cycle )
    252 printk("\n[DBG] in %s : thread %x in process %x cannot allocate %d page(s) / cycle %d\n",
    253 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 1<<order, cycle );
     253printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) / cycle %d\n",
     254__FUNCTION__, this->process->pid, this->trdid, 1<<order, cycle );
    254255#endif
    255256
     
    289290cycle = (uint32_t)hal_get_cycles();
    290291if( DEBUG_PPM_ALLOC_PAGES < cycle )
    291 printk("\n[DBG] in %s : thread %x in process %x exit for %d page(s) / ppn = %x / cycle %d\n",
    292 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,
     292printk("\n[%s] thread[%x,%x] exit for %d page(s) / ppn = %x / cycle %d\n",
     293__FUNCTION__, this->process->pid, this->trdid,
    2932941<<order, ppm_page2ppn(XPTR( local_cxy , block )), cycle );
     295#endif
     296
     297#if(DEBUG_PPM_ALLOC_PAGES & 0x1)
     298if( DEBUG_PPM_ALLOC_PAGES < cycle )
     299ppm_print("exit ppm_alloc_pages");
    294300#endif
    295301
     
    307313uint32_t cycle = (uint32_t)hal_get_cycles();
    308314if( DEBUG_PPM_FREE_PAGES < cycle )
    309 printk("\n[DBG] in %s : thread %x in process %x enter for %d page(s) / ppn %x / cycle %d\n",
    310 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,
     315printk("\n[%s] thread[%x,%x] enter for %d page(s) / ppn %x / cycle %d\n",
     316__FUNCTION__, this->process->pid, this->trdid,
    3113171<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
    312318#endif
     
    314320#if(DEBUG_PPM_FREE_PAGES & 0x1)
    315321if( DEBUG_PPM_FREE_PAGES < cycle )
    316 ppm_print();
     322ppm_print("enter ppm_free_pages");
    317323#endif
    318324
     
    331337cycle = (uint32_t)hal_get_cycles();
    332338if( DEBUG_PPM_FREE_PAGES < cycle )
    333 printk("\n[DBG] in %s : thread %x in process %x exit for %d page(s) / ppn %x / cycle %d\n",
    334 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,
     339printk("\n[%s] thread[%x,%x] exit for %d page(s) / ppn %x / cycle %d\n",
     340__FUNCTION__, this->process->pid, this->trdid,
    3353411<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
    336342#endif
    337343
     344#if(DEBUG_PPM_FREE_PAGES & 0x1)
     345if( DEBUG_PPM_FREE_PAGES < cycle )
     346ppm_print("exit ppm_free_pages");
     347#endif
     348
    338349}  // end ppm_free_pages()
    339350
    340 //////////////////////
    341 void ppm_print( void )
     351///////////////////////////////
     352void ppm_print( char * string )
    342353{
    343354        uint32_t       order;
     
    350361        busylock_acquire( &ppm->free_lock );
    351362
    352         printk("\n***  PPM in cluster %x : %d pages ***\n", local_cxy , ppm->pages_nr );
     363        printk("\n***  PPM in cluster %x / %s / %d pages ***\n",
     364    local_cxy , string, ppm->pages_nr );
    353365
    354366        for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
     
    413425    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
    414426           
    415 // printk("\n@@@ %s : before dirty_list lock aquire\n", __FUNCTION__ );
    416 
    417427        // lock the remote PPM dirty_list
    418428        remote_queuelock_acquire( dirty_lock_xp );
    419429
    420 // printk("\n@@@ %s : after dirty_list lock aquire\n", __FUNCTION__ );
    421 
    422430    // lock the remote page
    423431    remote_busylock_acquire( page_lock_xp );
    424 
    425 // printk("\n@@@ %s : after page lock aquire\n", __FUNCTION__ );
    426432
    427433    // get remote page flags
     
    466472        }
    467473
    468 // printk("\n@@@ %s : before page lock release\n", __FUNCTION__ );
    469 
    470474    // unlock the remote page
    471475    remote_busylock_release( page_lock_xp );
    472476
    473 // printk("\n@@@ %s : after page lock release\n", __FUNCTION__ );
    474 
    475477        // unlock the remote PPM dirty_list
    476478        remote_queuelock_release( dirty_lock_xp );
    477 
    478 // printk("\n@@@ %s : after page lock aquire\n", __FUNCTION__ );
    479479
    480480        return done;
  • trunk/kernel/mm/ppm.h

    r610 r611  
    8383 * This is the low-level physical pages allocation function.
    8484 * It allocates N contiguous physical pages. N is a power of 2.
    85  * In normal use, you don't need to call it directly, as the recommended way to get
     85 * In normal use, it should not be called directly, as the recommended way to get
    8686 * physical pages is to call the generic allocator defined in kmem.h.
    8787 *****************************************************************************************
    8888 * @ order        : ln2( number of 4 Kbytes pages)
    8989 * @ returns a pointer on the page descriptor if success / NULL otherwise
    90  **************************************************************************************à))**/
     90 ****************************************************************************************/
    9191page_t * ppm_alloc_pages( uint32_t order );
    9292
     
    174174/*****************************************************************************************
    175175 * This function prints the PPM allocator status in the calling thread cluster.
    176  ****************************************************************************************/
    177 void ppm_print( void );
     176 *****************************************************************************************
     177 * string   : character string printed in header
     178 ****************************************************************************************/
     179void ppm_print( char * string );
    178180
    179181/*****************************************************************************************
  • trunk/kernel/mm/vmm.c

    r610 r611  
    11/*
    2  * vmm.c - virtual memory manager related operations interface.
     2 * vmm.c - virtual memory manager related operations definition.
    33 *
    44 * Authors   Ghassan Almaless (2008,2009,2010,2011, 2012)
     
    254254}  // vmm_display()
    255255
    256 ///////////////////////////////////
    257 void vmm_vseg_attach( vmm_t  * vmm,
    258                       vseg_t * vseg )
     256//////////////////////////////////////////
     257void vmm_attach_vseg_to_vsl( vmm_t  * vmm,
     258                             vseg_t * vseg )
    259259{
    260260    // build extended pointer on rwlock protecting VSL
     
    275275}
    276276
    277 ///////////////////////////////////
    278 void vmm_vseg_detach( vmm_t  * vmm,
    279                       vseg_t * vseg )
     277////////////////////////////////////////////
     278void vmm_detach_vseg_from_vsl( vmm_t  * vmm,
     279                               vseg_t * vseg )
    280280{
     281    // get vseg type
     282    uint32_t type = vseg->type;
     283
    281284    // build extended pointer on rwlock protecting VSL
    282285    xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock );
     
    288291    vseg->vmm = NULL;
    289292
    290     // remove vseg from vmm list
     293    // remove vseg from VSL
    291294    xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
    292295
    293296    // release rwlock in write mode
    294297    remote_rwlock_wr_release( lock_xp );
    295 }
     298
     299    // release the stack slot to VMM stack allocator if STACK type
     300    if( type == VSEG_TYPE_STACK )
     301    {
     302        // get pointer on stack allocator
     303        stack_mgr_t * mgr = &vmm->stack_mgr;
     304
     305        // compute slot index
     306        uint32_t index = ((vseg->vpn_base - mgr->vpn_base - 1) / CONFIG_VMM_STACK_SIZE);
     307
     308        // update stacks_bitmap
     309        busylock_acquire( &mgr->lock );
     310        bitmap_clear( &mgr->bitmap , index );
     311        busylock_release( &mgr->lock );
     312    }
     313
     314    // release the vseg to VMM mmap allocator if MMAP type
     315    if( (type == VSEG_TYPE_ANON) || (type == VSEG_TYPE_FILE) || (type == VSEG_TYPE_REMOTE) )
     316    {
     317        // get pointer on mmap allocator
     318        mmap_mgr_t * mgr = &vmm->mmap_mgr;
     319
     320        // compute zombi_list index
     321        uint32_t index = bits_log2( vseg->vpn_size );
     322
     323        // update zombi_list
     324        busylock_acquire( &mgr->lock );
     325        list_add_first( &mgr->zombi_list[index] , &vseg->zlist );
     326        busylock_release( &mgr->lock );
     327    }
     328
     329    // release physical memory allocated for vseg descriptor if no MMAP type
     330    if( (type != VSEG_TYPE_ANON) && (type != VSEG_TYPE_FILE) && (type != VSEG_TYPE_REMOTE) )
     331    {
     332        vseg_free( vseg );
     333    }
     334
     335}  // end vmm_remove_vseg_from_vsl()
    296336
    297337////////////////////////////////////////////////
     
    616656
    617657            // register child vseg in child VSL
    618             vmm_vseg_attach( child_vmm , child_vseg );
     658            vmm_attach_vseg_to_vsl( child_vmm , child_vseg );
    619659
    620660#if DEBUG_VMM_FORK_COPY
     
    759799    xptr_t   root_xp = XPTR( local_cxy , &vmm->vsegs_root );
    760800
    761     // remove all user vsegs registered in VSL
     801    // scan the VSL to delete all registered vsegs
     802    // (don't use a FOREACH for item deletion in xlist)
    762803        while( !xlist_is_empty( root_xp ) )
    763804        {
     
    766807        vseg    = GET_PTR( vseg_xp );
    767808
    768         // unmap and release physical pages
    769         vmm_unmap_vseg( process , vseg );
    770 
    771         // remove vseg from VSL
    772                 vmm_vseg_detach( vmm , vseg );
    773 
    774         // release memory allocated to vseg descriptor
    775         vseg_free( vseg );
     809        // delete vseg and release physical pages
     810        vmm_delete_vseg( process->pid , vseg->min );
    776811
    777812#if( DEBUG_VMM_DESTROY & 1 )
    778813if( DEBUG_VMM_DESTROY < cycle )
    779 printk("\n[%s] %s vseg released / vpn_base %x / vpn_size %d\n",
     814printk("\n[%s] %s vseg deleted / vpn_base %x / vpn_size %d\n",
    780815__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
    781816#endif
     
    796831__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
    797832#endif
    798                     vmm_vseg_detach( vmm , vseg );
     833            // clean vseg descriptor
     834            vseg->vmm = NULL;
     835
     836            // remove vseg from  xlist
     837            xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
     838
     839                    // release vseg descriptor
    799840            vseg_free( vseg );
    800841
     
    10791120
    10801121    // attach vseg to VSL
    1081         vmm_vseg_attach( vmm , vseg );
     1122        vmm_attach_vseg_to_vsl( vmm , vseg );
    10821123
    10831124#if DEBUG_VMM_CREATE_VSEG
     
    10921133}  // vmm_create_vseg()
    10931134
    1094 /////////////////////////////////////
    1095 void vmm_remove_vseg( vseg_t * vseg )
     1135///////////////////////////////////
     1136void vmm_delete_vseg( pid_t    pid,
     1137                      intptr_t vaddr )
    10961138{
    1097     // get pointers on calling process and VMM
    1098     thread_t   * this    = CURRENT_THREAD;
    1099     vmm_t      * vmm     = &this->process->vmm;
    1100     uint32_t     type    = vseg->type;
    1101 
    1102     // detach vseg from VSL
    1103         vmm_vseg_detach( vmm , vseg );
    1104 
    1105     // release the stack slot to VMM stack allocator if STACK type
    1106     if( type == VSEG_TYPE_STACK )
    1107     {
    1108         // get pointer on stack allocator
    1109         stack_mgr_t * mgr = &vmm->stack_mgr;
    1110 
    1111         // compute slot index
    1112         uint32_t index = ((vseg->vpn_base - mgr->vpn_base - 1) / CONFIG_VMM_STACK_SIZE);
    1113 
    1114         // update stacks_bitmap
    1115         busylock_acquire( &mgr->lock );
    1116         bitmap_clear( &mgr->bitmap , index );
    1117         busylock_release( &mgr->lock );
    1118     }
    1119 
    1120     // release the vseg to VMM mmap allocator if MMAP type
    1121     if( (type == VSEG_TYPE_ANON) || (type == VSEG_TYPE_FILE) || (type == VSEG_TYPE_REMOTE) )
    1122     {
    1123         // get pointer on mmap allocator
    1124         mmap_mgr_t * mgr = &vmm->mmap_mgr;
    1125 
    1126         // compute zombi_list index
    1127         uint32_t index = bits_log2( vseg->vpn_size );
    1128 
    1129         // update zombi_list
    1130         busylock_acquire( &mgr->lock );
    1131         list_add_first( &mgr->zombi_list[index] , &vseg->zlist );
    1132         busylock_release( &mgr->lock );
    1133     }
    1134 
    1135     // release physical memory allocated for vseg descriptor if no MMAP type
    1136     if( (type != VSEG_TYPE_ANON) && (type != VSEG_TYPE_FILE) && (type != VSEG_TYPE_REMOTE) )
    1137     {
    1138         vseg_free( vseg );
    1139     }
    1140 }  // end vmm_remove_vseg()
    1141 
    1142 /////////////////////////////////////////
    1143 void vmm_unmap_vseg( process_t * process,
    1144                      vseg_t    * vseg )
    1145 {
     1139    process_t * process;    // local pointer on local process
     1140    vmm_t     * vmm;        // local pointer on local process VMM
     1141    vseg_t    * vseg;       // local pointer on local vseg containing vaddr
     1142    gpt_t     * gpt;        // local pointer on local process GPT
    11461143    vpn_t       vpn;        // VPN of current PTE
    11471144    vpn_t       vpn_min;    // VPN of first PTE
     
    11571154    uint32_t    forks;      // actual number of pendinf forks
    11581155
    1159 #if DEBUG_VMM_UNMAP_VSEG
     1156#if DEBUG_VMM_DELETE_VSEG
    11601157uint32_t   cycle = (uint32_t)hal_get_cycles();
    11611158thread_t * this  = CURRENT_THREAD;
    1162 if( DEBUG_VMM_UNMAP_VSEG < cycle )
    1163 printk("\n[%s] thread[%x,%x] enter / process %x / vseg %s / base %x / cycle %d\n",
    1164 __FUNCTION__, this->process->pid, this->trdid, process->pid,
    1165 vseg_type_str( vseg->type ), vseg->vpn_base, cycle );
    1166 #endif
    1167 
    1168     // get pointer on local GPT
    1169     gpt_t     * gpt = &process->vmm.gpt;
    1170 
    1171     // loop on pages in vseg
     1159if( DEBUG_VMM_DELETE_VSEG < cycle )
     1160printk("\n[%s] thread[%x,%x] enter / process %x / vaddr %x / cycle %d\n",
     1161__FUNCTION__, this->process->pid, this->trdid, pid, vaddr, cycle );
     1162#endif
     1163
     1164    // get local pointer on local process descriptor
     1165    process = cluster_get_local_process_from_pid( pid );
     1166
     1167    if( process == NULL ) return;
     1168
     1169    // get pointers on local process VMM an GPT
     1170    vmm = &process->vmm;
     1171    gpt = &process->vmm.gpt;
     1172
     1173    // get local pointer on vseg containing vaddr
     1174    vseg = vmm_vseg_from_vaddr( vmm , vaddr );
     1175
     1176    if( vseg == NULL ) return;
     1177
     1178    // loop to invalidate all vseg PTEs in GPT
    11721179    vpn_min = vseg->vpn_base;
    11731180    vpn_max = vpn_min + vseg->vpn_size;
     
    11801187        {
    11811188
    1182 #if( DEBUG_VMM_UNMAP_VSEG & 1 )
    1183 if( DEBUG_VMM_UNMAP_VSEG < cycle )
    1184 printk("- vpn %x / ppn %x\n" , vpn , ppn );
     1189#if( DEBUG_VMM_DELETE_VSEG & 1 )
     1190if( DEBUG_VMM_DELETE_VSEG < cycle )
     1191printk("- unmap vpn %x / ppn %x / vseg %s \n" , vpn , ppn, vseg_type_str(vseg->type) );
    11851192#endif
    11861193
     
    12251232                        rpc_pmem_release_pages_client( page_cxy , page_ptr );
    12261233                    }
     1234
     1235#if( DEBUG_VMM_DELETE_VSEG & 1 )
     1236if( DEBUG_VMM_DELETE_VSEG < cycle )
     1237printk("- release ppn %x\n", ppn );
     1238#endif
    12271239                }
    12281240            }
     
    12301242    }
    12311243
    1232 #if DEBUG_VMM_UNMAP_VSEG
     1244    // remove vseg from VSL and release vseg descriptor (if not MMAP)
     1245    vmm_detach_vseg_from_vsl( vmm , vseg );
     1246
     1247#if DEBUG_VMM_DELETE_VSEG
    12331248cycle = (uint32_t)hal_get_cycles();
    1234 if( DEBUG_VMM_UNMAP_VSEG < cycle )
     1249if( DEBUG_VMM_DELETE_VSEG < cycle )
    12351250printk("\n[%s] thread[%x,%x] exit / process %x / vseg %s / base %x / cycle %d\n",
    1236 __FUNCTION__, this->process->pid, this->trdid, process->pid,
    1237 vseg_type_str( vseg->type ), vseg->vpn_base, cycle );
    1238 #endif
    1239 
    1240 }  // end vmm_unmap_vseg()
    1241 
    1242 //////////////////////////////////////////////////////////////////////////////////////////
    1243 // This low-level static function is called by the vmm_get_vseg(), vmm_get_pte(),
    1244 // and vmm_resize_vseg() functions.  It scan the local VSL to find the unique vseg
    1245 // containing a given virtual address.
    1246 //////////////////////////////////////////////////////////////////////////////////////////
    1247 // @ vmm     : pointer on the process VMM.
    1248 // @ vaddr   : virtual address.
    1249 // @ return vseg pointer if success / return NULL if not found.
    1250 //////////////////////////////////////////////////////////////////////////////////////////
    1251 static vseg_t * vmm_vseg_from_vaddr( vmm_t    * vmm,
    1252                                      intptr_t   vaddr )
     1251__FUNCTION__, this->process->pid, this->trdid, pid, vseg_type_str(vseg->type), vaddr, cycle );
     1252#endif
     1253
     1254}  // end vmm_delete_vseg()
     1255
     1256/////////////////////////////////////////////
     1257vseg_t * vmm_vseg_from_vaddr( vmm_t    * vmm,
     1258                              intptr_t   vaddr )
    12531259{
    12541260    xptr_t   iter_xp;
     
    13101316        remote_rwlock_wr_acquire( lock_xp );
    13111317
    1312         if( (vseg->min > addr_min) || (vseg->max < addr_max) )   // region not included in vseg
    1313     {
    1314         error = EINVAL;
    1315     }
    1316         else if( (vseg->min == addr_min) && (vseg->max == addr_max) ) // vseg must be removed
    1317     {
    1318         vmm_remove_vseg( vseg );
     1318        if( (vseg->min > addr_min) || (vseg->max < addr_max) )        // not included in vseg
     1319    {
     1320        error = -1;
     1321    }
     1322        else if( (vseg->min == addr_min) && (vseg->max == addr_max) )  // vseg must be deleted
     1323    {
     1324        vmm_delete_vseg( process->pid , vseg->min );
    13191325        error = 0;
    13201326    }
    1321         else if( vseg->min == addr_min )                         // vseg must be resized
     1327        else if( vseg->min == addr_min )                               // vseg must be resized
    13221328    {
    13231329        // update vseg base address
     
    13311337        error = 0;
    13321338    }
    1333         else if( vseg->max == addr_max )                          // vseg must be resized
     1339        else if( vseg->max == addr_max )                              // vseg must be resized
    13341340    {
    13351341        // update vseg max address
     
    13431349        error = 0;
    13441350    }
    1345     else                                                      // vseg cut in three regions
     1351    else                                                          // vseg cut in three regions
    13461352    {
    13471353        // resize existing vseg
     
    14151421        vseg_init_from_ref( vseg , vseg_xp );
    14161422
    1417         // register local vseg in local VMM
    1418         vmm_vseg_attach( vmm , vseg );
     1423        // register local vseg in local VSL
     1424        vmm_attach_vseg_to_vsl( vmm , vseg );
    14191425    }   
    14201426
  • trunk/kernel/mm/vmm.h

    r610 r611  
    3838
    3939struct process_s;
     40struct vseg_s;
    4041
    4142/*********************************************************************************************
    4243 * This structure defines the STACK allocator used by the VMM to dynamically handle
    43  * a STACK vseg requested or released by an user process.
    44  * This allocator handles a fixed size array of fixed size slots in the STACK zone.
     44 * vseg allocation or release requests for an user thread.
     45 * This allocator handles a fixed size array of fixed size slots in STACK zone of user space.
    4546 * The stack size and the number of slots are defined by the CONFIG_VMM_STACK_SIZE, and
    4647 * CONFIG_VMM_STACK_BASE parameters.
    47  * Each slot can contain one user stack vseg. The first page in the slot is not allocated
    48  * to detect stack overflow.
     48 * Each slot can contain one user stack vseg. The first 4 Kbytes page in the slot is not
     49 * mapped to detect stack overflow.
    4950 * The slot index can be computed form the slot base address, and reversely.
    5051 * All allocation / release operations are registered in the stack_bitmap, that completely
    51  * define the STACK zone state.
     52 * define the STACK zone status.
    5253 ********************************************************************************************/
    5354
     
    159160
    160161/*********************************************************************************************
    161  * This function adds a vseg descriptor in the VSL of a given VMM,
    162  * and updates the vmm field in the vseg descriptor.
    163  * It takes the lock protecting VSL.
    164  *********************************************************************************************
    165  * @ vmm       : pointer on the VMM
    166  * @ vseg      : pointer on the vseg descriptor
    167  ********************************************************************************************/
    168 void vmm_vseg_attach( struct vmm_s  * vmm,
    169                       vseg_t        * vseg );
    170 
    171 /*********************************************************************************************
    172  * This function removes a vseg descriptor from the set of vsegs controlled by a given VMM,
    173  * and updates the vmm field in the vseg descriptor. No memory is released.
    174  * It takes the lock protecting VSL.
    175  *********************************************************************************************
    176  * @ vmm       : pointer on the VMM
    177  * @ vseg      : pointer on the vseg descriptor
    178  ********************************************************************************************/
    179 void vmm_vseg_detach( struct vmm_s  * vmm,
    180                       vseg_t        * vseg );
    181 
    182 /*********************************************************************************************
    183162 * This function is called by the process_make_fork() function. It partially copies
    184163 * the content of a remote parent process VMM to the local child process VMM:
     
    235214
    236215/*********************************************************************************************
    237  * This function unmaps from the local GPT all mapped PTEs of a vseg identified by the
    238  * <process> and <vseg> arguments. It can be used for any type of vseg.
    239  * If this function is executed in the reference cluster, it handles for each referenced
    240  * physical pages the pending forks counter :
    241  * - if counter is non-zero, it decrements it.
    242  * - if counter is zero, it releases the physical page to local kmem allocator.
    243  *********************************************************************************************
    244  * @ process  : pointer on process descriptor.
    245  * @ vseg     : pointer on the vseg to be unmapped.
    246  ********************************************************************************************/
    247 void vmm_unmap_vseg( struct process_s * process,
    248                      vseg_t           * vseg );
    249 
    250 /*********************************************************************************************
    251216 * This function deletes, in the local cluster, all vsegs registered in the VSL
    252217 * of the process identified by the <process> argument. For each vseg:
     
    254219 * - it removes the vseg from the local VSL.
    255220 * - it releases the memory allocated to the local vseg descriptors.
    256  * Finally, it releases the memory allocated to the GPT itself.
     221 * - it releases the memory allocated to the GPT itself.
    257222 *********************************************************************************************
    258223 * @ process   : pointer on process descriptor.
     
    304269
    305270/*********************************************************************************************
    306  * This function removes a vseg identified by it's pointer from the VMM of the calling process.
    307  * - If the vseg has not the STACK or MMAP type, it is removed from the vsegs list,
    308  *   and the physical memory allocated to vseg descriptor is released to KMEM.
    309  * - If the vseg has the STACK type, it is removed from the vsegs list, the physical memory
    310  *   allocated to vseg descriptor is released to KMEM, and the stack slot is returned to the
    311  *   VMM STACK allocator.
    312  * - If the vseg has the MMAP type, it is removed from the vsegs list and is registered
    313  *   in the zombi_list of the VMM MMAP allocator for future reuse. The physical memory
    314  *   allocated to vseg descriptor is NOT released to KMEM.
    315  *********************************************************************************************
    316  * @ vseg      : pointer on vseg to be removed.
    317  ********************************************************************************************/
    318 void vmm_remove_vseg( vseg_t * vseg );
     271 * This function removes from the local VMM of a process descriptor identified by the <pid>
     272 * argument a local vseg identified by its base address <vaddr> in user space.
     273 * It can be used for any type of vseg, but must be called by a local thread.
     274 * Use the RPC_VMM_DELETE_VSEG if the client thread is not local.
     275 * It does nothing if the process is not registered in the local cluster.
     276 * It does nothing if the vseg is not registered in the local process VSL.
     277 * - It removes from the local GPT all registered PTEs. If it is executed in the reference
     278 *   cluster, it releases the referenced physical pages, to the relevant kmem allocator,
     279 *   depending on vseg type and the pending forks counter.
     280 * - It removes the vseg from the local VSL, and release the vseg descriptor if not MMAP.
     281 *********************************************************************************************
     282 * @ process  : process identifier.
     283 * @ vaddr    : vseg base address in user space.
     284 ********************************************************************************************/
     285void vmm_delete_vseg( pid_t    pid,
     286                      intptr_t vaddr );
     287
     288/*********************************************************************************************
     289 * This function insert a new <vseg> descriptor in the VSL identifed by the <vmm> argument.
     290 * and updates the vmm field in the vseg descriptor.
     291 * It takes the lock protecting VSL.
     292 *********************************************************************************************
     293 * @ vmm       : local pointer on local VMM.
     294 * @ vseg      : local pointer on local vseg descriptor.
     295 ********************************************************************************************/
     296void vmm_attach_vseg_to_vsl( vmm_t  * vmm,
     297                             vseg_t * vseg );
     298
     299/*********************************************************************************************
     300 * This function removes a vseg identified by the <vseg> argument from the local VSL
     301 * identified by the <vmm> argument and release the memory allocated to vseg descriptor,
     302 * for all vseg types, BUT the MMAP type (i.e. ANON or REMOTE).
     303 * - If the vseg has not the STACK or MMAP type, it is simply removed from the VSL,
     304 *   and vseg descriptor is released.
     305 * - If the vseg has the STACK type, it is removed from VSL, vseg descriptor is released,
     306 *   and the stack slot is returned to the local VMM_STACK allocator.
     307 * - If the vseg has the MMAP type, it is removed from VSL and is registered in zombi_list
     308 *   of the VMM_MMAP allocator for future reuse. The vseg descriptor is NOT released.
     309 *********************************************************************************************
     310 * @ vmm       : local pointer on local VMM.
     311 * @ vseg      : local pointer on local vseg to be removed.
     312 ********************************************************************************************/
     313void vmm_detach_vseg_from_vsl( vmm_t  * vmm,
     314                               vseg_t * vseg );
    319315
    320316/*********************************************************************************************
     
    338334
    339335/*********************************************************************************************
     336 * This low-level function scan the local VSL in <vmm> to find the unique vseg containing
     337 * a given virtual address <vaddr>.
     338 * It is called by the vmm_get_vseg(), vmm_get_pte(), and vmm_resize_vseg() functions.
     339 *********************************************************************************************
     340 * @ vmm     : pointer on the process VMM.
     341 * @ vaddr   : virtual address.
     342 * @ return vseg pointer if success / return NULL if not found.
     343 ********************************************************************************************/
     344struct vseg_s * vmm_vseg_from_vaddr( vmm_t    * vmm,
     345                                     intptr_t   vaddr );
     346
     347/*********************************************************************************************
    340348 * This function checks that a given virtual address is contained in a registered vseg.
    341349 * It can be called by any thread running in any cluster:
     
    344352 *   register it in local VMM and returns the local vseg pointer, if success.
    345353 * - it returns an user error if the vseg is missing in the reference VMM, or if there is
    346  *   not enough memory for a new vseg descriptor in cluster containing the calling thread.
     354 *   not enough memory for a new vseg descriptor in the calling thread cluster.
    347355 *********************************************************************************************
    348356 * @ process   : [in] pointer on process descriptor
     
    350358 * @ vseg      : [out] local pointer on local vseg
    351359 * @ returns 0 if success / returns -1 if user error (out of segment).
    352  *********************************************************************************************/
     360 ********************************************************************************************/
    353361error_t vmm_get_vseg( struct process_s  * process,
    354362                      intptr_t            vaddr,
  • trunk/kernel/mm/vseg.h

    r595 r611  
    7171typedef struct vseg_s
    7272{
    73     xlist_entry_t     xlist;        /*! all vsegs in same VSL (or same zombi list)        */
     73    xlist_entry_t     xlist;        /*! all vsegs in same VSL                             */
    7474    list_entry_t      zlist;        /*! all vsegs in same zombi list                      */
    7575    struct vmm_s    * vmm;          /*! pointer on associated VM manager                  */
  • trunk/kernel/syscalls/shared_include/shared_almos.h

    r580 r611  
    5252    DISPLAY_DQDT              = 7,
    5353    DISPLAY_BUSYLOCKS         = 8,
     54    DISPLAY_MAPPER            = 9,
    5455}
    5556display_type_t;
  • trunk/kernel/syscalls/shared_include/shared_dirent.h

    r445 r611  
    2626
    2727/*******************************************************************************************
    28  * These two structure defines the informations returned to user by the opendir()
    29  * function, used by the readdir() function, and released by the closedir() function.
    30  * - "DIR" describes the complete directory.
    31  * - "dirent" describes one directory entry.
     28 * This enum defines the possible types for a dirent inode in a dirent structure.
     29 *
     30 * WARNING : these types must be kept consistent with inode types in <vfs.h> file.
     31 *           and with types in <shared_stat.h> file.
    3232 ******************************************************************************************/
    3333
    34 #define DIRENT_NAME_MAX_LENGTH  56
    35 #define DIRENT_MAX_NUMBER       63
     34typedef enum
     35{
     36    DT_REG     = 0,                     /*! regular file                                  */
     37    DT_DIR     = 1,                     /*! directory                                     */
     38    DT_FIFO    = 2,                     /*! named pipe (FIFO)                             */
     39    DT_PIPE    = 3,                     /*! anonymous pipe                                */
     40    DT_SOCK    = 4,                     /*! socket                                        */
     41    DT_CHR     = 5,                     /*! character device                              */
     42    DT_BLK     = 6,                     /*! block device                                  */
     43    DT_LNK     = 7,                     /*! symbolic link                                 */
     44    DT_UNKNOWN = 8,                     /*! undetermined type                             */
     45}
     46dirent_type_t;
     47
     48/*******************************************************************************************
     49 * This defines the actual ALMOS-MKH implementation of the DIR user type.
     50 ******************************************************************************************/
     51
     52typedef unsigned int   DIR;
     53
     54/*******************************************************************************************
     55 * This structure defines the informations returned to user by the readdir() syscall.
     56 *
     57 * WARNING: sizeof(dirent) must be 64 bytes.
     58 ******************************************************************************************/
    3659
    3760struct dirent
    3861{
    39     unsigned int   inum;                                /*! inode identifier              */
    40     unsigned int   type;                                /*! inode type                    */
    41     char           name[DIRENT_NAME_MAX_LENGTH];        /*! directory entry name          */
     62    int           d_ino;                                  /*! inode identifier            */
     63    int           d_type;                                 /*! inode type                  */
     64    char          d_name[48];                             /*! dentry name                 */
     65    char          padding[64 - 48 - (2*sizeof(int))];
    4266};
    4367
    44 typedef struct user_directory
    45 {
    46     struct dirent   entry[DIRENT_MAX_NUMBER];
    47     unsigned int    current;
    48 }
    49 DIR;
    50 
    5168#endif
  • trunk/kernel/syscalls/shared_include/shared_stat.h

    r594 r611  
    3030 *****************************************************************************************/
    3131
    32 typedef struct stat
     32struct stat
    3333{
    3434        unsigned int    st_dev;     /*! ID of device containing file                         */
     
    4242        unsigned int    st_blksize; /*! blocksize for file system I/O                        */
    4343        unsigned int    st_blocks;  /*! number of allocated blocks                           */
    44 }
    45 stat_t;
     44};
    4645
    4746/******************************************************************************************
     
    5251 *
    5352 * WARNING : these macros must be kept consistent with inode types in <vfs.h> file.
     53 *           and with types in <dirent.h> file.
    5454 *****************************************************************************************/
    5555
     
    6060#define  S_ISSOCK(x)  ((((x)>>16) & 0xF) == 4)    /*! it is a socket                     */
    6161#define  S_ISCHR(x)   ((((x)>>16) & 0xF) == 5)    /*! it is a character device           */
    62 #define  S_ISLNK(x)   ((((x)>>16) & 0xF) == 6)    /*! it is a symbolic link              */
     62#define  S_ISBLK(x)   ((((x)>>16) & 0xF) == 6)    /*! it is a block device               */
     63#define  S_ISLNK(x)   ((((x)>>16) & 0xF) == 7)    /*! it is a symbolic link              */
    6364
    6465#endif /* _STAT_H_ */
  • trunk/kernel/syscalls/sys_closedir.c

    r473 r611  
    11/*
    2  * sys_closedir.c - Close an open directory.
     2 * sys_closedir.c - Close an open VFS directory.
    33 *
    4  * Author    Alain Greiner  (2016, 2017)
     4 * Author    Alain Greiner  (2016,2017,2018)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2222 */
    2323
     24#include <kernel_config.h>
    2425#include <hal_kernel_types.h>
    2526#include <vfs.h>
     
    2728#include <thread.h>
    2829#include <process.h>
     30#include <remote_dir.h>
    2931#include <errno.h>
    3032#include <syscalls.h>
     
    3436int sys_closedir ( DIR * dirp )
    3537{
    36     printk("\n[ERROR] in %s : not implemented yet\n", __FUNCTION__, dirp );
    37     return -1;
     38    xptr_t         dir_xp;       // extended pointer on remote_dir_t structure
     39
     40        thread_t  * this    = CURRENT_THREAD;  // client thread
     41        process_t * process = this->process;   // client process
     42
     43#if (DEBUG_SYS_CLOSEDIR || CONFIG_INSTRUMENTATION_SYSCALLS)
     44uint64_t     tm_start = hal_get_cycles();
     45#endif
     46
     47#if DEBUG_SYS_CLOSEDIR
     48if( DEBUG_SYS_CLOSEDIR < tm_start )
     49printk("\n[%s] thread[%x,%x] enter for DIR <%x> / cycle %d\n",
     50__FUNCTION__, process->pid, this->trdid, dirp, (uint32_t)tm_start );
     51#endif
     52 
     53    // get extended pointer on kernel remote_dir_t structure from dirp
     54    dir_xp  = remote_dir_from_ident( (intptr_t)dirp );
     55
     56    if( dir_xp == XPTR_NULL )
     57        {
     58
     59#if DEBUG_SYSCALLS_ERROR
     60printk("\n[ERROR] in %s / thread[%x,%x] : DIR pointer %x not registered\n",
     61__FUNCTION__ , process->pid , this->trdid, dirp );
     62#endif
     63                this->errno = EINVAL;
     64                return -1;
     65        }       
     66
     67    // delete kernel remote_dir_t structure
     68    remote_dir_destroy( dir_xp );
     69
     70    hal_fence();
     71
     72#if (DEBUG_SYS_CLOSEDIR || CONFIG_INSTRUMENTATION_SYSCALLS)
     73uint64_t     tm_end = hal_get_cycles();
     74#endif
     75
     76#if DEBUG_SYS_CLOSEDIR
     77if( DEBUG_SYS_CLOSEDIR < tm_end )
     78printk("\n[%s] thread[%x,%x] exit for DIR <%x> / cycle %d\n",
     79__FUNCTION__, process->pid, this->trdid, dirp, (uint32_t)tm_end );
     80#endif
     81 
     82#if CONFIG_INSTRUMENTATION_SYSCALLS
     83hal_atomic_add( &syscalls_cumul_cost[SYS_CLOSEDIR] , tm_end - tm_start );
     84hal_atomic_add( &syscalls_occurences[SYS_CLOSEDIR] , 1 );
     85#endif
     86
     87    return 0;
     88
    3889}  // end sys_closedir()
  • trunk/kernel/syscalls/sys_display.c

    r594 r611  
    3131#include <string.h>
    3232#include <shared_syscalls.h>
     33#include <vfs.h>
     34#include <mapper.h>
    3335
    3436#include <syscalls.h>
     
    5658int sys_display( reg_t  type,
    5759                 reg_t  arg0,
    58                  reg_t  arg1 )
     60                 reg_t  arg1,
     61                 reg_t  arg2 )
    5962{
    6063
     
    278281        thread_display_busylocks( thread_xp );
    279282    }
     283    /////////////////////////////////
     284    else if( type == DISPLAY_MAPPER )
     285    {
     286        xptr_t        root_inode_xp;
     287        xptr_t        inode_xp;
     288        cxy_t         inode_cxy;
     289        vfs_inode_t * inode_ptr;
     290        xptr_t        mapper_xp;
     291        mapper_t    * mapper_ptr;
     292
     293        char          kbuf[CONFIG_VFS_MAX_PATH_LENGTH];
     294
     295        char     * path    = (char *)arg0;
     296        uint32_t   page_id = (uint32_t)arg1;
     297        uint32_t   nbytes  = (uint32_t)arg2;
     298
     299        // check pathname length
     300        if( hal_strlen_from_uspace( path ) >= CONFIG_VFS_MAX_PATH_LENGTH )
     301        {
     302
     303#if DEBUG_SYSCALLS_ERROR
     304printk("\n[ERROR] in %s for MAPPER : pathname too long\n",
     305 __FUNCTION__ );
     306#endif
     307            this->errno = ENFILE;
     308            return -1;
     309        }
     310
     311        // copy pathname in kernel space
     312        hal_strcpy_from_uspace( kbuf , path , CONFIG_VFS_MAX_PATH_LENGTH );
     313
     314        // compute root inode for pathname
     315        if( kbuf[0] == '/' )                        // absolute path
     316        {
     317            // use extended pointer on VFS root inode
     318            root_inode_xp = process->vfs_root_xp;
     319        }
     320        else                                        // relative path
     321        {
     322            // get cluster and local pointer on reference process
     323            xptr_t      ref_xp  = process->ref_xp;
     324            process_t * ref_ptr = (process_t *)GET_PTR( ref_xp );
     325            cxy_t       ref_cxy = GET_CXY( ref_xp );
     326
     327            // use extended pointer on CWD inode
     328            root_inode_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->cwd_xp ) );
     329        }
     330
     331        // get extended pointer on target inode
     332        error = vfs_lookup( root_inode_xp,
     333                            kbuf,
     334                            0,
     335                            &inode_xp,
     336                            NULL );
     337        if( error )
     338            {
     339
     340#if DEBUG_SYSCALLS_ERROR
     341printk("\n[ERROR] in %s for MAPPER : cannot found inode <%s>\n",
     342__FUNCTION__ , kbuf );
     343#endif
     344                    this->errno = ENFILE;
     345                    return -1;
     346            }
     347   
     348        // get target inode cluster and local pointer
     349        inode_cxy = GET_CXY( inode_xp );
     350        inode_ptr = GET_PTR( inode_xp );
     351
     352        // get extended pointer on target mapper
     353        mapper_ptr = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->mapper ) );
     354        mapper_xp  = XPTR( inode_cxy , mapper_ptr );
     355
     356        // display mapper
     357        error = mapper_display_page( mapper_xp , page_id , nbytes , kbuf );
     358
     359        if( error )
     360            {
     361
     362#if DEBUG_SYSCALLS_ERROR
     363printk("\n[ERROR] in %s for MAPPER : cannot display page %d\n",
     364__FUNCTION__ , page_id );
     365#endif
     366                    this->errno = ENFILE;
     367                    return -1;
     368            }
     369    }
    280370    ////
    281371    else
  • trunk/kernel/syscalls/sys_mmap.c

    r594 r611  
    119119    // test mmap type : can be FILE / ANON / REMOTE
    120120
    121     if( (map_anon == false) && (map_remote == false) )   // FILE
     121    /////////////////////////////////////////////////////////// MAP_FILE
     122    if( (map_anon == false) && (map_remote == false) )   
    122123    {
    123124
     
    217218        vseg_cxy  = file_cxy;
    218219    }
    219     else if ( map_anon )                                 // MAP_ANON
     220    ///////////////////////////////////////////////////////// MAP_ANON
     221    else if ( map_anon )                                 
    220222    {
    221223        mapper_xp = XPTR_NULL;
     
    230232
    231233    }
    232     else                                                 // MAP_REMOTE
     234    /////////////////////////////////////////////////////// MAP_REMOTE
     235    else                                                 
    233236    {
    234237        mapper_xp = XPTR_NULL;
  • trunk/kernel/syscalls/sys_opendir.c

    r610 r611  
    11/*
    2  * sys_opendir.c - open a directory.
     2 * sys_opendir.c - Open a VFS directory.
    33 *
    44 * Author        Alain Greiner (2016,2017,2018)
     
    2222 */
    2323
     24#include <kernel_config.h>
    2425#include <hal_kernel_types.h>
    2526#include <hal_uspace.h>
    2627#include <thread.h>
    2728#include <process.h>
     29#include <remote_dir.h>
    2830#include <printk.h>
    2931#include <errno.h>
     32#include <vseg.h>
    3033#include <vfs.h>
    3134#include <syscalls.h>
     
    3639                  DIR  ** dirp )
    3740{
    38     error_t       error;
    39     vseg_t      * vseg;                   // for user space checking
    40     xptr_t        root_inode_xp;          // extended pointer on path root inode
    41 
     41    error_t        error;
     42    xptr_t         root_inode_xp;          // extended pointer on path root inode
     43    xptr_t         inode_xp;               // extended pointer on directory inode
     44    vfs_inode_t  * inode_ptr;              // local pointer on directory inode
     45    cxy_t          inode_cxy;              // directory inode cluster
     46    uint32_t       inode_type;             // to check directory inode type
     47    xptr_t         dir_xp;                 // extended pointer on remote_dir_t
     48    remote_dir_t * dir_ptr;                // local pointer on remote_dir_t
     49    cxy_t          dir_cxy;                // remote_dir_t cluster identifier
     50    vseg_t       * vseg;                   // for user space checking
     51    intptr_t       ident;                  // dirent array pointer in user space                 
    4252    char          kbuf[CONFIG_VFS_MAX_PATH_LENGTH];
    4353       
    44         thread_t  * this    = CURRENT_THREAD;
    45         process_t * process = this->process;
     54        thread_t     * this    = CURRENT_THREAD;  // client thread
     55        process_t    * process = this->process;   // client process
    4656
    4757#if (DEBUG_SYS_OPENDIR || CONFIG_INSTRUMENTATION_SYSCALLS)
     
    8595#endif
    8696
    87     // compute root inode for path
     97    // compute root inode for pathname
    8898    if( kbuf[0] == '/' )                        // absolute path
    8999    {
     
    102112    }
    103113
    104 /*
    105     // call the relevant VFS function ???
    106     error = vfs_opendir( root_inode_xp,
    107                          kbuf );
     114    // get extended pointer on directory inode
     115    error = vfs_lookup( root_inode_xp,
     116                        kbuf,
     117                        0,
     118                        &inode_xp,
     119                        NULL );
    108120    if( error )
    109121        {
    110122
    111123#if DEBUG_SYSCALLS_ERROR
    112 printk("\n[ERROR] in %s / thread[%x,%x] : cannot open directory <%s>\n",
    113 __FUNCTION__ , process->pid , this->trdid , pathname );
     124printk("\n[ERROR] in %s / thread[%x,%x] : cannot found directory <%s>\n",
     125__FUNCTION__ , process->pid , this->trdid , kbuf );
    114126#endif
    115127                this->errno = ENFILE;
     
    117129        }
    118130   
    119     // copy to user space ???
    120 */
     131    // check inode type
     132    inode_ptr  = GET_PTR( inode_xp );
     133    inode_cxy  = GET_CXY( inode_xp );
     134    inode_type = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->type ) );
     135
     136    if( inode_type != INODE_TYPE_DIR )
     137        {
     138
     139#if DEBUG_SYSCALLS_ERROR
     140printk("\n[ERROR] in %s / thread[%x,%x] : cannot found directory <%s>\n",
     141__FUNCTION__ , process->pid , this->trdid , kbuf );
     142#endif
     143                this->errno = ENFILE;
     144                return -1;
     145        }
     146   
     147    // allocate, initialize, and register a new remote_dir_t structure
     148    // in the calling process reference cluster
     149    dir_xp  = remote_dir_create( inode_xp );
     150    dir_ptr = GET_PTR( dir_xp );
     151    dir_cxy = GET_CXY( dir_xp );
     152
     153    if( dir_xp == XPTR_NULL )
     154        {
     155
     156#if DEBUG_SYSCALLS_ERROR
     157printk("\n[ERROR] in %s / thread[%x,%x] : cannot create remote_dir for <%s>\n",
     158__FUNCTION__ , process->pid , this->trdid , kbuf );
     159#endif
     160                this->errno = ENFILE;
     161                return -1;
     162        }
     163   
     164    // get ident from remote_dir structure
     165    ident = (intptr_t)hal_remote_lpt( XPTR( dir_cxy , &dir_ptr->ident ) );
     166
     167    // set ident value in user buffer
     168    hal_copy_to_uspace( dirp , &ident , sizeof(intptr_t) );
    121169
    122170    hal_fence();
  • trunk/kernel/syscalls/sys_readdir.c

    r473 r611  
    11/*
    2  * sys_readdir.c - Read one entry from an open directory.
     2 * sys_readdir.c - Copy one entry from an open VFS directory to an user buffer.
    33 *
    4  * Author    Alain Greiner (2016,2017)
     4 * Author    Alain Greiner (2016,2017,2018)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    3030#include <vfs.h>
    3131#include <process.h>
     32#include <remote_dir.h>
    3233#include <syscalls.h>
    3334#include <shared_syscalls.h>
     
    3536///////////////////////////////////////
    3637int sys_readdir( DIR            * dirp,
    37                  struct dirent ** dentp )
     38                 struct dirent ** buffer )
    3839{
    39     printk("\n[ERROR] in %s : not implemented yet\n", __FUNCTION__, dirp, dentp );
    40     return -1;
     40    error_t         error;
     41    vseg_t        * vseg;               // for user space checking of buffer
     42    xptr_t          dir_xp;             // extended pointer on remote_dir_t structure
     43    remote_dir_t  * dir_ptr;            // local pointer on remote_dir_t structure
     44    cxy_t           dir_cxy;            // remote_dir_t stucture cluster identifier
     45    struct dirent * direntp;            // dirent pointer in user space 
     46    uint32_t        entries;            // total number of dirent entries
     47    uint32_t        current;            // current dirent index
     48
     49        thread_t  * this    = CURRENT_THREAD;  // client thread
     50        process_t * process = this->process;   // client process
     51
     52#if (DEBUG_SYS_READDIR || CONFIG_INSTRUMENTATION_SYSCALLS)
     53uint64_t     tm_start = hal_get_cycles();
     54#endif
     55
     56#if DEBUG_SYS_READDIR
     57if( DEBUG_SYS_READDIR < tm_start )
     58printk("\n[%s] thread[%x,%x] enter / dirp %x / cycle %d\n",
     59__FUNCTION__, process->pid, this->trdid, dirp, (uint32_t)tm_start );
     60#endif
     61 
     62    // check buffer in user space
     63    error = vmm_get_vseg( process , (intptr_t)buffer, &vseg );
     64
     65        if( error )
     66        {
     67
     68#if DEBUG_SYSCALLS_ERROR
     69printk("\n[ERROR] in %s / thread[%x,%x] : user buffer %x unmapped\n",
     70__FUNCTION__ , process->pid , this->trdid, buffer );
     71vmm_display( process , false );
     72#endif
     73                this->errno = EINVAL;
     74                return -1;
     75        }       
     76
     77    // get pointers on remote_dir_t structure from dirp
     78    dir_xp  = remote_dir_from_ident( (intptr_t)dirp );
     79    dir_ptr = GET_PTR( dir_xp );
     80    dir_cxy = GET_CXY( dir_xp );
     81
     82    if( dir_xp == XPTR_NULL )
     83        {
     84
     85#if DEBUG_SYSCALLS_ERROR
     86printk("\n[ERROR] in %s / thread[%x,%x] : dirp %x not registered\n",
     87__FUNCTION__ , process->pid , this->trdid, dirp );
     88#endif
     89                this->errno = EBADF;
     90                return -1;
     91        }       
     92
     93    // get "current" and "entries_nr" values from remote_dir_t structure
     94    current = hal_remote_l32( XPTR( dir_cxy , &dir_ptr->current ) );
     95    entries = hal_remote_l32( XPTR( dir_cxy , &dir_ptr->entries ) );
     96
     97    // check "current" index
     98    if( current >= entries )
     99    {
     100        this->errno = 0;
     101        return -1;
     102    }
     103
     104    // compute dirent pointer in user space
     105    direntp = (struct dirent *)dirp + current;
     106
     107#if (DEBUG_SYS_READDIR & 1)
     108if( DEBUG_SYS_READDIR < tm_start )
     109printk("\n[%s] entries = %d / current = %d / direntp = %x\n",
     110__FUNCTION__, entries, current, direntp );
     111#endif
     112
     113    // copy dirent pointer to user buffer
     114    hal_copy_to_uspace( buffer, &direntp , sizeof(void *) );
     115
     116    // update current index in "remote_dir_t" structure
     117    hal_remote_atomic_add( XPTR( dir_cxy , &dir_ptr->current ) , 1 );
     118
     119    hal_fence();
     120
     121#if (DEBUG_SYS_READDIR || CONFIG_INSTRUMENTATION_SYSCALLS)
     122uint64_t     tm_end = hal_get_cycles();
     123#endif
     124
     125#if DEBUG_SYS_READDIR
     126if( DEBUG_SYS_READDIR < tm_end )
     127printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
     128__FUNCTION__, process->pid, this->trdid, (uint32_t)tm_end );
     129#endif
     130 
     131#if CONFIG_INSTRUMENTATION_SYSCALLS
     132hal_atomic_add( &syscalls_cumul_cost[SYS_READDIR] , tm_end - tm_start );
     133hal_atomic_add( &syscalls_occurences[SYS_READDIR] , 1 );
     134#endif
     135
     136        return 0;
     137
    41138}  // end sys_readdir()
  • trunk/kernel/syscalls/syscalls.h

    r610 r611  
    328328/******************************************************************************************
    329329 * [23] This function open a directory, that must exist in the file system, returning
    330  * a DIR pointer on the directory in user space.
    331  ******************************************************************************************
    332  * @ pathname   : pathname (can be relative or absolute).
     330 * a DIR pointer on the dirent array in user space.
     331 ******************************************************************************************
     332 * @ pathname   : [in]  pathname (can be relative or absolute).
    333333 * @ dirp       : [out] buffer for pointer on user directory (DIR).
    334334 * @ return 0 if success / returns -1 if failure.
     
    341341 * next directory entry in the directory identified by the <dirp> argument.
    342342 ******************************************************************************************
    343  * @ dirp     : user pointer identifying the searched directory.
    344  * @ dentp    : [out] buffer for pointer on user direntory entry (dirent).
     343 * @ dirp     : [in]  user pointer on dirent array identifying the open directory.
     344 * @ buffer   : [out] pointer on user buffer for a pointer on dirent in user space.
    345345 * @ return O if success / returns -1 if failure.
    346346 *****************************************************************************************/
    347347int sys_readdir( DIR            * dirp,
    348                  struct dirent ** dentp );
     348                 struct dirent ** buffer );
    349349
    350350/******************************************************************************************
     
    352352 * all structures associated with the <dirp> pointer.
    353353 ******************************************************************************************
    354  * @ dirp     : user pointer identifying the directory.
     354 * @ dirp     : [in] user pointer on dirent array identifying the open directory.
    355355 * @ return 0 if success / returns -1 if failure.
    356356 *****************************************************************************************/
     
    575575 * [43] This debug function displays on the kernel terminal TXT0 an user defined string,
    576576 * or the current state of a kernel structure, identified by the <type> argument.
    577  * The <arg0> and <arg1> arguments depends on the structure type:
     577 * The <arg0>, <arg1>, and <arg2> arguments depends on the structure type:
    578578 * - DISPLAY_STRING          : an user defined string
    579579 * - DISPLAY_VMM             : VSL and GPT for a process identified by <pid>.
     
    583583 * - DISPLAY_VFS             : all files registered in the VFS cache.
    584584 * - DISPLAY_CHDEV           : all registered channel devices.
    585  * - DISPLAY_DQDT            : all DQDT nodes.
     585 * - DISPLAY_DQDT            : all DQDT nodes curren values.
     586 * - DISPLAY_BUSYLOCKS       : all busylocks taken by one thread.
     587 * - DISPLAY_MAPPER          : one page of a given mapper.
    586588 ******************************************************************************************
    587589 * type      : [in] type of display
    588590 * arg0      : [in] type dependant argument.
    589591 * arg1      : [in] type dependant argument.
     592 * arg2      : [in] type dependant argument.
    590593 * @ return 0 if success / return -1 if illegal arguments
    591594 *****************************************************************************************/
    592595int sys_display( reg_t  type,
    593596                 reg_t  arg0,
    594                  reg_t  arg1 );
     597                 reg_t  arg1,
     598                 reg_t  arg2 );
    595599
    596600/******************************************************************************************
  • trunk/libs/libalmosmkh/almosmkh.c

    r597 r611  
    276276    return hal_user_syscall( SYS_DISPLAY,
    277277                             DISPLAY_DQDT, 0, 0, 0 );
     278}
     279
     280///////////////////////////////////////
     281int display_mapper( char        * path,
     282                    unsigned int  page_id,
     283                    unsigned int  nbytes)
     284{
     285    return hal_user_syscall( SYS_DISPLAY,
     286                             DISPLAY_MAPPER,
     287                             (reg_t)path,
     288                             (reg_t)page_id,
     289                             (reg_t)nbytes );
    278290}
    279291
  • trunk/libs/libalmosmkh/almosmkh.h

    r610 r611  
    213213int display_dqdt( void );
    214214
     215/***************************************************************************************
     216 * This debug syscall displays on the kernel terminal TXT0 the content of a given
     217 * page of a given VFS mapper.
     218 * It can be called by any thread running in any cluster.
     219 ***************************************************************************************
     220 * @ path      : pathname identifying the file/directory in VFS.
     221 * @ page_id   : page index in file.
     222 * @ nbytes    : number of bytes to display.
     223 * @ return 0 if success / return -1 if file or page not found.
     224 **************************************************************************************/
     225int display_mapper( char        * path,
     226                    unsigned int  page_id,
     227                    unsigned int  nbytes);
     228
    215229/*****************************************************************************************
    216230* This debug syscall is used to activate / desactivate the context switches trace
  • trunk/libs/mini-libc/dirent.h

    r449 r611  
    2525#define _DIRENT_H_
    2626
     27#include <shared_dirent.h>
     28
    2729/*****************************************************************************************
    28  * This file defines the user level, directory entry related library.
     30 * This file defines the user level, directory entries related library.
    2931 * All these functions make a system call to access the kernel VFS.
    30  * The user/kernel shared structures and mnemonics are defined in
    31  * the <syscalls/shared_include/shared_dirent.h> file.
    3232 ****************************************************************************************/
    33 
    34 #include <shared_dirent.h>
    3533
    3634/*****************************************************************************************
  • trunk/params-hard.mk

    r610 r611  
    33ARCH      = /users/alain/soc/tsar-trunk-svn-2013/platforms/tsar_generic_iob
    44X_SIZE    = 1
    5 Y_SIZE    = 2
     5Y_SIZE    = 1
    66NB_PROCS  = 1
    77NB_TTYS   = 2
  • trunk/user/ksh/ksh.c

    r610 r611  
    4343#include <signal.h>
    4444#include <unistd.h>
     45#include <dirent.h>
    4546#include <almosmkh.h>
    4647#include <semaphore.h>
     
    5455#define MAX_ARGS           (32)     // max number of arguments in a command
    5556
    56 #define MAIN_DEBUG          0
    57 
    58 #define CMD_CAT_DEBUG       0
    59 #define CMD_CP_DEBUG        0
    60 #define CMD_LOAD_DEBUG      0
     57#define DEBUG_MAIN          0
     58
     59#define DEBUG_CMD_CAT       0
     60#define DEBUG_CMD_CP        0
     61#define DEBUG_CMD_LOAD      0
     62#define DEBUG_CMD_LS        0
    6163
    6264//////////////////////////////////////////////////////////////////////////////////////////
     
    105107{
    106108        char         * path;
    107     stat_t         st;      // stat structure
     109    struct stat    st;
    108110    int            fd;
    109111    int            size;
     
    116118        size = 0;
    117119                printf("  usage: cat pathname\n");
    118         goto exit;
     120            goto cmd_cat_exit;
    119121    }
    120122
     
    127129        buf  = NULL;
    128130        size = 0;
    129             printf("  error: cannot open %s\n", path);
    130             goto exit;
    131     }
    132 
    133 #if CMD_CAT_DEBUG
     131            printf("  error: cannot open file <%s>\n", path);
     132            goto cmd_cat_exit;
     133    }
     134
     135#if DEBUG_CMD_CAT
    134136long long unsigned cycle;
    135137get_cycle( &cycle );
     
    143145        buf  = NULL;
    144146        size = 0;
    145             printf("  error: cannot stat %s\n", path);
    146             goto exit;
     147            printf("  error: cannot stat <%s>\n", path);
     148            goto cmd_cat_exit;
    147149    }
    148150
     
    151153        buf  = NULL;
    152154        size = 0;
    153             printf("  error: %s is a directory\n", path);
    154             goto exit;
     155            printf("  error: <%s> is a directory\n", path);
     156            goto cmd_cat_exit;
    155157    }
    156158
     
    158160    size = st.st_size;
    159161
    160 #if CMD_CAT_DEBUG
     162#if DEBUG_CMD_CAT
    161163get_cycle( &cycle );
    162164printf("\n[%s] get size %d / cycle %d\n",
     
    169171    if ( buf == NULL )
    170172    {
    171             printf("  error: cannot map %s\n", path );
    172             goto exit;
    173     }
    174 
    175 #if CMD_CAT_DEBUG
     173            printf("  error: cannot map file <%s>\n", path );
     174            goto cmd_cat_exit;
     175    }
     176
     177#if DEBUG_CMD_CAT
    176178get_cycle( &cycle );
    177179printf("\n[%s] map file %d to buffer %x / cycle %d\n",
     
    188190    return;
    189191
    190 exit:
     192cmd_cat_exit:
    191193
    192194        if (buf != NULL) munmap(buf, size);
     
    226228static void cmd_cp(int argc, char **argv)
    227229{
    228         int    src_fd;
    229     int    dst_fd;
    230         char * srcpath;
    231     char * dstpath;
    232         int    size;          // source file size
    233         int    bytes;         // number of transfered bytes
    234         char   buf[4096];
    235         stat_t st;
     230        int          src_fd;
     231    int          dst_fd;
     232        char       * srcpath;
     233    char       * dstpath;
     234        int          size;          // source file size
     235        int          bytes;         // number of transfered bytes
     236        char         buf[4096];
     237        struct stat st;
    236238
    237239        if (argc != 3)
     
    240242        dst_fd = -1;
    241243                printf("  usage: cp src_pathname dst_pathname\n");
    242         goto exit;
     244        goto cmd_cp_exit;
    243245        }
    244246
     
    252254    {
    253255        dst_fd = -1;
    254             printf("  error: cannot open %s\n", srcpath );
    255             goto exit;
    256     }
    257 
    258 #if CMD_CP_DEBUG
     256            printf("  error: cannot open <%s>\n", srcpath );
     257            goto cmd_cp_exit;
     258    }
     259
     260#if DEBUG_CMD_CP
    259261long long unsigned cycle;
    260262get_cycle( &cycle );
     
    267269    {
    268270        dst_fd = -1;
    269             printf("  error: cannot stat %s\n", srcpath);
    270             goto exit;
    271     }
    272 
    273 #if CMD_CP_DEBUG
     271            printf("  error: cannot stat <%s>\n", srcpath);
     272            goto cmd_cp_exit;
     273    }
     274
     275#if DEBUG_CMD_CP
    274276get_cycle( &cycle );
    275277printf("\n[%s] stats file <%s> done / cycle %d\n",
     
    280282    {
    281283        dst_fd = -1;
    282                 printf("  error: %s is a directory\n", srcpath);
    283                 goto exit;
     284                printf("  error: <%s> is a directory\n", srcpath);
     285                goto cmd_cp_exit;
    284286        }
    285287
     
    292294        if ( dst_fd < 0 )
    293295    {
    294                 printf("  error: cannot open %s\n", dstpath );
    295                 goto exit;
    296         }
    297 
    298 #if CMD_CP_DEBUG
     296                printf("  error: cannot open <%s>\n", dstpath );
     297                goto cmd_cp_exit;
     298        }
     299
     300#if DEBUG_CMD_CP
    299301get_cycle( &cycle );
    300302printf("\n[%s] open file <%s> done / cycle %d\n",
     
    304306        if ( stat( dstpath , &st ) )
    305307    {
    306                 printf("  error: cannot stat %s\n", dstpath );
    307                 goto exit;
    308         }
    309 
    310 #if CMD_CP_DEBUG
     308                printf("  error: cannot stat <%s>\n", dstpath );
     309                goto cmd_cp_exit;
     310        }
     311
     312#if DEBUG_CMD_CP
    311313get_cycle( &cycle );
    312314printf("\n[%s] stats file <%s> done / cycle %d\n",
     
    316318        if ( S_ISDIR(st.st_mode ) )
    317319    {
    318                 printf("  error: %s is a directory\n", dstpath );
    319                 goto exit;
     320                printf("  error: <%s> is a directory\n", dstpath );
     321                goto cmd_cp_exit;
    320322        }
    321323
     
    329331                if ( read( src_fd , buf , len ) != len )
    330332        {
    331                         printf("  error: cannot read from file %s\n", srcpath);
    332                         goto exit;
     333                        printf("  error: cannot read from file <%s>\n", srcpath);
     334                        goto cmd_cp_exit;
    333335                }
    334336
    335 #if CMD_CP_DEBUG
     337#if DEBUG_CMD_CP
    336338get_cycle( &cycle );
    337339printf("\n[%s] %d bytes read from <%s> / cycle %d\n",
     
    342344                if ( write( dst_fd , buf , len ) != len )
    343345        {
    344                         printf("  error: cannot write to file %s\n", dstpath);
    345                         goto exit;
     346                        printf("  error: cannot write to file <%s>\n", dstpath);
     347                        goto cmd_cp_exit;
    346348                }
    347349
    348 #if CMD_CP_DEBUG
     350#if DEBUG_CMD_CP
    349351get_cycle( &cycle );
    350352printf("\n[%s] %d bytes writen to <%s> / cycle %d\n",
     
    355357        }
    356358
    357 exit:
     359cmd_cp_exit:
    358360
    359361        if (src_fd >= 0) close(src_fd);
     
    370372    if( argc < 2 )
    371373    {
    372         printf("  usage: display  vmm      cxy  pid   \n"
    373                "         display  sched    cxy  lid   \n"             
    374                "         display  process  cxy        \n"             
    375                "         display  txt      txtid      \n"             
    376                "         display  vfs                 \n"             
    377                "         display  chdev               \n"             
    378                "         display  dqdt                \n"             
    379                "         display  locks    pid  trdid \n");
     374        printf("  usage: display  vmm      cxy    pid\n"
     375               "         display  sched    cxy    lid\n"             
     376               "         display  process  cxy\n"             
     377               "         display  txt      txtid\n"             
     378               "         display  vfs\n"             
     379               "         display  chdev\n"             
     380               "         display  dqdt\n"             
     381               "         display  locks    pid    trdid\n"
     382               "         display  mapper   path   page_id  nbytes\n");
    380383    }
    381384    ////////////////////////////////////
     
    500503            {
    501504                printf("  error: illegal arguments pid = %x / trdid = %x\n", pid, trdid );
     505            }
     506        }
     507    }
     508    ///////////////////////////////////////////
     509    else if( strcmp( argv[1] , "mapper" ) == 0 )
     510    {
     511        if( argc != 5 )
     512        {
     513                    printf("  usage: display mapper path page_id nbytes\n");
     514            }
     515        else
     516        {
     517                unsigned int page_id   = atoi(argv[3]);
     518            unsigned int nbytes    = atoi(argv[4]);
     519
     520            if( display_mapper( argv[2] , page_id, nbytes ) )
     521            {
     522                printf("  error: cannot display page %d of mapper %s\n", page_id, argv[2] );
    502523            }
    503524        }
     
    643664        ksh_pid = getpid();
    644665
    645 #if CMD_LOAD_DEBUG
     666#if DEBUG_CMD_LOAD
    646667long long unsigned cycle;
    647668get_cycle( &cycle );
    648 printf("\n[KSH] %s : ksh_pid %x / path %s / bg %d / place %d (%x) / cycle %d\n",
     669printf("\n[ksh] %s : ksh_pid %x / path %s / bg %d / place %d (%x) / cycle %d\n",
    649670__FUNCTION__, ksh_pid, argv[1], background, placement, cxy, (int)cycle );
    650671#endif
     
    663684        {
    664685
    665 #if CMD_LOAD_DEBUG
     686#if DEBUG_CMD_LOAD
    666687get_cycle( &cycle );
    667 printf("\n[KSH] %s : child_pid %x after fork, before exec / cycle %d\n",
     688printf("\n[ksh] %s : child_pid %x after fork, before exec / cycle %d\n",
    668689__FUNCTION__ , getpid(), (int)cycle );
    669690#endif
     
    672693            ret_exec = execve( pathname , NULL , NULL );
    673694
    674 #if CMD_LOAD_DEBUG
     695#if DEBUG_CMD_LOAD
    675696get_cycle( &cycle );
    676 printf("\n[KSH] %s : child_pid %x after exec / ret_exec %d / cycle %d\n",
     697printf("\n[ksh] %s : child_pid %x after exec / ret_exec %d / cycle %d\n",
    677698__FUNCTION__ , getpid(), ret_exec, (int)cycle );
    678699#endif
     
    688709        {
    689710
    690 #if CMD_LOAD_DEBUG
     711#if DEBUG_CMD_LOAD
    691712get_cycle( &cycle );
    692 printf("\n[KSH] %s : ksh_pid %x after fork / ret_fork %x / cycle %d\n",
     713printf("\n[ksh] %s : ksh_pid %x after fork / ret_fork %x / cycle %d\n",
    693714__FUNCTION__, getpid(), ret_fork, (int)cycle );
    694715#endif
     
    733754static void cmd_ls( int argc , char **argv )
    734755{
    735         char  * path;
    736 
    737 //  struct dirent * file;
    738 //  DIR *dir;
    739 
    740         if (argc > 2 )
     756        char           * pathname;
     757    struct dirent  * entry;
     758    DIR            * dir;
     759
     760        if (argc != 2 )
    741761    {
    742762                printf("  usage: ls [path]\n");
     
    744764    else
    745765    {
    746         if ( argc == 1 ) path = ".";
    747         else             path = argv[1];
    748 
    749         printf("  error: not implemented yet\n");
    750 /*
    751         dir = opendir( path );
    752         while ((file = readdir(dir)) != NULL)
    753         {
    754                 printf(" %s\n", file->d_name);
    755         }
    756         closedir(dir);
    757 */
    758     }
     766
     767// handle case with no argument
     768// TODO if ( argc == 1 ) path = ".";
     769
     770        // get target directory path
     771        pathname = argv[1];
     772
     773        // open target directory
     774            dir = opendir( pathname );
     775
     776#if DEBUG_CMD_LS
     777printf("\n[ksh] %s : directory <%s> open / DIR %x\n",
     778__FUNCTION__, pathname , dir );
     779#endif
     780
     781        if( dir == NULL)
     782            {
     783                    printf("  error : directory <%s> not found\n", pathname );
     784            goto cmd_ls_exit;
     785            }
     786
     787        // loop on directory entries   
     788        while ( (entry = readdir(dir)) != NULL )
     789            {
     790                    printf(" - %s\n", entry->d_name);
     791            }
     792
     793        // close target directory
     794            closedir( dir );
     795
     796#if DEBUG_CMD_LS
     797printf("\n[ksh] %s : directory <%s> closed\n",
     798__FUNCTION__, pathname );
     799#endif
     800
     801    }
     802
     803cmd_ls_exit:
    759804
    760805    // release semaphore to get next command
     
    887932        if ( unlink( pathname ) )
    888933        {
    889                     printf("  error: unable to remove %s\n", pathname );
     934                    printf("  error: unable to remove <%s>\n", pathname );
    890935            }
    891936    }
     
    10481093
    10491094
    1050 // To lauch one command without interactive mode
     1095/* To lauch one command without interactive mode
    10511096   
    10521097if( sem_wait( &semaphore ) )
     
    10571102else
    10581103{
    1059     printf("\n[ksh] cp /home/Makefile /home/bloup\n");
     1104    printf("\n[ksh] ls bin/user\n");
    10601105}
    10611106
    1062 strcpy( buf , "cp /home/Makefile /home/bloup" );
     1107strcpy( buf , "ls bin/user" );
    10631108parse( buf );
    10641109
    1065 //
     1110*/
    10661111
    10671112        enum fsm_states
     
    12701315    get_core( &cxy , &lid );
    12711316
    1272 #if MAIN_DEBUG
     1317#if DEBUG_MAIN
    12731318printf("\n[ksh] main started on core[%x,%d]\n", cxy , lid );
    12741319#endif
     
    12811326    }
    12821327
    1283 #if MAIN_DEBUG
     1328#if DEBUG_MAIN
    12841329printf("\n[ksh] main initialized semaphore\n" );
    12851330#endif
     
    12941339                    &interactive,   // entry function
    12951340                    NULL );
    1296 #if MAIN_DEBUG
     1341#if DEBUG_MAIN
    12971342printf("\n[ksh] main launched interactive thread => wait children termination\n" );
    12981343#endif
     
    13071352        child_pid = wait( &status );
    13081353
    1309 #if MAIN_DEBUG
     1354#if DEBUG_MAIN
    13101355if( WIFEXITED  (status) ) printf("\n[ksh] child process %x exit\n"   , child_pid );
    13111356if( WIFSIGNALED(status) ) printf("\n[ksh] child process %x killed\n" , child_pid );
Note: See TracChangeset for help on using the changeset viewer.