Changeset 610 for trunk/kernel/mm


Ignore:
Timestamp:
Dec 27, 2018, 7:38:58 PM (5 years ago)
Author:
alain
Message:

Fix several bugs in VFS to support the following
ksh commandis : cp, mv, rm, mkdir, cd, pwd

Location:
trunk/kernel/mm
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/mm/mapper.c

    r606 r610  
    188188        {
    189189
     190            if( mapper_cxy == local_cxy )   // mapper is local
     191            {
     192
    190193#if (DEBUG_MAPPER_GET_PAGE & 1)
    191194if( DEBUG_MAPPER_GET_PAGE < cycle )
    192 printk("\n[%s] missing page => load it from IOC device\n", __FUNCTION__ );
    193 #endif
    194             if( mapper_cxy == local_cxy )   // mapper is local
    195             {
     195printk("\n[%s] missing page => load it from FS / local access \n", __FUNCTION__ );
     196#endif
    196197                 error = mapper_handle_miss( mapper_ptr,
    197198                                             page_id,
     
    200201            else
    201202            {
     203
     204#if (DEBUG_MAPPER_GET_PAGE & 1)
     205if( DEBUG_MAPPER_GET_PAGE < cycle )
     206printk("\n[%s] missing page => load it from FS / RPC access \n", __FUNCTION__ );
     207#endif
    202208                 rpc_mapper_handle_miss_client( mapper_cxy,
    203209                                                mapper_ptr,
     
    253259vfs_inode_t * inode = mapper->inode;
    254260vfs_inode_get_name( XPTR( local_cxy , inode ) , name );
    255 if( DEBUG_MAPPER_HANDLE_MISS < cycle )
    256 printk("\n[%s] enter for page %d in <%s> / cycle %d\n",
     261// if( DEBUG_MAPPER_HANDLE_MISS < cycle )
     262// if( (page_id == 1) && (cycle > 10000000) )
     263printk("\n[%s] enter for page %d in <%s> / cycle %d",
    257264__FUNCTION__, page_id, name, cycle );
    258265if( DEBUG_MAPPER_HANDLE_MISS & 1 )
    259 grdxt_display( &mapper->rt , name );
    260 #endif
    261 
    262     // allocate one page from the mapper cluster
     266grdxt_display( XPTR( local_cxy , &mapper->rt ) , name );
     267#endif
     268
     269    // allocate one page from the local cluster
    263270    req.type  = KMEM_PAGE;
    264271    req.size  = 0;
     
    313320#if DEBUG_MAPPER_HANDLE_MISS
    314321cycle = (uint32_t)hal_get_cycles();
    315 if( DEBUG_MAPPER_HANDLE_MISS < cycle )
    316 printk("\n[%s] exit for page %d in <%s> / ppn %x / cycle %d\n",
     322// if( DEBUG_MAPPER_HANDLE_MISS < cycle )
     323// if( (page_id == 1) && (cycle > 10000000) )
     324printk("\n[%s] exit for page %d in <%s> / ppn %x / cycle %d",
    317325__FUNCTION__, page_id, name, ppm_page2ppn( *page_xp ), cycle );
    318326if( DEBUG_MAPPER_HANDLE_MISS & 1 )
    319 grdxt_display( &mapper->rt , name );
     327grdxt_display( XPTR( local_cxy , &mapper->rt ) , name );
    320328#endif
    321329
     
    348356}  // end mapper_release_page()
    349357
    350 ////////////////////////////////////////////
    351 error_t mapper_move_user( mapper_t * mapper,
     358///////////////////////////////////////////////
     359error_t mapper_move_user( xptr_t     mapper_xp,
    352360                          bool_t     to_buffer,
    353361                          uint32_t   file_offset,
     
    355363                          uint32_t   size )
    356364{
    357     xptr_t     mapper_xp;      // extended pointer on local mapper
    358365    uint32_t   page_offset;    // first byte to move to/from a mapper page
    359366    uint32_t   page_count;     // number of bytes to move to/from a mapper page
     
    371378#endif
    372379
    373     // build extended pointer on mapper
    374     mapper_xp = XPTR( local_cxy , mapper );
    375 
    376380    // compute offsets of first and last bytes in file
    377381    uint32_t min_byte = file_offset;
     
    384388#if (DEBUG_MAPPER_MOVE_USER & 1)
    385389if( DEBUG_MAPPER_MOVE_USER < cycle )
    386 printk("\n[%s] first_page %d / last_page %d\n", __FUNCTION__, first, last );
     390printk("\n[%s] thread[%x,%x] : first_page %d / last_page %d\n",
     391__FUNCTION__, this->process->pid, this->trdid, first, last );
    387392#endif
    388393
     
    404409#if (DEBUG_MAPPER_MOVE_USER & 1)
    405410if( DEBUG_MAPPER_MOVE_USER < cycle )
    406 printk("\n[%s] page_id = %d / page_offset = %d / page_count = %d\n",
    407 __FUNCTION__ , page_id , page_offset , page_count );
     411printk("\n[%s] thread[%x,%x] : page_id = %d / page_offset = %d / page_count = %d\n",
     412__FUNCTION__, this->process->pid, this->trdid, page_id , page_offset , page_count );
    408413#endif
    409414
     
    412417
    413418        if ( page_xp == XPTR_NULL ) return -1;
     419
     420#if (DEBUG_MAPPER_MOVE_USER & 1)
     421if( DEBUG_MAPPER_MOVE_USER < cycle )
     422printk("\n[%s] thread[%x,%x] : get page (%x,%x) from mapper\n",
     423__FUNCTION__, this->process->pid, this->trdid, GET_CXY(page_xp), GET_PTR(page_xp) );
     424#endif
    414425
    415426        // compute pointer in mapper
     
    547558        }
    548559
     560#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
     561if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
     562printk("\n[%s] src_cxy %x / src_ptr %x / dst_cxy %x / dst_ptr %x\n",
     563__FUNCTION__, src_cxy, src_ptr, dst_cxy, dst_ptr );
     564#endif
     565
    549566        // move fragment
    550567        hal_remote_memcpy( XPTR( dst_cxy , dst_ptr ), XPTR( src_cxy , src_ptr ), page_count );
  • trunk/kernel/mm/mapper.h

    r606 r610  
    4545 * - The leaves are pointers on physical page descriptors, dynamically allocated
    4646 *   in the local cluster.
    47  * - In a given cluster, a mapper is a "private" structure: a thread accessing the mapper
    48  *   must be running in the cluster containing it (can be a local thread or a RPC thread).
    49  * - The mapper is protected by a blocking "rwlock", to support several simultaneous
    50  *   readers, and only one writer. This lock implement a busy waiting policy.
    51  * - The mapper_get_page() function that return a page descriptor pointer from a page
    52  *   index in file is in charge of handling the miss on the mapper cache.
     47 * - The mapper is protected by a "remote_rwlock", to support several simultaneous
     48 *   "readers", and only one "writer".
     49 * - A "reader" thread, calling the mapper_remote_get_page() function to get a page
     50 *   descriptor pointer from the page index in file, can be remote (running in any cluster).
     51 * - A "writer" thread, calling the mapper_handle_miss() function to handle a page miss
     52 *   must be local (running in the mapper cluster).
    5353 * - The vfs_mapper_move_page() function access the file system to handle a mapper miss,
    5454 *   or update a dirty page on device.
    55  * - The vfs_mapper_load_all() functions is used to load all pages of a given file
    56  *   or directory into the mapper.
     55 * - The vfs_mapper_load_all() functions is used to load all pages of a directory
     56 *   into the mapper (prefetch).
    5757 * - the mapper_move_user() function is used to move data to or from an user buffer.
    5858 *   This user space buffer can be physically distributed in several clusters.
     
    137137
    138138/*******************************************************************************************
    139  * This function move data between a local mapper, and a distributed user buffer.
    140  * It must be called by a thread running in cluster containing the mapper.
     139 * This function move data between a remote mapper, dentified by the <mapper_xp> argument,
     140 * and a distributed user buffer. It can be called by a thread running in any cluster.
    141141 * It is called by the vfs_user_move() to implement sys_read() and sys_write() syscalls.
    142142 * If required, the data transfer is split in "fragments", where one fragment contains
     
    144144 * It uses "hal_uspace" accesses to move a fragment to/from the user buffer.
    145145 * In case of write, the dirty bit is set for all pages written in the mapper.
    146  * The mapper being an extendable cache, it is automatically extended when required
    147  * for both read and write accesses.
     146 * The mapper being an extendable cache, it is automatically extended when required.
    148147 * The "offset" field in the file descriptor, and the "size" field in inode descriptor
    149148 * are not modified by this function.
    150149 *******************************************************************************************
    151  * @ mapper       : local pointer on mapper.
     150 * @ mapper_xp    : extended pointer on mapper.
    152151 * @ to_buffer    : mapper -> buffer if true / buffer -> mapper if false.
    153152 * @ file_offset  : first byte to move in file.
     
    156155 * returns O if success / returns -1 if error.
    157156 ******************************************************************************************/
    158 error_t mapper_move_user( mapper_t * mapper,
     157error_t mapper_move_user( xptr_t     mappe_xp,
    159158                          bool_t     to_buffer,
    160159                          uint32_t   file_offset,
  • trunk/kernel/mm/ppm.c

    r606 r610  
    413413    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
    414414           
     415// printk("\n@@@ %s : before dirty_list lock aquire\n", __FUNCTION__ );
     416
    415417        // lock the remote PPM dirty_list
    416418        remote_queuelock_acquire( dirty_lock_xp );
    417419
     420// printk("\n@@@ %s : after dirty_list lock aquire\n", __FUNCTION__ );
     421
    418422    // lock the remote page
    419423    remote_busylock_acquire( page_lock_xp );
     424
     425// printk("\n@@@ %s : after page lock aquire\n", __FUNCTION__ );
    420426
    421427    // get remote page flags
     
    460466        }
    461467
     468// printk("\n@@@ %s : before page lock release\n", __FUNCTION__ );
     469
    462470    // unlock the remote page
    463471    remote_busylock_release( page_lock_xp );
    464472
     473// printk("\n@@@ %s : after page lock release\n", __FUNCTION__ );
     474
    465475        // unlock the remote PPM dirty_list
    466476        remote_queuelock_release( dirty_lock_xp );
     477
     478// printk("\n@@@ %s : after page lock aquire\n", __FUNCTION__ );
    467479
    468480        return done;
  • trunk/kernel/mm/ppm.h

    r606 r610  
    6262 * also rooted in the PPM, in order to be able to save all dirty pages on disk.
    6363 * This dirty list is protected by a specific remote_queuelock, because it can be
    64  * modified by a remote thread, but it is implemented as a local list, because it
    65  * contains only local pages.
     64 * modified by a remote thread, but it contains only local pages.
    6665 ****************************************************************************************/
    6766
     
    193192 * It can be called by a thread running in any cluster.
    194193 * - it takes the queuelock protecting the PPM dirty_list.
     194 * - it takes the busylock protecting the page flags.
    195195 * - it test the PG_DIRTY flag in the page descriptor.
    196196 *   . if page already dirty => do nothing
    197197 *   . it page not dirty => set the PG_DIRTY flag and register page in PPM dirty list.
     198 * - it releases the busylock protcting the page flags.
    198199 * - it releases the queuelock protecting the PPM dirty_list.
    199200 *****************************************************************************************
     
    207208 * It can be called by a thread running in any cluster.
    208209 * - it takes the queuelock protecting the PPM dirty_list.
     210 * - it takes the busylock protecting the page flags.
    209211 * - it test the PG_DIRTY flag in the page descriptor.
    210212 *   . if page not dirty => do nothing
    211213 *   . it page dirty => reset the PG_DIRTY flag and remove page from PPM dirty list.
     214 * - it releases the busylock protcting the page flags.
    212215 * - it releases the queuelock protecting the PPM dirty_list.
    213216 *****************************************************************************************
  • trunk/kernel/mm/vmm.c

    r606 r610  
    14441444#endif
    14451445
    1446     // compute target cluster
    14471446    page_t     * page_ptr;
    14481447    cxy_t        page_cxy;
     
    16111610#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
    16121611if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    1613 printk("\n[%s] thread[%x,%x] for vpn  %x / both mapper & BSS\n",
     1612printk("\n[%s] thread[%x,%x] for vpn  %x / both mapper & BSS\n"
    16141613"      %d bytes from mapper / %d bytes from BSS\n",
    16151614__FUNCTION__, this->process->pid, this->trdid, vpn,
     
    16741673                          (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT,
    16751674                          &vseg );
    1676 
    16771675    if( error )
    16781676    {
     
    19331931#endif
    19341932
     1933    // access local GPT to get GPT_COW flag
     1934    bool_t cow = hal_gpt_pte_is_cow( &(process->vmm.gpt), vpn );
     1935
     1936    if( cow == false ) return EXCP_USER_ERROR;
     1937
    19351938    // get local vseg
    19361939    error = vmm_get_vseg( process,
    19371940                          (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT,
    19381941                          &vseg );
    1939 
    19401942    if( error )
    19411943    {
     
    19501952    ref_ptr = GET_PTR( process->ref_xp );
    19511953
    1952     // build relevant extended pointers on  GPT and  GPT lock
     1954    // build relevant extended pointers on  relevant GPT and  GPT lock
    19531955    // - access local GPT for a private vseg 
    19541956    // - access reference GPT for a public vseg
  • trunk/kernel/mm/vmm.h

    r595 r610  
    158158                  bool_t             mapping );
    159159
    160 /*******************************************************************************************
     160/*********************************************************************************************
    161161 * This function adds a vseg descriptor in the VSL of a given VMM,
    162162 * and updates the vmm field in the vseg descriptor.
    163163 * It takes the lock protecting VSL.
    164  *******************************************************************************************
     164 *********************************************************************************************
    165165 * @ vmm       : pointer on the VMM
    166166 * @ vseg      : pointer on the vseg descriptor
    167  ******************************************************************************************/
     167 ********************************************************************************************/
    168168void vmm_vseg_attach( struct vmm_s  * vmm,
    169169                      vseg_t        * vseg );
    170170
    171 /*******************************************************************************************
     171/*********************************************************************************************
    172172 * This function removes a vseg descriptor from the set of vsegs controlled by a given VMM,
    173173 * and updates the vmm field in the vseg descriptor. No memory is released.
    174174 * It takes the lock protecting VSL.
    175  *******************************************************************************************
     175 *********************************************************************************************
    176176 * @ vmm       : pointer on the VMM
    177177 * @ vseg      : pointer on the vseg descriptor
    178  ******************************************************************************************/
     178 ********************************************************************************************/
    179179void vmm_vseg_detach( struct vmm_s  * vmm,
    180180                      vseg_t        * vseg );
     
    326326 * (d) if the removed region cut the vseg in three parts, it is modified, and a new
    327327 *     vseg is created with same type.
    328  * FIXME [AG] this function must be called by a thread running in the reference cluster,
    329  * and the VMM must be updated in all process descriptors copies.
     328 * FIXME [AG] this function should be called by a thread running in the reference cluster,
     329 *       and the VMM should be updated in all process descriptors copies.
    330330 *********************************************************************************************
    331331 * @ process   : pointer on process descriptor
     
    357357/*********************************************************************************************
    358358 * This function is called by the generic exception handler in case of page-fault event,
    359  * detected for a given <vpn> in a given <process> in any cluster.
     359 * detected for a given <vpn>. The <process> argument is used to access the relevant VMM.
    360360 * It checks the missing VPN and returns an user error if it is not in a registered vseg.
    361361 * For a legal VPN, there is actually 3 cases:
     
    370370 *    on vseg type, and updates directly (without RPC) the local GPT and the reference GPT.
    371371 *    Other GPT copies  will updated on demand.
    372  * In the three cases, concurrent accesses to the GPT are handled, thanks to the
     372 * Concurrent accesses to the GPT are handled, thanks to the
    373373 * remote_rwlock protecting each GPT copy.
    374374 *********************************************************************************************
    375  * @ process   : pointer on local process descriptor copy.
    376  * @ vpn       : VPN of the missing PTE.
     375 * @ process  : local pointer on local process.
     376 * @ vpn      : VPN of the missing PTE.
    377377 * @ returns EXCP_NON_FATAL / EXCP_USER_ERROR / EXCP_KERNEL_PANIC after analysis
    378378 ********************************************************************************************/
     
    381381
    382382/*********************************************************************************************
    383  * This function is called by the generic exception handler in case of copy-on-write event,
    384  * detected for a given <vpn> in a given <process> in any cluster.
     383 * This function is called by the generic exception handler in case of WRITE violation event,
     384 * detected for a given <vpn>. The <process> argument is used to access the relevant VMM.
    385385 * It returns a kernel panic if VPN is not in a registered vseg or is not mapped.
    386386 * For a legal mapped vseg there is two cases:
     
    399399 *    Finally it calls the vmm_global_update_pte() function to reset the COW flag and set
    400400 *    the WRITE flag in all the GPT copies, using a RPC if the reference cluster is remote.
    401  * In both cases, concurrent accesses to the GPT are handled, thanks to the
    402  * remote_rwlock protecting each GPT copy.
     401 * In both cases, concurrent accesses to the GPT are protected by the remote_rwlock
     402 * atached to the GPT copy in VMM.
    403403 *********************************************************************************************
    404404 * @ process   : pointer on local process descriptor copy.
Note: See TracChangeset for help on using the changeset viewer.