Changeset 611 for trunk/kernel/mm


Ignore:
Timestamp:
Jan 9, 2019, 3:02:51 PM (5 years ago)
Author:
alain
Message:

Introduce sigificant modifs in VFS to support the <ls> command,
and the . and .. directories entries.

Location:
trunk/kernel/mm
Files:
9 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/mm/kmem.c

    r577 r611  
    4040#include <fatfs.h>
    4141#include <ramfs.h>
     42#include <remote_dir.h>
    4243#include <remote_sem.h>
    4344#include <remote_barrier.h>
     
    100101    else if( type == KMEM_CONDVAR )       return sizeof( remote_condvar_t );
    101102    else if( type == KMEM_MUTEX )         return sizeof( remote_mutex_t );
     103    else if( type == KMEM_DIR )           return sizeof( remote_dir_t );
     104
    102105        else if( type == KMEM_512_BYTES )     return 512;
    103106
     
    128131    else if( type == KMEM_CONDVAR )       return "KMEM_CONDVAR";
    129132    else if( type == KMEM_MUTEX )         return "KMEM_MUTEX";
     133    else if( type == KMEM_DIR )           return "KMEM_DIR";
     134
    130135        else if( type == KMEM_512_BYTES )     return "KMEM_512_BYTES";
    131136
     
    144149
    145150#if DEBUG_KMEM
     151thread_t * this = CURRENT_THREAD;
    146152uint32_t cycle = (uint32_t)hal_get_cycles();
    147153if( DEBUG_KMEM < cycle )
    148 printk("\n[DBG] %s : thread %x enter / KCM type %s missing in cluster %x / cycle %d\n",
    149 __FUNCTION__, CURRENT_THREAD, kmem_type_str( type ), local_cxy, cycle );
     154printk("\n[%s] thread[%x,%x] enter / KCM type %s missing in cluster %x / cycle %d\n",
     155__FUNCTION__, this->process->pid, this->trdid, kmem_type_str( type ), local_cxy, cycle );
    150156#endif
    151157
     
    174180cycle = (uint32_t)hal_get_cycles();
    175181if( DEBUG_KMEM < cycle )
    176 printk("\n[DBG] %s : thread %x exit / cycle %d\n",
    177 __FUNCTION__, CURRENT_THREAD, cycle );
     182printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
     183__FUNCTION__, this->process->pid, this->trdid, cycle );
    178184#endif
    179185
     
    198204
    199205#if DEBUG_KMEM
     206thread_t * this = CURRENT_THREAD;
    200207uint32_t cycle = (uint32_t)hal_get_cycles();
    201208if( DEBUG_KMEM < cycle )
    202 printk("\n[DBG] %s : thread %x enter / type %s / cluster %x / cycle %d\n",
    203 __FUNCTION__, CURRENT_THREAD, kmem_type_str( type ), local_cxy, cycle );
     209printk("\n[%s] thread [%x,%x] enter / %s / size %d / cluster %x / cycle %d\n",
     210__FUNCTION__, this->process->pid, this->trdid,
     211kmem_type_str( type ), size, local_cxy, cycle );
    204212#endif
    205213
     
    222230cycle = (uint32_t)hal_get_cycles();
    223231if( DEBUG_KMEM < cycle )
    224 printk("\n[DBG] %s : thread %x exit / %d page(s) allocated / ppn %x / cycle %d\n",
    225 __FUNCTION__, CURRENT_THREAD, 1<<size, ppm_page2ppn(XPTR(local_cxy,ptr)), cycle );
     232printk("\n[%s] thread[%x,%x] exit / %d page(s) allocated / ppn %x / cycle %d\n",
     233__FUNCTION__, this->process->pid, this->trdid,
     2341<<size, ppm_page2ppn(XPTR(local_cxy,ptr)), cycle );
    226235#endif
    227236
     
    244253cycle = (uint32_t)hal_get_cycles();
    245254if( DEBUG_KMEM < cycle )
    246 printk("\n[DBG] %s : thread %x exit / type %s allocated / base %x / size %d / cycle %d\n",
    247 __FUNCTION__, CURRENT_THREAD, kmem_type_str( type ), (intptr_t)ptr, size, cycle );
     255printk("\n[%s] thread[%x,%x] exit / type %s allocated / base %x / size %d / cycle %d\n",
     256__FUNCTION__, this->process->pid, this->trdid,
     257kmem_type_str( type ), (intptr_t)ptr, size, cycle );
    248258#endif
    249259
     
    286296cycle = (uint32_t)hal_get_cycles();
    287297if( DEBUG_KMEM < cycle )
    288 printk("\n[DBG] %s : thread %x exit / type %s allocated / base %x / size %d / cycle %d\n",
    289 __FUNCTION__, CURRENT_THREAD, kmem_type_str(type), (intptr_t)ptr,
     298printk("\n[%s] thread [%x,%x] exit / type %s allocated / base %x / size %d / cycle %d\n",
     299__FUNCTION__, this->process->pid, this->trdid, kmem_type_str(type), (intptr_t)ptr,
    290300kmem_type_size(type), cycle );
    291301#endif
  • trunk/kernel/mm/kmem.h

    r567 r611  
    3636enum
    3737{
    38   KMEM_PAGE             = 0,   /*! reserved for PPM allocator                       */
    39   KMEM_GENERIC          = 1,   /*! reserved for KHM allocator                       */
    40   KMEM_KCM              = 2,   /*! kcm_t                                            */
    41   KMEM_VSEG             = 3,   /*! vseg_t                                           */
    42   KMEM_DEVICE           = 4,   /*! device_t                                         */
    43   KMEM_MAPPER           = 5,   /*! mapper_t                                         */
    44   KMEM_PROCESS          = 6,   /*! process_t                                        */
    45   KMEM_CPU_CTX          = 7,   /*! hal_cpu_context_t                                */
    46   KMEM_FPU_CTX          = 8,   /*! hal_fpu_context_t                                */
    47   KMEM_BARRIER          = 9,   /*! remote_barrier_t                                 */
     38    KMEM_PAGE             = 0,   /*! reserved for PPM allocator                     */
     39    KMEM_GENERIC          = 1,   /*! reserved for KHM allocator                     */
     40    KMEM_KCM              = 2,   /*! kcm_t                                          */
     41    KMEM_VSEG             = 3,   /*! vseg_t                                         */
     42    KMEM_DEVICE           = 4,   /*! device_t                                       */
     43    KMEM_MAPPER           = 5,   /*! mapper_t                                       */
     44    KMEM_PROCESS          = 6,   /*! process_t                                      */
     45    KMEM_CPU_CTX          = 7,   /*! hal_cpu_context_t                              */
     46    KMEM_FPU_CTX          = 8,   /*! hal_fpu_context_t                              */
     47    KMEM_BARRIER          = 9,   /*! remote_barrier_t                               */
    4848
    49   KMEM_DEVFS_CTX        = 10,  /*! fatfs_inode_t                                    */
    50   KMEM_FATFS_CTX        = 11,  /*! fatfs_ctx_t                                      */
    51   KMEM_VFS_CTX          = 12,  /*! vfs_context_t                                    */
    52   KMEM_VFS_INODE        = 13,  /*! vfs_inode_t                                      */
    53   KMEM_VFS_DENTRY       = 14,  /*! vfs_dentry_t                                     */
    54   KMEM_VFS_FILE         = 15,  /*! vfs_file_t                                       */
    55   KMEM_SEM              = 16,  /*! remote_sem_t                                     */
    56   KMEM_CONDVAR          = 17,  /*! remote_condvar_t                                 */
    57   KMEM_MUTEX            = 18,  /*! remote_mutex_t                                   */
    58   KMEM_512_BYTES        = 19,  /*! 512 bytes aligned                                */
     49    KMEM_DEVFS_CTX        = 10,  /*! fatfs_inode_t                                  */
     50    KMEM_FATFS_CTX        = 11,  /*! fatfs_ctx_t                                    */
     51    KMEM_VFS_CTX          = 12,  /*! vfs_context_t                                  */
     52    KMEM_VFS_INODE        = 13,  /*! vfs_inode_t                                    */
     53    KMEM_VFS_DENTRY       = 14,  /*! vfs_dentry_t                                   */
     54    KMEM_VFS_FILE         = 15,  /*! vfs_file_t                                     */
     55    KMEM_SEM              = 16,  /*! remote_sem_t                                   */
     56    KMEM_CONDVAR          = 17,  /*! remote_condvar_t                               */
     57    KMEM_MUTEX            = 18,  /*! remote_mutex_t                                 */
     58    KMEM_DIR              = 19,  /*! remote_dir_t                                   */
    5959
    60   KMEM_TYPES_NR         = 21,
     60    KMEM_512_BYTES        = 20,  /*! 512 bytes aligned                              */
     61
     62    KMEM_TYPES_NR         = 21,
    6163};
    6264
  • trunk/kernel/mm/mapper.c

    r610 r611  
    644644}  // end mapper_remote_set_32()
    645645
    646 
     646//////////////////////////////////////////////////
     647error_t mapper_display_page( xptr_t     mapper_xp,
     648                             uint32_t   page_id,
     649                             uint32_t   nbytes,
     650                             char     * string )
     651{
     652    xptr_t     page_xp;        // extended pointer on page descriptor
     653    xptr_t     base_xp;        // extended pointer on page base
     654    char       buffer[4096];   // local buffer
     655    uint32_t * tab;            // pointer on uint32_t to scan the buffer
     656    uint32_t   line;           // line index
     657    uint32_t   word;           // word index
     658
     659    if( nbytes > 4096)
     660    {
     661        printk("\n[ERROR] in %s : nbytes (%d) cannot be larger than 4096\n",
     662        __FUNCTION__, nbytes );
     663        return -1;
     664    }
     665   
     666    // get extended pointer on page descriptor
     667    page_xp = mapper_remote_get_page( mapper_xp , page_id );
     668
     669    if( page_xp == XPTR_NULL)
     670    {
     671        printk("\n[ERROR] in %s : cannot access page %d in mapper\n",
     672        __FUNCTION__, page_id );
     673        return -1;
     674    }
     675
     676    // get extended pointer on page base
     677    base_xp = ppm_page2base( page_xp );
     678   
     679    // copy remote page to local buffer
     680    hal_remote_memcpy( XPTR( local_cxy , buffer ) , base_xp , nbytes );
     681
     682    // display 8 words per line
     683    tab = (uint32_t *)buffer;
     684    printk("\n***** %s : first %d bytes of page %d *****\n", string, nbytes, page_id );
     685    for( line = 0 ; line < (nbytes >> 5) ; line++ )
     686    {
     687        printk("%X : ", line );
     688        for( word = 0 ; word < 8 ; word++ ) printk("%X ", tab[(line<<3) + word] );
     689        printk("\n");
     690    }
     691
     692    return 0;
     693
     694}  // end mapper_display_page
     695
     696
  • trunk/kernel/mm/mapper.h

    r610 r611  
    11/*
    2  * mapper.h - Kernel cache for FS files or directories definition.
     2 * mapper.h - Kernel cache for VFS files/directories definition.
    33 *
    44 * Authors   Mohamed Lamine Karaoui (2015)
     
    195195
    196196/*******************************************************************************************
    197  * This function returns an extended pointer on a mapper page, identified by <page_id>,
    198  * index in the file. The - possibly remote - mapper is identified by the <mapper_xp>
    199  * argument.  It can be executed by a thread running in any cluster, as it uses remote
     197 * This function returns an extended pointer on a page descriptor.
     198 * The - possibly remote - mapper is identified by the <mapper_xp> argument.
     199 * The page is identified by <page_id> argument (page index in the file).
     200 * It can be executed by a thread running in any cluster, as it uses remote
    200201 * access primitives to scan the mapper.
    201202 * In case of miss, this function takes the mapper lock in WRITE_MODE, and call the
     
    205206 * @ mapper_xp  : extended pointer on the mapper.
    206207 * @ page_id    : page index in file
    207  * @ returns extended pointer on page base if success / return XPTR_NULL if error.
     208 * @ returns extended pointer on page descriptor if success / return XPTR_NULL if error.
    208209 ******************************************************************************************/
    209210xptr_t mapper_remote_get_page( xptr_t    mapper_xp,
     
    212213/*******************************************************************************************
    213214 * This function allows to read a single word in a mapper seen as and array of uint32_t.
    214  * It has bee designed to support remote access tho the FAT mapper of the FATFS.
     215 * It has bee designed to support remote access to the FAT mapper of the FATFS.
    215216 * It can be called by any thread running in any cluster.
    216217 * In case of miss, it takes the mapper lock in WRITE_MODE, load the missing
     
    218219 *******************************************************************************************
    219220 * @ mapper_xp  : [in]  extended pointer on the mapper.
    220  * @ index          : [in]  32 bits word index in file.
     221 * @ word_id    : [in]  32 bits word index in file.
    221222 * @ p_value    : [out] local pointer on destination buffer.
    222223 * @ returns 0 if success / return -1 if error.
     
    234235 *******************************************************************************************
    235236 * @ mapper_xp  : [in]  extended pointer on the mapper.
    236  * @ index          : [in]  32 bits word index in file.
    237  * @ p_value    : [in]  value to be written.
     237 * @ word_id    : [in]  32 bits word index in file.
     238 * @ value      : [in]  value to be written.
    238239 * @ returns 0 if success / return -1 if error.
    239240 ******************************************************************************************/
     
    242243                              uint32_t   value );
    243244
     245/*******************************************************************************************
     246 * This debug function displays the content of a given page of a given mapper.
     247 * - the mapper is identified by the <mapper_xp> argument.
     248 * - the page is identified by the <page_id> argument.
     249 * - the number of bytes to display in page is defined by the <nbytes> argument.
     250 * The format is eigth (32 bits) words per line in hexadecimal.
     251 * It can be called by any thread running in any cluster.
     252 * In case of miss in mapper, it load the missing page from device to mapper.
     253 *******************************************************************************************
     254 * @ mapper_xp  : [in]  extended pointer on the mapper.
     255 * @ page_id    : [in]  page index in file.
     256 * @ nbytes     : [in]  value to be written.
     257 * @ string     : [in]  string printed in header.
     258 * @ returns 0 if success / return -1 if error.
     259 ******************************************************************************************/
     260error_t mapper_display_page( xptr_t     mapper_xp,
     261                             uint32_t   page_id,
     262                             uint32_t   nbytes,
     263                             char     * string );
     264
     265
    244266#endif /* _MAPPER_H_ */
  • trunk/kernel/mm/ppm.c

    r610 r611  
    210210
    211211#if DEBUG_PPM_ALLOC_PAGES
     212thread_t * this = CURRENT_THREAD;
    212213uint32_t cycle = (uint32_t)hal_get_cycles();
    213214if( DEBUG_PPM_ALLOC_PAGES < cycle )
    214 printk("\n[DBG] in %s : thread %x in process %x enter for %d page(s) / cycle %d\n",
    215 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 1<<order, cycle );
     215printk("\n[%s] thread[%x,%x] enter for %d page(s) / cycle %d\n",
     216__FUNCTION__, this->process->pid, this->trdid, 1<<order, cycle );
    216217#endif
    217218
    218219#if(DEBUG_PPM_ALLOC_PAGES & 0x1)
    219220if( DEBUG_PPM_ALLOC_PAGES < cycle )
    220 ppm_print();
     221ppm_print("enter ppm_alloc_pages");
    221222#endif
    222223
    223224        ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
    224225
    225         assert( (order < CONFIG_PPM_MAX_ORDER) ,
    226     "illegal order argument = %x\n" , order );
     226// check order
     227assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );
    227228
    228229        page_t * block = NULL; 
     
    250251cycle = (uint32_t)hal_get_cycles();
    251252if( DEBUG_PPM_ALLOC_PAGES < cycle )
    252 printk("\n[DBG] in %s : thread %x in process %x cannot allocate %d page(s) / cycle %d\n",
    253 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 1<<order, cycle );
     253printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) / cycle %d\n",
     254__FUNCTION__, this->process->pid, this->trdid, 1<<order, cycle );
    254255#endif
    255256
     
    289290cycle = (uint32_t)hal_get_cycles();
    290291if( DEBUG_PPM_ALLOC_PAGES < cycle )
    291 printk("\n[DBG] in %s : thread %x in process %x exit for %d page(s) / ppn = %x / cycle %d\n",
    292 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,
     292printk("\n[%s] thread[%x,%x] exit for %d page(s) / ppn = %x / cycle %d\n",
     293__FUNCTION__, this->process->pid, this->trdid,
    2932941<<order, ppm_page2ppn(XPTR( local_cxy , block )), cycle );
     295#endif
     296
     297#if(DEBUG_PPM_ALLOC_PAGES & 0x1)
     298if( DEBUG_PPM_ALLOC_PAGES < cycle )
     299ppm_print("exit ppm_alloc_pages");
    294300#endif
    295301
     
    307313uint32_t cycle = (uint32_t)hal_get_cycles();
    308314if( DEBUG_PPM_FREE_PAGES < cycle )
    309 printk("\n[DBG] in %s : thread %x in process %x enter for %d page(s) / ppn %x / cycle %d\n",
    310 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,
     315printk("\n[%s] thread[%x,%x] enter for %d page(s) / ppn %x / cycle %d\n",
     316__FUNCTION__, this->process->pid, this->trdid,
    3113171<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
    312318#endif
     
    314320#if(DEBUG_PPM_FREE_PAGES & 0x1)
    315321if( DEBUG_PPM_FREE_PAGES < cycle )
    316 ppm_print();
     322ppm_print("enter ppm_free_pages");
    317323#endif
    318324
     
    331337cycle = (uint32_t)hal_get_cycles();
    332338if( DEBUG_PPM_FREE_PAGES < cycle )
    333 printk("\n[DBG] in %s : thread %x in process %x exit for %d page(s) / ppn %x / cycle %d\n",
    334 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,
     339printk("\n[%s] thread[%x,%x] exit for %d page(s) / ppn %x / cycle %d\n",
     340__FUNCTION__, this->process->pid, this->trdid,
    3353411<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
    336342#endif
    337343
     344#if(DEBUG_PPM_FREE_PAGES & 0x1)
     345if( DEBUG_PPM_FREE_PAGES < cycle )
     346ppm_print("exit ppm_free_pages");
     347#endif
     348
    338349}  // end ppm_free_pages()
    339350
    340 //////////////////////
    341 void ppm_print( void )
     351///////////////////////////////
     352void ppm_print( char * string )
    342353{
    343354        uint32_t       order;
     
    350361        busylock_acquire( &ppm->free_lock );
    351362
    352         printk("\n***  PPM in cluster %x : %d pages ***\n", local_cxy , ppm->pages_nr );
     363        printk("\n***  PPM in cluster %x / %s / %d pages ***\n",
     364    local_cxy , string, ppm->pages_nr );
    353365
    354366        for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
     
    413425    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
    414426           
    415 // printk("\n@@@ %s : before dirty_list lock aquire\n", __FUNCTION__ );
    416 
    417427        // lock the remote PPM dirty_list
    418428        remote_queuelock_acquire( dirty_lock_xp );
    419429
    420 // printk("\n@@@ %s : after dirty_list lock aquire\n", __FUNCTION__ );
    421 
    422430    // lock the remote page
    423431    remote_busylock_acquire( page_lock_xp );
    424 
    425 // printk("\n@@@ %s : after page lock aquire\n", __FUNCTION__ );
    426432
    427433    // get remote page flags
     
    466472        }
    467473
    468 // printk("\n@@@ %s : before page lock release\n", __FUNCTION__ );
    469 
    470474    // unlock the remote page
    471475    remote_busylock_release( page_lock_xp );
    472476
    473 // printk("\n@@@ %s : after page lock release\n", __FUNCTION__ );
    474 
    475477        // unlock the remote PPM dirty_list
    476478        remote_queuelock_release( dirty_lock_xp );
    477 
    478 // printk("\n@@@ %s : after page lock aquire\n", __FUNCTION__ );
    479479
    480480        return done;
  • trunk/kernel/mm/ppm.h

    r610 r611  
    8383 * This is the low-level physical pages allocation function.
    8484 * It allocates N contiguous physical pages. N is a power of 2.
    85  * In normal use, you don't need to call it directly, as the recommended way to get
     85 * In normal use, it should not be called directly, as the recommended way to get
    8686 * physical pages is to call the generic allocator defined in kmem.h.
    8787 *****************************************************************************************
    8888 * @ order        : ln2( number of 4 Kbytes pages)
    8989 * @ returns a pointer on the page descriptor if success / NULL otherwise
    90  **************************************************************************************à))**/
     90 ****************************************************************************************/
    9191page_t * ppm_alloc_pages( uint32_t order );
    9292
     
    174174/*****************************************************************************************
    175175 * This function prints the PPM allocator status in the calling thread cluster.
    176  ****************************************************************************************/
    177 void ppm_print( void );
     176 *****************************************************************************************
     177 * string   : character string printed in header
     178 ****************************************************************************************/
     179void ppm_print( char * string );
    178180
    179181/*****************************************************************************************
  • trunk/kernel/mm/vmm.c

    r610 r611  
    11/*
    2  * vmm.c - virtual memory manager related operations interface.
     2 * vmm.c - virtual memory manager related operations definition.
    33 *
    44 * Authors   Ghassan Almaless (2008,2009,2010,2011, 2012)
     
    254254}  // vmm_display()
    255255
    256 ///////////////////////////////////
    257 void vmm_vseg_attach( vmm_t  * vmm,
    258                       vseg_t * vseg )
     256//////////////////////////////////////////
     257void vmm_attach_vseg_to_vsl( vmm_t  * vmm,
     258                             vseg_t * vseg )
    259259{
    260260    // build extended pointer on rwlock protecting VSL
     
    275275}
    276276
    277 ///////////////////////////////////
    278 void vmm_vseg_detach( vmm_t  * vmm,
    279                       vseg_t * vseg )
     277////////////////////////////////////////////
     278void vmm_detach_vseg_from_vsl( vmm_t  * vmm,
     279                               vseg_t * vseg )
    280280{
     281    // get vseg type
     282    uint32_t type = vseg->type;
     283
    281284    // build extended pointer on rwlock protecting VSL
    282285    xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock );
     
    288291    vseg->vmm = NULL;
    289292
    290     // remove vseg from vmm list
     293    // remove vseg from VSL
    291294    xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
    292295
    293296    // release rwlock in write mode
    294297    remote_rwlock_wr_release( lock_xp );
    295 }
     298
     299    // release the stack slot to VMM stack allocator if STACK type
     300    if( type == VSEG_TYPE_STACK )
     301    {
     302        // get pointer on stack allocator
     303        stack_mgr_t * mgr = &vmm->stack_mgr;
     304
     305        // compute slot index
     306        uint32_t index = ((vseg->vpn_base - mgr->vpn_base - 1) / CONFIG_VMM_STACK_SIZE);
     307
     308        // update stacks_bitmap
     309        busylock_acquire( &mgr->lock );
     310        bitmap_clear( &mgr->bitmap , index );
     311        busylock_release( &mgr->lock );
     312    }
     313
     314    // release the vseg to VMM mmap allocator if MMAP type
     315    if( (type == VSEG_TYPE_ANON) || (type == VSEG_TYPE_FILE) || (type == VSEG_TYPE_REMOTE) )
     316    {
     317        // get pointer on mmap allocator
     318        mmap_mgr_t * mgr = &vmm->mmap_mgr;
     319
     320        // compute zombi_list index
     321        uint32_t index = bits_log2( vseg->vpn_size );
     322
     323        // update zombi_list
     324        busylock_acquire( &mgr->lock );
     325        list_add_first( &mgr->zombi_list[index] , &vseg->zlist );
     326        busylock_release( &mgr->lock );
     327    }
     328
     329    // release physical memory allocated for vseg descriptor if no MMAP type
     330    if( (type != VSEG_TYPE_ANON) && (type != VSEG_TYPE_FILE) && (type != VSEG_TYPE_REMOTE) )
     331    {
     332        vseg_free( vseg );
     333    }
     334
     335}  // end vmm_remove_vseg_from_vsl()
    296336
    297337////////////////////////////////////////////////
     
    616656
    617657            // register child vseg in child VSL
    618             vmm_vseg_attach( child_vmm , child_vseg );
     658            vmm_attach_vseg_to_vsl( child_vmm , child_vseg );
    619659
    620660#if DEBUG_VMM_FORK_COPY
     
    759799    xptr_t   root_xp = XPTR( local_cxy , &vmm->vsegs_root );
    760800
    761     // remove all user vsegs registered in VSL
     801    // scan the VSL to delete all registered vsegs
     802    // (don't use a FOREACH for item deletion in xlist)
    762803        while( !xlist_is_empty( root_xp ) )
    763804        {
     
    766807        vseg    = GET_PTR( vseg_xp );
    767808
    768         // unmap and release physical pages
    769         vmm_unmap_vseg( process , vseg );
    770 
    771         // remove vseg from VSL
    772                 vmm_vseg_detach( vmm , vseg );
    773 
    774         // release memory allocated to vseg descriptor
    775         vseg_free( vseg );
     809        // delete vseg and release physical pages
     810        vmm_delete_vseg( process->pid , vseg->min );
    776811
    777812#if( DEBUG_VMM_DESTROY & 1 )
    778813if( DEBUG_VMM_DESTROY < cycle )
    779 printk("\n[%s] %s vseg released / vpn_base %x / vpn_size %d\n",
     814printk("\n[%s] %s vseg deleted / vpn_base %x / vpn_size %d\n",
    780815__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
    781816#endif
     
    796831__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
    797832#endif
    798                     vmm_vseg_detach( vmm , vseg );
     833            // clean vseg descriptor
     834            vseg->vmm = NULL;
     835
     836            // remove vseg from  xlist
     837            xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
     838
     839                    // release vseg descriptor
    799840            vseg_free( vseg );
    800841
     
    10791120
    10801121    // attach vseg to VSL
    1081         vmm_vseg_attach( vmm , vseg );
     1122        vmm_attach_vseg_to_vsl( vmm , vseg );
    10821123
    10831124#if DEBUG_VMM_CREATE_VSEG
     
    10921133}  // vmm_create_vseg()
    10931134
    1094 /////////////////////////////////////
    1095 void vmm_remove_vseg( vseg_t * vseg )
     1135///////////////////////////////////
     1136void vmm_delete_vseg( pid_t    pid,
     1137                      intptr_t vaddr )
    10961138{
    1097     // get pointers on calling process and VMM
    1098     thread_t   * this    = CURRENT_THREAD;
    1099     vmm_t      * vmm     = &this->process->vmm;
    1100     uint32_t     type    = vseg->type;
    1101 
    1102     // detach vseg from VSL
    1103         vmm_vseg_detach( vmm , vseg );
    1104 
    1105     // release the stack slot to VMM stack allocator if STACK type
    1106     if( type == VSEG_TYPE_STACK )
    1107     {
    1108         // get pointer on stack allocator
    1109         stack_mgr_t * mgr = &vmm->stack_mgr;
    1110 
    1111         // compute slot index
    1112         uint32_t index = ((vseg->vpn_base - mgr->vpn_base - 1) / CONFIG_VMM_STACK_SIZE);
    1113 
    1114         // update stacks_bitmap
    1115         busylock_acquire( &mgr->lock );
    1116         bitmap_clear( &mgr->bitmap , index );
    1117         busylock_release( &mgr->lock );
    1118     }
    1119 
    1120     // release the vseg to VMM mmap allocator if MMAP type
    1121     if( (type == VSEG_TYPE_ANON) || (type == VSEG_TYPE_FILE) || (type == VSEG_TYPE_REMOTE) )
    1122     {
    1123         // get pointer on mmap allocator
    1124         mmap_mgr_t * mgr = &vmm->mmap_mgr;
    1125 
    1126         // compute zombi_list index
    1127         uint32_t index = bits_log2( vseg->vpn_size );
    1128 
    1129         // update zombi_list
    1130         busylock_acquire( &mgr->lock );
    1131         list_add_first( &mgr->zombi_list[index] , &vseg->zlist );
    1132         busylock_release( &mgr->lock );
    1133     }
    1134 
    1135     // release physical memory allocated for vseg descriptor if no MMAP type
    1136     if( (type != VSEG_TYPE_ANON) && (type != VSEG_TYPE_FILE) && (type != VSEG_TYPE_REMOTE) )
    1137     {
    1138         vseg_free( vseg );
    1139     }
    1140 }  // end vmm_remove_vseg()
    1141 
    1142 /////////////////////////////////////////
    1143 void vmm_unmap_vseg( process_t * process,
    1144                      vseg_t    * vseg )
    1145 {
     1139    process_t * process;    // local pointer on local process
     1140    vmm_t     * vmm;        // local pointer on local process VMM
     1141    vseg_t    * vseg;       // local pointer on local vseg containing vaddr
     1142    gpt_t     * gpt;        // local pointer on local process GPT
    11461143    vpn_t       vpn;        // VPN of current PTE
    11471144    vpn_t       vpn_min;    // VPN of first PTE
     
    11571154    uint32_t    forks;      // actual number of pendinf forks
    11581155
    1159 #if DEBUG_VMM_UNMAP_VSEG
     1156#if DEBUG_VMM_DELETE_VSEG
    11601157uint32_t   cycle = (uint32_t)hal_get_cycles();
    11611158thread_t * this  = CURRENT_THREAD;
    1162 if( DEBUG_VMM_UNMAP_VSEG < cycle )
    1163 printk("\n[%s] thread[%x,%x] enter / process %x / vseg %s / base %x / cycle %d\n",
    1164 __FUNCTION__, this->process->pid, this->trdid, process->pid,
    1165 vseg_type_str( vseg->type ), vseg->vpn_base, cycle );
    1166 #endif
    1167 
    1168     // get pointer on local GPT
    1169     gpt_t     * gpt = &process->vmm.gpt;
    1170 
    1171     // loop on pages in vseg
     1159if( DEBUG_VMM_DELETE_VSEG < cycle )
     1160printk("\n[%s] thread[%x,%x] enter / process %x / vaddr %x / cycle %d\n",
     1161__FUNCTION__, this->process->pid, this->trdid, pid, vaddr, cycle );
     1162#endif
     1163
     1164    // get local pointer on local process descriptor
     1165    process = cluster_get_local_process_from_pid( pid );
     1166
     1167    if( process == NULL ) return;
     1168
     1169    // get pointers on local process VMM an GPT
     1170    vmm = &process->vmm;
     1171    gpt = &process->vmm.gpt;
     1172
     1173    // get local pointer on vseg containing vaddr
     1174    vseg = vmm_vseg_from_vaddr( vmm , vaddr );
     1175
     1176    if( vseg == NULL ) return;
     1177
     1178    // loop to invalidate all vseg PTEs in GPT
    11721179    vpn_min = vseg->vpn_base;
    11731180    vpn_max = vpn_min + vseg->vpn_size;
     
    11801187        {
    11811188
    1182 #if( DEBUG_VMM_UNMAP_VSEG & 1 )
    1183 if( DEBUG_VMM_UNMAP_VSEG < cycle )
    1184 printk("- vpn %x / ppn %x\n" , vpn , ppn );
     1189#if( DEBUG_VMM_DELETE_VSEG & 1 )
     1190if( DEBUG_VMM_DELETE_VSEG < cycle )
     1191printk("- unmap vpn %x / ppn %x / vseg %s \n" , vpn , ppn, vseg_type_str(vseg->type) );
    11851192#endif
    11861193
     
    12251232                        rpc_pmem_release_pages_client( page_cxy , page_ptr );
    12261233                    }
     1234
     1235#if( DEBUG_VMM_DELETE_VSEG & 1 )
     1236if( DEBUG_VMM_DELETE_VSEG < cycle )
     1237printk("- release ppn %x\n", ppn );
     1238#endif
    12271239                }
    12281240            }
     
    12301242    }
    12311243
    1232 #if DEBUG_VMM_UNMAP_VSEG
     1244    // remove vseg from VSL and release vseg descriptor (if not MMAP)
     1245    vmm_detach_vseg_from_vsl( vmm , vseg );
     1246
     1247#if DEBUG_VMM_DELETE_VSEG
    12331248cycle = (uint32_t)hal_get_cycles();
    1234 if( DEBUG_VMM_UNMAP_VSEG < cycle )
     1249if( DEBUG_VMM_DELETE_VSEG < cycle )
    12351250printk("\n[%s] thread[%x,%x] exit / process %x / vseg %s / base %x / cycle %d\n",
    1236 __FUNCTION__, this->process->pid, this->trdid, process->pid,
    1237 vseg_type_str( vseg->type ), vseg->vpn_base, cycle );
    1238 #endif
    1239 
    1240 }  // end vmm_unmap_vseg()
    1241 
    1242 //////////////////////////////////////////////////////////////////////////////////////////
    1243 // This low-level static function is called by the vmm_get_vseg(), vmm_get_pte(),
    1244 // and vmm_resize_vseg() functions.  It scan the local VSL to find the unique vseg
    1245 // containing a given virtual address.
    1246 //////////////////////////////////////////////////////////////////////////////////////////
    1247 // @ vmm     : pointer on the process VMM.
    1248 // @ vaddr   : virtual address.
    1249 // @ return vseg pointer if success / return NULL if not found.
    1250 //////////////////////////////////////////////////////////////////////////////////////////
    1251 static vseg_t * vmm_vseg_from_vaddr( vmm_t    * vmm,
    1252                                      intptr_t   vaddr )
     1251__FUNCTION__, this->process->pid, this->trdid, pid, vseg_type_str(vseg->type), vaddr, cycle );
     1252#endif
     1253
     1254}  // end vmm_delete_vseg()
     1255
     1256/////////////////////////////////////////////
     1257vseg_t * vmm_vseg_from_vaddr( vmm_t    * vmm,
     1258                              intptr_t   vaddr )
    12531259{
    12541260    xptr_t   iter_xp;
     
    13101316        remote_rwlock_wr_acquire( lock_xp );
    13111317
    1312         if( (vseg->min > addr_min) || (vseg->max < addr_max) )   // region not included in vseg
    1313     {
    1314         error = EINVAL;
    1315     }
    1316         else if( (vseg->min == addr_min) && (vseg->max == addr_max) ) // vseg must be removed
    1317     {
    1318         vmm_remove_vseg( vseg );
     1318        if( (vseg->min > addr_min) || (vseg->max < addr_max) )        // not included in vseg
     1319    {
     1320        error = -1;
     1321    }
     1322        else if( (vseg->min == addr_min) && (vseg->max == addr_max) )  // vseg must be deleted
     1323    {
     1324        vmm_delete_vseg( process->pid , vseg->min );
    13191325        error = 0;
    13201326    }
    1321         else if( vseg->min == addr_min )                         // vseg must be resized
     1327        else if( vseg->min == addr_min )                               // vseg must be resized
    13221328    {
    13231329        // update vseg base address
     
    13311337        error = 0;
    13321338    }
    1333         else if( vseg->max == addr_max )                          // vseg must be resized
     1339        else if( vseg->max == addr_max )                              // vseg must be resized
    13341340    {
    13351341        // update vseg max address
     
    13431349        error = 0;
    13441350    }
    1345     else                                                      // vseg cut in three regions
     1351    else                                                          // vseg cut in three regions
    13461352    {
    13471353        // resize existing vseg
     
    14151421        vseg_init_from_ref( vseg , vseg_xp );
    14161422
    1417         // register local vseg in local VMM
    1418         vmm_vseg_attach( vmm , vseg );
     1423        // register local vseg in local VSL
     1424        vmm_attach_vseg_to_vsl( vmm , vseg );
    14191425    }   
    14201426
  • trunk/kernel/mm/vmm.h

    r610 r611  
    3838
    3939struct process_s;
     40struct vseg_s;
    4041
    4142/*********************************************************************************************
    4243 * This structure defines the STACK allocator used by the VMM to dynamically handle
    43  * a STACK vseg requested or released by an user process.
    44  * This allocator handles a fixed size array of fixed size slots in the STACK zone.
     44 * vseg allocation or release requests for an user thread.
     45 * This allocator handles a fixed size array of fixed size slots in STACK zone of user space.
    4546 * The stack size and the number of slots are defined by the CONFIG_VMM_STACK_SIZE, and
    4647 * CONFIG_VMM_STACK_BASE parameters.
    47  * Each slot can contain one user stack vseg. The first page in the slot is not allocated
    48  * to detect stack overflow.
     48 * Each slot can contain one user stack vseg. The first 4 Kbytes page in the slot is not
     49 * mapped to detect stack overflow.
    4950 * The slot index can be computed form the slot base address, and reversely.
    5051 * All allocation / release operations are registered in the stack_bitmap, that completely
    51  * define the STACK zone state.
     52 * define the STACK zone status.
    5253 ********************************************************************************************/
    5354
     
    159160
    160161/*********************************************************************************************
    161  * This function adds a vseg descriptor in the VSL of a given VMM,
    162  * and updates the vmm field in the vseg descriptor.
    163  * It takes the lock protecting VSL.
    164  *********************************************************************************************
    165  * @ vmm       : pointer on the VMM
    166  * @ vseg      : pointer on the vseg descriptor
    167  ********************************************************************************************/
    168 void vmm_vseg_attach( struct vmm_s  * vmm,
    169                       vseg_t        * vseg );
    170 
    171 /*********************************************************************************************
    172  * This function removes a vseg descriptor from the set of vsegs controlled by a given VMM,
    173  * and updates the vmm field in the vseg descriptor. No memory is released.
    174  * It takes the lock protecting VSL.
    175  *********************************************************************************************
    176  * @ vmm       : pointer on the VMM
    177  * @ vseg      : pointer on the vseg descriptor
    178  ********************************************************************************************/
    179 void vmm_vseg_detach( struct vmm_s  * vmm,
    180                       vseg_t        * vseg );
    181 
    182 /*********************************************************************************************
    183162 * This function is called by the process_make_fork() function. It partially copies
    184163 * the content of a remote parent process VMM to the local child process VMM:
     
    235214
    236215/*********************************************************************************************
    237  * This function unmaps from the local GPT all mapped PTEs of a vseg identified by the
    238  * <process> and <vseg> arguments. It can be used for any type of vseg.
    239  * If this function is executed in the reference cluster, it handles for each referenced
    240  * physical pages the pending forks counter :
    241  * - if counter is non-zero, it decrements it.
    242  * - if counter is zero, it releases the physical page to local kmem allocator.
    243  *********************************************************************************************
    244  * @ process  : pointer on process descriptor.
    245  * @ vseg     : pointer on the vseg to be unmapped.
    246  ********************************************************************************************/
    247 void vmm_unmap_vseg( struct process_s * process,
    248                      vseg_t           * vseg );
    249 
    250 /*********************************************************************************************
    251216 * This function deletes, in the local cluster, all vsegs registered in the VSL
    252217 * of the process identified by the <process> argument. For each vseg:
     
    254219 * - it removes the vseg from the local VSL.
    255220 * - it releases the memory allocated to the local vseg descriptors.
    256  * Finally, it releases the memory allocated to the GPT itself.
     221 * - it releases the memory allocated to the GPT itself.
    257222 *********************************************************************************************
    258223 * @ process   : pointer on process descriptor.
     
    304269
    305270/*********************************************************************************************
    306  * This function removes a vseg identified by it's pointer from the VMM of the calling process.
    307  * - If the vseg has not the STACK or MMAP type, it is removed from the vsegs list,
    308  *   and the physical memory allocated to vseg descriptor is released to KMEM.
    309  * - If the vseg has the STACK type, it is removed from the vsegs list, the physical memory
    310  *   allocated to vseg descriptor is released to KMEM, and the stack slot is returned to the
    311  *   VMM STACK allocator.
    312  * - If the vseg has the MMAP type, it is removed from the vsegs list and is registered
    313  *   in the zombi_list of the VMM MMAP allocator for future reuse. The physical memory
    314  *   allocated to vseg descriptor is NOT released to KMEM.
    315  *********************************************************************************************
    316  * @ vseg      : pointer on vseg to be removed.
    317  ********************************************************************************************/
    318 void vmm_remove_vseg( vseg_t * vseg );
     271 * This function removes from the local VMM of a process descriptor identified by the <pid>
     272 * argument a local vseg identified by its base address <vaddr> in user space.
     273 * It can be used for any type of vseg, but must be called by a local thread.
     274 * Use the RPC_VMM_DELETE_VSEG if the client thread is not local.
     275 * It does nothing if the process is not registered in the local cluster.
     276 * It does nothing if the vseg is not registered in the local process VSL.
     277 * - It removes from the local GPT all registered PTEs. If it is executed in the reference
     278 *   cluster, it releases the referenced physical pages, to the relevant kmem allocator,
     279 *   depending on vseg type and the pending forks counter.
     280 * - It removes the vseg from the local VSL, and release the vseg descriptor if not MMAP.
     281 *********************************************************************************************
     282 * @ process  : process identifier.
     283 * @ vaddr    : vseg base address in user space.
     284 ********************************************************************************************/
     285void vmm_delete_vseg( pid_t    pid,
     286                      intptr_t vaddr );
     287
     288/*********************************************************************************************
     289 * This function insert a new <vseg> descriptor in the VSL identifed by the <vmm> argument.
     290 * and updates the vmm field in the vseg descriptor.
     291 * It takes the lock protecting VSL.
     292 *********************************************************************************************
     293 * @ vmm       : local pointer on local VMM.
     294 * @ vseg      : local pointer on local vseg descriptor.
     295 ********************************************************************************************/
     296void vmm_attach_vseg_to_vsl( vmm_t  * vmm,
     297                             vseg_t * vseg );
     298
     299/*********************************************************************************************
     300 * This function removes a vseg identified by the <vseg> argument from the local VSL
     301 * identified by the <vmm> argument and release the memory allocated to vseg descriptor,
     302 * for all vseg types, BUT the MMAP type (i.e. ANON or REMOTE).
     303 * - If the vseg has not the STACK or MMAP type, it is simply removed from the VSL,
     304 *   and vseg descriptor is released.
     305 * - If the vseg has the STACK type, it is removed from VSL, vseg descriptor is released,
     306 *   and the stack slot is returned to the local VMM_STACK allocator.
     307 * - If the vseg has the MMAP type, it is removed from VSL and is registered in zombi_list
     308 *   of the VMM_MMAP allocator for future reuse. The vseg descriptor is NOT released.
     309 *********************************************************************************************
     310 * @ vmm       : local pointer on local VMM.
     311 * @ vseg      : local pointer on local vseg to be removed.
     312 ********************************************************************************************/
     313void vmm_detach_vseg_from_vsl( vmm_t  * vmm,
     314                               vseg_t * vseg );
    319315
    320316/*********************************************************************************************
     
    338334
    339335/*********************************************************************************************
     336 * This low-level function scan the local VSL in <vmm> to find the unique vseg containing
     337 * a given virtual address <vaddr>.
     338 * It is called by the vmm_get_vseg(), vmm_get_pte(), and vmm_resize_vseg() functions.
     339 *********************************************************************************************
     340 * @ vmm     : pointer on the process VMM.
     341 * @ vaddr   : virtual address.
     342 * @ return vseg pointer if success / return NULL if not found.
     343 ********************************************************************************************/
     344struct vseg_s * vmm_vseg_from_vaddr( vmm_t    * vmm,
     345                                     intptr_t   vaddr );
     346
     347/*********************************************************************************************
    340348 * This function checks that a given virtual address is contained in a registered vseg.
    341349 * It can be called by any thread running in any cluster:
     
    344352 *   register it in local VMM and returns the local vseg pointer, if success.
    345353 * - it returns an user error if the vseg is missing in the reference VMM, or if there is
    346  *   not enough memory for a new vseg descriptor in cluster containing the calling thread.
     354 *   not enough memory for a new vseg descriptor in the calling thread cluster.
    347355 *********************************************************************************************
    348356 * @ process   : [in] pointer on process descriptor
     
    350358 * @ vseg      : [out] local pointer on local vseg
    351359 * @ returns 0 if success / returns -1 if user error (out of segment).
    352  *********************************************************************************************/
     360 ********************************************************************************************/
    353361error_t vmm_get_vseg( struct process_s  * process,
    354362                      intptr_t            vaddr,
  • trunk/kernel/mm/vseg.h

    r595 r611  
    7171typedef struct vseg_s
    7272{
    73     xlist_entry_t     xlist;        /*! all vsegs in same VSL (or same zombi list)        */
     73    xlist_entry_t     xlist;        /*! all vsegs in same VSL                             */
    7474    list_entry_t      zlist;        /*! all vsegs in same zombi list                      */
    7575    struct vmm_s    * vmm;          /*! pointer on associated VM manager                  */
Note: See TracChangeset for help on using the changeset viewer.