Changeset 651 for trunk/kernel/mm


Ignore:
Timestamp:
Nov 14, 2019, 11:50:09 AM (4 years ago)
Author:
alain
Message:

1) Improve the VMM MMAP allocator: implement the "buddy" algorithm
to allocate only aligned blocks.
2) fix a bug in the pthread_join() / pthread_exit() mmechanism.

Location:
trunk/kernel/mm
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/mm/mapper.c

    r637 r651  
    442442        if ( page_xp == XPTR_NULL ) return -1;
    443443
    444         // compute extended pointer in kernel mapper
     444        // compute extended pointer on kernel mapper
    445445        xptr_t     map_xp  = ppm_page2base( page_xp ) + page_offset;
    446446
     
    448448if( DEBUG_MAPPER_MOVE_USER < cycle )
    449449printk("\n[%s] thread[%x,%x] : get buffer(%x,%x) in mapper\n",
    450 __FUNCTION__, this->process->pid, this->trdid, map_cxy, map_ptr );
     450__FUNCTION__, this->process->pid, this->trdid, GET_CXY(map_xp), GET_PTR(map_xp) );
    451451#endif
    452452        // compute pointer in user buffer
  • trunk/kernel/mm/ppm.c

    r637 r651  
    296296                current_size >>= 1;
    297297
    298         // update order fiels in new free block
     298        // update order fields in new free block
    299299                current_block = found_block + current_size;
    300300                current_block->order = current_order;
  • trunk/kernel/mm/vmm.c

    r641 r651  
    5555
    5656////////////////////////////////////////////////////////////////////////////////////////////
     57// This static function is called by the vmm_user_init() function.
     58// It initialises the free lists of vsegs used by the VMM MMAP allocator.
     59// It makes the assumption that HEAP_BASE == 1 Gbytes and HEAP_SIZE == 2 Gbytes.
     60////////////////////////////////////////////////////////////////////////////////////////////
     61static void vmm_stack_init( vmm_t * vmm )
     62{
     63
     64// check STACK zone
     65assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <=
     66(CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) , "STACK zone too small\n");
     67
     68    // get pointer on STACK allocator
     69    stack_mgr_t * mgr = &vmm->stack_mgr;
     70
     71    mgr->bitmap   = 0;
     72    mgr->vpn_base = CONFIG_VMM_STACK_BASE;
     73    busylock_init( &mgr->lock , LOCK_VMM_STACK );
     74
     75}
     76
     77////////////////////////////////////////////////////////////////////////////////////////////
    5778// This static function is called by the vmm_create_vseg() function, and implements
    58 // the VMM STACK specific allocator.
     79// the VMM STACK specific allocator. Depending on the local thread index <ltid>,
     80// it ckeks availability of the corresponding slot in the process STACKS region,
     81// allocates a vseg descriptor, and initializes the "vpn_base" and "vpn_size" fields.
    5982////////////////////////////////////////////////////////////////////////////////////////////
    6083// @ vmm      : [in]  pointer on VMM.
    6184// @ ltid     : [in]  requested slot == local user thread identifier.
    62 // @ vpn_base : [out] first allocated page
    63 // @ vpn_size : [out] number of allocated pages
    6485////////////////////////////////////////////////////////////////////////////////////////////
    65 static void vmm_stack_alloc( vmm_t  * vmm,
    66                              ltid_t   ltid,
    67                              vpn_t  * vpn_base,
    68                              vpn_t  * vpn_size )
     86static vseg_t * vmm_stack_alloc( vmm_t  * vmm,
     87                                 ltid_t   ltid )
    6988{
    7089
     
    7695    stack_mgr_t * mgr = &vmm->stack_mgr;
    7796
    78     // get lock on stack allocator
     97    // get lock protecting stack allocator
    7998    busylock_acquire( &mgr->lock );
    8099
     
    83102"slot index %d already allocated", ltid );
    84103
     104    // allocate a vseg descriptor
     105    vseg_t * vseg = vseg_alloc();
     106
     107    if( vseg == NULL )
     108        {
     109        // release lock protecting free lists
     110        busylock_release( &mgr->lock );
     111
     112        printk("\n[ERROR] %s cannot allocate memory for vseg in cluster %x\n",
     113        __FUNCTION__ , local_cxy );
     114
     115        return NULL;
     116    }
     117
    85118    // update bitmap
    86119    bitmap_set( &mgr->bitmap , ltid );
     
    89122    busylock_release( &mgr->lock );
    90123
    91     // returns vpn_base, vpn_size (first page non allocated)
    92     *vpn_base = mgr->vpn_base + ltid * CONFIG_VMM_STACK_SIZE + 1;
    93     *vpn_size = CONFIG_VMM_STACK_SIZE - 1;
     124    // set "vpn_base" & "vpn_size" fields (first page non allocated)
     125    vseg->vpn_base = mgr->vpn_base + (ltid * CONFIG_VMM_STACK_SIZE) + 1;
     126    vseg->vpn_size = CONFIG_VMM_STACK_SIZE - 1;
     127
     128    return vseg;
    94129
    95130} // end vmm_stack_alloc()
     
    98133// This static function is called by the vmm_remove_vseg() function, and implements
    99134// the VMM STACK specific desallocator.
     135// It updates the bitmap to release the corresponding slot in the process STACKS region,
     136// and releases memory allocated to vseg descriptor.
    100137////////////////////////////////////////////////////////////////////////////////////////////
    101138// @ vmm      : [in] pointer on VMM.
     
    128165    busylock_release( &mgr->lock );
    129166
     167    // release memory allocated to vseg descriptor
     168    vseg_free( vseg );
     169
    130170}  // end vmm_stack_free()
     171
     172
     173
     174////////////////////////////////////////////////////////////////////////////////////////////
     175// This function display the current state of the VMM MMAP allocator of a process VMM
     176// identified by the <vmm> argument.
     177////////////////////////////////////////////////////////////////////////////////////////////
     178void vmm_mmap_display( vmm_t * vmm )
     179{
     180    uint32_t  order;
     181    xptr_t    root_xp;
     182    xptr_t    iter_xp;
     183
     184    // get pointer on process
     185    process_t * process = (process_t *)(((char*)vmm) - OFFSETOF( process_t , vmm ));
     186
     187    // get process PID
     188    pid_t pid = process->pid;
     189
     190    // get pointer on VMM MMAP allocator
     191    mmap_mgr_t * mgr = &vmm->mmap_mgr;
     192
     193    // display header
     194    printk("***** VMM MMAP allocator / process %x *****\n", pid );
     195
     196    // scan the array of free lists of vsegs
     197    for( order = 0 ; order <= CONFIG_VMM_HEAP_MAX_ORDER ; order++ )
     198    {
     199        root_xp = XPTR( local_cxy , &mgr->free_list_root[order] );
     200
     201        if( !xlist_is_empty( root_xp ) )
     202        {
     203            printk(" - %d (%x pages) : ", order , 1<<order );
     204
     205            XLIST_FOREACH( root_xp , iter_xp )
     206            {
     207                xptr_t   vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
     208                vseg_t * vseg    = GET_PTR( vseg_xp );
     209
     210                printk("%x | ", vseg->vpn_base );
     211            }
     212
     213            printk("\n");
     214        }
     215    }
     216}  // end vmm_mmap_display()
     217
     218////////////////////////////////////////////////////////////////////////////////////////////
     219// This static function is called by the vmm_user_init() function.
     220// It initialises the free lists of vsegs used by the VMM MMAP allocator.
     221// TODO this function is only valid for 32 bits cores, and makes three assumptions:
     222// HEAP_BASE == 1 Gbytes / HEAP_SIZE == 2 Gbytes / MMAP_MAX_SIZE == 1 Gbytes
     223////////////////////////////////////////////////////////////////////////////////////////////
     224void vmm_mmap_init( vmm_t * vmm )
     225{
     226
     227// check HEAP base and size
     228assert( (CONFIG_VMM_HEAP_BASE == 0x40000) & (CONFIG_VMM_STACK_BASE == 0xc0000),
     229"CONFIG_VMM_HEAP_BASE != 0x40000 or CONFIG_VMM_STACK_BASE != 0xc0000" );
     230
     231// check  MMAP vseg max order
     232assert( (CONFIG_VMM_HEAP_MAX_ORDER == 18), "max mmap vseg size is 256K pages" );
     233
     234    // get pointer on MMAP allocator
     235    mmap_mgr_t * mgr = &vmm->mmap_mgr;
     236
     237    // initialize HEAP base and size
     238    mgr->vpn_base        = CONFIG_VMM_HEAP_BASE;
     239    mgr->vpn_size        = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE;
     240
     241    // initialize lock
     242    busylock_init( &mgr->lock , LOCK_VMM_MMAP );
     243
     244    // initialize free lists
     245    uint32_t   i;
     246    for( i = 0 ; i <= CONFIG_VMM_HEAP_MAX_ORDER ; i++ )
     247    {
     248        xlist_root_init( XPTR( local_cxy , &mgr->free_list_root[i] ) );
     249    }
     250
     251    // allocate and register first 1 Gbytes vseg
     252    vseg_t * vseg0 = vseg_alloc();
     253
     254assert( (vseg0 != NULL) , "cannot allocate vseg" );
     255
     256    vseg0->vpn_base = CONFIG_VMM_HEAP_BASE;
     257    vseg0->vpn_size = CONFIG_VMM_HEAP_BASE;
     258
     259    xlist_add_first( XPTR( local_cxy , &mgr->free_list_root[CONFIG_VMM_HEAP_MAX_ORDER] ),
     260                     XPTR( local_cxy , &vseg0->xlist ) );
     261
     262    // allocate and register second 1 Gbytes vseg
     263    vseg_t * vseg1 = vseg_alloc();
     264
     265assert( (vseg1 != NULL) , "cannot allocate vseg" );
     266
     267    vseg1->vpn_base = CONFIG_VMM_HEAP_BASE << 1;
     268    vseg1->vpn_size = CONFIG_VMM_HEAP_BASE;
     269
     270    xlist_add_first( XPTR( local_cxy , &mgr->free_list_root[CONFIG_VMM_HEAP_MAX_ORDER] ),
     271                     XPTR( local_cxy , &vseg1->xlist ) );
     272
     273#if DEBUG_VMM_MMAP
     274thread_t * this = CURRENT_THREAD;
     275uint32_t cycle = (uint32_t)hal_get_cycles();
     276printk("\n[%s] thread[%x,%x] / cycle %d\n",
     277__FUNCTION__, this->process->pid, this->trdid, cycle );
     278vmm_mmap_display( vmm );
     279#endif
     280
     281}  // end vmm_mmap_init()
    131282
    132283////////////////////////////////////////////////////////////////////////////////////////////
    133284// This static function is called by the vmm_create_vseg() function, and implements
    134 // the VMM MMAP specific allocator.
     285// the VMM MMAP specific allocator.  Depending on the requested number of pages <npages>,
     286// it get a free vseg from the relevant free_list, and initializes the "vpn_base" and
     287// "vpn_size" fields.
    135288////////////////////////////////////////////////////////////////////////////////////////////
    136289// @ vmm      : [in] pointer on VMM.
    137290// @ npages   : [in] requested number of pages.
    138 // @ vpn_base : [out] first allocated page.
    139 // @ vpn_size : [out] actual number of allocated pages.
     291// @ returns local pointer on vseg if success / returns NULL if failure.
    140292////////////////////////////////////////////////////////////////////////////////////////////
    141 static error_t vmm_mmap_alloc( vmm_t * vmm,
    142                                vpn_t   npages,
    143                                vpn_t * vpn_base,
    144                                vpn_t * vpn_size )
     293static vseg_t * vmm_mmap_alloc( vmm_t * vmm,
     294                                vpn_t   npages )
    145295{
    146     uint32_t   order;
    147     xptr_t     vseg_xp;
    148     vseg_t   * vseg;
    149     vpn_t      base;
    150     vpn_t      size;
    151     vpn_t      free;
    152 
    153 #if DEBUG_VMM_MMAP_ALLOC
     296
     297#if DEBUG_VMM_MMAP
    154298thread_t * this = CURRENT_THREAD;
    155299uint32_t cycle = (uint32_t)hal_get_cycles();
    156 if( DEBUG_VMM_MMAP_ALLOC < cycle )
    157 printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
    158 __FUNCTION__, this->process->pid, this->trdid, cycle );
     300if( DEBUG_VMM_MMAP < cycle )
     301printk("\n[%s] thread[%x,%x] for %x pages / cycle %d\n",
     302__FUNCTION__, this->process->pid, this->trdid, npages, cycle );
    159303#endif
    160304
    161305    // number of allocated pages must be power of 2
    162306    // compute actual size and order
    163     size = POW2_ROUNDUP( npages );
    164     order = bits_log2( size );
     307    vpn_t    required_vpn_size = POW2_ROUNDUP( npages );
     308    uint32_t required_order    = bits_log2( required_vpn_size );
    165309
    166310    // get mmap allocator pointer
    167311    mmap_mgr_t * mgr = &vmm->mmap_mgr;
    168312
    169     // build extended pointer on root of zombi_list[order]
    170     xptr_t root_xp = XPTR( local_cxy , &mgr->zombi_list[order] );
    171 
    172     // take lock protecting zombi_lists
     313    // take lock protecting free lists in MMAP allocator
    173314    busylock_acquire( &mgr->lock );
    174315
    175     // get vseg from zombi_list or from mmap zone
    176     if( xlist_is_empty( root_xp ) )                   // from mmap zone
    177     {
    178         // check overflow
    179         free = mgr->first_free_vpn;
    180         if( (free + size) > mgr->vpn_size ) return -1;
    181 
    182         // update MMAP allocator
    183         mgr->first_free_vpn += size;
    184 
    185         // compute base
    186         base = free;
    187     }
    188     else                                              // from zombi_list
    189     {
    190         // get pointer on zombi vseg from zombi_list
    191         vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist );
    192         vseg    = GET_PTR( vseg_xp );
    193 
    194         // remove vseg from free-list
    195         xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
    196 
    197         // compute base
    198         base = vseg->vpn_base;
    199     }
    200 
    201     // release lock
    202     busylock_release( &mgr->lock );
    203 
    204 #if DEBUG_VMM_MMAP_ALLOC
    205 cycle = (uint32_t)hal_get_cycles();
    206 if( DEBUG_VMM_DESTROY < cycle )
    207 printk("\n[%s] thread[%x,%x] exit / vpn_base %x / vpn_size %x / cycle %d\n",
    208 __FUNCTION__, this->process->pid, this->trdid, base, size, cycle );
    209 #endif
    210 
    211     // returns vpn_base, vpn_size
    212     *vpn_base = base;
    213     *vpn_size = size;
    214     return 0;
     316    // initialises the while loop variables
     317    uint32_t   current_order = required_order;
     318    vseg_t   * current_vseg  = NULL;
     319
     320    // search a free vseg equal or larger than requested size
     321        while( current_order <= CONFIG_VMM_HEAP_MAX_ORDER )
     322        {
     323        // build extended pointer on free_pages_root[current_order]
     324        xptr_t root_xp = XPTR( local_cxy , &mgr->free_list_root[current_order] );
     325
     326                if( !xlist_is_empty( root_xp ) )
     327                {
     328            // get extended pointer on first vseg in this free_list
     329                        xptr_t current_vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist );
     330            current_vseg = GET_PTR( current_vseg_xp );
     331
     332            // build extended pointer on xlist field in vseg descriptor
     333            xptr_t list_entry_xp = XPTR( local_cxy , &current_vseg->xlist );
     334
     335            // remove this vseg from the free_list
     336                        xlist_unlink( list_entry_xp );
     337
     338                        break; 
     339                }
     340
     341        // increment loop index
     342        current_order++;
     343
     344    }  // end while loop
     345
     346    if( current_vseg == NULL )  // return failure
     347    {
     348        // release lock protecting free lists
     349        busylock_release( &mgr->lock );
     350
     351        printk("\n[ERROR] %s cannot allocate ) %d page(s) in cluster %x\n",
     352        __FUNCTION__, npages , local_cxy );
     353
     354        return NULL;
     355    }
     356
     357        // split recursively the found vseg in smaller vsegs
     358    // if required, and update the free-lists accordingly
     359        while( current_order > required_order )
     360        {
     361        // get found vseg base and size
     362        vpn_t  vpn_base = current_vseg->vpn_base;
     363        vpn_t  vpn_size = current_vseg->vpn_size;
     364       
     365        // allocate a new vseg for the upper half of current vseg
     366            vseg_t * new_vseg = vseg_alloc();
     367
     368            if( new_vseg == NULL )
     369        {
     370                // release lock protecting free lists
     371            busylock_release( &mgr->lock );
     372
     373            printk("\n[ERROR] %s cannot allocate memory for vseg in cluster %x\n",
     374            __FUNCTION__ , local_cxy );
     375
     376            return NULL;
     377            }
     378
     379        // initialise new vseg (upper half of found vseg)
     380        new_vseg->vmm      = vmm;
     381        new_vseg->vpn_base = vpn_base + (vpn_size >> 1);
     382        new_vseg->vpn_size = vpn_size >> 1;
     383
     384        // insert new vseg in relevant free_list
     385                xlist_add_first( XPTR( local_cxy , &mgr->free_list_root[current_order-1] ),
     386                         XPTR( local_cxy , &new_vseg->xlist ) );
     387
     388        // update found vseg
     389        current_vseg->vpn_size = vpn_size>>1;
     390
     391        // update order
     392                current_order --;
     393        }
     394
     395        // release lock protecting free lists
     396        busylock_release( &mgr->lock );
     397
     398#if DEBUG_VMM_MMAP
     399vmm_mmap_display( vmm );
     400#endif
     401
     402    return current_vseg;
    215403
    216404}  // end vmm_mmap_alloc()
     
    219407// This static function implements the VMM MMAP specific desallocator.
    220408// It is called by the vmm_remove_vseg() function.
     409// It releases the vseg to the relevant free_list, after trying (recursively) to
     410// merge it to the buddy vseg.
    221411////////////////////////////////////////////////////////////////////////////////////////////
    222412// @ vmm      : [in] pointer on VMM.
     
    226416                           vseg_t * vseg )
    227417{
    228     // get pointer on mmap allocator
     418
     419#if DEBUG_VMM_MMAP
     420thread_t * this = CURRENT_THREAD;
     421uint32_t cycle = (uint32_t)hal_get_cycles();
     422if( DEBUG_VMM_MMAP < cycle )
     423printk("\n[%s] thread[%x,%x] for vpn_base %x / vpn_size %x / cycle %d\n",
     424__FUNCTION__, this->process->pid, this->trdid, vseg->vpn_base, vseg->vpn_size, cycle );
     425#endif
     426
     427    vseg_t * buddy_vseg;
     428
     429    // get mmap allocator pointer
    229430    mmap_mgr_t * mgr = &vmm->mmap_mgr;
    230431
    231     // compute zombi_list order
    232     uint32_t order = bits_log2( vseg->vpn_size );
    233 
    234     // take lock protecting zombi lists
     432    // take lock protecting free lists
    235433    busylock_acquire( &mgr->lock );
    236434
    237     // update relevant zombi_list
    238     xlist_add_first( XPTR( local_cxy , &mgr->zombi_list[order] ),
    239                      XPTR( local_cxy , &vseg->xlist ) );
     435    // initialise loop variables
     436    // released_vseg is the currently released vseg
     437    vseg_t * released_vseg     = vseg;
     438    uint32_t released_order    = bits_log2( vseg->vpn_size );
     439
     440        // iteratively merge the released vseg to the buddy vseg
     441        // release the current page and exit when buddy not found
     442    while( released_order <= CONFIG_VMM_HEAP_MAX_ORDER )
     443    {
     444        // compute buddy_vseg vpn_base
     445                vpn_t buddy_vpn_base = released_vseg->vpn_base ^ (1 << released_order);
     446       
     447        // build extended pointer on free_pages_root[current_order]
     448        xptr_t root_xp = XPTR( local_cxy , &mgr->free_list_root[released_order] );
     449
     450        // scan this free list to find the buddy vseg
     451        xptr_t   iter_xp;
     452        buddy_vseg = NULL;
     453        XLIST_FOREACH( root_xp , iter_xp )
     454        {
     455            xptr_t   current_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
     456            vseg_t * current_vseg    = GET_PTR( current_vseg_xp );
     457
     458            if( current_vseg->vpn_base == buddy_vpn_base )
     459            {
     460                buddy_vseg = current_vseg;
     461                break;
     462            }
     463        }
     464       
     465        if( buddy_vseg != NULL )     // buddy found => merge released & buddy
     466        {
     467            // update released vseg fields
     468            released_vseg->vpn_size = buddy_vseg->vpn_size<<1;
     469            if( released_vseg->vpn_base > buddy_vseg->vpn_base)
     470                released_vseg->vpn_base = buddy_vseg->vpn_base;
     471
     472            // remove buddy vseg from free_list
     473            xlist_unlink( XPTR( local_cxy , &buddy_vseg->xlist ) );
     474
     475            // release memory allocated to buddy descriptor
     476            vseg_free( buddy_vseg );
     477        }
     478        else                         // buddy not found => register & exit
     479        {
     480            // register released vseg in free list
     481            xlist_add_first( root_xp , XPTR( local_cxy , &released_vseg->xlist ) );
     482
     483            // exit while loop
     484            break;
     485        }
     486
     487        // increment released_order
     488        released_order++;
     489    }
    240490
    241491    // release lock
    242492    busylock_release( &mgr->lock );
    243493
    244 }  // end of vmm_mmap_free()
     494#if DEBUG_VMM_MMAP
     495vmm_mmap_display( vmm );
     496#endif
     497
     498}  // end vmm_mmap_free()
    245499
    246500////////////////////////////////////////////////////////////////////////////////////////////
     
    288542error_t vmm_user_init( process_t * process )
    289543{
    290     uint32_t  i;
    291544
    292545#if DEBUG_VMM_USER_INIT
     
    306559         "UTILS zone too small\n" );
    307560
    308 // check STACK zone
    309 assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <=
    310 (CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) ,
    311 "STACK zone too small\n");
    312 
    313     // initialize the lock protecting the VSL
     561    // initialize lock protecting the VSL
    314562        remote_queuelock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
    315563
     564
     565    // initialize STACK allocator
     566    vmm_stack_init( vmm );
     567
     568    // initialize MMAP allocator
     569    vmm_mmap_init( vmm );
     570
     571    // initialize instrumentation counters
     572        vmm->false_pgfault_nr    = 0;
     573        vmm->local_pgfault_nr    = 0;
     574        vmm->global_pgfault_nr   = 0;
     575        vmm->false_pgfault_cost  = 0;
     576        vmm->local_pgfault_cost  = 0;
     577        vmm->global_pgfault_cost = 0;
    316578
    317579/*
     
    356618    vmm->envs_vpn_base = base;
    357619*/
    358     // initialize STACK allocator
    359     vmm->stack_mgr.bitmap   = 0;
    360     vmm->stack_mgr.vpn_base = CONFIG_VMM_STACK_BASE;
    361     busylock_init( &vmm->stack_mgr.lock , LOCK_VMM_STACK );
    362 
    363     // initialize MMAP allocator
    364     vmm->mmap_mgr.vpn_base        = CONFIG_VMM_HEAP_BASE;
    365     vmm->mmap_mgr.vpn_size        = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE;
    366     vmm->mmap_mgr.first_free_vpn  = CONFIG_VMM_HEAP_BASE;
    367     busylock_init( &vmm->mmap_mgr.lock , LOCK_VMM_MMAP );
    368     for( i = 0 ; i < 32 ; i++ )
    369     {
    370         xlist_root_init( XPTR( local_cxy , &vmm->mmap_mgr.zombi_list[i] ) );
    371     }
    372 
    373     // initialize instrumentation counters
    374         vmm->false_pgfault_nr    = 0;
    375         vmm->local_pgfault_nr    = 0;
    376         vmm->global_pgfault_nr   = 0;
    377         vmm->false_pgfault_cost  = 0;
    378         vmm->local_pgfault_cost  = 0;
    379         vmm->global_pgfault_cost = 0;
    380 
    381620    hal_fence();
    382621
     
    11581397    remote_queuelock_release( parent_lock_xp );
    11591398
     1399/* deprecated [AG] : this is already done by the vmm_user_init() funcfion
     1400
    11601401    // initialize the child VMM STACK allocator
    1161     child_vmm->stack_mgr.bitmap   = 0;
    1162     child_vmm->stack_mgr.vpn_base = CONFIG_VMM_STACK_BASE;
     1402    vmm_stack_init( child_vmm );
    11631403
    11641404    // initialize the child VMM MMAP allocator
    1165     uint32_t i;
    1166     child_vmm->mmap_mgr.vpn_base        = CONFIG_VMM_HEAP_BASE;
    1167     child_vmm->mmap_mgr.vpn_size        = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE;
    1168     child_vmm->mmap_mgr.first_free_vpn  = CONFIG_VMM_HEAP_BASE;
    1169     for( i = 0 ; i < 32 ; i++ )
    1170     {
    1171         xlist_root_init( XPTR( local_cxy , &child_vmm->mmap_mgr.zombi_list[i] ) );
    1172     }
     1405    vmm_mmap_init( child_vmm );
    11731406
    11741407    // initialize instrumentation counters
     
    11791412        child_vmm->local_pgfault_cost  = 0;
    11801413        child_vmm->global_pgfault_cost = 0;
    1181 
     1414*/
    11821415    // copy base addresses from parent VMM to child VMM
    11831416    child_vmm->args_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->args_vpn_base));
     
    12601493    remote_queuelock_release( vsl_lock_xp );
    12611494
    1262     // remove all registered MMAP vsegs
    1263     // from zombi_lists in MMAP allocator
     1495    // remove all registered MMAP vsegs from free_lists in MMAP allocator
    12641496    uint32_t i;
    1265     for( i = 0 ; i<32 ; i++ )
    1266     {
    1267         // build extended pointer on zombi_list[i]
    1268         xptr_t root_xp = XPTR( local_cxy , &vmm->mmap_mgr.zombi_list[i] );
     1497    for( i = 0 ; i <= CONFIG_VMM_HEAP_MAX_ORDER ; i++ )
     1498    {
     1499        // build extended pointer on free list root
     1500        xptr_t root_xp = XPTR( local_cxy , &vmm->mmap_mgr.free_list_root[i] );
    12691501 
    12701502        // scan zombi_list[i]
     
    13431575                          cxy_t         cxy )
    13441576{
    1345     vseg_t     * vseg;          // created vseg pointer
    1346     vpn_t        vpn_base;      // first page index
    1347     vpn_t        vpn_size;      // number of pages covered by vseg
    1348         error_t      error;
     1577    vseg_t     * vseg;          // pointer on allocated vseg descriptor
    13491578
    13501579#if DEBUG_VMM_CREATE_VSEG
     
    13641593        vmm_t * vmm    = &process->vmm;
    13651594
    1366     // compute base, size, vpn_base, vpn_size, depending on vseg type
    1367     // we use the VMM specific allocators for "stack", "file", "anon", & "remote" vsegs
    1368 
     1595    // allocate a vseg descriptor and initialize it, depending on type
     1596    // we use specific allocators for "stack" and "mmap" types
     1597
     1598    /////////////////////////////
    13691599    if( type == VSEG_TYPE_STACK )
    13701600    {
    1371         // get vpn_base and vpn_size from STACK allocator
    1372         vmm_stack_alloc( vmm , base , &vpn_base , &vpn_size );
    1373 
    1374         // compute vseg base and size from vpn_base and vpn_size
    1375         base = vpn_base << CONFIG_PPM_PAGE_SHIFT;
    1376         size = vpn_size << CONFIG_PPM_PAGE_SHIFT;
    1377     }
    1378     else if( type == VSEG_TYPE_FILE )
    1379     {
    1380         // compute page index (in mapper) for first byte
    1381         vpn_t    vpn_min    = file_offset >> CONFIG_PPM_PAGE_SHIFT;
    1382 
    1383         // compute page index (in mapper) for last byte
    1384         vpn_t    vpn_max    = (file_offset + size - 1) >> CONFIG_PPM_PAGE_SHIFT;
    1385 
    1386         // compute offset in first page
    1387         uint32_t offset = file_offset & CONFIG_PPM_PAGE_MASK;
    1388 
    1389         // compute number of pages required in virtual space
    1390         vpn_t    npages      = vpn_max - vpn_min + 1;
    1391 
    1392         // get vpn_base and vpn_size from MMAP allocator
    1393         error = vmm_mmap_alloc( vmm , npages , &vpn_base , &vpn_size );
    1394         if( error )
     1601        // get vseg from STACK allocator
     1602        vseg = vmm_stack_alloc( vmm , base );    // base == ltid
     1603       
     1604        if( vseg == NULL )
    13951605        {
    1396             printk("\n[ERROR] in %s : no vspace for mmap vseg / process %x in cluster %x\n",
    1397                    __FUNCTION__ , process->pid , local_cxy );
     1606            printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
     1607            __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
    13981608            return NULL;
    13991609        }
    14001610
    1401         // set the vseg base (not always aligned for FILE)
    1402         base = (vpn_base << CONFIG_PPM_PAGE_SHIFT) + offset;
    1403     }
    1404     else if( (type == VSEG_TYPE_ANON) ||
    1405              (type == VSEG_TYPE_REMOTE) )
     1611        // initialize vseg
     1612        vseg->type = type;
     1613        vseg->vmm  = vmm;
     1614        vseg->min  = vseg->vpn_base << CONFIG_PPM_PAGE_SHIFT;
     1615        vseg->max  = vseg->min + (vseg->vpn_size << CONFIG_PPM_PAGE_SHIFT);
     1616        vseg->cxy  = cxy;
     1617
     1618        vseg_init_flags( vseg , type );
     1619    }
     1620    /////////////////////////////////
     1621    else if( type == VSEG_TYPE_FILE )
     1622    {
     1623        // compute page index (in mapper) for first and last byte
     1624        vpn_t    vpn_min    = file_offset >> CONFIG_PPM_PAGE_SHIFT;
     1625        vpn_t    vpn_max    = (file_offset + size - 1) >> CONFIG_PPM_PAGE_SHIFT;
     1626
     1627        // compute offset in first page and number of pages
     1628        uint32_t offset = file_offset & CONFIG_PPM_PAGE_MASK;
     1629        vpn_t    npages      = vpn_max - vpn_min + 1;
     1630
     1631        // get vseg from MMAP allocator
     1632        vseg = vmm_mmap_alloc( vmm , npages );
     1633
     1634        if( vseg == NULL )
     1635        {
     1636            printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
     1637            __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
     1638            return NULL;
     1639        }
     1640
     1641        // initialize vseg
     1642        vseg->type        = type;
     1643        vseg->vmm         = vmm;
     1644        vseg->min         = (vseg->vpn_base << CONFIG_PPM_PAGE_SHIFT) + offset;
     1645        vseg->max         = vseg->min + size;
     1646        vseg->file_offset = file_offset;
     1647        vseg->file_size   = file_size;
     1648        vseg->mapper_xp   = mapper_xp;
     1649        vseg->cxy         = cxy;
     1650
     1651        vseg_init_flags( vseg , type );
     1652    }
     1653    /////////////////////////////////////////////////////////////////
     1654    else if( (type == VSEG_TYPE_ANON) || (type == VSEG_TYPE_REMOTE) )
    14061655    {
    14071656        // compute number of required pages in virtual space
     
    14091658        if( size & CONFIG_PPM_PAGE_MASK) npages++;
    14101659       
    1411         // get vpn_base and vpn_size from MMAP allocator
    1412         error = vmm_mmap_alloc( vmm , npages , &vpn_base , &vpn_size );
    1413         if( error )
     1660        // allocate vseg from MMAP allocator
     1661        vseg = vmm_mmap_alloc( vmm , npages );
     1662
     1663        if( vseg == NULL )
    14141664        {
    1415             printk("\n[ERROR] in %s : no vspace for mmap vseg / process %x in cluster %x\n",
    1416                    __FUNCTION__ , process->pid , local_cxy );
     1665            printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
     1666            __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
    14171667            return NULL;
    14181668        }
    14191669
    1420         // set vseg base (always aligned for ANON or REMOTE)
    1421         base = vpn_base << CONFIG_PPM_PAGE_SHIFT;
    1422     }
     1670        // initialize vseg
     1671        vseg->type = type;
     1672        vseg->vmm  = vmm;
     1673        vseg->min  = vseg->vpn_base << CONFIG_PPM_PAGE_SHIFT;
     1674        vseg->max  = vseg->min + (vseg->vpn_size << CONFIG_PPM_PAGE_SHIFT);
     1675        vseg->cxy  = cxy;
     1676
     1677        vseg_init_flags( vseg , type );
     1678    }
     1679    /////////////////////////////////////////////////////////////////
    14231680    else    // VSEG_TYPE_DATA, VSEG_TYPE_CODE or KERNEL vseg
    14241681    {
     
    14261683        uint32_t vpn_max = (base + size - 1) >> CONFIG_PPM_PAGE_SHIFT;
    14271684
    1428         vpn_base = vpn_min;
    1429             vpn_size = vpn_max - vpn_min + 1;
     1685        // allocate vseg descriptor
     1686            vseg = vseg_alloc();
     1687
     1688            if( vseg == NULL )
     1689            {
     1690            printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
     1691            __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
     1692            return NULL;
     1693            }
     1694        // initialize vseg
     1695        vseg->type        = type;
     1696        vseg->vmm         = vmm;
     1697        vseg->min         = base;
     1698        vseg->max         = base + size;
     1699        vseg->vpn_base    = base >> CONFIG_PPM_PAGE_SHIFT;
     1700        vseg->vpn_size    = vpn_max - vpn_min + 1;
     1701        vseg->file_offset = file_offset;
     1702        vseg->file_size   = file_size;
     1703        vseg->mapper_xp   = mapper_xp;
     1704        vseg->cxy         = cxy;
     1705
     1706        vseg_init_flags( vseg , type );
    14301707    }
    14311708
    14321709    // check collisions
    1433     vseg = vmm_check_conflict( process , vpn_base , vpn_size );
    1434 
    1435     if( vseg != NULL )
    1436     {
    1437         printk("\n[ERROR] in %s for process %x : new vseg [vpn_base %x / vpn_size %x]\n"
    1438                "  overlap existing vseg [vpn_base %x / vpn_size %x]\n",
    1439         __FUNCTION__ , process->pid, vpn_base, vpn_size, vseg->vpn_base, vseg->vpn_size );
     1710    vseg_t * existing_vseg = vmm_check_conflict( process , vseg->vpn_base , vseg->vpn_size );
     1711
     1712    if( existing_vseg != NULL )
     1713    {
     1714        printk("\n[ERROR] in %s for process %x : new vseg %s [vpn_base %x / vpn_size %x]\n"
     1715               "        overlap existing vseg %s [vpn_base %x / vpn_size %x]\n",
     1716        __FUNCTION__ , process->pid, vseg_type_str(vseg->type), vseg->vpn_base, vseg->vpn_size,
     1717        vseg_type_str(existing_vseg->type), existing_vseg->vpn_base, existing_vseg->vpn_size );
     1718        vseg_free( vseg );
    14401719        return NULL;
    14411720    }
    1442 
    1443     // allocate physical memory for vseg descriptor
    1444         vseg = vseg_alloc();
    1445         if( vseg == NULL )
    1446         {
    1447             printk("\n[ERROR] in %s for process %x : cannot allocate memory for vseg\n",
    1448         __FUNCTION__ , process->pid );
    1449         return NULL;
    1450         }
    1451 
    1452 #if (DEBUG_VMM_CREATE_VSEG & 1)
    1453 if( DEBUG_VMM_CREATE_VSEG < cycle )
    1454 printk("\n[%s] thread[%x,%x] : base %x / size %x / vpn_base %x / vpn_size %x\n",
    1455 __FUNCTION__, this->process->pid, this->trdid, base, size, vpn_base, vpn_size );
    1456 #endif
    1457 
    1458     // initialize vseg descriptor
    1459         vseg_init( vseg,
    1460                type,
    1461                base,
    1462                size,
    1463                vpn_base,
    1464                vpn_size,
    1465                file_offset,
    1466                file_size,
    1467                mapper_xp,
    1468                cxy );
    14691721
    14701722    // build extended pointer on VSL lock
     
    14801732    remote_queuelock_release( lock_xp );
    14811733
    1482 #if DEBUG_VMM_CREATE_VSEG
     1734#if DEBUG_VMM_CREATE_VSEG 
    14831735cycle = (uint32_t)hal_get_cycles();
    1484 // if( DEBUG_VMM_CREATE_VSEG < cycle )
    1485 if( type == VSEG_TYPE_REMOTE )
    1486 printk("\n[%s] thread[%x,%x] exit / process %x / %s / base %x / cxy %x / cycle %d\n",
    1487 __FUNCTION__, this->process->pid, this->trdid,
    1488 process->pid, vseg_type_str(type), base, cxy, cycle );
     1736if( DEBUG_VMM_CREATE_VSEG < cycle )
     1737printk("\n[%s] thread[%x,%x] exit / %s / vpn_base %x / vpn_size %x / cycle %d\n",
     1738__FUNCTION__, this->process->pid, this->trdid,
     1739vseg_type_str(type), vseg->vpn_base, vseg->vpn_size, cycle );
    14891740#endif
    14901741
     
    16541905        // release slot to local stack allocator
    16551906        vmm_stack_free( vmm , vseg );
    1656 
    1657         // release vseg descriptor to local kmem
    1658         vseg_free( vseg );
    16591907    }
    16601908    else if( (vseg_type == VSEG_TYPE_ANON) ||
  • trunk/kernel/mm/vmm.h

    r640 r651  
    6464
    6565/*********************************************************************************************
    66  * This structure defines the MMAP allocator used by the VMM to dynamically handle 
    67  * MMAP vsegs requested or released by an user process.
    68  * This allocator should be only used in the reference cluster.
    69  * - allocation policy : all allocated vsegs occupy an integer number of pages that is
    70  *   power of 2, and are aligned on a page boundary. The requested number of pages is
    71  *   rounded if required. The first_free_vpn variable defines completely the MMAP zone state.
    72  *   It is never decremented, as the released vsegs are simply registered in a zombi_list.
    73  *   The relevant zombi_list is checked first for each allocation request.
    74  * - release policy : a released MMAP vseg is registered in an array of zombi_lists.
    75  *   This array is indexed by ln(number of pages), and each entry contains the root of
    76  *   a local list of zombi vsegs that have the same size. The physical memory allocated
    77  *   for a zombi vseg descriptor is not released, to use the "list" field.
    78  *   This physical memory allocated for MMAP vseg descriptors is actually released
    79  *   when the VMM is destroyed.
     66 * This structure defines the MMAP allocator used by the VMM to dynamically handle MMAP vsegs
     67 * requested or released by an user process. It must be called in the reference cluster.
     68 * - allocation policy :
     69 *   This allocator implements the buddy algorithm. All allocated vsegs occupy an integer
     70 *   number of pages, that is power of 2, and are aligned (vpn_base is multiple of vpn_size).
     71 *   The requested number of pages is rounded if required. The global allocator state is
     72 *   completely defined by the free_pages_root[] array indexed by the vseg order.
     73 *   These free lists are local, but are implemented as xlist because we use the existing
     74 *   vseg.xlist to register a free vseg in its free list.
     75 * - release policy :
     76 *   A released vseg is recursively merged with the "buddy" vseg when it is free, in
     77 *   order to build the largest possible aligned free vsegs. The resulting vseg.vpn_size
     78 *   field is updated.
     79 * Implementation note:
     80 * The only significant (and documented) fiels in the vsegs registered in the MMAP allocator
     81 * free lists are "xlist", "vpn_base", and "vpn_size".
    8082 ********************************************************************************************/
    8183
     
    8587    vpn_t          vpn_base;           /*! first page of MMAP zone                          */
    8688    vpn_t          vpn_size;           /*! number of pages in MMAP zone                     */
    87     vpn_t          first_free_vpn;     /*! first free page in MMAP zone                     */
    88     xlist_entry_t  zombi_list[32];     /*! array of roots of released vsegs lists           */
     89    xlist_entry_t  free_list_root[CONFIG_VMM_HEAP_MAX_ORDER + 1];  /* roots of free lists   */
    8990}
    9091mmap_mgr_t;
     
    103104 * 2. The VSL contains only local vsegs, but it is implemented as an xlist, and protected by
    104105 *    a remote_rwlock, because it can be accessed by a thread running in a remote cluster.
    105  *    An exemple is the vmm_fork_copy() function.
     106 *    An example is the vmm_fork_copy() function.
    106107 * 3. The GPT in the reference cluster can be directly accessed by remote threads to handle
    107108 *    false page-fault (page is mapped in the reference GPT, but the PTE copy is missing
     
    119120
    120121    stack_mgr_t        stack_mgr;           /*! embedded STACK vsegs allocator              */
     122
    121123    mmap_mgr_t         mmap_mgr;            /*! embedded MMAP vsegs allocator               */
    122124
     
    156158 * call to the vmm_user_init() function after an exec() syscall.
    157159 * It removes from the VMM of the process identified by the <process> argument all
    158  * non kernel vsegs (i.e. all user vsegs), by calling the vmm_remove_vseg() function.
     160 * all user vsegs, by calling the vmm_remove_vseg() function.
    159161 * - the vsegs are removed from the VSL.
    160162 * - the corresponding GPT entries are removed from the GPT.
     
    279281/*********************************************************************************************
    280282 * This function allocates memory for a vseg descriptor, initialises it, and register it
    281  * in the VSL of the local process descriptor, that must be the reference process.
    282  * - For the FILE, ANON, & REMOTE types, it does not use the <base> and <size> arguments,
    283  *   but uses the specific MMAP virtual memory allocator.
     283 * in the VSL of the local process descriptor.
     284 * - For the FILE, ANON, & REMOTE types, it does not use the <base> argument, but uses
     285 *   the specific VMM MMAP allocator.
    284286 * - For the STACK type, it does not use the <base> and <size> arguments,  but uses the
    285  *   and the <base> argument the specific STACK virtual memory allocator.
     287 *   the specific VMM STACK allocator.
    286288 * It checks collision with pre-existing vsegs.
    287289 * To comply with the "on-demand" paging policy, this function does NOT modify the GPT,
  • trunk/kernel/mm/vseg.c

    r635 r651  
    8181}
    8282
    83 ///////////////////////////////////
    84 void vseg_init( vseg_t      * vseg,
    85                 vseg_type_t   type,
    86                     intptr_t      base,
    87                 uint32_t      size,
    88                 vpn_t         vpn_base,
    89                 vpn_t         vpn_size,
    90                         uint32_t      file_offset,
    91                 uint32_t      file_size,
    92                 xptr_t        mapper_xp,
    93                 cxy_t         cxy )
     83/////////////////////////////////////////
     84void vseg_init_flags( vseg_t      * vseg,
     85                      vseg_type_t   type )
    9486{
    95     vseg->type        = type;
    96         vseg->min         = base;
    97         vseg->max         = base + size;
    98     vseg->vpn_base    = vpn_base;
    99         vseg->vpn_size    = vpn_size;
    100     vseg->file_offset = file_offset;
    101     vseg->file_size   = file_size;
    102         vseg->mapper_xp   = mapper_xp;
    103     vseg->cxy         = cxy;
    104 
    10587    // set vseg flags depending on type
    106         if     ( type == VSEG_TYPE_CODE )
     88        if( type == VSEG_TYPE_CODE )
    10789    {
    10890        vseg->flags = VSEG_USER    |
  • trunk/kernel/mm/vseg.h

    r640 r651  
    4141typedef enum
    4242{
    43     VSEG_TYPE_CODE   = 0,          /*! executable user code     / private / localized     */
    44     VSEG_TYPE_DATA   = 1,          /*! initialized user data    / public  / distributed   */
    45     VSEG_TYPE_STACK  = 2,          /*! execution user stack     / private / localized     */
    46     VSEG_TYPE_ANON   = 3,          /*! anonymous mmap           / public  / localized     */
    47     VSEG_TYPE_FILE   = 4,          /*! file mmap                / public  / localized     */
    48     VSEG_TYPE_REMOTE = 5,          /*! remote mmap              / public  / localized     */
     43    VSEG_TYPE_CODE   = 1,          /*! executable user code     / private / localized     */
     44    VSEG_TYPE_DATA   = 2,          /*! initialized user data    / public  / distributed   */
     45    VSEG_TYPE_STACK  = 3,          /*! execution user stack     / private / localized     */
     46    VSEG_TYPE_ANON   = 4,          /*! anonymous mmap           / public  / localized     */
     47    VSEG_TYPE_FILE   = 5,          /*! file mmap                / public  / localized     */
     48    VSEG_TYPE_REMOTE = 6,          /*! remote mmap              / public  / localized     */
    4949
    50     VSEG_TYPE_KCODE  = 6,          /*! executable kernel code   / private / localized     */
    51     VSEG_TYPE_KDATA  = 7,          /*! initialized kernel data  / private / localized     */
    52     VSEG_TYPE_KDEV   = 8,          /*! kernel peripheral device / public  / localized     */
     50    VSEG_TYPE_KCODE  = 7,          /*! executable kernel code   / private / localized     */
     51    VSEG_TYPE_KDATA  = 8,          /*! initialized kernel data  / private / localized     */
     52    VSEG_TYPE_KDEV   = 9,          /*! kernel peripheral device / public  / localized     */
    5353}
    5454vseg_type_t;
     
    115115
    116116/*******************************************************************************************
    117  * This function initializes a local vseg descriptor, from the arguments values.
    118  * It does NOT register the vseg in the local VMM.
     117 * This function initializes the "flags" field for a local <vseg> descriptor,
     118 * depending on the vseg <type>.
    119119 *******************************************************************************************
    120120 * @ vseg      : pointer on the vseg descriptor.
    121  * @ base      : vseg base address.
    122  * @ size      : vseg size (bytes).
    123  * @ vpn_base  : first page index.
    124  * @ vpn_size  : number of pages.
    125121 * @ type      : vseg type.
    126  * @ cxy       : target cluster for physical mapping.
    127122 ******************************************************************************************/
    128 void vseg_init( vseg_t      * vseg,
    129                     vseg_type_t   type,
    130                 intptr_t      base,
    131                     uint32_t      size,
    132                 vpn_t         vpn_base,
    133                 vpn_t         vpn_size,
    134                 uint32_t      file_offset,
    135                 uint32_t      file_size,
    136                 xptr_t        mapper_xp,
    137                 cxy_t         cxy );
     123void vseg_init_flags( vseg_t      * vseg,
     124                          vseg_type_t   type );
    138125
    139126/*******************************************************************************************
Note: See TracChangeset for help on using the changeset viewer.