Changeset 433 for trunk/kernel/mm


Ignore:
Timestamp:
Feb 14, 2018, 3:40:19 PM (6 years ago)
Author:
alain
Message:

blip

Location:
trunk/kernel/mm
Files:
8 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/mm/kcm.c

    r407 r433  
    4747                             kcm_page_t * kcm_page )
    4848{
    49         kcm_dmsg("\n[DBG] %s : enters for %s / page %x / count = %d / active = %d\n",
    50                  __FUNCTION__ , kmem_type_str( kcm->type ) ,
    51                  (intptr_t)kcm_page , kcm_page->count , kcm_page->active );
     49
     50#if CONFIG_DEBUG_KCM_ALLOC
     51uint32_t cycle = (uint32_t)hal_get_cycles();
     52if( CONFIG_DEBUG_KCM_ALLOC < cycle )
     53printk("\n[DBG] %s : thread %x enters for %s / page %x / count %d / active %d\n",
     54__FUNCTION__ , CURRENT_THREAD , kmem_type_str( kcm->type ) ,
     55(intptr_t)kcm_page , kcm_page->count , kcm_page->active );
     56#endif
    5257
    5358        assert( kcm_page->active , __FUNCTION__ , "kcm_page should be active" );
     
    8085                     + (index * kcm->block_size) );
    8186
    82         kcm_dmsg("\n[DBG] %s : allocated one block  %s / ptr = %p / page = %x / count = %d\n",
    83                  __FUNCTION__ , kmem_type_str( kcm->type ) , ptr ,
    84                  (intptr_t)kcm_page , kcm_page->count );
     87#if CONFIG_DEBUG_KCM_ALLOC
     88cycle = (uint32_t)hal_get_cycles();
     89if( CONFIG_DEBUG_KCM_ALLOC < cycle )
     90printk("\n[DBG] %s : thread %x exit / type  %s / ptr %p / page %x / count %d\n",
     91__FUNCTION__ , CURRENT_THREAD , kmem_type_str( kcm->type ) , ptr ,
     92(intptr_t)kcm_page , kcm_page->count );
     93#endif
    8594
    8695        return ptr;
     
    300309                kcm->active_pages_nr ++;
    301310                kcm_page->active = 1;
    302 
    303                 kcm_dmsg("\n[DBG] %s : enters for type %s at cycle %d / new page = %x / count = %d\n",
    304                          __FUNCTION__ , kmem_type_str( kcm->type ) , hal_get_cycles() ,
    305                          (intptr_t)kcm_page , kcm_page->count );
    306 
    307311        }
    308312        else                                    // get first page from active list
     
    310314                // get page pointer from active list
    311315                kcm_page = (kcm_page_t *)LIST_FIRST( &kcm->active_root , kcm_page_t , list );
    312 
    313                 kcm_dmsg("\n[DBG] %s : enters for type %s at cycle %d / page = %x / count = %d\n",
    314                          __FUNCTION__ , kmem_type_str( kcm->type ) , hal_get_cycles() ,
    315                          (intptr_t)kcm_page , kcm_page->count );
    316316        }
    317317
  • trunk/kernel/mm/kmem.c

    r429 r433  
    198198        if( type == KMEM_PAGE )                        // PPM allocator
    199199        {
     200
     201#if CONFIG_DEBUG_KMEM_ALLOC
     202if( CONFIG_DEBUG_KMEM_ALLOC < (uint32_t)hal_get_cycles() )
     203printk("\n[DBG] in %s : thread %x enter for %d page(s)\n",
     204__FUNCTION__ , CURRENT_THREAD , 1<<size );
     205#endif
     206
    200207                // allocate the number of requested pages
    201208                ptr = (void *)ppm_alloc_pages( size );
     
    213220                          __FUNCTION__, local_cxy , kmem_type_str( type ) ,
    214221                          (intptr_t)ptr , (intptr_t)ppm_page2base( ptr ) );
     222
     223#if CONFIG_DEBUG_KMEM_ALLOC
     224if( CONFIG_DEBUG_KMEM_ALLOC < (uint32_t)hal_get_cycles() )
     225printk("\n[DBG] in %s : thread %x exit / %d page(s) allocated / ppn = %x\n",
     226__FUNCTION__ , CURRENT_THREAD , 1<<size , ppm_page2ppn( XPTR( local_cxy , ptr ) ) );
     227#endif
     228
    215229        }
    216230        else if( type == KMEM_GENERIC )                // KHM allocator
  • trunk/kernel/mm/page.c

    r408 r433  
    4747        page->index    = 0;
    4848        page->refcount = 0;
    49         page->fork_nr  = 0;
     49        page->fork  = 0;
    5050
    5151        spinlock_init( &page->lock );
  • trunk/kernel/mm/page.h

    r408 r433  
    5656 * This structure defines a physical page descriptor.
    5757 * Size is 64 bytes for a 32 bits core...
     58 * TODO : the list of waiting threads seems to be unused [AG]
     59 $ TODO : the spinlock use has to be clarified [AG]
    5860 ************************************************************************************/
    5961
     
    6769    xlist_entry_t     wait_root;      /*! root of list of waiting threads      (16) */
    6870        uint32_t          refcount;       /*! reference counter                    (4)  */
    69         uint32_t          fork_nr;        /*! number of pending forks              (4)  */
    70         spinlock_t        lock;           /*! only used to set the PG_LOCKED flag  (16) */
     71        uint32_t          forks;          /*! number of pending forks              (4)  */
     72        spinlock_t        lock;           /*! To Be Defined [AG]                   (16) */
    7173}
    7274page_t;
  • trunk/kernel/mm/ppm.c

    r407 r433  
    193193        list_add_first( &ppm->free_pages_root[current_order] , &current->list );
    194194        ppm->free_pages_nr[current_order] ++;
    195 }
     195
     196}  // end ppm_free_pages_nolock()
    196197
    197198////////////////////////////////////////////
     
    201202        page_t   * remaining_block;
    202203        uint32_t   current_size;
     204 
     205#if CONFIG_DEBUG_PPM_ALLOC_PAGES
     206uint32_t cycle = (uint32_t)hal_get_cycles();
     207if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle )
     208printk("\n[DBG] in %s : thread %x enter for %d page(s) / cycle %d\n",
     209__FUNCTION__ , CURRENT_THREAD , 1<<order, cycle );
     210#endif
     211
     212#if(CONFIG_DEBUG_PPM_ALLOC_PAGES & 0x1)
     213if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle )
     214ppm_print();
     215#endif
    203216
    204217        ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
     
    208221
    209222        page_t * block = NULL; 
    210 
    211         ppm_dmsg("\n[DBG] %s : enters / order = %d\n",
    212                  __FUNCTION__ , order );
    213223
    214224        // take lock protecting free lists
     
    231241                spinlock_unlock( &ppm->free_lock );
    232242
     243#if CONFIG_DEBUG_PPM_ALLOC_PAGES
     244cycle = (uint32_t)hal_get_cycles();
     245if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle )
     246printk("\n[DBG] in %s : thread %x cannot allocate %d page(s) at cycle %d\n",
     247__FUNCTION__ , CURRENT_THREAD , 1<<order, cycle );
     248#endif
     249
    233250                return NULL;
    234251        }
     
    260277        spinlock_unlock( &ppm->free_lock );
    261278
    262         ppm_dmsg("\n[DBG] %s : base = %x / order = %d\n",
    263                  __FUNCTION__ , (uint32_t)ppm_page2base( block ) , order );
     279#if CONFIG_DEBUG_PPM_ALLOC_PAGES
     280cycle = (uint32_t)hal_get_cycles();
     281if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle )
     282printk("\n[DBG] in %s : thread %x exit / %d page(s) allocated / ppn = %x / cycle %d\n",
     283__FUNCTION__, CURRENT_THREAD, 1<<order, ppm_page2ppn(XPTR( local_cxy , block )), cycle );
     284#endif
    264285
    265286        return block;
    266 }
     287
     288}  // end ppm_alloc_pages()
    267289
    268290
     
    272294        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
    273295
     296#if CONFIG_DEBUG_PPM_FREE_PAGES
     297uint32_t cycle = (uint32_t)hal_get_cycles();
     298if( CONFIG_DEBUG_PPM_FREE_PAGES < cycle )
     299printk("\n[DBG] in %s : thread %x enter for %d page(s) / cycle %d\n",
     300__FUNCTION__ , CURRENT_THREAD , 1<<page->order , cycle );
     301#endif
     302
     303#if(CONFIG_DEBUG_PPM_FREE_PAGES & 0x1)
     304if( CONFIG_DEBUG_PPM_FREE_PAGES < cycle )
     305ppm_print();
     306#endif
     307
    274308        // get lock protecting free_pages[] array
    275309        spinlock_lock( &ppm->free_lock );
     
    279313        // release lock protecting free_pages[] array
    280314        spinlock_unlock( &ppm->free_lock );
     315
     316#if CONFIG_DEBUG_PPM_FREE_PAGES
     317cycle = (uint32_t)hal_get_cycles();
     318if( CONFIG_DEBUG_PPM_FREE_PAGES < cycle )
     319printk("\n[DBG] in %s : thread %x exit / %d page(s) released / ppn = %x / cycle %d\n",
     320__FUNCTION__, CURRENT_THREAD, 1<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
     321#endif
     322
    281323}
    282324
    283 ////////////////////////////
    284 void ppm_print( ppm_t * ppm,
    285                 char  * string )
     325////////////////
     326void ppm_print()
    286327{
    287328        uint32_t       order;
     
    289330        page_t       * page;
    290331
     332    ppm_t * ppm = &LOCAL_CLUSTER->ppm;
     333
    291334        // get lock protecting free lists
    292335        spinlock_lock( &ppm->free_lock );
    293336
    294         printk("\n***  PPM in cluster %x : %d pages / &pages_tbl = %x / vaddr_base = %x ***\n",
    295     local_cxy , ppm->pages_nr , (intptr_t)ppm->pages_tbl , (intptr_t)ppm->vaddr_base );
     337        printk("\n***  PPM in cluster %x : %d pages ***\n", local_cxy , ppm->pages_nr );
    296338
    297339        for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
    298340        {
    299                 printk("- order = %d / free_pages = %d  [",
     341                printk("- order = %d / free_pages = %d\t: ",
    300342                       order , ppm->free_pages_nr[order] );
    301343
     
    303345                {
    304346                        page = LIST_ELEMENT( iter , page_t , list );
    305                         printk("%d," , page - ppm->pages_tbl );
     347                        printk("%x," , page - ppm->pages_tbl );
    306348                }
    307349
    308                 printk("]\n", NULL );
     350                printk("\n");
    309351        }
    310352
  • trunk/kernel/mm/ppm.h

    r409 r433  
    5252 * from the "kernel_heap" section.
    5353 * This low-level allocator implements the buddy algorithm: an allocated block is
    54  * an integer number n of 4 Kbytes pages, and n (called order) is a power of 2.
     54 * an integer number n of 4 small pages, and n (called order) is a power of 2.
    5555 ****************************************************************************************/
    5656
     
    163163
    164164/*****************************************************************************************
    165  * This function prints the PPM allocator status.
    166  *****************************************************************************************
    167  * @ ppm      : pointer on PPM allocator.
    168  * @ string   : define context of display.
     165 * This function prints the PPM allocator status in the calling thread cluster.
    169166 ****************************************************************************************/
    170 void ppm_print( ppm_t * ppm,
    171                 char  * string );
     167void ppm_print();
    172168
    173169/*****************************************************************************************
  • trunk/kernel/mm/vmm.c

    r429 r433  
    6363    intptr_t  size;
    6464
    65 vmm_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x\n",
    66 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid );
     65#if CONFIG_DEBUG_VMM_INIT
     66uint32_t cycle = (uint32_t)hal_get_cycles();
     67if( CONFIG_DEBUG_VMM_INIT )
     68printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
     69__FUNCTION__ , CURRENT_THREAD , process->pid , cycle );
     70#endif
    6771
    6872    // get pointer on VMM
     
    179183    hal_fence();
    180184
    181 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x / entry_point = %x\n",
    182 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ,
    183 process->pid , process->vmm.entry_point );
     185#if CONFIG_DEBUG_VMM_INIT
     186cycle = (uint32_t)hal_get_cycles();
     187if( CONFIG_DEBUG_VMM_INIT )
     188printk("\n[DBG] %s : thread %x exit for process %x / entry_point = %x / cycle %d\n",
     189__FUNCTION__ , CURRENT_THREAD , process->pid , process->vmm.entry_point , cycle );
     190#endif
    184191
    185192    return 0;
     
    211218    {
    212219        vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
    213         vseg    = (vseg_t *)GET_PTR( vseg_xp );
     220        vseg    = GET_PTR( vseg_xp );
    214221
    215222        printk(" - %s : base = %X / size = %X / npages = %d\n",
     
    239246}  // vmm_display()
    240247
    241 /////////////////////i////////////////////
    242 void vmm_update_pte( process_t * process,
    243                      vpn_t       vpn,
    244                      uint32_t    attr,
    245                      ppn_t       ppn )
     248/////////////////////i//////////////////////////
     249void vmm_global_update_pte( process_t * process,
     250                            vpn_t       vpn,
     251                            uint32_t    attr,
     252                            ppn_t       ppn )
    246253{
    247254
     
    258265    cxy_t           owner_cxy;
    259266    lpid_t          owner_lpid;
     267
     268#if CONFIG_DEBUG_VMM_UPDATE_PTE
     269uint32_t cycle = (uint32_t)hal_get_cycles();
     270if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle )
     271printk("\n[DBG] %s : thread %x enter for process %x / vpn %x / cycle %d\n",
     272__FUNCTION__ , CURRENT_THREAD , process->pid , vpn , cycle );
     273#endif
     274
     275    // check cluster is reference
     276    assert( (GET_CXY( process->ref_xp ) == local_cxy) , __FUNCTION__,
     277    "not called in reference cluster\n");
    260278
    261279    // get extended pointer on root of process copies xlist in owner cluster
     
    271289        // get cluster and local pointer on remote process
    272290        remote_process_xp  = XLIST_ELEMENT( process_iter_xp , process_t , copies_list );
    273         remote_process_ptr = (process_t *)GET_PTR( remote_process_xp );
     291        remote_process_ptr = GET_PTR( remote_process_xp );
    274292        remote_process_cxy = GET_CXY( remote_process_xp );
     293
     294#if (CONFIG_DEBUG_VMM_UPDATE_PTE & 0x1)
     295if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle )
     296printk("\n[DBG] %s : thread %x handling process %x in cluster %x\n",
     297__FUNCTION__ , CURRENT_THREAD , process->pid , remote_process_cxy );
     298#endif
    275299
    276300        // get extended pointer on remote gpt
    277301        remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt );
    278302
    279         hal_gpt_update_pte( remote_gpt_xp,
    280                             vpn,
    281                             attr,
    282                             ppn );
     303        // update remote GPT
     304        hal_gpt_update_pte( remote_gpt_xp, vpn, attr, ppn );
    283305    } 
    284 }  // end vmm_update_pte()
     306
     307#if CONFIG_DEBUG_VMM_UPDATE_PTE
     308cycle = (uint32_t)hal_get_cycles();
     309if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle )
     310printk("\n[DBG] %s : thread %x exit for process %x / vpn %x / cycle %d\n",
     311__FUNCTION__ , CURRENT_THREAD , process->pid , vpn , cycle );
     312#endif
     313
     314}  // end vmm_global_update_pte()
    285315
    286316///////////////////////////////////////
     
    308338    lpid_t          owner_lpid;
    309339
    310 vmm_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x\n",
    311 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid );
     340#if CONFIG_DEBUG_VMM_SET_COW
     341uint32_t cycle = (uint32_t)hal_get_cycles();
     342if( CONFIG_DEBUG_VMM_SET_COW < cycle )
     343printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
     344__FUNCTION__ , CURRENT_THREAD , process->pid , cycle );
     345#endif
    312346
    313347    // check cluster is reference
     
    333367        // get cluster and local pointer on remote process
    334368        remote_process_xp  = XLIST_ELEMENT( process_iter_xp , process_t , copies_list );
    335         remote_process_ptr = (process_t *)GET_PTR( remote_process_xp );
     369        remote_process_ptr = GET_PTR( remote_process_xp );
    336370        remote_process_cxy = GET_CXY( remote_process_xp );
    337371
    338 vmm_dmsg("\n[DBG] %s : core[%x,%d] handling process %x in cluster %x\n",
    339 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid , remote_process_cxy );
     372#if (CONFIG_DEBUG_VMM_SET_COW &0x1)
     373if( CONFIG_DEBUG_VMM_SET_COW < cycle )
     374printk("\n[DBG] %s : thread %x handling process %x in cluster %x\n",
     375__FUNCTION__ , CURRENT_THREAD , process->pid , remote_process_cxy );
     376#endif
    340377
    341378        // get extended pointer on remote gpt
     
    347384            // get pointer on vseg
    348385            vseg_xp  = XLIST_ELEMENT( vseg_iter_xp , vseg_t , xlist );
    349             vseg     = (vseg_t *)GET_PTR( vseg_xp );
     386            vseg     = GET_PTR( vseg_xp );
    350387
    351388            assert( (GET_CXY( vseg_xp ) == local_cxy) , __FUNCTION__,
     
    357394            vpn_t    vpn_size = vseg->vpn_size;
    358395
    359 vmm_dmsg("\n[DBG] %s : core[%x,%d] handling vseg %s / vpn_base = %x / vpn_size = %x\n",
    360 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vseg_type_str(type), vpn_base, vpn_size );
    361 
    362             // set COW flag on the remote GPT depending on vseg type
     396#if (CONFIG_DEBUG_VMM_SET_COW & 0x1)
     397if( CONFIG_DEBUG_VMM_SET_COW < cycle )
     398printk("\n[DBG] %s : thread %x handling vseg %s / vpn_base = %x / vpn_size = %x\n",
     399__FUNCTION__, CURRENT_THREAD , vseg_type_str(type), vpn_base, vpn_size );
     400#endif
     401            // only DATA, ANON and REMOTE vsegs
    363402            if( (type == VSEG_TYPE_DATA)  ||
    364403                (type == VSEG_TYPE_ANON)  ||
    365404                (type == VSEG_TYPE_REMOTE) )
    366405            {
    367                 hal_gpt_flip_cow( true,             // set_cow
    368                                   remote_gpt_xp,
    369                                   vpn_base,
    370                                   vpn_size );
    371             }
    372         }    // en loop on vsegs
     406                vpn_t      vpn;
     407                uint32_t   attr;
     408                ppn_t      ppn;
     409                xptr_t     page_xp;
     410                cxy_t      page_cxy;
     411                page_t   * page_ptr;
     412                xptr_t     forks_xp;
     413
     414                // update flags in remote GPT
     415                hal_gpt_set_cow( remote_gpt_xp,
     416                                 vpn_base,
     417                                 vpn_size );
     418
     419                // atomically increment pending forks counter in physical pages,
     420                // for all vseg pages that are mapped in reference cluster
     421                if( remote_process_cxy == local_cxy )
     422                {
     423                    // the reference GPT is the local GPT
     424                    gpt_t * gpt = GET_PTR( remote_gpt_xp );
     425
     426                    // scan all pages in vseg
     427                    for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ )
     428                    {
     429                        // get page attributes and PPN from reference GPT
     430                        hal_gpt_get_pte( gpt , vpn , &attr , &ppn );
     431
     432                        // atomically update pending forks counter if page is mapped
     433                        if( attr & GPT_MAPPED )
     434                        {
     435                            page_xp  = ppm_ppn2page( ppn );
     436                            page_cxy = GET_CXY( page_xp );
     437                            page_ptr = GET_PTR( page_xp );
     438                            forks_xp = XPTR( page_cxy , &page_ptr->forks );
     439                            hal_remote_atomic_add( forks_xp , 1 );
     440                        }
     441                    }   // end loop on vpn
     442                }   // end if local
     443            }   // end if vseg type
     444        }   // end loop on vsegs
    373445    }   // end loop on process copies
    374446 
    375 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x\n",
    376 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid );
     447#if CONFIG_DEBUG_VMM_SET_COW
     448cycle = (uint32_t)hal_get_cycles();
     449if( CONFIG_DEBUG_VMM_SET_COW < cycle )
     450printk("\n[DBG] %s : thread %x exit for process %x / cycle %d\n",
     451__FUNCTION__ , CURRENT_THREAD , process->pid , cycle );
     452#endif
    377453
    378454}  // end vmm_set-cow()
     
    404480    ppn_t       ppn;
    405481
    406 vmm_dmsg("\n[DBG] %s : core[%x,%d] enter\n",
    407 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid );
     482#if CONFIG_DEBUG_VMM_FORK_COPY
     483uint32_t cycle = (uint32_t)hal_get_cycles();
     484if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     485printk("\n[DBG] %s : thread %x enter / cycle %d\n",
     486__FUNCTION__ , CURRENT_THREAD, cycle );
     487#endif
    408488
    409489    // get parent process cluster and local pointer
    410490    parent_cxy     = GET_CXY( parent_process_xp );
    411     parent_process = (process_t *)GET_PTR( parent_process_xp );
     491    parent_process = GET_PTR( parent_process_xp );
    412492
    413493    // get local pointers on parent and child VMM
     
    445525        // get local and extended pointers on current parent vseg
    446526        parent_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
    447         parent_vseg    = (vseg_t *)GET_PTR( parent_vseg_xp );
     527        parent_vseg    = GET_PTR( parent_vseg_xp );
    448528
    449529        // get vseg type
    450530        type = hal_remote_lw( XPTR( parent_cxy , &parent_vseg->type ) );
    451531       
    452 
    453 vmm_dmsg("\n[DBG] %s : core[%x,%d] found parent vseg %s / vpn_base = %x\n",
    454 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vseg_type_str(type),
    455 hal_remote_lw( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) );
     532#if CONFIG_DEBUG_VMM_FORK_COPY
     533cycle = (uint32_t)hal_get_cycles();
     534if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     535printk("\n[DBG] %s : thread %x found parent vseg %s / vpn_base = %x / cycle %d\n",
     536__FUNCTION__ , CURRENT_THREAD, vseg_type_str(type),
     537hal_remote_lw( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );
     538#endif
    456539
    457540        // all parent vsegs - but STACK - must be copied in child VSL
     
    473556            vseg_attach( child_vmm , child_vseg );
    474557
    475 vmm_dmsg("\n[DBG] %s : core[%x,%d] copied to child VSL : vseg %s / vpn_base = %x\n",
    476 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vseg_type_str(type),
    477 hal_remote_lw( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) );
     558#if CONFIG_DEBUG_VMM_FORK_COPY
     559cycle = (uint32_t)hal_get_cycles();
     560if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     561printk("\n[DBG] %s : thread %x copied vseg %s / vpn_base = %x to child VSL / cycle %d\n",
     562__FUNCTION__ , CURRENT_THREAD , vseg_type_str(type),
     563hal_remote_lw( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );
     564#endif
    478565
    479566            // copy DATA, MMAP, REMOTE, FILE parent GPT entries to child GPT
     
    502589                    }
    503590
    504                     // increment page descriptor fork_nr for the referenced page if mapped
     591                    // increment pending forks counter in page if mapped
    505592                    if( mapped )
    506593                    {
    507594                        page_xp = ppm_ppn2page( ppn );
    508595                        page_cxy = GET_CXY( page_xp );
    509                         page_ptr = (page_t *)GET_PTR( page_xp );
    510                         hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->fork_nr ) , 1 );
    511 
    512 vmm_dmsg("\n[DBG] %s : core[%x,%d] copied to child GPT : vpn %x\n",
    513 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn );
     596                        page_ptr = GET_PTR( page_xp );
     597                        hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , 1 );
     598
     599#if CONFIG_DEBUG_VMM_FORK_COPY
     600cycle = (uint32_t)hal_get_cycles();
     601if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     602printk("\n[DBG] %s : thread %x copied vpn %x to child GPT / cycle %d\n",
     603__FUNCTION__ , CURRENT_THREAD , vpn , cycle );
     604#endif
    514605
    515606                    }
     
    558649    hal_fence();
    559650
     651#if CONFIG_DEBUG_VMM_FORK_COPY
     652cycle = (uint32_t)hal_get_cycles();
     653if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     654printk("\n[DBG] %s : thread %x exit successfully / cycle %d\n",
     655__FUNCTION__ , CURRENT_THREAD , cycle );
     656#endif
     657
    560658    return 0;
    561659
     
    568666        vseg_t * vseg;
    569667
    570 vmm_dmsg("\n[DBG] %s : core[%x,%d] enter\n",
    571 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid );
    572 
    573     // get pointer on VMM
     668#if CONFIG_DEBUG_VMM_DESTROY
     669uint32_t cycle = (uint32_t)hal_get_cycles();
     670if( CONFIG_DEBUG_VMM_DESTROY < cycle )
     671printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
     672__FUNCTION__ , CURRENT_THREAD , process->pid , cycle );
     673#endif
     674
     675    // get pointer on local VMM
    574676    vmm_t  * vmm = &process->vmm;
    575677
     
    586688        // get pointer on first vseg in VSL
    587689                vseg_xp = XLIST_FIRST_ELEMENT( root_xp , vseg_t , xlist );
    588         vseg = (vseg_t *)GET_PTR( vseg_xp );
    589 
    590         // unmap and release all pages
     690        vseg    = GET_PTR( vseg_xp );
     691
     692        // unmap rand release physical pages if required)
    591693        vmm_unmap_vseg( process , vseg );
    592694
     
    598700        }
    599701
    600     // release lock
     702    // release lock protecting VSL
    601703        remote_rwlock_wr_unlock( lock_xp );
    602704
     
    616718    hal_gpt_destroy( &vmm->gpt );
    617719
    618 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit\n",
    619 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid );
     720#if CONFIG_DEBUG_VMM_DESTROY
     721cycle = (uint32_t)hal_get_cycles();
     722if( CONFIG_DEBUG_VMM_DESTROY < cycle )
     723printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     724__FUNCTION__ , CURRENT_THREAD , cycle );
     725#endif
    620726
    621727}  // end vmm_destroy()
     
    637743        {
    638744                vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
    639         vseg    = (vseg_t *)GET_PTR( vseg_xp );
     745        vseg    = GET_PTR( vseg_xp );
    640746
    641747                if( ((vpn_base + vpn_size) > vseg->vpn_base) &&
     
    766872        error_t      error;
    767873
    768 vmm_dmsg("\n[DBG] %s : core[%x,%d] enters / process %x / base %x / size %x / %s / cxy = %x\n",
    769 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ,
    770 process->pid , base , size , vseg_type_str(type) , cxy );
     874#if CONFIG_DEBUG_VMM_CREATE_VSEG
     875uint32_t cycle = (uint32_t)hal_get_cycles();
     876if( CONFIG_DEBUG_VMM_CREATE_VSEG < cycle )
     877printk("\n[DBG] %s : thread %x enter / process %x / base %x / size %x / %s / cxy %x / cycle %d\n",
     878__FUNCTION__, CURRENT_THREAD, process->pid, base, size, vseg_type_str(type), cxy, cycle );
     879#endif
    771880
    772881    // get pointer on VMM
     
    854963        remote_rwlock_wr_unlock( lock_xp );
    855964
    856 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit / process %x / base %x / size %x / type %s\n",
    857 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ,
    858 process->pid , base , size , vseg_type_str(type) );
     965#if CONFIG_DEBUG_VMM_CREATE_VSEG
     966cycle = (uint32_t)hal_get_cycles();
     967if( CONFIG_DEBUG_VMM_CREATE_VSEG < cycle )
     968printk("\n[DBG] %s : thread %x exit / process %x / %s / cxy %x / cycle %d\n",
     969__FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str(type), cxy, cycle );
     970#endif
    859971
    860972        return vseg;
     
    9851097    cxy_t       page_cxy;   // page descriptor cluster
    9861098    page_t    * page_ptr;   // page descriptor pointer
    987 
    988 vmm_dmsg("\n[DBG] %s : core[%x, %d] enter / process %x / vseg %s / base %x / cycle %d\n",
    989 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, process->pid ,
    990 vseg_type_str( vseg->type ), vseg->vpn_base, (uint32_t)hal_get_cycles() );
    991 
    992     // get pointer on process GPT
     1099    xptr_t      forks_xp;   // extended pointer on pending forks counter
     1100    uint32_t    count;      // actual number of pendinf forks
     1101
     1102#if CONFIG_DEBUG_VMM_UNMAP_VSEG
     1103uint32_t cycle = (uint32_t)hal_get_cycles();
     1104if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle )
     1105printk("\n[DBG] %s : thread %x enter / process %x / vseg %s / base %x / cycle %d\n",
     1106__FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str( vseg->type ), vseg->vpn_base, cycle );
     1107#endif
     1108
     1109    // get pointer on local GPT
    9931110    gpt_t     * gpt = &process->vmm.gpt;
    9941111
     
    10071124            "an user vseg must use small pages" );
    10081125
    1009             // unmap GPT entry
     1126            // unmap GPT entry in all GPT copies
    10101127            hal_gpt_reset_pte( gpt , vpn );
    10111128
    1012             // release memory if not identity mapped
    1013             if( (vseg->flags & VSEG_IDENT)  == 0 )
     1129            // handle pending forks counter if
     1130            // 1) not identity mapped
     1131            // 2) running in reference cluster
     1132            if( ((vseg->flags & VSEG_IDENT)  == 0) &&
     1133                (GET_CXY( process->ref_xp ) == local_cxy) )
    10141134            {
    1015                 // get extended pointer on page descriptor
     1135                // get extended pointer on physical page descriptor
    10161136                page_xp  = ppm_ppn2page( ppn );
    10171137                page_cxy = GET_CXY( page_xp );
    1018                 page_ptr = (page_t *)GET_PTR( page_xp );
    1019 
    1020                 // release physical page to relevant cluster
    1021                 if( page_cxy == local_cxy )                   // local cluster
     1138                page_ptr = GET_PTR( page_xp );
     1139
     1140                // FIXME lock the physical page
     1141
     1142                // get extended pointer on pending forks counter
     1143                forks_xp = XPTR( page_cxy , &page_ptr->forks );
     1144
     1145                // get pending forks counter
     1146                count = hal_remote_lw( forks_xp );
     1147               
     1148                if( count )  // decrement pending forks counter
    10221149                {
    1023                     req.type = KMEM_PAGE;
    1024                     req.ptr  = page_ptr;
    1025                     kmem_free( &req );
     1150                    hal_remote_atomic_add( forks_xp , -1 );
     1151                } 
     1152                else         // release physical page to relevant cluster
     1153                {
     1154                    if( page_cxy == local_cxy )   // local cluster
     1155                    {
     1156                        req.type = KMEM_PAGE;
     1157                        req.ptr  = page_ptr;
     1158                        kmem_free( &req );
     1159                    }
     1160                    else                          // remote cluster
     1161                    {
     1162                        rpc_pmem_release_pages_client( page_cxy , page_ptr );
     1163                    }
    10261164                }
    1027                 else                                          // remote cluster
    1028                 {
    1029                     rpc_pmem_release_pages_client( page_cxy , page_ptr );
    1030                 }
     1165
     1166                // FIXME unlock the physical page
    10311167            }
    10321168        }
    10331169    }
     1170
     1171#if CONFIG_DEBUG_VMM_UNMAP_VSEG
     1172cycle = (uint32_t)hal_get_cycles();
     1173if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle )
     1174printk("\n[DBG] %s : thread %x exit / process %x / vseg %s / base %x / cycle %d\n",
     1175__FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str( vseg->type ), vseg->vpn_base, cycle );
     1176#endif
     1177
    10341178}  // end vmm_unmap_vseg()
    10351179
     
    10611205    {
    10621206        vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
    1063         vseg    = (vseg_t *)GET_PTR( vseg_xp );
     1207        vseg    = GET_PTR( vseg_xp );
    10641208        if( (vaddr >= vseg->min) && (vaddr < vseg->max) )
    10651209        {
     
    11851329        // get cluster and local pointer on reference process
    11861330        cxy_t       ref_cxy = GET_CXY( ref_xp );
    1187         process_t * ref_ptr = (process_t *)GET_PTR( ref_xp );
     1331        process_t * ref_ptr = GET_PTR( ref_xp );
    11881332
    11891333        if( local_cxy == ref_cxy )  return -1;   // local cluster is the reference
     
    12241368                                 vpn_t    vpn )
    12251369{
     1370
     1371#if CONFIG_DEBUG_VMM_ALLOCATE_PAGE
     1372if( CONFIG_DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )
     1373printk("\n[DBG] in %s : thread %x enter for vpn %x\n",
     1374__FUNCTION__ , CURRENT_THREAD, vpn );
     1375#endif
     1376
    12261377    // compute target cluster
    12271378    page_t     * page_ptr;
     
    12621413    }
    12631414
     1415#if CONFIG_DEBUG_VMM_ALLOCATE_PAGE
     1416if( CONFIG_DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )
     1417printk("\n[DBG] in %s : thread %x exit for vpn = %d / ppn = %x\n",
     1418__FUNCTION__ , CURRENT_THREAD, vpn, ppm_page2ppn( XPTR( page_cxy , page_ptr ) ) );
     1419#endif
     1420
    12641421    if( page_ptr == NULL ) return XPTR_NULL;
    12651422    else                   return XPTR( page_cxy , page_ptr );
     
    12811438    index     = vpn - vseg->vpn_base;
    12821439
    1283 vmm_dmsg("\n[DBG] %s : core[%x,%d] enter for vpn = %x / type = %s / index = %d\n",
    1284 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn, vseg_type_str(type), index );
     1440#if CONFIG_DEBUG_VMM_GET_ONE_PPN
     1441if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1442printk("\n[DBG] %s : thread %x enter for vpn = %x / type = %s / index = %d\n",
     1443__FUNCTION__, CURRENT_THREAD, vpn, vseg_type_str(type), index );
     1444#endif
    12851445
    12861446    // FILE type : get the physical page from the file mapper
     
    12951455        // get mapper cluster and local pointer
    12961456        cxy_t      mapper_cxy = GET_CXY( mapper_xp );
    1297         mapper_t * mapper_ptr = (mapper_t *)GET_PTR( mapper_xp );
     1457        mapper_t * mapper_ptr = GET_PTR( mapper_xp );
    12981458
    12991459        // get page descriptor from mapper
     
    13161476    else
    13171477    {
    1318         // allocate physical page
     1478        // allocate one physical page
    13191479        page_xp = vmm_page_allocate( vseg , vpn );
    13201480
     
    13221482
    13231483        // initialise missing page from .elf file mapper for DATA and CODE types
    1324         // => the mapper_xp field is an extended pointer on the .elf file mapper
     1484        // (the vseg->mapper_xp field is an extended pointer on the .elf file mapper)
    13251485        if( (type == VSEG_TYPE_CODE) || (type == VSEG_TYPE_DATA) )
    13261486        {
     
    13331493            // get mapper cluster and local pointer
    13341494            cxy_t      mapper_cxy = GET_CXY( mapper_xp );
    1335             mapper_t * mapper_ptr = (mapper_t *)GET_PTR( mapper_xp );
     1495            mapper_t * mapper_ptr = GET_PTR( mapper_xp );
    13361496
    13371497            // compute missing page offset in vseg
     
    13411501            uint32_t elf_offset = vseg->file_offset + offset;
    13421502
    1343 vmm_dmsg("\n[DBG] %s : core[%x,%d] for vpn = %x / elf_offset = %x\n",
    1344 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn, elf_offset );
     1503#if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)
     1504if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1505printk("\n[DBG] %s : thread %x for vpn = %x / elf_offset = %x\n",
     1506__FUNCTION__, CURRENT_THREAD, vpn, elf_offset );
     1507#endif
    13451508
    13461509            // compute extended pointer on page base
     
    13521515            if( file_size < offset )                 // missing page fully in  BSS
    13531516            {
    1354 vmm_dmsg("\n[DBG] %s : core[%x,%d] for vpn = %x / fully in BSS\n",
    1355 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn );
     1517
     1518#if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)
     1519if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1520printk("\n[DBG] %s : thread%x for vpn = %x / fully in BSS\n",
     1521__FUNCTION__, CURRENT_THREAD, vpn );
     1522#endif
    13561523
    13571524                if( GET_CXY( page_xp ) == local_cxy )
     
    13671534            {
    13681535
    1369 vmm_dmsg("\n[DBG] %s : core[%x,%d] for vpn = %x / fully in mapper\n",
    1370 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn );
     1536#if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)
     1537if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1538printk("\n[DBG] %s : thread %x, for vpn = %x / fully in mapper\n",
     1539__FUNCTION__, CURRENT_THREAD, vpn );
     1540#endif
    13711541
    13721542                if( mapper_cxy == local_cxy )
     
    13961566            {
    13971567
    1398 vmm_dmsg("\n[DBG] %s : core[%x,%d] for vpn = %x / both mapper & BSS\n"
    1399          "      %d bytes from mapper / %d bytes from BSS\n",
    1400 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn,
     1568#if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)
     1569if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1570printk("\n[DBG] %s : thread %x for vpn = %x / both mapper & BSS\n"
     1571"      %d bytes from mapper / %d bytes from BSS\n",
     1572__FUNCTION__, CURRENT_THREAD, vpn,
    14011573file_size - offset , offset + CONFIG_PPM_PAGE_SIZE - file_size  );
    1402 
     1574#endif
    14031575                // initialize mapper part
    14041576                if( mapper_cxy == local_cxy )
     
    14411613    *ppn = ppm_page2ppn( page_xp );
    14421614
    1443 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit for vpn = %x / ppn = %x\n",
    1444 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , *ppn );
     1615#if CONFIG_DEBUG_VMM_GET_ONE_PPN
     1616if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1617printk("\n[DBG] %s : thread %x exit for vpn = %x / ppn = %x\n",
     1618__FUNCTION__ , CURRENT_THREAD , vpn , *ppn );
     1619#endif
    14451620
    14461621    return 0;
     
    14551630                     ppn_t     * ppn )
    14561631{
    1457     vseg_t  * vseg;       // pointer on vseg containing VPN
     1632    vseg_t  * vseg;       // vseg containing VPN
    14581633    ppn_t     old_ppn;    // current PTE_PPN
    14591634    uint32_t  old_attr;   // current PTE_ATTR
     
    14661641    "not called in the reference cluster\n" );
    14671642
    1468 vmm_dmsg("\n[DBG] %s : core[%x,%d] enter for vpn = %x in process %x / cow = %d\n",
    1469 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , process->pid , cow );
     1643#if CONFIG_DEBUG_VMM_GET_PTE
     1644uint32_t cycle = (uint32_t)hal_get_cycles();
     1645if( CONFIG_DEBUG_VMM_GET_PTE > cycle )
     1646printk("\n[DBG] %s : thread %x enter for vpn = %x / process %x / cow = %d / cycle %d\n",
     1647__FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cow , cycle );
     1648#endif
    14701649
    14711650    // get VMM pointer
    14721651    vmm_t * vmm = &process->vmm;
    14731652
    1474     // get vseg pointer from ref VSL
     1653    // get vseg pointer from reference VSL
    14751654    error = vmm_get_vseg( process , vpn<<CONFIG_PPM_PAGE_SHIFT , &vseg );
    14761655
     
    14821661    }
    14831662
    1484 vmm_dmsg("\n[DBG] %s : core[%x,%d] found vseg %s / vpn_base = %x / vpn_size = %x\n",
    1485 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ,
    1486 vseg_type_str(vseg->type) , vseg->vpn_base , vseg->vpn_size );
     1663#if CONFIG_DEBUG_VMM_GET_PTE
     1664cycle = (uint32_t)hal_get_cycles();
     1665if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1666printk("\n[DBG] %s : thread %x found vseg %s / vpn_base = %x / vpn_size = %x\n",
     1667__FUNCTION__, CURRENT_THREAD, vseg_type_str(vseg->type), vseg->vpn_base, vseg->vpn_size );
     1668#endif
    14871669
    14881670    // access GPT to get current PTE attributes and PPN
     
    14931675    // clusters containing a copy, and return the new_ppn and new_attr
    14941676
    1495     if( cow )               ////////////// copy_on_write request ///////////
     1677    if( cow )  /////////////////////////// copy_on_write request //////////////////////
    14961678    {
    14971679        assert( (old_attr & GPT_MAPPED) , __FUNCTION__ ,
    14981680        "PTE must be mapped for a copy-on-write exception\n" );
    14991681
    1500 excp_dmsg("\n[DBG] %s : core[%x,%d] handling COW for vpn %x\n",
    1501 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn );
    1502 
    1503         // get extended pointer, cluster and local pointer on page descriptor
     1682#if CONFIG_DEBUG_VMM_GET_PTE
     1683cycle = (uint32_t)hal_get_cycles();
     1684if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1685printk("\n[DBG] %s : thread %x handling COW for vpn %x in process %x\n",
     1686__FUNCTION__, CURRENT_THREAD, vpn, process->pid );
     1687#endif
     1688
     1689        // get extended pointer, cluster and local pointer on physical page descriptor
    15041690        xptr_t   page_xp  = ppm_ppn2page( old_ppn );
    15051691        cxy_t    page_cxy = GET_CXY( page_xp );
    1506         page_t * page_ptr = (page_t *)GET_PTR( page_xp );
     1692        page_t * page_ptr = GET_PTR( page_xp );
    15071693
    15081694        // get number of pending forks in page descriptor
    1509         uint32_t count = hal_remote_lw( XPTR( page_cxy , &page_ptr->fork_nr ) );
    1510 
    1511         if( count )        // pending fork => allocate a new page, copy it, reset COW
     1695        uint32_t forks = hal_remote_lw( XPTR( page_cxy , &page_ptr->forks ) );
     1696
     1697        if( forks )        // pending fork => allocate a new page, copy old to new
    15121698        {
    15131699            // allocate a new physical page
     
    15391725
    15401726        // update GPT[vpn] for all GPT copies
    1541         // to maintain coherence of copies
    1542         vmm_update_pte( process,
    1543                         vpn,
    1544                         new_attr,
    1545                         new_ppn );
    1546 
    1547         // decrement fork_nr in page descriptor
    1548         hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->fork_nr ) , -1 );
    1549     }
    1550     else                         /////////////// page_fault request ///////////
     1727        vmm_global_update_pte( process, vpn, new_attr, new_ppn );
     1728
     1729        // decrement pending forks counter in page descriptor
     1730        hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , -1 );
     1731    }
     1732    else  ////////////////////////////////// page_fault request ////////////////////////
    15511733    { 
    15521734        if( (old_attr & GPT_MAPPED) == 0 )   // true page_fault => map it
    15531735        {
    15541736
    1555 excp_dmsg("\n[DBG] %s : core[%x,%d] handling page fault for vpn %x\n",
    1556 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn );
     1737#if CONFIG_DEBUG_VMM_GET_PTE
     1738cycle = (uint32_t)hal_get_cycles();
     1739if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1740printk("\n[DBG] %s : thread %x handling page fault for vpn %x in process %x\n",
     1741__FUNCTION__, CURRENT_THREAD, vpn, process->pid );
     1742#endif
    15571743
    15581744            // allocate new_ppn, depending on vseg type
     
    15921778    }
    15931779
    1594 excp_dmsg("\n[DBG] %s : core[%x,%d] update GPT for vpn %x / ppn = %x / attr = %x\n",
    1595 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , new_ppn , new_attr );
    1596 
    1597     // retur success
     1780#if CONFIG_DEBUG_VMM_GET_PTE
     1781cycle = (uint32_t)hal_get_cycles();
     1782if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1783printk("\n[DBG] %s : thread,%x exit for vpn %x in process %x / ppn = %x / attr = %x / cycle %d\n",
     1784__FUNCTION__, CURRENT_THREAD, vpn, process->pid, new_ppn, new_attr, cycle );
     1785#endif
     1786
     1787    // return success
    15981788    *ppn  = new_ppn;
    15991789    *attr = new_attr;
     
    16121802    // get reference process cluster and local pointer
    16131803    cxy_t       ref_cxy = GET_CXY( process->ref_xp );
    1614     process_t * ref_ptr = (process_t *)GET_PTR( process->ref_xp );
     1804    process_t * ref_ptr = GET_PTR( process->ref_xp );
    16151805
    16161806    // get missing PTE attributes and PPN from reference cluster
     
    16511841                        vpn_t       vpn )
    16521842{
    1653     uint32_t         attr;          // missing page attributes
    1654     ppn_t            ppn;           // missing page PPN
     1843    uint32_t         attr;          // page attributes
     1844    ppn_t            ppn;           // page PPN
    16551845    error_t          error;
    16561846
     1847   
    16571848    // get reference process cluster and local pointer
    16581849    cxy_t       ref_cxy = GET_CXY( process->ref_xp );
    1659     process_t * ref_ptr = (process_t *)GET_PTR( process->ref_xp );
     1850    process_t * ref_ptr = GET_PTR( process->ref_xp );
    16601851
    16611852    // get new PTE attributes and PPN from reference cluster
     
    17221913    {
    17231914        cxy_t       ref_cxy = GET_CXY( process->ref_xp );
    1724         process_t * ref_ptr = (process_t *)GET_PTR( process->ref_xp );
     1915        process_t * ref_ptr = GET_PTR( process->ref_xp );
    17251916        rpc_vmm_get_pte_client( ref_cxy , ref_ptr , vpn , false , &attr , &ppn , &error );
    17261917    }
  • trunk/kernel/mm/vmm.h

    r429 r433  
    9999 *    a remote_rwlock, because it can be accessed by a thread running in a remote cluster.
    100100 *    An exemple is the vmm_fork_copy() function.
    101  * 2. In most custers, the VSL and GPT are only partial copies of the reference VSL and GPT
     101 * 2. In most clusters, the VSL and GPT are only partial copies of the reference VSL and GPT
    102102 *    structures, stored in the reference cluster.
    103103 ********************************************************************************************/
     
    155155
    156156/*********************************************************************************************
    157  * This function is called by the process_fork_create() function. It partially copies
     157 * This function is called by the process_make_fork() function. It partially copies
    158158 * the content of a remote parent process VMM to the local child process VMM:
    159159 * - all DATA, MMAP, REMOTE vsegs registered in the parent VSL are registered in the child
     
    176176
    177177/*********************************************************************************************
    178  * This function is called by the process_make_fork() function to handle the fork syscall.
     178 * This function is called by the process_make_fork() function executing the fork syscall.
    179179 * It set the COW flag, and reset the WRITABLE flag of all GPT entries of the DATA, MMAP,
    180180 * and REMOTE vsegs of a process identified by the <process> argument.
    181181 * It must be called by a thread running in the reference cluster, that contains the complete
    182  * list of vsegs. Use the rpc_vmm_set_cow_client() when the calling thread client is remote.
     182 * VSL and GPT (use the rpc_vmm_set_cow_client() when the calling thread client is remote).
    183183 * It updates all copies of the process in all clusters, to maintain coherence in GPT copies,
    184184 * using the list of copies stored in the owner process, and using remote_write accesses to
    185  * update the remote GPTs. It cannot fail, as only mapped entries in GPT copies are updated.
     185 * update the remote GPTs. It atomically increment the pending_fork counter, in all involved
     186 * physical page descriptors. It cannot fail, as only mapped entries in GPTs are updated.
    186187 *********************************************************************************************
    187188 * @ process   : local pointer on local reference process descriptor.
     
    190191
    191192/*********************************************************************************************
    192  * This function is called by the vmm_get_pte() function in case of COW exception.
    193  * It modifies both the PPN an the attributes for a GPT entry identified by the <process>
    194  * and <vpn> arguments.
     193 * This global function modifies a GPT entry identified  by the <process> and <vpn>
     194 * arguments in all clusters containing a process copy.
     195 * It must be called by a thread running in the reference cluster.
    195196 * It updates all copies of the process in all clusters, to maintain coherence in GPT copies,
    196197 * using the list of copies stored in the owner process, and using remote_write accesses to
     
    202203 * @ ppn       : PTE / physical page index.
    203204 ********************************************************************************************/
    204 void vmm_update_pte( struct process_s * process,
    205                      vpn_t              vpn,
    206                      uint32_t           attr,
    207                      ppn_t              ppn );
    208 
    209 /*********************************************************************************************
    210  * This function scan the list of vsegs registered in the VSL of the process
    211  * identified by the <process> argument, and for each vseg:
    212  * - it unmap from the GPT and releases all mapped pages in vseg.
    213  * - it removes the vseg from the process VSL.
    214  * - It releases the memory allocated to the vseg descriptor.
     205void vmm_global_update_pte( struct process_s * process,
     206                            vpn_t              vpn,
     207                            uint32_t           attr,
     208                            ppn_t              ppn );
     209
     210/*********************************************************************************************
     211 * This function unmaps from the local GPT all mapped PTEs of a vseg identified by the
     212 * <process> and <vseg> arguments. It can be used for any type of vseg.
     213 * If this function is executed in the reference cluster, it handles for each referenced
     214 * physical pages the pending forks counter :
     215 * - if counter is non-zero, it decrements it.
     216 * - if counter is zero, it releases the physical page to local kmem allocator.
     217 *********************************************************************************************
     218 * @ process  : pointer on process descriptor.
     219 * @ vseg     : pointer on the vseg to be unmapped.
     220 ********************************************************************************************/
     221void vmm_unmap_vseg( struct process_s * process,
     222                     vseg_t           * vseg );
     223
     224/*********************************************************************************************
     225 * This function deletes, in the local cluster, all vsegs registered in the VSL
     226 * of the process identified by the <process> argument. For each vseg:
     227 * - it unmaps all vseg PTEs from the GPT (release the physical pages when required).
     228 * - it removes the vseg from the local VSL.
     229 * - it releases the memory allocated to the local vseg descriptors.
    215230 * Finally, it releases the memory allocated to the GPT itself.
    216231 *********************************************************************************************
     
    291306
    292307/*********************************************************************************************
    293  * This function unmaps all mapped PTEs of a given vseg, from the generic page table
    294  * associated to a given process descriptor, and releases the physical memory allocated
    295  * to all mapped GPT entries.  It can be used for any type of vseg.
    296  *********************************************************************************************
    297  * @ process  : pointer on process descriptor.
    298  * @ vseg     : pointer on the vseg to be unmapped.
    299  ********************************************************************************************/
    300 void vmm_unmap_vseg( struct process_s * process,
    301                      vseg_t           * vseg );
    302 
    303 /*********************************************************************************************
    304308 * This function removes a given region (defined by a base address and a size) from
    305309 * the VMM of a given process descriptor. This can modify the number of vsegs:
     
    340344/*********************************************************************************************
    341345 * This function is called by the generic exception handler when a page-fault event
    342  * has been detected in a given cluster.
     346 * has been detected for a given process in a given cluster.
    343347 * - If the local cluster is the reference, it call directly the vmm_get_pte() function.
    344348 * - If the local cluster is not the reference cluster, it send a RPC_VMM_GET_PTE
     
    355359/*********************************************************************************************
    356360 * This function is called by the generic exception handler when a copy-on-write event
    357  * has been detected in a given cluster.
    358  * - If the local cluster is the reference, it call directly the vmm_get_pte() function.
    359  * - If the local cluster is not the reference cluster, it send a RPC_VMM_GET_PTE
    360  *   to the reference cluster to get the missing PTE attributes and PPN,
    361  *   and update the local page table.
     361 * has been detected for a given process in a given cluster.
     362 * It takes the lock protecting the physical page, and test the pending forks counter.
     363 * If no pending fork:
     364 * - it reset the COW flag and set the WRITE flag in the reference GPT entry, and in all
     365 *   the GPT copies
     366
     367 * If there is a pending forkon the
     368 * - It get the involved vseg pointer.
     369 * - It allocates a new physical page from the cluster defined by the vseg type.
     370 * - It copies the old physical page content to the new physical page.
     371 * - It decrements the pending_fork counter in old physical page descriptor.
     372
    362373 *********************************************************************************************
    363374 * @ process   : pointer on process descriptor.
     
    369380
    370381/*********************************************************************************************
    371  * This function is called when a new PTE (GPT entry) is required because a "page-fault",
    372  * or "copy-on_write" event has been detected for a given <vpn> in a given <process>.
    373  * The <cow> argument defines the type of event to be handled.
     382 * This function handle both the "page-fault" and "copy-on_write" events for a given <vpn>
     383 * in a given <process>.  The <cow> argument defines the type of event to be handled.
    374384 * This function must be called by a thread running in reference cluster, and the vseg
    375  * containing the searched VPN should be registered in the reference VMM.
    376  * - for an actual page-fault, it allocates the missing physical page from the target cluster
    377  *   defined by the vseg type, initialize it, and update the reference page table.
     385 * containing the searched VPN must be registered in the reference VMM.
     386 * - for an page-fault, it allocates the missing physical page from the target cluster
     387 *   defined by the vseg type, initializes it, and updates the reference GPT, but not
     388 *   the copies GPT, that will be updated on demand.
    378389 * - for a copy-on-write, it allocates a new physical page from the target cluster,
    379  *   initialise it from the old physical page, and update the reference page table.
    380  * In both cases, it calls the RPC_PMEM_GET_PAGES to get the new physical page if the
    381  * target cluster is not the reference cluster.
     390 *   initialise it from the old physical page, and updates the reference GPT and all
     391 *   the GPT copies, for coherence.
     392 * In both cases, it calls the RPC_PMEM_GET_PAGES to get the new physical page when
     393 * the target cluster is not the reference cluster.
    382394 * It returns in the <attr> and <ppn> arguments the accessed or modified PTE.
    383395 *********************************************************************************************
     
    400412 * (Physical Page Number) associated to a missing page defined by the <vpn> argument.
    401413 * - For the FILE type, it returns directly the physical page from the file mapper.
    402  * - For the CODE and DATA types, it allocates a new phsical page from the cluster defined
     414 * - For the CODE and DATA types, it allocates a new physical page from the cluster defined
    403415 *   by the <vseg->cxy> field, or by the <vpn> MSB bits for a distributed vseg,
    404416 *   and initialize this page from the .elf file mapper.
Note: See TracChangeset for help on using the changeset viewer.