Changeset 640 for trunk/kernel/mm


Ignore:
Timestamp:
Oct 1, 2019, 1:19:00 PM (5 years ago)
Author:
alain
Message:

Remove all RPCs in page-fault handling.

Location:
trunk/kernel/mm
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/mm/vmm.c

    r635 r640  
    3232#include <printk.h>
    3333#include <memcpy.h>
    34 #include <remote_rwlock.h>
    3534#include <remote_queuelock.h>
    3635#include <list.h>
     
    313312
    314313    // initialize the lock protecting the VSL
    315         remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
     314        remote_queuelock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
    316315
    317316
     
    425424
    426425    // take the VSL lock
    427         remote_rwlock_wr_acquire( lock_xp );
     426        remote_queuelock_acquire( lock_xp );
    428427
    429428    // scan the VSL to delete all non kernel vsegs
     
    474473
    475474    // release the VSL lock
    476         remote_rwlock_wr_release( lock_xp );
     475        remote_queuelock_release( lock_xp );
    477476
    478477// FIXME il faut gérer les process copies...
     
    491490
    492491}  // end vmm_user_reset()
     492
     493/////////////////////////////////////////////////
     494void vmm_global_delete_vseg( process_t * process,
     495                             intptr_t    base )
     496{
     497    pid_t           pid;
     498    cxy_t           owner_cxy;
     499    lpid_t          owner_lpid;
     500
     501    xlist_entry_t * process_root_ptr;
     502    xptr_t          process_root_xp;
     503    xptr_t          process_iter_xp;
     504
     505    xptr_t          remote_process_xp;
     506    cxy_t           remote_process_cxy;
     507    process_t     * remote_process_ptr;
     508
     509    xptr_t          vsl_root_xp;
     510    xptr_t          vsl_lock_xp;
     511    xptr_t          vsl_iter_xp;
     512
     513#if DEBUG_VMM_GLOBAL_DELETE_VSEG
     514uint32_t cycle = (uint32_t)hal_get_cycles();
     515thread_t * this = CURRENT_THREAD;
     516#endif
     517
     518#if (DEBUG_VMM_GLOBAL_DELETE_VSEG & 1)
     519if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle )
     520printk("\n[%s] thread[%x,%x] : process %x / base %x / cycle %d\n",
     521__FUNCTION__, this->process->pid, this->trdid, process->pid, base, cycle );
     522#endif
     523
     524    // get owner process cluster and local index
     525    pid              = process->pid;
     526    owner_cxy        = CXY_FROM_PID( pid );
     527    owner_lpid       = LPID_FROM_PID( pid );
     528
     529    // get extended pointer on root of process copies xlist in owner cluster
     530    process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid];
     531    process_root_xp  = XPTR( owner_cxy , process_root_ptr );
     532
     533    // loop on process copies
     534    XLIST_FOREACH( process_root_xp , process_iter_xp )
     535    {
     536        // get cluster and local pointer on remote process
     537        remote_process_xp  = XLIST_ELEMENT( process_iter_xp , process_t , copies_list );
     538        remote_process_ptr = GET_PTR( remote_process_xp );
     539        remote_process_cxy = GET_CXY( remote_process_xp );
     540
     541        // build extended pointers on remote VSL root and lock
     542        vsl_root_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsegs_root );
     543        vsl_lock_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsl_lock );
     544
     545        // get lock on remote VSL
     546        remote_queuelock_acquire( vsl_lock_xp );
     547
     548        // loop on vsegs in remote process VSL
     549        XLIST_FOREACH( vsl_root_xp , vsl_iter_xp )
     550        {
     551            // get pointers on current vseg
     552            xptr_t   vseg_xp  = XLIST_ELEMENT( vsl_iter_xp , vseg_t , xlist );
     553            vseg_t * vseg_ptr = GET_PTR( vseg_xp );
     554
     555            // get current vseg base address
     556            intptr_t vseg_base = (intptr_t)hal_remote_lpt( XPTR( remote_process_cxy,
     557                                                                 &vseg_ptr->min ) );
     558
     559            if( vseg_base == base )   // found searched vseg
     560            {
     561                if( remote_process_cxy == local_cxy )
     562                {
     563                    vmm_remove_vseg( process,
     564                                     vseg_ptr );
     565                }
     566                else
     567                {
     568                    rpc_vmm_remove_vseg_client( remote_process_cxy,
     569                                                remote_process_ptr,
     570                                                vseg_ptr );
     571                }
     572
     573#if (DEBUG_VMM_GLOBAL_DELETE_VSEG & 1)
     574if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle )
     575printk("\n[%s] thread[%x,%x] deleted vseg %x for process %x in cluster %x\n",
     576__FUNCTION__, this->process->pid, this->trdid, base, process->pid, remote_process_cxy );
     577#endif
     578
     579            }
     580        }  // end of loop on vsegs
     581
     582#if (DEBUG_VMM_GLOBAL_DELETE_VSEG & 1)
     583if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle )
     584hal_vmm_display( remote_process_xp , false );
     585#endif
     586
     587        // release lock on remote VSL
     588        remote_queuelock_release( vsl_lock_xp );
     589
     590    }  // end of loop on process copies
     591
     592#if DEBUG_VMM_GLOBAL_DELETE_VSEG
     593cycle = (uint32_t)hal_get_cycles();
     594if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle )
     595printk("\n[%s] thread[%x,%x] exit for process %x / base %x / cycle %d\n",
     596__FUNCTION__, this->process->pid, this->trdid, process->pid , base, cycle );
     597#endif
     598
     599}  // end vmm_global_delete_vseg()
     600
     601////////////////////////////////////////////////
     602void vmm_global_resize_vseg( process_t * process,
     603                             intptr_t    base,
     604                             intptr_t    new_base,
     605                             intptr_t    new_size )
     606{
     607    pid_t           pid;
     608    cxy_t           owner_cxy;
     609    lpid_t          owner_lpid;
     610
     611    xlist_entry_t * process_root_ptr;
     612    xptr_t          process_root_xp;
     613    xptr_t          process_iter_xp;
     614
     615    xptr_t          remote_process_xp;
     616    cxy_t           remote_process_cxy;
     617    process_t     * remote_process_ptr;
     618
     619    xptr_t          vsl_root_xp;
     620    xptr_t          vsl_lock_xp;
     621    xptr_t          vsl_iter_xp;
     622
     623#if DEBUG_VMM_GLOBAL_RESIZE_VSEG
     624uint32_t cycle = (uint32_t)hal_get_cycles();
     625thread_t * this = CURRENT_THREAD;
     626#endif
     627
     628#if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1)
     629if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle )
     630printk("\n[%s] thread[%x,%x] : process %x / base %x / new_base %x / new_size %x / cycle %d\n",
     631__FUNCTION__, this->process->pid, this->trdid, process->pid, base, new_base, new_size, cycle );
     632#endif
     633
     634    // get owner process cluster and local index
     635    pid              = process->pid;
     636    owner_cxy        = CXY_FROM_PID( pid );
     637    owner_lpid       = LPID_FROM_PID( pid );
     638
     639    // get extended pointer on root of process copies xlist in owner cluster
     640    process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid];
     641    process_root_xp  = XPTR( owner_cxy , process_root_ptr );
     642
     643    // loop on process copies
     644    XLIST_FOREACH( process_root_xp , process_iter_xp )
     645    {
     646        // get cluster and local pointer on remote process
     647        remote_process_xp  = XLIST_ELEMENT( process_iter_xp , process_t , copies_list );
     648        remote_process_ptr = GET_PTR( remote_process_xp );
     649        remote_process_cxy = GET_CXY( remote_process_xp );
     650
     651        // build extended pointers on remote VSL root and lock
     652        vsl_root_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsegs_root );
     653        vsl_lock_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsl_lock );
     654
     655        // get lock on remote VSL
     656        remote_queuelock_acquire( vsl_lock_xp );
     657
     658        // loop on vsegs in remote process VSL
     659        XLIST_FOREACH( vsl_root_xp , vsl_iter_xp )
     660        {
     661            // get pointers on current vseg
     662            xptr_t   vseg_xp  = XLIST_ELEMENT( vsl_iter_xp , vseg_t , xlist );
     663            vseg_t * vseg_ptr = GET_PTR( vseg_xp );
     664
     665            // get current vseg base address
     666            intptr_t vseg_base = (intptr_t)hal_remote_lpt( XPTR( remote_process_cxy,
     667                                                                 &vseg_ptr->min ) );
     668
     669            if( vseg_base == base )   // found searched vseg
     670            {
     671                if( remote_process_cxy == local_cxy )
     672                {
     673                    vmm_resize_vseg( remote_process_ptr,
     674                                     vseg_ptr,
     675                                     new_base,
     676                                     new_size );
     677                }
     678                else
     679                {
     680                    rpc_vmm_resize_vseg_client( remote_process_cxy,
     681                                                remote_process_ptr,
     682                                                vseg_ptr,
     683                                                new_base,
     684                                                new_size );
     685                }
     686 
     687#if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1)
     688if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle )
     689printk("\n[%s] thread[%x,%x] resized vseg %x for process %x in cluster %x\n",
     690__FUNCTION__, this->process->pid, this->trdid, base, process->pid, remote_process_cxy );
     691#endif
     692
     693            }
     694        }  // end of loop on vsegs
     695
     696#if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1)
     697if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle )
     698hal_vmm_display( remote_process_xp , false );
     699#endif
     700
     701        // release lock on remote VSL
     702        remote_queuelock_release( vsl_lock_xp );
     703    }  // end of loop on process copies
     704
     705#if DEBUG_VMM_GLOBAL_RESIZE_VSEG
     706cycle = (uint32_t)hal_get_cycles();
     707if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle )
     708printk("\n[%s] thread[%x,%x] exit for process %x / base %x / cycle %d\n",
     709__FUNCTION__, this->process->pid, this->trdid, process->pid , base, cycle );
     710#endif
     711
     712}  // end vmm_global_resize_vseg()
    493713
    494714////////////////////////////////////////////////
     
    498718                            ppn_t       ppn )
    499719{
     720    pid_t           pid;
     721    cxy_t           owner_cxy;
     722    lpid_t          owner_lpid;
     723
    500724    xlist_entry_t * process_root_ptr;
    501725    xptr_t          process_root_xp;
     
    507731    xptr_t          remote_gpt_xp;
    508732
    509     pid_t           pid;
    510     cxy_t           owner_cxy;
    511     lpid_t          owner_lpid;
    512 
    513 #if DEBUG_VMM_UPDATE_PTE
     733#if DEBUG_VMM_GLOBAL_UPDATE_PTE
    514734uint32_t cycle = (uint32_t)hal_get_cycles();
    515735thread_t * this = CURRENT_THREAD;
    516 if( DEBUG_VMM_UPDATE_PTE < cycle )
     736#endif
     737
     738
     739#if (DEBUG_VMM_GLOBAL_UPDATE_PTE & 1)
     740if( DEBUG_VMM_GLOBAL_UPDATE_PTE < cycle )
    517741printk("\n[%s] thread[%x,%x] enter for process %x / vpn %x / attr %x / ppn %x / ycle %d\n",
    518742__FUNCTION__, this->process->pid, this->trdid, process->pid, vpn, attr, ppn, cycle );
    519743#endif
    520744
    521     // get extended pointer on root of process copies xlist in owner cluster
     745    // get owner process cluster and local index
    522746    pid              = process->pid;
    523747    owner_cxy        = CXY_FROM_PID( pid );
    524748    owner_lpid       = LPID_FROM_PID( pid );
     749
     750    // get extended pointer on root of process copies xlist in owner cluster
    525751    process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid];
    526752    process_root_xp  = XPTR( owner_cxy , process_root_ptr );
    527753
    528 // check local cluster is owner cluster
    529 assert( (owner_cxy == local_cxy) , "must be called in owner cluster\n");
    530 
    531     // loop on destination process copies
     754    // loop on process copies
    532755    XLIST_FOREACH( process_root_xp , process_iter_xp )
    533756    {
     
    537760        remote_process_cxy = GET_CXY( remote_process_xp );
    538761
    539 #if (DEBUG_VMM_UPDATE_PTE & 1)
    540 if( DEBUG_VMM_UPDATE_PTE < cycle )
     762#if (DEBUG_VMM_GLOBAL_UPDATE_PTE & 1)
     763if( DEBUG_VMM_GLOBAL_UPDATE_PTE < cycle )
    541764printk("\n[%s] thread[%x,%x] handling vpn %x for process %x in cluster %x\n",
    542765__FUNCTION__, this->process->pid, this->trdid, vpn, process->pid, remote_process_cxy );
     
    550773    } 
    551774
    552 #if DEBUG_VMM_UPDATE_PTE
     775#if DEBUG_VMM_GLOBAL_UPDATE_PTE
    553776cycle = (uint32_t)hal_get_cycles();
    554 if( DEBUG_VMM_UPDATE_PTE < cycle )
     777if( DEBUG_VMM_GLOBAL_UPDATE_PTE < cycle )
    555778printk("\n[%s] thread[%x,%x] exit for process %x / vpn %x / cycle %d\n",
    556779__FUNCTION__, this->process->pid, this->trdid, process->pid , vpn , cycle );
    557780#endif
    558781
    559 #if (DEBUG_VMM_UPDATE_PTE & 1)
     782#if (DEBUG_VMM_GLOBAL_UPDATE_PTE & 1)
    560783hal_vmm_display( process , true );
    561784#endif
     
    772995    parent_lock_xp = XPTR( parent_cxy , &parent_vmm->vsl_lock );
    773996
    774     // take the lock protecting the parent VSL in read mode
    775     remote_rwlock_rd_acquire( parent_lock_xp );
     997    // take the lock protecting the parent VSL
     998    remote_queuelock_acquire( parent_lock_xp );
    776999
    7771000    // loop on parent VSL xlist
     
    8091032            vseg_init_from_ref( child_vseg , parent_vseg_xp );
    8101033
    811             // build extended pointer on VSL lock
    812             xptr_t lock_xp = XPTR( local_cxy , &child_vmm->vsl_lock );
     1034            // build extended pointer on child VSL lock
     1035            xptr_t child_lock_xp = XPTR( local_cxy , &child_vmm->vsl_lock );
    8131036 
    814             // take the VSL lock in write mode
    815             remote_rwlock_wr_acquire( lock_xp );
     1037            // take the child VSL lock
     1038            remote_queuelock_acquire( child_lock_xp );
    8161039
    8171040            // register child vseg in child VSL
    8181041            vmm_attach_vseg_to_vsl( child_vmm , child_vseg );
    8191042
    820             // release the VSL lock
    821             remote_rwlock_wr_release( lock_xp );
     1043            // release the child VSL lock
     1044            remote_queuelock_release( child_lock_xp );
    8221045
    8231046#if DEBUG_VMM_FORK_COPY
     
    8661089
    8671090    // release the parent VSL lock in read mode
    868     remote_rwlock_rd_release( parent_lock_xp );
     1091    remote_queuelock_release( parent_lock_xp );
    8691092
    8701093    // initialize the child VMM STACK allocator
     
    9391162
    9401163    // take the VSL lock
    941     remote_rwlock_wr_acquire( vsl_lock_xp );
     1164    remote_queuelock_acquire( vsl_lock_xp );
    9421165
    9431166    // scan the VSL to delete all registered vsegs
     
    9681191
    9691192    // release the VSL lock
    970     remote_rwlock_wr_release( vsl_lock_xp );
     1193    remote_queuelock_release( vsl_lock_xp );
    9711194
    9721195    // remove all registered MMAP vsegs
     
    10421265
    10431266}  // end vmm_check_conflict()
    1044 
    1045 
    10461267
    10471268////////////////////////////////////////////////
     
    10601281        error_t      error;
    10611282
     1283#if DEBUG_VMM_CREATE_VSEG
     1284thread_t * this  = CURRENT_THREAD;
     1285uint32_t   cycle;
     1286#endif
     1287
    10621288#if (DEBUG_VMM_CREATE_VSEG & 1)
    1063 thread_t * this  = CURRENT_THREAD;
    1064 uint32_t   cycle = (uint32_t)hal_get_cycles();
     1289cycle = (uint32_t)hal_get_cycles();
    10651290if( DEBUG_VMM_CREATE_VSEG < cycle )
    10661291printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cxy %x / cycle %d\n",
     
    11801405 
    11811406    // take the VSL lock in write mode
    1182     remote_rwlock_wr_acquire( lock_xp );
     1407    remote_queuelock_acquire( lock_xp );
    11831408
    11841409    // attach vseg to VSL
     
    11861411
    11871412    // release the VSL lock
    1188     remote_rwlock_wr_release( lock_xp );
     1413    remote_queuelock_release( lock_xp );
    11891414
    11901415#if DEBUG_VMM_CREATE_VSEG
    11911416cycle = (uint32_t)hal_get_cycles();
    1192 if( DEBUG_VMM_CREATE_VSEG < cycle )
     1417// if( DEBUG_VMM_CREATE_VSEG < cycle )
     1418if( type == VSEG_TYPE_REMOTE )
    11931419printk("\n[%s] thread[%x,%x] exit / process %x / %s / base %x / cxy %x / cycle %d\n",
    11941420__FUNCTION__, this->process->pid, this->trdid,
     
    12001426}  // vmm_create_vseg()
    12011427
     1428////////////////////////////////////////////////////////////////////////////////////////////
     1429// This static function is called by the vmm_remove_vseg() and vmm_resize_vseg() functions.
     1430// Depending on the vseg <type>, it decrements the physical page refcount, and
     1431// conditionnally release to the relevant kmem the physical page identified by <ppn>.
     1432////////////////////////////////////////////////////////////////////////////////////////////
     1433// @ process  : local pointer on process.
     1434// @ vseg     : local pointer on vseg.
     1435// @ ppn      : released pysical page index.
     1436////////////////////////////////////////////////////////////////////////////////////////////
     1437static void vmm_ppn_release( process_t * process,
     1438                             vseg_t    * vseg,
     1439                             ppn_t       ppn )
     1440{
     1441    bool_t do_release;
     1442
     1443    // get vseg type
     1444    vseg_type_t type = vseg->type;
     1445
     1446    // compute is_ref
     1447    bool_t is_ref = (GET_CXY( process->ref_xp ) == local_cxy);
     1448
     1449    // get pointers on physical page descriptor
     1450    xptr_t   page_xp  = ppm_ppn2page( ppn );
     1451    cxy_t    page_cxy = GET_CXY( page_xp );
     1452    page_t * page_ptr = GET_PTR( page_xp );
     1453
     1454    // decrement page refcount
     1455    xptr_t count_xp = XPTR( page_cxy , &page_ptr->refcount );
     1456    hal_remote_atomic_add( count_xp , -1 );
     1457
     1458    // compute the do_release condition depending on vseg type
     1459    if( (type == VSEG_TYPE_FILE)  ||
     1460        (type == VSEG_TYPE_KCODE) ||
     1461        (type == VSEG_TYPE_KDATA) ||
     1462        (type == VSEG_TYPE_KDEV) )           
     1463    {
     1464        // no physical page release for FILE and KERNEL
     1465        do_release = false;
     1466    }
     1467    else if( (type == VSEG_TYPE_CODE)  ||
     1468             (type == VSEG_TYPE_STACK) )
     1469    {
     1470        // always release physical page for private vsegs
     1471        do_release = true;
     1472    }
     1473    else if( (type == VSEG_TYPE_ANON)  ||
     1474             (type == VSEG_TYPE_REMOTE) )
     1475    {
     1476        // release physical page if reference cluster
     1477        do_release = is_ref;
     1478    }
     1479    else if( is_ref )  // vseg_type == DATA in reference cluster
     1480    {
     1481        // get extended pointers on forks and lock field in page descriptor
     1482        xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks );
     1483        xptr_t lock_xp  = XPTR( page_cxy , &page_ptr->lock );
     1484
     1485        // take lock protecting "forks" counter
     1486        remote_busylock_acquire( lock_xp );
     1487
     1488        // get number of pending forks from page descriptor
     1489        uint32_t forks = hal_remote_l32( forks_xp );
     1490
     1491        // decrement pending forks counter if required
     1492        if( forks )  hal_remote_atomic_add( forks_xp , -1 );
     1493
     1494        // release lock protecting "forks" counter
     1495        remote_busylock_release( lock_xp );
     1496
     1497        // release physical page if forks == 0
     1498        do_release = (forks == 0);
     1499    }
     1500    else              // vseg_type == DATA not in reference cluster
     1501    {
     1502        // no physical page release if not in reference cluster
     1503        do_release = false;
     1504    }
     1505
     1506    // release physical page to relevant kmem when required
     1507    if( do_release )
     1508    {
     1509        ppm_remote_free_pages( page_cxy , page_ptr );
     1510
     1511#if DEBUG_VMM_PPN_RELEASE
     1512thread_t * this = CURRENT_THREAD;
     1513if( DEBUG_VMM_PPN_RELEASE < cycle )
     1514printk("\n[%s] thread[%x,%x] released ppn %x to kmem\n",
     1515__FUNCTION__, this->process->pid, this->trdid, ppn );
     1516#endif
     1517
     1518    }
     1519} // end vmm_ppn_release()
    12021520
    12031521//////////////////////////////////////////
     
    12051523                      vseg_t    * vseg )
    12061524{
    1207     vmm_t     * vmm;        // local pointer on process VMM
    1208     xptr_t      gpt_xp;     // extended pointer on GPT
    1209     bool_t      is_ref;     // local process is reference process
    12101525    uint32_t    vseg_type;  // vseg type
    12111526    vpn_t       vpn;        // VPN of current PTE
     
    12141529    ppn_t       ppn;        // current PTE ppn value
    12151530    uint32_t    attr;       // current PTE attributes
    1216     xptr_t      page_xp;    // extended pointer on page descriptor
    1217     cxy_t       page_cxy;   // page descriptor cluster
    1218     page_t    * page_ptr;   // page descriptor pointer
    1219     xptr_t      count_xp;   // extended pointer on page refcount
    12201531
    12211532// check arguments
     
    12231534assert( (vseg    != NULL), "vseg argument is NULL" );
    12241535
    1225     // compute is_ref
    1226     is_ref = (GET_CXY( process->ref_xp ) == local_cxy);
    1227 
    12281536    // get pointers on local process VMM
    1229     vmm = &process->vmm;
     1537    vmm_t * vmm = &process->vmm;
    12301538
    12311539    // build extended pointer on GPT
    1232     gpt_xp = XPTR( local_cxy , &vmm->gpt );
     1540    xptr_t gpt_xp = XPTR( local_cxy , &vmm->gpt );
    12331541
    12341542    // get relevant vseg infos
     
    12401548uint32_t   cycle = (uint32_t)hal_get_cycles();
    12411549thread_t * this  = CURRENT_THREAD;
     1550#endif
     1551
     1552#if (DEBUG_VMM_REMOVE_VSEG & 1 )
    12421553if( DEBUG_VMM_REMOVE_VSEG < cycle )
    12431554printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cycle %d\n",
     
    12461557#endif
    12471558
    1248     // loop on PTEs in GPT
     1559    // loop on PTEs in GPT to unmap all mapped PTE
    12491560        for( vpn = vpn_min ; vpn < vpn_max ; vpn++ )
    12501561    {
     
    12571568#if( DEBUG_VMM_REMOVE_VSEG & 1 )
    12581569if( DEBUG_VMM_REMOVE_VSEG < cycle )
    1259 printk("- unmap vpn %x / ppn %x / %s" , vpn , ppn, vseg_type_str(vseg_type) );
     1570printk("\n[%s] thread[%x,%x] unmap vpn %x / ppn %x / %s",
     1571__FUNCTION__, this->process->pid, this->trdid, vpn , ppn, vseg_type_str(vseg_type) );
    12601572#endif
    12611573            // unmap GPT entry in local GPT
    12621574            hal_gpt_reset_pte( gpt_xp , vpn );
    12631575
    1264             // get pointers on physical page descriptor
    1265             page_xp  = ppm_ppn2page( ppn );
    1266             page_cxy = GET_CXY( page_xp );
    1267             page_ptr = GET_PTR( page_xp );
    1268 
    1269             // decrement page refcount
    1270             count_xp = XPTR( page_cxy , &page_ptr->refcount );
    1271             hal_remote_atomic_add( count_xp , -1 );
    1272 
    1273             // compute the ppn_release condition depending on vseg type
    1274             bool_t ppn_release;
    1275             if( (vseg_type == VSEG_TYPE_FILE)  ||
    1276                 (vseg_type == VSEG_TYPE_KCODE) ||
    1277                 (vseg_type == VSEG_TYPE_KDATA) ||
    1278                 (vseg_type == VSEG_TYPE_KDEV) )           
    1279             {
    1280                 // no physical page release for FILE and KERNEL
    1281                 ppn_release = false;
    1282             }
    1283             else if( (vseg_type == VSEG_TYPE_CODE)  ||
    1284                      (vseg_type == VSEG_TYPE_STACK) )
    1285             {
    1286                 // always release physical page for private vsegs
    1287                 ppn_release = true;
    1288             }
    1289             else if( (vseg_type == VSEG_TYPE_ANON)  ||
    1290                      (vseg_type == VSEG_TYPE_REMOTE) )
    1291             {
    1292                 // release physical page if reference cluster
    1293                 ppn_release = is_ref;
    1294             }
    1295             else if( is_ref )  // vseg_type == DATA in reference cluster
    1296             {
    1297                 // get extended pointers on forks and lock field in page descriptor
    1298                 xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks );
    1299                 xptr_t lock_xp  = XPTR( page_cxy , &page_ptr->lock );
    1300 
    1301                 // take lock protecting "forks" counter
    1302                 remote_busylock_acquire( lock_xp );
    1303 
    1304                 // get number of pending forks from page descriptor
    1305                 uint32_t forks = hal_remote_l32( forks_xp );
    1306 
    1307                 // decrement pending forks counter if required
    1308                 if( forks )  hal_remote_atomic_add( forks_xp , -1 );
    1309 
    1310                 // release lock protecting "forks" counter
    1311                 remote_busylock_release( lock_xp );
    1312 
    1313                 // release physical page if forks == 0
    1314                 ppn_release = (forks == 0);
    1315             }
    1316             else              // vseg_type == DATA not in reference cluster
    1317             {
    1318                 // no physical page release if not in reference cluster
    1319                 ppn_release = false;
    1320             }
    1321 
    1322             // release physical page to relevant kmem when required
    1323             if( ppn_release ) ppm_remote_free_pages( page_cxy , page_ptr );
    1324 
    1325 #if( DEBUG_VMM_REMOVE_VSEG & 1 )
    1326 if( DEBUG_VMM_REMOVE_VSEG < cycle )
    1327 {
    1328     if( ppn_release ) printk(" / released to kmem\n" );
    1329     else              printk("\n");
    1330 }
    1331 #endif
     1576            // release physical page when required
     1577            vmm_ppn_release( process , vseg , ppn );
    13321578        }
    13331579    }
     
    13681614}  // end vmm_remove_vseg()
    13691615
    1370 
    1371 ///////////////////////////////////
    1372 void vmm_delete_vseg( pid_t    pid,
    1373                       intptr_t vaddr )
     1616/////////////////////////////////////////////
     1617void vmm_resize_vseg( process_t * process,
     1618                      vseg_t    * vseg,
     1619                      intptr_t    new_base,
     1620                      intptr_t    new_size )
    13741621{
    1375     process_t * process;    // local pointer on local process
    1376     vseg_t    * vseg;       // local pointer on local vseg containing vaddr
    1377 
    1378     // get local pointer on local process descriptor
    1379     process = cluster_get_local_process_from_pid( pid );
    1380 
    1381     if( process == NULL )
    1382     {
    1383         printk("\n[WARNING] in %s : cannot get local process descriptor\n",
    1384         __FUNCTION__ );
    1385         return;
    1386     }
    1387 
    1388     // get local pointer on local vseg containing vaddr
    1389     vseg = vmm_vseg_from_vaddr( &process->vmm , vaddr );
    1390 
    1391     if( vseg == NULL )
    1392     {
    1393         printk("\n[WARNING] in %s : cannot get vseg descriptor\n",
    1394         __FUNCTION__ );
    1395         return;
    1396     }
    1397 
    1398     // call relevant function
    1399     vmm_remove_vseg( process , vseg );
    1400 
    1401 }  // end vmm_delete_vseg
    1402 
    1403 
    1404 /////////////////////////////////////////////
    1405 vseg_t * vmm_vseg_from_vaddr( vmm_t    * vmm,
    1406                               intptr_t   vaddr )
    1407 {
    1408     xptr_t   vseg_xp;
    1409     vseg_t * vseg;
    1410     xptr_t   iter_xp;
    1411 
    1412     // get extended pointers on VSL lock and root
    1413     xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
    1414     xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root );
    1415 
    1416     // get lock protecting the VSL
    1417     remote_rwlock_rd_acquire( lock_xp );
    1418 
    1419     // scan the list of vsegs in VSL
    1420     XLIST_FOREACH( root_xp , iter_xp )
    1421     {
    1422         // get pointers on vseg
    1423         vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
    1424         vseg    = GET_PTR( vseg_xp );
    1425 
    1426         // return success when match
    1427         if( (vaddr >= vseg->min) && (vaddr < vseg->max) )
    1428         {
    1429             // return success
    1430             remote_rwlock_rd_release( lock_xp );
    1431             return vseg;
    1432         }
    1433     }
    1434 
    1435     // return failure
    1436     remote_rwlock_rd_release( lock_xp );
    1437     return NULL;
    1438 
    1439 }  // end vmm_vseg_from_vaddr()
    1440 
    1441 /////////////////////////////////////////////
    1442 error_t vmm_resize_vseg( process_t * process,
    1443                          intptr_t    base,
    1444                          intptr_t    size )
    1445 {
    1446     error_t   error;
    1447     vseg_t  * new;
    1448     vpn_t     vpn_min;
    1449     vpn_t     vpn_max;
     1622    vpn_t     vpn;
     1623    ppn_t     ppn;
     1624    uint32_t  attr;
     1625
     1626// check arguments
     1627assert( (process != NULL), "process argument is NULL" );
     1628assert( (vseg    != NULL), "vseg argument is NULL" );
    14501629
    14511630#if DEBUG_VMM_RESIZE_VSEG
    14521631uint32_t   cycle = (uint32_t)hal_get_cycles();
    14531632thread_t * this  = CURRENT_THREAD;
     1633#endif
     1634
     1635#if (DEBUG_VMM_RESIZE_VSEG & 1)
    14541636if( DEBUG_VMM_RESIZE_VSEG < cycle )
    1455 printk("\n[%s] thread[%x,%x] enter / process %x / base %x / size %d / cycle %d\n",
    1456 __FUNCTION__, this->process->pid, this->trdid, process->pid, base, size, cycle );
    1457 #endif
    1458 
    1459     // get pointer on process VMM
    1460     vmm_t * vmm = &process->vmm;
    1461 
    1462     intptr_t addr_min = base;
    1463         intptr_t addr_max = base + size;
    1464 
    1465     // get pointer on vseg
    1466         vseg_t * vseg = vmm_vseg_from_vaddr( vmm , base );
    1467 
    1468         if( vseg == NULL)
    1469     {
    1470         printk("\n[ERROR] in %s : vseg(%x,%d) not found\n",
    1471         __FUNCTION__, base , size );
    1472         return -1;
    1473     }
    1474 
    1475     // resize depends on unmapped region base and size
    1476         if( (vseg->min > addr_min) || (vseg->max < addr_max) )        // not included in vseg
    1477     {
    1478         printk("\n[ERROR] in %s : unmapped region[%x->%x[ not included in vseg[%x->%x[\n",
    1479         __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
    1480 
    1481         error = -1;
    1482     }
    1483         else if( (vseg->min == addr_min) && (vseg->max == addr_max) )  // vseg must be deleted
    1484     {
     1637printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cycle %d\n",
     1638__FUNCTION__, this->process->pid, this->trdid,
     1639process->pid, vseg_type_str(vseg->type), old_base, cycle );
     1640#endif
     1641
     1642    // get existing vseg vpn_min and vpn_max
     1643    vpn_t     old_vpn_min = vseg->vpn_base;
     1644    vpn_t     old_vpn_max = old_vpn_min + vseg->vpn_size - 1;
     1645
     1646    // compute new vseg vpn_min & vpn_max 
     1647    intptr_t min          = new_base;
     1648    intptr_t max          = new_base + new_size;
     1649    vpn_t    new_vpn_min  = min >> CONFIG_PPM_PAGE_SHIFT;
     1650    vpn_t    new_vpn_max  = (max - 1) >> CONFIG_PPM_PAGE_SHIFT;
     1651
     1652    // build extended pointer on GPT
     1653    xptr_t gpt_xp = XPTR( local_cxy , &process->vmm.gpt );
     1654
     1655    // loop on PTEs in GPT to unmap PTE if (oldd_vpn_min <= vpn < new_vpn_min)
     1656        for( vpn = old_vpn_min ; vpn < new_vpn_min ; vpn++ )
     1657    {
     1658        // get ppn and attr
     1659        hal_gpt_get_pte( gpt_xp , vpn , &attr , &ppn );
     1660
     1661        if( attr & GPT_MAPPED )  // PTE is mapped
     1662        {
    14851663
    14861664#if( DEBUG_VMM_RESIZE_VSEG & 1 )
    14871665if( DEBUG_VMM_RESIZE_VSEG < cycle )
    1488 printk("\n[%s] unmapped region[%x->%x[ equal vseg[%x->%x[\n",
    1489 __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
    1490 #endif
    1491         vmm_delete_vseg( process->pid , vseg->min );
    1492 
    1493 #if( DEBUG_VMM_RESIZE_VSEG & 1 )
     1666printk("\n[%s] thread[%x,%x] unmap vpn %x / ppn %x / %s",
     1667__FUNCTION__, this->process->pid, this->trdid, vpn , ppn, vseg_type_str(vseg_type) );
     1668#endif
     1669            // unmap GPT entry
     1670            hal_gpt_reset_pte( gpt_xp , vpn );
     1671
     1672            // release physical page when required
     1673            vmm_ppn_release( process , vseg , ppn );
     1674        }
     1675    }
     1676
     1677    // loop on PTEs in GPT to unmap PTE if (new vpn_max <= vpn < old_vpn_max)
     1678        for( vpn = new_vpn_max ; vpn < old_vpn_max ; vpn++ )
     1679    {
     1680        // get ppn and attr
     1681        hal_gpt_get_pte( gpt_xp , vpn , &attr , &ppn );
     1682
     1683        if( attr & GPT_MAPPED )  // PTE is mapped
     1684        {
     1685
     1686#if( DEBUG_VMM_REMOVE_VSEG & 1 )
    14941687if( DEBUG_VMM_RESIZE_VSEG < cycle )
    1495 printk("\n[%s] thread[%x,%x] deleted vseg\n",
    1496 __FUNCTION__, this->process->pid, this->trdid );
    1497 #endif
    1498         error = 0;
    1499     }
    1500         else if( vseg->min == addr_min )                               // vseg must be resized
    1501     {
    1502 
    1503 #if( DEBUG_VMM_RESIZE_VSEG & 1 )
     1688printk("\n[%s] thread[%x,%x] unmap vpn %x / ppn %x / %s",
     1689__FUNCTION__, this->process->pid, this->trdid, vpn , ppn, vseg_type_str(vseg_type) );
     1690#endif
     1691            // unmap GPT entry in local GPT
     1692            hal_gpt_reset_pte( gpt_xp , vpn );
     1693
     1694            // release physical page when required
     1695            vmm_ppn_release( process , vseg , ppn );
     1696        }
     1697    }
     1698
     1699    // resize vseg in VSL
     1700    vseg->min      = min;
     1701    vseg->max      = max;
     1702    vseg->vpn_base = new_vpn_min;
     1703    vseg->vpn_size = new_vpn_max - new_vpn_min + 1;
     1704
     1705#if DEBUG_VMM_RESIZE_VSEG
     1706cycle = (uint32_t)hal_get_cycles();
    15041707if( DEBUG_VMM_RESIZE_VSEG < cycle )
    1505 printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n",
    1506 __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
    1507 #endif
    1508         // update vseg min address
    1509         vseg->min = addr_max;
    1510 
    1511         // update vpn_base and vpn_size
    1512         vpn_min        = vseg->min >> CONFIG_PPM_PAGE_SHIFT;
    1513         vpn_max        = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT;
    1514         vseg->vpn_base = vpn_min;
    1515         vseg->vpn_size = vpn_max - vpn_min + 1;
    1516 
    1517 #if( DEBUG_VMM_RESIZE_VSEG & 1 )
    1518 if( DEBUG_VMM_RESIZE_VSEG < cycle )
    1519 printk("\n[%s] thread[%x,%x] changed vseg_min\n",
    1520 __FUNCTION__, this->process->pid, this->trdid );
    1521 #endif
    1522         error = 0;
    1523     }
    1524         else if( vseg->max == addr_max )                              // vseg must be resized
    1525     {
    1526 
    1527 #if( DEBUG_VMM_RESIZE_VSEG & 1 )
    1528 if( DEBUG_VMM_RESIZE_VSEG < cycle )
    1529 printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n",
    1530 __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
    1531 #endif
    1532         // update vseg max address
    1533         vseg->max = addr_min;
    1534 
    1535         // update vpn_base and vpn_size
    1536         vpn_min        = vseg->min >> CONFIG_PPM_PAGE_SHIFT;
    1537         vpn_max        = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT;
    1538         vseg->vpn_base = vpn_min;
    1539         vseg->vpn_size = vpn_max - vpn_min + 1;
    1540 
    1541 #if( DEBUG_VMM_RESIZE_VSEG & 1 )
    1542 if( DEBUG_VMM_RESIZE_VSEG < cycle )
    1543 printk("\n[%s] thread[%x,%x] changed vseg_max\n",
    1544 __FUNCTION__, this->process->pid, this->trdid );
    1545 #endif
    1546         error = 0;
    1547 
    1548     }
    1549     else                                                          // vseg cut in three regions
    1550     {
    1551 
    1552 #if( DEBUG_VMM_RESIZE_VSEG & 1 )
    1553 if( DEBUG_VMM_RESIZE_VSEG < cycle )
    1554 printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n",
    1555 __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
    1556 #endif
    1557         // resize existing vseg
    1558         vseg->max = addr_min;
    1559 
    1560         // update vpn_base and vpn_size
    1561         vpn_min        = vseg->min >> CONFIG_PPM_PAGE_SHIFT;
    1562         vpn_max        = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT;
    1563         vseg->vpn_base = vpn_min;
    1564         vseg->vpn_size = vpn_max - vpn_min + 1;
    1565 
    1566         // create new vseg
    1567         new = vmm_create_vseg( process,
    1568                                vseg->type,
    1569                                addr_min,
    1570                                (vseg->max - addr_max),
    1571                                vseg->file_offset,
    1572                                vseg->file_size,
    1573                                vseg->mapper_xp,
    1574                                vseg->cxy );
    1575 
    1576 #if( DEBUG_VMM_RESIZE_VSEG & 1 )
    1577 if( DEBUG_VMM_RESIZE_VSEG < cycle )
    1578 printk("\n[%s] thread[%x,%x] replaced vseg by two smal vsegs\n",
    1579 __FUNCTION__, this->process->pid, this->trdid );
    1580 #endif
    1581 
    1582         if( new == NULL ) error = -1;
    1583         else              error = 0;
    1584     }
    1585 
    1586 #if DEBUG_VMM_RESIZE_VSEG
    1587 if( DEBUG_VMM_RESIZE_VSEG < cycle )
    1588 printk("\n[%s] thread[%x,%x] exit / process %x / base %x / size %d / cycle %d\n",
    1589 __FUNCTION__, this->process->pid, this->trdid, process->pid, base, size, cycle );
    1590 #endif
    1591 
    1592         return error;
    1593 
    1594 }  // vmm_resize_vseg()
     1708printk("[%s] thread[%x,%x] exit / process %x / %s / base %x / cycle %d\n",
     1709__FUNCTION__, this->process->pid, this->trdid,
     1710process->pid, vseg_type_str(vseg->type), vseg->min, cycle );
     1711#endif
     1712
     1713}  // end vmm_resize_vseg
     1714
     1715/////////////////////////////////////////////////////////////////////////////////////////////
     1716// This static function is called twice by the vmm_get_vseg() function.
     1717// It scan the - possibly remote - VSL defined by the <vmm_xp> argument to find the vseg
     1718// containing a given virtual address <vaddr>. It uses remote accesses to access the remote
     1719// VSL if required. The VSL lock protecting the VSL must be taken by the caller.
     1720/////////////////////////////////////////////////////////////////////////////////////////////
     1721// @ vmm_xp  : extended pointer on the process VMM.
     1722// @ vaddr   : virtual address.
     1723// @ return local pointer on remote vseg if success / return NULL if not found.
     1724/////////////////////////////////////////////////////////////////////////////////////////////
     1725static vseg_t * vmm_vseg_from_vaddr( xptr_t     vmm_xp,
     1726                                     intptr_t   vaddr )
     1727{
     1728    xptr_t   iter_xp;
     1729    xptr_t   vseg_xp;
     1730    vseg_t * vseg;
     1731    intptr_t min;
     1732    intptr_t max;
     1733
     1734    // get cluster and local pointer on target VMM
     1735    vmm_t * vmm_ptr = GET_PTR( vmm_xp );
     1736    cxy_t   vmm_cxy = GET_CXY( vmm_xp );
     1737
     1738    // build extended pointer on VSL root
     1739    xptr_t root_xp = XPTR( vmm_cxy , &vmm_ptr->vsegs_root );
     1740
     1741    // scan the list of vsegs in VSL
     1742    XLIST_FOREACH( root_xp , iter_xp )
     1743    {
     1744        vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
     1745        vseg    = GET_PTR( vseg_xp );
     1746
     1747        min = hal_remote_l32( XPTR( vmm_cxy , &vseg->min ) );
     1748        max = hal_remote_l32( XPTR( vmm_cxy , &vseg->max ) );
     1749
     1750        // return success when match
     1751        if( (vaddr >= min) && (vaddr < max) ) return vseg;
     1752    }
     1753
     1754    // return failure
     1755    return NULL;
     1756
     1757}  // end vmm_vseg_from_vaddr()
    15951758
    15961759///////////////////////////////////////////
     
    15991762                       vseg_t   ** found_vseg )
    16001763{
    1601     xptr_t    vseg_xp;
    1602     vseg_t  * vseg;
    1603     vmm_t   * vmm;
    1604     error_t   error;
    1605 
    1606     // get pointer on local VMM
    1607     vmm = &process->vmm;
     1764    xptr_t    loc_lock_xp;     // extended pointer on local VSL lock
     1765    xptr_t    ref_lock_xp;     // extended pointer on reference VSL lock
     1766    vseg_t  * loc_vseg;        // local pointer on local vseg
     1767    vseg_t  * ref_vseg;        // local pointer on reference vseg
     1768
     1769    // build extended pointer on local VSL lock
     1770    loc_lock_xp = XPTR( local_cxy , &process->vmm.vsl_lock );
     1771     
     1772    // get local VSL lock
     1773    remote_queuelock_acquire( loc_lock_xp );
    16081774
    16091775    // try to get vseg from local VMM
    1610     vseg = vmm_vseg_from_vaddr( vmm , vaddr );
    1611 
    1612     if( vseg == NULL )   // vseg not found in local cluster => try to get it from ref
    1613         {
     1776    loc_vseg = vmm_vseg_from_vaddr( XPTR( local_cxy, &process->vmm ) , vaddr );
     1777
     1778    if (loc_vseg == NULL)   // vseg not found => access reference VSL
     1779    {
    16141780        // get extended pointer on reference process
    16151781        xptr_t ref_xp = process->ref_xp;
    16161782
    1617         // get cluster and local pointer on reference process 
     1783        // get cluster and local pointer on reference process
    16181784        cxy_t       ref_cxy = GET_CXY( ref_xp );
    16191785        process_t * ref_ptr = GET_PTR( ref_xp );
    16201786
    1621         if( local_cxy == ref_cxy )  return -1;   // local cluster is the reference
    1622 
    1623         // get extended pointer on reference vseg
    1624         rpc_vmm_get_vseg_client( ref_cxy , ref_ptr , vaddr , &vseg_xp , &error );
    1625            
    1626         if( error )   return -1;                // vseg not found => illegal user vaddr
    1627        
    1628         // allocate a vseg in local cluster
    1629         vseg = vseg_alloc();
    1630 
    1631         if( vseg == NULL ) return -1;           // cannot allocate a local vseg
    1632 
    1633         // initialise local vseg from reference
    1634         vseg_init_from_ref( vseg , vseg_xp );
    1635 
    1636         // build extended pointer on VSL lock
    1637         xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
    1638  
    1639         // take the VSL lock in write mode
    1640         remote_rwlock_wr_acquire( lock_xp );
    1641 
    1642         // register local vseg in local VSL
    1643         vmm_attach_vseg_to_vsl( vmm , vseg );
    1644  
    1645         // release the VSL lock
    1646         remote_rwlock_wr_release( lock_xp );
    1647     }   
    1648 
    1649     // success
    1650     *found_vseg = vseg;
    1651     return 0;
    1652 
     1787        // build extended pointer on reference VSL lock
     1788        ref_lock_xp = XPTR( ref_cxy , &ref_ptr->vmm.vsl_lock );
     1789     
     1790        // get reference VSL lock
     1791        remote_queuelock_acquire( ref_lock_xp );
     1792
     1793        // try to get vseg from reference VMM
     1794        ref_vseg = vmm_vseg_from_vaddr( XPTR( ref_cxy , &ref_ptr->vmm ) , vaddr );
     1795
     1796        if( ref_vseg == NULL )  // vseg not found => return error
     1797        {
     1798            printk("\n[ERROR] in %s : vaddr %x in process %x out of segment\n",
     1799            __FUNCTION__, vaddr, process->pid );
     1800
     1801            // release reference VSL lock
     1802            remote_queuelock_release( ref_lock_xp );
     1803
     1804            return -1;
     1805        }
     1806        else                    // vseg found => try to update local VSL
     1807        {
     1808            // allocate a local vseg descriptor
     1809            loc_vseg = vseg_alloc();
     1810
     1811            if( loc_vseg == NULL )   // no memory => return error
     1812            {
     1813                printk("\n[ERROR] in %s : vaddr %x in process %x / no memory for local vseg\n",
     1814                __FUNCTION__, vaddr, process->pid );
     1815
     1816                // release reference VSL & local VSL locks
     1817                remote_queuelock_release( ref_lock_xp );
     1818                remote_queuelock_release( loc_lock_xp );
     1819
     1820                return -1;
     1821            }
     1822            else                     // update local VSL and return success
     1823            {
     1824                // initialize local vseg
     1825                vseg_init_from_ref( loc_vseg , XPTR( ref_cxy , ref_vseg ) );
     1826
     1827                // register local vseg in local VSL
     1828                vmm_attach_vseg_to_vsl( &process->vmm , loc_vseg );
     1829
     1830                // release reference VSL & local VSL locks
     1831                remote_queuelock_release( ref_lock_xp );
     1832                remote_queuelock_release( loc_lock_xp );
     1833
     1834                *found_vseg = loc_vseg;
     1835                return 0;
     1836            }
     1837        }
     1838    }
     1839    else                        // vseg found in local VSL => return success
     1840    {
     1841        // release local VSL lock
     1842        remote_queuelock_release( loc_lock_xp );
     1843
     1844        *found_vseg = loc_vseg;
     1845        return 0;
     1846    }
    16531847}  // end vmm_get_vseg()
    16541848
     
    16581852// pointer on the allocated page descriptor.
    16591853// The vseg cannot have the FILE type.
     1854//////////////////////////////////////////////////////////////////////////////////////
     1855// @ vseg   : local pointer on vseg.
     1856// @ vpn    : unmapped vpn.
     1857// @ return an extended pointer on the allocated page
    16601858//////////////////////////////////////////////////////////////////////////////////////
    16611859static xptr_t vmm_page_allocate( vseg_t * vseg,
     
    21942392#if DEBUG_VMM_HANDLE_COW
    21952393uint32_t   cycle = (uint32_t)hal_get_cycles();
    2196 if( DEBUG_VMM_HANDLE_COW < cycle )
     2394if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
    21972395printk("\n[%s] thread[%x,%x] enter for vpn %x / core[%x,%d] / cycle %d\n",
    21982396__FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle );
     
    22002398
    22012399#if ((DEBUG_VMM_HANDLE_COW & 3) == 3 )
    2202 hal_vmm_display( process , true );
     2400hal_vmm_display( XPTR( local_cxy , process ) , true );
    22032401#endif
    22042402
     
    22162414
    22172415#if DEBUG_VMM_HANDLE_COW
    2218 if( DEBUG_VMM_HANDLE_COW < cycle )
     2416if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
    22192417printk("\n[%s] thread[%x,%x] get vseg %s\n",
    22202418__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type) );
     
    22562454
    22572455#if DEBUG_VMM_HANDLE_COW
    2258 if( DEBUG_VMM_HANDLE_COW < cycle )
     2456if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
    22592457printk("\n[%s] thread[%x,%x] get pte for vpn %x : ppn %x / attr %x\n",
    22602458__FUNCTION__, this->process->pid, this->trdid, vpn, old_ppn, old_attr );
     
    22852483
    22862484#if DEBUG_VMM_HANDLE_COW
    2287 if( DEBUG_VMM_HANDLE_COW < cycle )
     2485if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
    22882486printk("\n[%s] thread[%x,%x] get forks = %d for vpn %x\n",
    22892487__FUNCTION__, this->process->pid, this->trdid, forks, vpn );
     
    23152513
    23162514#if DEBUG_VMM_HANDLE_COW
    2317 if( DEBUG_VMM_HANDLE_COW < cycle )
     2515if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
    23182516printk("\n[%s] thread[%x,%x] get new ppn %x for vpn %x\n",
    23192517__FUNCTION__, this->process->pid, this->trdid, new_ppn, vpn );
     
    23262524
    23272525#if DEBUG_VMM_HANDLE_COW
    2328 if( DEBUG_VMM_HANDLE_COW < cycle )
     2526if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
    23292527printk("\n[%s] thread[%x,%x] copied old page to new page\n",
    23302528__FUNCTION__, this->process->pid, this->trdid );
     
    23382536
    23392537#if(DEBUG_VMM_HANDLE_COW & 1)
    2340 if( DEBUG_VMM_HANDLE_COW < cycle )
     2538if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
    23412539printk("\n[%s] thread[%x,%x] no pending forks / keep existing PPN %x\n",
    23422540__FUNCTION__, this->process->pid, this->trdid, old_ppn );
     
    23492547
    23502548#if(DEBUG_VMM_HANDLE_COW & 1)
    2351 if( DEBUG_VMM_HANDLE_COW < cycle )
     2549if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
    23522550printk("\n[%s] thread[%x,%x] new_attr %x / new_ppn %x\n",
    23532551__FUNCTION__, this->process->pid, this->trdid, new_attr, new_ppn );
     
    23672565    else
    23682566    {
    2369         if( ref_cxy == local_cxy )                  // reference cluster is local
    2370         {
    2371             vmm_global_update_pte( process,
    2372                                    vpn,
    2373                                    new_attr,
    2374                                    new_ppn );
    2375         }
    2376         else                                        // reference cluster is remote
    2377         {
    2378             rpc_vmm_global_update_pte_client( ref_cxy,
    2379                                               ref_ptr,
    2380                                               vpn,
    2381                                               new_attr,
    2382                                               new_ppn );
    2383         }
     2567        // set new PTE in all GPT copies
     2568        vmm_global_update_pte( process,
     2569                               vpn,
     2570                               new_attr,
     2571                               new_ppn );
    23842572    }
    23852573
    23862574#if DEBUG_VMM_HANDLE_COW
    23872575cycle = (uint32_t)hal_get_cycles();
    2388 if( DEBUG_VMM_HANDLE_COW < cycle )
     2576if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
    23892577printk("\n[%s] thread[%x,%x] exit for vpn %x / core[%x,%d] / cycle %d\n",
    23902578__FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle );
     
    23922580
    23932581#if ((DEBUG_VMM_HANDLE_COW & 3) == 3)
    2394 hal_vmm_display( process , true );
     2582hal_vmm_display( XPTR( local_cxy , process ) , true );
    23952583#endif
    23962584
  • trunk/kernel/mm/vmm.h

    r635 r640  
    112112typedef struct vmm_s
    113113{
    114         remote_rwlock_t  vsl_lock;            /*! lock protecting the local VSL                 */
    115         xlist_entry_t    vsegs_root;          /*! Virtual Segment List (complete in reference)  */
    116         uint32_t         vsegs_nr;            /*! total number of local vsegs                   */
    117 
    118     gpt_t            gpt;                 /*! Generic Page Table (complete in reference)    */
    119 
    120     stack_mgr_t      stack_mgr;           /*! embedded STACK vsegs allocator                */
    121     mmap_mgr_t       mmap_mgr;            /*! embedded MMAP vsegs allocator                 */
    122 
    123         uint32_t         false_pgfault_nr;    /*! false page fault counter (for all threads)    */
    124         uint32_t         local_pgfault_nr;    /*! false page fault counter (for all threads)    */
    125         uint32_t         global_pgfault_nr;   /*! false page fault counter (for all threads)    */
    126     uint32_t         false_pgfault_cost;  /*! cumulated cost (for all threads)              */
    127     uint32_t         local_pgfault_cost;  /*! cumulated cost (for all threads)              */
    128     uint32_t         global_pgfault_cost; /*! cumulated cost (for all threads)              */
    129 
    130     vpn_t            args_vpn_base;       /*! args vseg first page                          */
    131     vpn_t            envs_vpn_base;       /*! envs vseg first page                          */
    132         vpn_t            code_vpn_base;       /*! code vseg first page                          */
    133         vpn_t            data_vpn_base;       /*! data vseg first page                          */
    134     vpn_t            heap_vpn_base;       /*! heap zone first page                          */
    135 
    136         intptr_t         entry_point;         /*! main thread entry point                       */
     114        remote_queuelock_t vsl_lock;            /*! lock protecting the local VSL               */
     115        xlist_entry_t      vsegs_root;          /*! Virtual Segment List root                   */
     116        uint32_t           vsegs_nr;            /*! total number of local vsegs                 */
     117
     118    gpt_t              gpt;                 /*! Generic Page Table descriptor               */
     119
     120    stack_mgr_t        stack_mgr;           /*! embedded STACK vsegs allocator              */
     121    mmap_mgr_t         mmap_mgr;            /*! embedded MMAP vsegs allocator               */
     122
     123        uint32_t           false_pgfault_nr;    /*! false page fault counter (for all threads)  */
     124        uint32_t           local_pgfault_nr;    /*! false page fault counter (for all threads)  */
     125        uint32_t           global_pgfault_nr;   /*! false page fault counter (for all threads)  */
     126    uint32_t           false_pgfault_cost;  /*! cumulated cost (for all threads)            */
     127    uint32_t           local_pgfault_cost;  /*! cumulated cost (for all threads)            */
     128    uint32_t           global_pgfault_cost; /*! cumulated cost (for all threads)            */
     129
     130    vpn_t              args_vpn_base;       /*! args vseg first page                        */
     131    vpn_t              envs_vpn_base;       /*! envs vseg first page                        */
     132        vpn_t              code_vpn_base;       /*! code vseg first page                        */
     133        vpn_t              data_vpn_base;       /*! data vseg first page                        */
     134    vpn_t              heap_vpn_base;       /*! heap zone first page                        */
     135
     136        intptr_t           entry_point;         /*! main thread entry point                     */
    137137}
    138138vmm_t;
     
    143143 * - The GPT has been previously created, with the hal_gpt_create() function.
    144144 * - The "kernel" vsegs are previously registered, by the hal_vmm_kernel_update() function.
    145  * - The "code" and "data" vsegs are registered by the elf_load_process() function.
     145 * - The "code" and "data" vsegs arlmmmmmme registered by the elf_load_process() function.
    146146 * - The "stack" vsegs are dynamically registered by the thread_user_create() function.
    147147 * - The "file", "anon", "remote" vsegs are dynamically registered by the mmap() syscall.
     
    206206
    207207/*********************************************************************************************
     208 * This function modifies the size of the vseg identified by <process> and <base> arguments
     209 * in all clusters containing a VSL copy, as defined by <new_base> and <new_size> arguments.
     210 * This function is called by the sys_munmap() function, and can be called by a thread
     211 * running in any cluster, as it uses remote accesses.
     212 * It cannot fail, as only vseg registered  in VSL copies are updated.
     213 *********************************************************************************************
     214 * @ process   : local pointer on process descriptor.
     215 * @ base      : current vseg base address in user space.
     216 * @ new_base  : new vseg base.
     217 * @ new_size  : new vseg size.
     218 ********************************************************************************************/
     219void vmm_global_resize_vseg( struct process_s * process,
     220                             intptr_t           base,
     221                             intptr_t           new_base,
     222                             intptr_t           new_size );
     223
     224/*********************************************************************************************
     225 * This function removes the vseg identified by the <process> and <base> arguments from
     226 * the VSL and remove all associated PTE entries from the GPT.
     227 * This is done in all clusters containing a VMM copy to maintain VMM coherence.
     228 * This function can be called by a thread running in any cluster, as it uses the
     229 * vmm_remove_vseg() in the local cluster, and the RPC_VMM_REMOVE_VSEG for remote clusters.
     230 * It cannot fail, as only vseg registered  in VSL copies are deleted.
     231 *********************************************************************************************
     232 * @ pid      : local pointer on process identifier.
     233 * @ base     : vseg base address in user space.
     234 ********************************************************************************************/
     235void vmm_global_delete_vseg( struct process_s * process,
     236                             intptr_t           base );
     237
     238/*********************************************************************************************
    208239 * This function modifies one GPT entry identified by the <process> and <vpn> arguments
    209  * in all clusters containing a process copy. It is used to maintain coherence in GPT
    210  * copies, using remote_write accesses.
    211  * It must be called by a thread running in the process owner cluster.
    212  * Use the RPC_VMM_GLOBAL_UPDATE_PTE if required.
     240 * in all clusters containing a process copy. It maintains coherence in GPT copies,
     241 * using remote_write accesses.
    213242 * It cannot fail, as only mapped PTE2 in GPT copies are updated.
    214243 *********************************************************************************************
     
    282311/*********************************************************************************************
    283312 * This function removes from the VMM of a process descriptor identified by the <process>
    284  * argument the vseg identified by the <vseg> argument. It can be used for any type of vseg.
    285  * As it uses local pointers, it must be called by a local thread.
    286  * It is called by the vmm_user_reset(), vmm_delete_vseg() and vmm_destroy() functions.
     313 * argument the vseg identified by the <vseg> argument. 
     314 * It is called by the vmm_user_reset(), vmm_global_delete_vseg() and vmm_destroy() functions.
     315 * It must be called by a local thread, running in the cluster containing the modified VMM.
     316 * Use the RPC_VMM_REMOVE_VSEG if required.
    287317 * It makes a kernel panic if the process is not registered in the local cluster,
    288318 * or if the vseg is not registered in the process VSL.
    289319 * For all vseg types, the vseg is detached from local VSL, and all associated PTEs are
    290320 * unmapped from local GPT. Other actions depend on the vseg type:
    291  * - Regarding the vseg descriptor release:
     321 * Regarding the vseg descriptor release:
    292322 *   . for ANON and REMOTE, the vseg is not released, but registered in local zombi_list.
    293323 *   . for STACK the vseg is released to the local stack allocator.
    294324 *   . for all other types, the vseg is released to the local kmem.
    295  * - Regarding the physical pages release:
     325 * Regarding the physical pages release:
    296326 *   . for KERNEL and FILE, the pages are not released to kmem.
    297327 *   . for CODE and STACK, the pages are released to local kmem when they are not COW.
    298328 *   . for DATA, ANON and REMOTE, the pages are released to relevant kmem only when
    299329 *     the local cluster is the reference cluster.
    300  * The lock protecting the VSL must be taken by the caller.
    301  *********************************************************************************************
    302  * @ process  : local pointer on process.
    303  * @ vseg     : local pointer on vseg.
     330 * The VSL lock protecting the VSL must be taken by the caller.
     331 *********************************************************************************************
     332 * @ process  : local pointer on process descriptor.
     333 * @ vseg     : local pointer on target vseg.
    304334 ********************************************************************************************/
    305335void vmm_remove_vseg( struct process_s * process,
     
    307337
    308338/*********************************************************************************************
    309  * This function call the vmm_remove vseg() function to remove from the VMM of a local
    310  * process descriptor, identified by the <pid> argument the vseg identified by the <vaddr>
    311  * virtual address in user space.
    312  * Use the RPC_VMM_DELETE_VSEG to remove a vseg from a remote process descriptor.
    313  *********************************************************************************************
    314  * @ pid      : process identifier.
    315  * @ vaddr    : virtual address in user space.
    316  ********************************************************************************************/
    317 void vmm_delete_vseg( pid_t    pid,
    318                       intptr_t vaddr );
    319 
    320 /*********************************************************************************************
    321  * This function removes a given region (defined by a base address and a size) from
    322  * the VMM of a given process descriptor. This can modify the number of vsegs:
    323  * (a) if the region is not entirely mapped in an existing vseg, it's an error.
    324  * (b) if the region has same base and size as an existing vseg, the vseg is removed.
    325  * (c) if the removed region cut the vseg in two parts, it is modified.
    326  * (d) if the removed region cut the vseg in three parts, it is modified, and a new
    327  *     vseg is created with same type.
    328  * FIXME [AG] this function should be called by a thread running in the reference cluster,
    329  *       and the VMM should be updated in all process descriptors copies.
    330  *********************************************************************************************
    331  * @ process   : pointer on process descriptor
    332  * @ base      : vseg base address
    333  * @ size      : vseg size (bytes)
    334  ********************************************************************************************/
    335 error_t vmm_resize_vseg( struct process_s * process,
    336                          intptr_t           base,
    337                          intptr_t           size );
    338 
    339 /*********************************************************************************************
    340  * This low-level function scan the local VSL in <vmm> to find the unique vseg containing
    341  * a given virtual address <vaddr>.
    342  * It is called by the vmm_get_vseg(), vmm_get_pte(), and vmm_resize_vseg() functions.
    343  *********************************************************************************************
    344  * @ vmm     : pointer on the process VMM.
    345  * @ vaddr   : virtual address.
    346  * @ return vseg pointer if success / return NULL if not found.
    347  ********************************************************************************************/
    348 struct vseg_s * vmm_vseg_from_vaddr( vmm_t    * vmm,
    349                                      intptr_t   vaddr );
    350 
    351 /*********************************************************************************************
    352  * This function checks that a given virtual address is contained in a registered vseg.
    353  * It can be called by any thread running in any cluster:
    354  * - if the vseg is registered in the local process VMM, it returns the local vseg pointer.
    355  * - if the vseg is missing in local VMM, it uses a RPC to get it from the reference cluster,
    356  *   register it in local VMM and returns the local vseg pointer, if success.
    357  * - it returns an user error if the vseg is missing in the reference VMM, or if there is
    358  *   not enough memory for a new vseg descriptor in the calling thread cluster.
    359  *********************************************************************************************
    360  * @ process   : [in] pointer on process descriptor
    361  * @ vaddr     : [in] virtual address
    362  * @ vseg      : [out] local pointer on local vseg
    363  * @ returns 0 if success / returns -1 if user error (out of segment).
     339 * This function resize a local vseg identified by the <process> and <vseg> arguments.
     340 * It is called by the vmm_global_resize() function.
     341 * It must be called by a local thread, running in the cluster containing the modified VMM.
     342 * Use the RPC_VMM_RESIZE_VSEG if required.
     343 * It makes a kernel panic if the process is not registered in the local cluster,
     344 * or if the vseg is not registered in the process VSL.
     345 * The new vseg, defined by the <new_base> and <new_size> arguments must be strictly
     346 * included in the target vseg. The target VSL size and base fields are modified in the VSL.
     347 * If the new vseg contains less pages than the target vseg, the relevant pages are
     348 * removed from the GPT.
     349 * The VSL lock protecting the VSL must be taken by the caller.
     350 *********************************************************************************************
     351 * @ process   : local pointer on process descriptor
     352 * @ vseg      : local pointer on target vseg
     353 * @ new_base  : vseg base address
     354 * @ new_size  : vseg size (bytes)
     355 ********************************************************************************************/
     356void vmm_resize_vseg( struct process_s * process,
     357                      struct vseg_s    * vseg,
     358                      intptr_t           new_base,
     359                      intptr_t           new_size );
     360
     361/*********************************************************************************************
     362 * This function checks that a given virtual address <vaddr> in a given <process> is
     363 * contained in a registered vseg. It can be called by any thread running in any cluster.
     364 * - if the vseg is registered in the local process VSL, it returns the local vseg pointer.
     365 * - if the vseg is missing in local VSL, it access directly the reference VSL.
     366 * - if the vseg is found in reference VSL, it updates the local VSL and returns this pointer.
     367 * It returns an error when the vseg is missing in the reference VMM, or when there is
     368 * not enough memory for a new vseg descriptor in the calling thread cluster.
     369 * For both the local and the reference VSL, it takes the VSL lock before scanning the VSL.
     370 *********************************************************************************************
     371 * @ process   : [in] pointer on process descriptor.
     372 * @ vaddr     : [in] virtual address.
     373 * @ vseg      : [out] local pointer on local vseg.
     374 * @ returns 0 if success / returns -1 if user error
    364375 ********************************************************************************************/
    365376error_t vmm_get_vseg( struct process_s  * process,
     
    395406 * This function is called by the generic exception handler in case of WRITE violation event,
    396407 * detected for a given <vpn>. The <process> argument is used to access the relevant VMM.
    397  * It returns a kernel panic if VPN is not in a registered vseg or is not mapped.
     408 * It returns a kernel panic if the faulty VPN is not in a registered vseg, or is not mapped.
    398409 * For a legal mapped vseg there is two cases:
    399410 * 1) If the missing VPN belongs to a private vseg (STACK), it access only the local GPT.
  • trunk/kernel/mm/vseg.h

    r625 r640  
    22 * vseg.h - virtual segment (vseg) related operations
    33 *
    4  * Authors   Ghassan Almaless (2008,2009,2010,2011, 2012)
    5  *           Mohamed Lamine Karaoui (2015)
    6  *           Alain Greiner (2016,2017,2018,2019)
     4 * Authors  Alain Greiner (2016,2017,2018,2019)
    75 *
    86 * Copyright (c) UPMC Sorbonne Universites
Note: See TracChangeset for help on using the changeset viewer.