Changeset 567 for trunk/kernel/mm/vmm.c


Ignore:
Timestamp:
Oct 5, 2018, 12:01:52 AM (6 years ago)
Author:
alain
Message:

Complete restructuration of kernel locks.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/mm/vmm.c

    r561 r567  
    3131#include <printk.h>
    3232#include <memcpy.h>
    33 #include <rwlock.h>
     33#include <remote_rwlock.h>
     34#include <remote_queuelock.h>
    3435#include <list.h>
    3536#include <xlist.h>
     
    5152//////////////////////////////////////////////////////////////////////////////////
    5253
    53 extern  process_t  process_zero;   // defined in cluster.c file
    54 
     54extern  process_t  process_zero;      // allocated in cluster.c
    5555
    5656///////////////////////////////////////
     
    6565
    6666#if DEBUG_VMM_INIT
     67thread_t * this = CURRENT_THREAD;
    6768uint32_t cycle = (uint32_t)hal_get_cycles();
    6869if( DEBUG_VMM_INIT )
    69 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
    70 __FUNCTION__ , CURRENT_THREAD , process->pid , cycle );
     70printk("\n[DBG] %s : thread %x in process %x enter for process %x / cycle %d\n",
     71__FUNCTION__ , this->trdid, this->process->pid, process->pid , cycle );
    7172#endif
    7273
     
    7778    vmm->vsegs_nr = 0;
    7879        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
    79         remote_rwlock_init( XPTR( local_cxy , &vmm->vsegs_lock ) );
    80 
    81     assert( ((CONFIG_VMM_KENTRY_SIZE + CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE)
    82             <= CONFIG_VMM_ELF_BASE) , "UTILS zone too small\n" );
    83 
    84     assert( (CONFIG_THREAD_MAX_PER_CLUSTER <= 32) ,
    85             "no more than 32 threads per cluster for a single process\n");
    86 
    87     assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREAD_MAX_PER_CLUSTER) <=
    88              (CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) ,
    89              "STACK zone too small\n");
     80        remote_rwlock_init( XPTR( local_cxy , &vmm->vsegs_lock ),LOCK_VMM_VSL );
     81
     82assert( ((CONFIG_VMM_KENTRY_SIZE + CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE)
     83<= CONFIG_VMM_ELF_BASE) , "UTILS zone too small\n" );
     84
     85assert( (CONFIG_THREADS_MAX_PER_CLUSTER <= 32) ,
     86"no more than 32 threads per cluster for a single process\n");
     87
     88assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <=
     89(CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) ,
     90"STACK zone too small\n");
    9091
    9192    // register kentry vseg in VSL
     
    171172    vmm->stack_mgr.bitmap   = 0;
    172173    vmm->stack_mgr.vpn_base = CONFIG_VMM_STACK_BASE;
    173     spinlock_init( &vmm->stack_mgr.lock );
     174    busylock_init( &vmm->stack_mgr.lock , LOCK_VMM_STACK );
    174175
    175176    // initialize MMAP allocator
     
    177178    vmm->mmap_mgr.vpn_size        = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE;
    178179    vmm->mmap_mgr.first_free_vpn  = CONFIG_VMM_HEAP_BASE;
    179     spinlock_init( &vmm->mmap_mgr.lock );
     180    busylock_init( &vmm->mmap_mgr.lock , LOCK_VMM_MMAP );
    180181
    181182    uint32_t i;
     
    190191cycle = (uint32_t)hal_get_cycles();
    191192if( DEBUG_VMM_INIT )
    192 printk("\n[DBG] %s : thread %x exit for process %x / entry_point = %x / cycle %d\n",
    193 __FUNCTION__ , CURRENT_THREAD , process->pid , process->vmm.entry_point , cycle );
     193printk("\n[DBG] %s : thread %x in process %x exit / process %x / entry_point = %x / cycle %d\n",
     194__FUNCTION__, this->trdid, this->process->pid, process->pid , process->vmm.entry_point , cycle );
    194195#endif
    195196
     
    209210
    210211    // get lock protecting the vseg list
    211     remote_rwlock_rd_lock( XPTR( local_cxy , &vmm->vsegs_lock ) );
     212    remote_rwlock_rd_acquire( XPTR( local_cxy , &vmm->vsegs_lock ) );
    212213
    213214    // scan the list of vsegs
     
    243244
    244245    // release the lock
    245     remote_rwlock_rd_unlock( XPTR( local_cxy , &vmm->vsegs_lock ) );
     246    remote_rwlock_rd_release( XPTR( local_cxy , &vmm->vsegs_lock ) );
    246247
    247248}  // vmm_display()
     249
     250///////////////////////////////////
     251void vmm_vseg_attach( vmm_t  * vmm,
     252                      vseg_t * vseg )
     253{
     254    // build extended pointer on rwlock protecting VSL
     255    xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock );
     256
     257    // get rwlock in write mode
     258    remote_rwlock_wr_acquire( lock_xp );
     259
     260    // update vseg descriptor
     261    vseg->vmm = vmm;
     262
     263    // add vseg in vmm list
     264    xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ),
     265                    XPTR( local_cxy , &vseg->xlist ) );
     266
     267    // release rwlock in write mode
     268    remote_rwlock_wr_release( lock_xp );
     269}
     270
     271///////////////////////////////////
     272void vmm_vseg_detach( vmm_t  * vmm,
     273                      vseg_t * vseg )
     274{
     275    // build extended pointer on rwlock protecting VSL
     276    xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock );
     277
     278    // get rwlock in write mode
     279    remote_rwlock_wr_acquire( lock_xp );
     280
     281    // update vseg descriptor
     282    vseg->vmm = NULL;
     283
     284    // remove vseg from vmm list
     285    xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
     286
     287    // release rwlock in write mode
     288    remote_rwlock_wr_release( lock_xp );
     289}
    248290
    249291/////////////////////i//////////////////////////
     
    274316#endif
    275317
    276     // check cluster is reference
    277     assert( (GET_CXY( process->ref_xp ) == local_cxy) ,
    278     "not called in reference cluster\n");
     318// check cluster is reference
     319assert( (GET_CXY( process->ref_xp ) == local_cxy) ,
     320"not called in reference cluster\n");
    279321
    280322    // get extended pointer on root of process copies xlist in owner cluster
     
    346388#endif
    347389
    348     // check cluster is reference
    349     assert( (GET_CXY( process->ref_xp ) == local_cxy) ,
    350     "local cluster is not process reference cluster\n");
     390// check cluster is reference
     391assert( (GET_CXY( process->ref_xp ) == local_cxy) ,
     392"local cluster is not process reference cluster\n");
    351393
    352394    // get pointer on reference VMM
     
    387429            vseg     = GET_PTR( vseg_xp );
    388430
    389             assert( (GET_CXY( vseg_xp ) == local_cxy) ,
    390             "all vsegs in reference VSL must be local\n" );
     431assert( (GET_CXY( vseg_xp ) == local_cxy) ,
     432"all vsegs in reference VSL must be local\n" );
    391433
    392434            // get vseg type, base and size
     
    444486                            lock_xp  = XPTR( page_cxy , &page_ptr->lock );
    445487
     488                            // take lock protecting "forks" counter
     489                            remote_busylock_acquire( lock_xp );
     490
    446491                            // increment "forks"
    447                             remote_spinlock_lock( lock_xp );
    448492                            hal_remote_atomic_add( forks_xp , 1 );
    449                             remote_spinlock_unlock( lock_xp );
     493
     494                            // release lock protecting "forks" counter
     495                            remote_busylock_release( lock_xp );
    450496                        }
    451497                    }   // end loop on vpn
     
    511557
    512558    // initialize the lock protecting the child VSL
    513     remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsegs_lock ) );
     559    remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsegs_lock ), LOCK_VMM_STACK );
    514560
    515561    // initialize the child VSL as empty
     
    529575    parent_root_xp = XPTR( parent_cxy , &parent_vmm->vsegs_root );
    530576
    531     // take the lock protecting the parent VSL
    532     remote_rwlock_rd_lock( parent_lock_xp );
     577    // take the lock protecting the parent VSL in read mode
     578    remote_rwlock_rd_acquire( parent_lock_xp );
    533579
    534580    // loop on parent VSL xlist
     
    540586
    541587        // get vseg type
    542         type = hal_remote_lw( XPTR( parent_cxy , &parent_vseg->type ) );
     588        type = hal_remote_l32( XPTR( parent_cxy , &parent_vseg->type ) );
    543589       
    544590#if DEBUG_VMM_FORK_COPY
     
    547593printk("\n[DBG] %s : thread %x found parent vseg %s / vpn_base = %x / cycle %d\n",
    548594__FUNCTION__ , CURRENT_THREAD, vseg_type_str(type),
    549 hal_remote_lw( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );
     595hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );
    550596#endif
    551597
     
    566612
    567613            // register child vseg in child VSL
    568             vseg_attach( child_vmm , child_vseg );
     614            vmm_vseg_attach( child_vmm , child_vseg );
    569615
    570616#if DEBUG_VMM_FORK_COPY
     
    573619printk("\n[DBG] %s : thread %x copied vseg %s / vpn_base = %x to child VSL / cycle %d\n",
    574620__FUNCTION__ , CURRENT_THREAD , vseg_type_str(type),
    575 hal_remote_lw( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );
     621hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );
    576622#endif
    577623
     
    613659                        lock_xp  = XPTR( page_cxy , &page_ptr->lock );
    614660
     661                        // get lock protecting "forks" counter
     662                        remote_busylock_acquire( lock_xp );
     663
    615664                        // increment "forks"
    616                         remote_spinlock_lock( lock_xp );
    617665                        hal_remote_atomic_add( forks_xp , 1 );
    618                         remote_spinlock_unlock( lock_xp );
     666
     667                        // release lock protecting "forks" counter
     668                        remote_busylock_release( lock_xp );
    619669
    620670#if DEBUG_VMM_FORK_COPY
     
    630680    }   // end loop on vsegs
    631681
    632     // release the parent vsegs lock
    633     remote_rwlock_rd_unlock( parent_lock_xp );
     682    // release the parent VSL lock in read mode
     683    remote_rwlock_rd_release( parent_lock_xp );
    634684
    635685    // initialize child GPT (architecture specic)
     
    703753    // get extended pointer on VSL root and VSL lock
    704754    xptr_t   root_xp = XPTR( local_cxy , &vmm->vsegs_root );
    705         xptr_t   lock_xp = XPTR( local_cxy , &vmm->vsegs_lock );
    706 
    707     // get lock protecting vseg list
    708         remote_rwlock_wr_lock( lock_xp );
    709755
    710756    // remove all user vsegs registered in VSL
     
    712758        {
    713759        // get pointer on first vseg in VSL
    714                 vseg_xp = XLIST_FIRST_ELEMENT( root_xp , vseg_t , xlist );
     760                vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist );
    715761        vseg    = GET_PTR( vseg_xp );
    716762
     
    719765
    720766        // remove vseg from VSL
    721                 vseg_detach( vseg );
     767                vmm_vseg_detach( vmm , vseg );
    722768
    723769        // release memory allocated to vseg descriptor
     
    732778        }
    733779
    734     // release lock protecting VSL
    735         remote_rwlock_wr_unlock( lock_xp );
    736 
    737780    // remove all vsegs from zombi_lists in MMAP allocator
    738781    uint32_t i;
     
    748791__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
    749792#endif
    750                     vseg_detach( vseg );
     793                    vmm_vseg_detach( vmm , vseg );
    751794            vseg_free( vseg );
    752795
     
    812855
    813856    // get lock on stack allocator
    814     spinlock_lock( &mgr->lock );
     857    busylock_acquire( &mgr->lock );
    815858
    816859    // get first free slot index in bitmap
     
    818861    if( (index < 0) || (index > 31) )
    819862    {
    820         spinlock_unlock( &mgr->lock );
    821         return ENOMEM;
     863        busylock_release( &mgr->lock );
     864        return 0xFFFFFFFF;
    822865    }
    823866
     
    826869
    827870    // release lock on stack allocator
    828     spinlock_unlock( &mgr->lock );
     871    busylock_release( &mgr->lock );
    829872
    830873    // returns vpn_base, vpn_size (one page non allocated)
     
    864907
    865908    // get lock on mmap allocator
    866     spinlock_lock( &mgr->lock );
     909    busylock_acquire( &mgr->lock );
    867910
    868911    // get vseg from zombi_list or from mmap zone
     
    892935
    893936    // release lock on mmap allocator
    894     spinlock_unlock( &mgr->lock );
     937    busylock_release( &mgr->lock );
    895938
    896939    // returns vpn_base, vpn_size
     
    10021045
    10031046    // attach vseg to VSL
    1004     xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock );
    1005         remote_rwlock_wr_lock( lock_xp );
    1006         vseg_attach( vmm , vseg );
    1007         remote_rwlock_wr_unlock( lock_xp );
     1047        vmm_vseg_attach( vmm , vseg );
    10081048
    10091049#if DEBUG_VMM_CREATE_VSEG
     
    10271067
    10281068    // detach vseg from VSL
    1029     xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock );
    1030         remote_rwlock_wr_lock( lock_xp );
    1031         vseg_detach( vseg );
    1032         remote_rwlock_wr_unlock( lock_xp );
     1069        vmm_vseg_detach( vmm , vseg );
    10331070
    10341071    // release the stack slot to VMM stack allocator if STACK type
     
    10421079
    10431080        // update stacks_bitmap
    1044         spinlock_lock( &mgr->lock );
     1081        busylock_acquire( &mgr->lock );
    10451082        bitmap_clear( &mgr->bitmap , index );
    1046         spinlock_unlock( &mgr->lock );
     1083        busylock_release( &mgr->lock );
    10471084    }
    10481085
     
    10571094
    10581095        // update zombi_list
    1059         spinlock_lock( &mgr->lock );
     1096        busylock_acquire( &mgr->lock );
    10601097        list_add_first( &mgr->zombi_list[index] , &vseg->zlist );
    1061         spinlock_unlock( &mgr->lock );
     1098        busylock_release( &mgr->lock );
    10621099    }
    10631100
     
    11121149#endif
    11131150
    1114             // check small page
    1115             assert( (attr & GPT_SMALL) ,
    1116             "an user vseg must use small pages" );
     1151// check small page
     1152assert( (attr & GPT_SMALL) ,
     1153"an user vseg must use small pages" );
    11171154
    11181155            // unmap GPT entry in all GPT copies
     
    11211158            // handle pending forks counter if
    11221159            // 1) not identity mapped
    1123             // 2) running in reference cluster
     1160            // 2) reference cluster
    11241161            if( ((vseg->flags & VSEG_IDENT)  == 0) &&
    11251162                (GET_CXY( process->ref_xp ) == local_cxy) )
     
    11341171                lock_xp  = XPTR( page_cxy , &page_ptr->lock );
    11351172
    1136                 // get lock protecting page descriptor
    1137                 remote_spinlock_lock( lock_xp );
    1138 
    11391173                // get pending forks counter
    1140                 forks = hal_remote_lw( forks_xp );
     1174                forks = hal_remote_l32( forks_xp );
    11411175               
    11421176                if( forks )  // decrement pending forks counter
     
    11571191                    }
    11581192                }
    1159 
    1160                 // release lock protecting page descriptor
    1161                 remote_spinlock_unlock( lock_xp );
    11621193            }
    11631194        }
     
    11941225
    11951226    // get lock protecting the VSL
    1196     remote_rwlock_rd_lock( lock_xp );
     1227    remote_rwlock_rd_acquire( lock_xp );
    11971228
    11981229    // scan the list of vsegs in VSL
     
    12041235        {
    12051236            // return success
    1206             remote_rwlock_rd_unlock( lock_xp );
     1237            remote_rwlock_rd_release( lock_xp );
    12071238            return vseg;
    12081239        }
     
    12101241
    12111242    // return failure
    1212     remote_rwlock_rd_unlock( lock_xp );
     1243    remote_rwlock_rd_release( lock_xp );
    12131244    return NULL;
    12141245
     
    12401271
    12411272    // get lock protecting VSL
    1242         remote_rwlock_wr_lock( lock_xp );
     1273        remote_rwlock_wr_acquire( lock_xp );
    12431274
    12441275        if( (vseg->min > addr_min) || (vseg->max < addr_max) )   // region not included in vseg
     
    13011332
    13021333    // release VMM lock
    1303         remote_rwlock_wr_unlock( lock_xp );
     1334        remote_rwlock_wr_release( lock_xp );
    13041335
    13051336        return error;
     
    13481379
    13491380        // register local vseg in local VMM
    1350         vseg_attach( &process->vmm , vseg );
     1381        vmm_vseg_attach( &process->vmm , vseg );
    13511382    }   
    13521383   
     
    13811412    uint32_t     flags = vseg->flags;
    13821413
    1383     assert( ( type != VSEG_TYPE_FILE ) , "illegal vseg type\n" );
     1414// check vseg type
     1415assert( ( type != VSEG_TYPE_FILE ) , "illegal vseg type\n" );
    13841416
    13851417    if( flags & VSEG_DISTRIB )    // distributed => cxy depends on vpn LSB
     
    14001432        }
    14011433        page_cxy = ( x << y_width ) + y;
     1434
     1435//      if ( LOCAL_CLUSTER->valid[x][y] == false ) page_cxy = cluster_random_select();
     1436
    14021437    }
    14031438    else                          // other cases => cxy specified in vseg
     
    14571492        xptr_t mapper_xp = vseg->mapper_xp;
    14581493
    1459         assert( (mapper_xp != XPTR_NULL),
    1460         "mapper not defined for a FILE vseg\n" );
     1494assert( (mapper_xp != XPTR_NULL),
     1495"mapper not defined for a FILE vseg\n" );
    14611496       
    14621497        // get mapper cluster and local pointer
     
    14951530            xptr_t     mapper_xp = vseg->mapper_xp;
    14961531
    1497             assert( (mapper_xp != XPTR_NULL),
    1498             "mapper not defined for a CODE or DATA vseg\n" );
     1532assert( (mapper_xp != XPTR_NULL),
     1533"mapper not defined for a CODE or DATA vseg\n" );
    14991534       
    15001535            // get mapper cluster and local pointer
     
    15131548__FUNCTION__, this, vpn, elf_offset );
    15141549#endif
    1515 
    1516 
    15171550            // compute extended pointer on page base
    15181551            xptr_t base_xp  = ppm_page2base( page_xp );
     
    15291562__FUNCTION__, this, vpn );
    15301563#endif
    1531 
    1532 
    15331564                if( GET_CXY( page_xp ) == local_cxy )
    15341565                {
     
    16461677    error_t    error;
    16471678
    1648     thread_t * this  = CURRENT_THREAD;
    16491679
    16501680#if DEBUG_VMM_GET_PTE
     1681thread_t * this  = CURRENT_THREAD;
    16511682uint32_t   cycle = (uint32_t)hal_get_cycles();
    16521683if( DEBUG_VMM_GET_PTE < cycle )
     
    16631694                           &vseg );
    16641695
    1665     // vseg has been checked by the vmm_handle_page_fault() function
    1666     assert( (vseg != NULL) , "vseg undefined / vpn %x\n");
     1696// vseg has been checked by the vmm_handle_page_fault() function
     1697assert( (vseg != NULL) , "vseg undefined / vpn %x\n");
    16671698
    16681699    if( cow )  //////////////// copy_on_write request //////////////////////
     
    16751706        hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn );
    16761707
    1677         assert( (old_attr & GPT_MAPPED),
    1678           "PTE unmapped for a COW exception / vpn %x\n" );
     1708assert( (old_attr & GPT_MAPPED),
     1709"PTE unmapped for a COW exception / vpn %x\n" );
    16791710
    16801711#if( DEBUG_VMM_GET_PTE & 1 )
     
    16931724        xptr_t lock_xp  = XPTR( page_cxy , &page_ptr->lock );
    16941725
    1695         // take lock protecting page descriptor
    1696         remote_spinlock_lock( lock_xp );
     1726        // take lock protecting "forks" counter
     1727        remote_busylock_acquire( lock_xp );
    16971728
    16981729        // get number of pending forks in page descriptor
    1699         uint32_t forks = hal_remote_lw( forks_xp );
     1730        uint32_t forks = hal_remote_l32( forks_xp );
    17001731
    17011732        if( forks )        // pending fork => allocate a new page, copy old to new
     
    17281759        }
    17291760
    1730         // release lock protecting page descriptor
    1731         remote_spinlock_unlock( lock_xp );
     1761        // release lock protecting "forks" counter
     1762        remote_busylock_release( lock_xp );
    17321763
    17331764        // build new_attr : reset COW and set WRITABLE,
     
    18401871    type = vseg->type;
    18411872
    1842     // get reference process cluster and local pointer
     1873    // get relevant process cluster and local pointer
    18431874    // for private vsegs (CODE and DATA type),
    1844     // the reference is the local process descriptor.
     1875    // the relevant process descriptor is local.
    18451876    if( (type == VSEG_TYPE_STACK) || (type == VSEG_TYPE_CODE) )
    18461877    {
Note: See TracChangeset for help on using the changeset viewer.