Changeset 629 for trunk/kernel


Ignore:
Timestamp:
May 17, 2019, 9:27:04 AM (5 years ago)
Author:
alain
Message:

Remove the "giant" rwlock protecting the GPT, and
use the GPT_LOCKED attribute in each PTE to prevent
concurrent modifications of one GPT entry.
The version number has been incremented to 2.1.

Location:
trunk/kernel
Files:
20 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/fs/fatfs.h

    r628 r629  
    181181 *
    182182 * WARNING 2 : Most fields are constant values, but the <free_cluster_hint>,
    183  * <free_clusters>, <dirty_page_min>, <dirty_page_max>, <lock>, and the <fs_info_buffer>
    184  * are shared variables, that can be modified by any thread running in any cluster.
    185  * The <fs_info_buffer> contains a copy of the FS_INFO sector, and is only allocated in
    186  * the FAT cluster (i.e. in cluster 0). It is used by all to synchronously update the
    187  * free clusters info on IOC device.
     183 * <free_clusters>, <lock>, and the <fs_info_buffer> are shared variables,
     184 * that can be modified by any thread running in any cluster. The <fs_info_buffer>
     185 * contains a copy of the FS_INFO sector, and is only allocated in the FAT cluster
     186 * (cluster 0). It is used to synchronously update the free clusters info on IOC device.
    188187 *  => For all these variables, only the values stored in the FAT cluster must be used.
    189188 ****************************************************************************************/
     
    202201
    203202    /* shared variables (only the copy in FAT cluster must be used)                     */
    204     uint32_t            dirty_page_min;        /*! min dirty page index in FAT mapper   */
    205     uint32_t            dirty_page_max;        /*! max dirty page index in FAT mapper   */
    206203    uint32_t            free_cluster_hint;     /*! cluster[hint+1] is the first free    */
    207204    uint32_t            free_clusters;         /*! free clusters number                 */
  • trunk/kernel/fs/vfs.c

    r628 r629  
    26082608#if (DEBUG_VFS_LOOKUP & 1)
    26092609if( DEBUG_VFS_LOOKUP < cycle )
    2610 printk("\n[%s] thread[%x,%x] created missing inode <%s> in cluster %x\n",
     2610printk("\n[%s] thread[%x,%x] created missing inode for <%s> in cluster %x\n",
    26112611__FUNCTION__, process->pid, this->trdid, name, child_cxy );
    26122612#endif
     
    26292629
    26302630                // when the missing dentry is not in the parent mapper,
    2631                 // it is a new dentry that must be registered in parent directory mapper
     2631                // a new dentry must be registered in parent directory mapper
    26322632                if ( error )
    26332633                {
     
    28142814
    28152815    // 1. allocate one free cluster in file system to child inode,
    2816     // and update the File Allocation Table in both the TAF mapper and IOC device.
     2816    // and update the File Allocation Table in both the FAT mapper and IOC device.
    28172817    // It depends on the child inode FS type.
    28182818    vfs_ctx_t * ctx = hal_remote_lpt( XPTR( child_cxy , &child_ptr->ctx ) );
  • trunk/kernel/kern/kernel_init.c

    r628 r629  
    171171    "VFS_FILE",              // 33
    172172    "VMM_VSL",               // 34
    173     "VMM_GPT",               // 35
    174     "VFS_MAIN",              // 36
    175     "FATFS_FAT",             // 37
     173    "VFS_MAIN",              // 35
     174    "FATFS_FAT",             // 36
    176175};       
    177176
  • trunk/kernel/kern/process.c

    r626 r629  
    166166#endif
    167167
    168     // initialize GPT and VSL locks
    169     remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT );
     168    // initialize VSL locks
    170169        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
    171170
     
    426425
    427426    // initialize GPT and VSL locks
    428     remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT );
    429427        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
    430428
     
    14821480
    14831481    // set COW flag in DATA, ANON, REMOTE vsegs for parent process VMM
    1484     // this includes all parnet process copies in all clusters
     1482    // this includes all parent process copies in all clusters
    14851483    if( parent_process_cxy == local_cxy )   // reference is local
    14861484    {
     
    17071705
    17081706    // initialize VSL and GPT locks
    1709         remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
    1710     remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT );
     1707    remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
    17111708   
    17121709    // create kernel vsegs in GPT and VSL, as required by the hardware architecture
  • trunk/kernel/kern/rpc.c

    r628 r629  
    104104    "VFS_FILE_CREATE",           // 14
    105105    "VFS_FILE_DESTROY",          // 15
    106     "VFS_FS_GET_DENTRY",         // 16
     106    "VFS_FS_NEW_DENTRY",         // 16
    107107    "VFS_FS_ADD_DENTRY",         // 17
    108108    "VFS_FS_REMOVE_DENTRY",      // 18
  • trunk/kernel/kern/scheduler.c

    r625 r629  
    6363// @ returns pointer on selected thread descriptor
    6464////////////////////////////////////////////////////////////////////////////////////////////
    65 thread_t * sched_select( scheduler_t * sched )
     65static thread_t * sched_select( scheduler_t * sched )
    6666{
    6767    thread_t     * thread;
     
    248248uint32_t cycle = (uint32_t)hal_get_cycles();
    249249if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
    250 printk("\n[%s] thread[%x,%x] on core[%x,%d] deleted / %d threads / cycle %d\n",
     250printk("\n[%s] thread[%x,%x] on core[%x,%d] deleted (still %d threads) / cycle %d\n",
    251251__FUNCTION__, process->pid, thread->trdid, local_cxy, thread->core->lid, count, cycle );
     252#endif
     253
     254#if CONFIG_INSTRUMENTATION_PGFAULTS
     255uint32_t local_nr    = thread->info.local_pgfault_nr;
     256uint32_t local_cost  = (local_nr == 0)  ? 0 : (thread->info.local_pgfault_cost / local_nr);
     257uint32_t global_nr   = thread->info.global_pgfault_nr;
     258uint32_t global_cost = (global_nr == 0) ? 0 : (thread->info.global_pgfault_cost / global_nr);
     259uint32_t false_nr    = thread->info.false_pgfault_nr;
     260uint32_t false_cost  = (false_nr == 0)  ? 0 : (thread->info.false_pgfault_cost / false_nr);
     261printk("***** page faults for thread[%x,%x]\n"
     262       "  - %d local  : %d cycles\n"
     263       "  - %d global : %d cycles\n"
     264       "  - %d false  : %d cycles\n",
     265       process->pid, thread->trdid,
     266       local_nr,  local_cost,
     267       global_nr, global_cost,
     268       false_nr,  false_cost );
    252269#endif
    253270            // destroy process descriptor if last thread
     
    481498 
    482499#if (DEBUG_SCHED_YIELD & 0x1)
    483 // if( sched->trace )
    484 if( (uint32_t)hal_get_cycles() > DEBUG_SCHED_YIELD )
     500if( sched->trace )
    485501sched_display( lid );
    486502#endif
     
    535551
    536552#if DEBUG_SCHED_YIELD
    537 // if( sched->trace )
    538 if( (uint32_t)hal_get_cycles() > DEBUG_SCHED_YIELD )
     553if( sched->trace )
    539554printk("\n[%s] core[%x,%d] / cause = %s\n"
    540555"      thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n",
     
    553568
    554569#if (DEBUG_SCHED_YIELD & 1)
    555 // if( sched->trace )
    556 if( (uint32_t)hal_get_cycles() > DEBUG_SCHED_YIELD )
     570if( sched->trace )
    557571printk("\n[%s] core[%x,%d] / cause = %s\n"
    558572"      thread %x (%s) (%x,%x) continue / cycle %d\n",
  • trunk/kernel/kern/thread.c

    r625 r629  
    907907
    908908    // update target process instrumentation counter
    909         process->vmm.pgfault_nr += thread->info.pgfault_nr;
     909        // process->vmm.pgfault_nr += thread->info.pgfault_nr;
    910910
    911911    // remove thread from process th_tbl[]
  • trunk/kernel/kern/thread.h

    r625 r629  
    100100typedef struct thread_info_s
    101101{
    102         uint32_t              pgfault_nr;    /*! cumulated number of page fault           */
    103         cycle_t               last_cycle;    /*! last cycle counter value (date)          */
    104         cycle_t               usr_cycles;    /*! user execution duration (cycles)         */
    105         cycle_t               sys_cycles;    /*! system execution duration (cycles)       */
     102        uint32_t     false_pgfault_nr;       /*! number of local page fault               */
     103    uint32_t     false_pgfault_cost;     /*! cumulated cost                           */
     104        uint32_t     local_pgfault_nr;       /*! number of local page fault               */
     105    uint32_t     local_pgfault_cost;     /*! cumulated cost                           */
     106        uint32_t     global_pgfault_nr;      /*! number of global page fault              */
     107    uint32_t     global_pgfault_cost;    /*! cumulated cost                           */
     108
     109        cycle_t      last_cycle;             /*! last cycle counter value (date)          */
     110        cycle_t      usr_cycles;             /*! user execution duration (cycles)         */
     111        cycle_t      sys_cycles;             /*! system execution duration (cycles)       */
    106112}
    107113thread_info_t;
  • trunk/kernel/kernel_config.h

    r628 r629  
    2626#define _KERNEL_CONFIG_H_
    2727
    28 #define CONFIG_ALMOS_VERSION           "Version 2.0 / April 2019"
     28#define CONFIG_ALMOS_VERSION           "Version 2.1 / May 2019"
    2929
    3030////////////////////////////////////////////////////////////////////////////////////////////
     
    4040
    4141#define DEBUG_BUSYLOCK                    0
    42 #define DEBUG_BUSYLOCK_PID                0x10001    // for busylock detailed debug
    43 #define DEBUG_BUSYLOCK_TRDID              0x10000    // for busylock detailed debug
     42#define DEBUG_BUSYLOCK_PID                0          // for busylock detailed debug
     43#define DEBUG_BUSYLOCK_TRDID              0          // for busylock detailed debug
    4444                 
    4545#define DEBUG_CHDEV_CMD_RX                0
     
    136136#define DEBUG_PROCESS_ZERO_CREATE         0
    137137
    138 #define DEBUG_QUEUELOCK_TYPE              0    // lock type (0 : undefined / 1000 : all types)
     138#define DEBUG_QUEUELOCK_TYPE              0       // lock type 0 is undefined => no debug
     139#define DEBUG_QUEUELOCK_PTR               0
     140#define DEBUG_QUEUELOCK_CXY               0
    139141
    140142#define DEBUG_RPC_CLIENT_GENERIC          0
     
    165167#define DEBUG_RPC_VMM_DELETE_VSEG         0
    166168
    167 #define DEBUG_RWLOCK_TYPE                 0    // lock type (0 : undefined / 1000 : all types)
     169#define DEBUG_RWLOCK_TYPE                 35       // lock type 0 is undefined => no debug
     170#define DEBUG_RWLOCK_PTR                  0xb1650
     171#define DEBUG_RWLOCK_CXY                  0x11
    168172
    169173#define DEBUG_SCHED_HANDLE_SIGNALS        2
     
    309313#define LOCK_VFS_FILE         33   // remote (RW) protect file descriptor state
    310314#define LOCK_VMM_VSL          34   // remote (RW) protect VSL (local list of vsegs)
    311 #define LOCK_VMM_GPT          35   // remote (RW) protect GPT (local page table)
    312 #define LOCK_VFS_MAIN         36   // remote (RW) protect vfs traversal (in root inode)
    313 #define LOCK_FATFS_FAT        37   // remote (RW) protect exclusive access to the FATFS FAT
     315#define LOCK_VFS_MAIN         35   // remote (RW) protect vfs traversal (in root inode)
     316#define LOCK_FATFS_FAT        36   // remote (RW) protect exclusive access to the FATFS FAT
    314317
    315318
     
    451454////////////////////////////////////////////////////////////////////////////////////////////
    452455
    453 #define CONFIG_INTRUMENTATION_SYSCALLS  0
     456#define CONFIG_INSTRUMENTATION_SYSCALLS  0
     457#define CONFIG_INSTRUMENTATION_PGFAULTS  1
    454458
    455459
  • trunk/kernel/libk/queuelock.c

    r623 r629  
    22 * queuelock.c - local kernel lock with waiting queue implementation.
    33 *
    4  * Authors   Alain Greiner     (2016,2017,2018)
     4 * Authors   Alain Greiner     (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    4747#if DEBUG_QUEUELOCK_TYPE
    4848thread_t * this = CURRENT_THREAD;
    49 if( DEBUG_QUEUELOCK_TYPE == type )
     49if( (type      == DEBUG_QUEUELOCK_TYPE) &&
     50    (lock      == DEBUG_QUEUELOCK_PTR ) &&
     51    (local_cxy == DEBUG_QUEUELOCK_CXY ) )
    5052printk("\n[%s] thread[%x,%x] initialise lock %s [%x,%x]\n",
    5153__FUNCTION__, this->process->pid, this->trdid,
     
    7577
    7678#if DEBUG_QUEUELOCK_TYPE
    77 if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) )
     79if( (lock_type == DEBUG_QUEUELOCK_TYPE) &&
     80    (lock      == DEBUG_QUEUELOCK_PTR ) &&
     81    (local_cxy == DEBUG_QUEUELOCK_CXY ) )
    7882printk("\n[%s ] thread[%x,%x] BLOCK on q_lock %s [%x,%x]\n",
    7983__FUNCTION__, this->process->pid, this->trdid,
     
    100104
    101105#if DEBUG_QUEUELOCK_TYPE
    102 if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) )
     106if( (lock_type == DEBUG_QUEUELOCK_TYPE) &&
     107    (lock      == DEBUG_QUEUELOCK_PTR ) &&
     108    (local_cxy == DEBUG_QUEUELOCK_CXY ) )
    103109printk("\n[%s] thread[%x,%x] ACQUIRE q_lock %s [%x,%x]\n",
    104110__FUNCTION__, this->process->pid, this->trdid,
     
    126132uint32_t   lock_type = lock->lock.type;
    127133thread_t * this      = CURRENT_THREAD;
    128 if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) )
     134if( (lock_type == DEBUG_QUEUELOCK_TYPE) &&
     135    (lock      == DEBUG_QUEUELOCK_PTR ) &&
     136    (local_cxy == DEBUG_QUEUELOCK_CXY ) )
    129137printk("\n[%s] thread[%x,%x] RELEASE q_lock %s [%x,%x]\n",
    130138__FUNCTION__, this->process->pid, this->trdid,
     
    142150
    143151#if DEBUG_QUEUELOCK_TYPE
    144 if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) )
     152if( (lock_type == DEBUG_QUEUELOCK_TYPE) &&
     153    (lock      == DEBUG_QUEUELOCK_PTR ) &&
     154    (local_cxy == DEBUG_QUEUELOCK_CXY ) )
    145155printk("\n[%s] thread[%x,%x] UNBLOCK thread [%x,%x] / q_lock %s [%x,%x]\n",
    146156__FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid,
  • trunk/kernel/libk/remote_barrier.c

    r623 r629  
    467467if( cycle > DEBUG_BARRIER_WAIT )
    468468printk("\n[%s] thread[%x,%x] exit / barrier (%x,%x) / cycle %d\n",
    469 __FUNCTION__, this->trdid, this->process->pid, barrier_cxy, barrier_ptr, cycle );
     469__FUNCTION__, this->process->pid, this->trdid, barrier_cxy, barrier_ptr, cycle );
    470470#endif
    471471
  • trunk/kernel/libk/remote_queuelock.c

    r623 r629  
    22 * remote_queuelock.c - remote kernel lock with waiting queue implementation.
    33 *
    4  * Authors   Alain Greiner     (2016,2017,2018)
     4 * Authors   Alain Greiner     (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    5656#if DEBUG_QUEUELOCK_TYPE
    5757thread_t * this = CURRENT_THREAD;
    58 if( DEBUG_QUEUELOCK_TYPE == type )
     58if( (type     == DEBUG_QUEUELOCK_TYPE) &&
     59    (lock_ptr == DEBUG_QUEUELOCK_PTR ) &&
     60    (lock_cxy == DEBUG_QUEUELOCK_CXY ) )
    5961printk("\n[%s] thread[%x,%x] initialise lock %s [%x,%x]\n",
    6062__FUNCTION__, this->process->pid, this->trdid,
     
    9193
    9294#if DEBUG_QUEUELOCK_TYPE
    93 if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) )
     95if( (lock_type == DEBUG_QUEUELOCK_TYPE) &&
     96    (lock_ptr  == DEBUG_QUEUELOCK_PTR ) &&
     97    (lock_cxy  == DEBUG_QUEUELOCK_CXY ) )
    9498printk("\n[%s] thread[%x,%x] BLOCK on q_lock %s [%x,%x]\n",
    9599__FUNCTION__, this->process->pid, this->trdid,
     
    117121
    118122#if DEBUG_QUEUELOCK_TYPE
    119 if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) )
     123if( (lock_type == DEBUG_QUEUELOCK_TYPE) &&
     124    (lock_ptr  == DEBUG_QUEUELOCK_PTR ) &&
     125    (lock_cxy  == DEBUG_QUEUELOCK_CXY ) )
    120126printk("\n[%s] thread[%x,%x] ACQUIRE q_lock %s [%x,%x]\n",
    121127__FUNCTION__, this->process->pid, this->trdid,
     
    152158thread_t * this      = CURRENT_THREAD;
    153159uint32_t   lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) );
    154 if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) )
     160if( (lock_type == DEBUG_QUEUELOCK_TYPE) &&
     161    (lock_ptr  == DEBUG_QUEUELOCK_PTR ) &&
     162    (lock_cxy  == DEBUG_QUEUELOCK_CXY ) )
    155163printk("\n[%s] thread[%x,%x] RELEASE q_lock %s (%x,%x)\n",
    156164__FUNCTION__, this->process->pid, this->trdid,
     
    171179
    172180#if DEBUG_QUEUELOCK_TYPE
    173 if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) )
     181if( (lock_type == DEBUG_QUEUELOCK_TYPE) &&
     182    (lock_ptr  == DEBUG_QUEUELOCK_PTR ) &&
     183    (lock_cxy  == DEBUG_QUEUELOCK_CXY ) )
    174184{
    175185    trdid_t     trdid   = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
  • trunk/kernel/libk/remote_rwlock.c

    r627 r629  
    5555#if DEBUG_RWLOCK_TYPE
    5656thread_t * this = CURRENT_THREAD;
    57 if( DEBUG_RWLOCK_TYPE == type )
     57if( (type               == DEBUG_RWLOCK_TYPE) &&
     58    ((intptr_t)lock_ptr == DEBUG_RWLOCK_PTR ) &&
     59    (lock_cxy           == DEBUG_RWLOCK_CXY ) )
    5860printk("\n[%s] thread[%x,%x] initialise lock %s [%x,%x]\n",
    5961__FUNCTION__, this->process->pid, this->trdid,
     
    9395
    9496#if DEBUG_RWLOCK_TYPE
    95 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
     97if( (lock_type          == DEBUG_RWLOCK_TYPE) &&
     98    ((intptr_t)lock_ptr == DEBUG_RWLOCK_PTR ) &&
     99    (lock_cxy           == DEBUG_RWLOCK_CXY ) )
    96100printk("\n[%s] thread[%x,%x] READ BLOCK on rwlock %s [%x,%x] / taken %d / count %d\n",
    97101__FUNCTION__, this->process->pid, this->trdid,
     
    124128
    125129#if DEBUG_RWLOCK_TYPE
    126 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    127 printk("\n[%s] thread[%x,%x] READ ACQUIRE rwlock %s [%x,%x] / taken = %d / count = %d\n",
     130if( (lock_type          == DEBUG_RWLOCK_TYPE) &&
     131    ((intptr_t)lock_ptr == DEBUG_RWLOCK_PTR ) &&
     132    (lock_cxy           == DEBUG_RWLOCK_CXY ) )
     133printk("\n[%s] thread[%x,%x] READ ACQUIRE rwlock %s [%x,%x] / taken %d / count %d\n",
    128134__FUNCTION__, this->process->pid, this->trdid,
    129135lock_type_str[lock_type], lock_cxy, lock_ptr,
     
    166172
    167173#if DEBUG_RWLOCK_TYPE
    168 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
     174if( (lock_type          == DEBUG_RWLOCK_TYPE) &&
     175    ((intptr_t)lock_ptr == DEBUG_RWLOCK_PTR ) &&
     176    (lock_cxy           == DEBUG_RWLOCK_CXY ) )
    169177printk("\n[%s] thread[%x,%x] WRITE BLOCK on rwlock %s [%x,%x] / taken %d / count %d\n",
    170178__FUNCTION__, this->process->pid, this->trdid,
     
    196204
    197205#if DEBUG_RWLOCK_TYPE
    198 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
     206if( (lock_type          == DEBUG_RWLOCK_TYPE) &&
     207    ((intptr_t)lock_ptr == DEBUG_RWLOCK_PTR ) &&
     208    (lock_cxy           == DEBUG_RWLOCK_CXY ) )
    199209printk("\n[%s] thread[%x,%x] WRITE ACQUIRE rwlock %s [%x,%x] / taken %d / count %d\n",
    200210__FUNCTION__, this->process->pid, this->trdid,
     
    235245uint32_t   lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) );
    236246xptr_t     taken_xp  = XPTR( lock_cxy , &lock_ptr->taken );
    237 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
     247if( (lock_type          == DEBUG_RWLOCK_TYPE) &&
     248    ((intptr_t)lock_ptr == DEBUG_RWLOCK_PTR ) &&
     249    (lock_cxy           == DEBUG_RWLOCK_CXY ) )
    238250printk("\n[%s] thread[%x,%x] READ RELEASE rwlock %s [%x,%x] / taken %d / count %d\n",
    239251__FUNCTION__, this->process->pid, this->trdid,
     
    258270
    259271#if DEBUG_RWLOCK_TYPE
    260 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
     272if( (lock_type          == DEBUG_RWLOCK_TYPE) &&
     273    ((intptr_t)lock_ptr == DEBUG_RWLOCK_PTR ) &&
     274    (lock_cxy           == DEBUG_RWLOCK_CXY ) )
    261275{
    262276    trdid_t     trdid     = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
     
    289303
    290304#if DEBUG_RWLOCK_TYPE
    291 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
     305if( (lock_type          == DEBUG_RWLOCK_TYPE) &&
     306    ((intptr_t)lock_ptr == DEBUG_RWLOCK_PTR ) &&
     307    (lock_cxy           == DEBUG_RWLOCK_CXY ) )
    292308{
    293309    trdid_t     trdid     = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
     
    334350uint32_t   lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) );
    335351xptr_t     count_xp  = XPTR( lock_cxy , &lock_ptr->count );
    336 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
     352if( (lock_type          == DEBUG_RWLOCK_TYPE) &&
     353    ((intptr_t)lock_ptr == DEBUG_RWLOCK_PTR ) &&
     354    (lock_cxy           == DEBUG_RWLOCK_CXY ) )
    337355printk("\n[%s] thread[%x,%x] WRITE RELEASE rwlock %s [%x,%x] / taken %d / count %d\n",
    338356__FUNCTION__, this->process->pid, this->trdid,
     
    356374
    357375#if DEBUG_RWLOCK_TYPE
    358 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
     376if( (lock_type          == DEBUG_RWLOCK_TYPE) &&
     377    ((intptr_t)lock_ptr == DEBUG_RWLOCK_PTR ) &&
     378    (lock_cxy           == DEBUG_RWLOCK_CXY ) )
    359379{
    360380    trdid_t     trdid     = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
     
    386406
    387407#if DEBUG_RWLOCK_TYPE
    388 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
     408if( (lock_type          == DEBUG_RWLOCK_TYPE) &&
     409    ((intptr_t)lock_ptr == DEBUG_RWLOCK_PTR ) &&
     410    (lock_cxy           == DEBUG_RWLOCK_CXY ) )
    389411{
    390412    trdid_t     trdid     = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
  • trunk/kernel/libk/remote_rwlock.h

    r627 r629  
    4242 *   taken, or if the number of readers is non zero, it registers in the "wr_root" waiting
    4343 *   queue, blocks, and deschedules. It set "taken" otherwise.
    44  * - when a reader completes its access, it decrement the readers "count", unblock the
     44 * - when a reader completes its access, it decrement the readers "count", unblock
    4545 *   the first waiting writer if there is no other readers, and unblock all waiting
    4646 *   readers if there no write request.
  • trunk/kernel/libk/rwlock.c

    r623 r629  
    22 * rwlock.c - kernel local read/write lock implementation.
    33 *
    4  * Author  Alain Greiner     (2016,2017,2018)
     4 * Author  Alain Greiner     (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    5252#if DEBUG_RWLOCK_TYPE
    5353thread_t * this = CURRENT_THREAD;
    54 if( DEBUG_RWLOCK_TYPE == type )
     54if( (type           == DEBUG_RWLOCK_TYPE) &&
     55    ((intptr_t)lock == DEBUG_RWLOCK_PTR ) &&
     56    (local_cxy      == DEBUG_RWLOCK_CXY ) )
    5557printk("\n[%s] thread[%x,%x] initialise lock %s [%x,%x]\n",
    5658__FUNCTION__, this->process->pid, this->trdid,
     
    8082
    8183#if DEBUG_RWLOCK_TYPE
    82 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
     84if( (lock_type      == DEBUG_RWLOCK_TYPE) &&
     85    ((intptr_t)lock == DEBUG_RWLOCK_PTR ) &&
     86    (local_cxy      == DEBUG_RWLOCK_CXY ) )
    8387printk("\n[%s] thread[%x,%x] READ BLOCK on rwlock %s [%x,%x] / taken %d / count %d\n",
    8488__FUNCTION__, this->process->pid, this->trdid,
     
    105109
    106110#if DEBUG_RWLOCK_TYPE
    107 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
     111if( (lock_type      == DEBUG_RWLOCK_TYPE) &&
     112    ((intptr_t)lock == DEBUG_RWLOCK_PTR ) &&
     113    (local_cxy      == DEBUG_RWLOCK_CXY ) )
    108114printk("\n[%s] thread[%x,%x] READ ACQUIRE rwlock %s [%x,%x] / taken %d / count %d\n",
    109115__FUNCTION__, this->process->pid, this->trdid,
     
    136142
    137143#if DEBUG_RWLOCK_TYPE
    138 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
     144if( (lock_type      == DEBUG_RWLOCK_TYPE) &&
     145    ((intptr_t)lock == DEBUG_RWLOCK_PTR ) &&
     146    (local_cxy      == DEBUG_RWLOCK_CXY ) )
    139147printk("\n[%s] thread[%x,%x] WRITE BLOCK on rwlock %s [%x,%x] / taken %d / count %d\n",
    140148__FUNCTION__, this->process->pid, this->trdid,
     
    161169
    162170#if DEBUG_RWLOCK_TYPE
    163 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
     171if( (lock_type      == DEBUG_RWLOCK_TYPE) &&
     172    ((intptr_t)lock == DEBUG_RWLOCK_PTR ) &&
     173    (local_cxy      == DEBUG_RWLOCK_CXY ) )
    164174printk("\n[%s] thread[%x,%x] WRITE ACQUIRE rwlock %s [%x,%x] / taken %d / count %d\n",
    165175__FUNCTION__, this->process->pid, this->trdid,
     
    187197thread_t * this = CURRENT_THREAD;
    188198uint32_t lock_type = lock->lock.type;
    189 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
     199if( (lock_type      == DEBUG_RWLOCK_TYPE) &&
     200    ((intptr_t)lock == DEBUG_RWLOCK_PTR ) &&
     201    (local_cxy      == DEBUG_RWLOCK_CXY ) )
    190202printk("\n[%s] thread[%x,%x] READ RELEASE rwlock %s [%x,%x] / taken %d / count %d\n",
    191203__FUNCTION__, this->process->pid, this->trdid,
     
    201213
    202214#if DEBUG_RWLOCK_TYPE
    203 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
     215if( (lock_type      == DEBUG_RWLOCK_TYPE) &&
     216    ((intptr_t)lock == DEBUG_RWLOCK_PTR ) &&
     217    (local_cxy      == DEBUG_RWLOCK_CXY ) )
    204218printk("\n[%s] thread[%x,%x] UNBLOCK thread[%x,%x] / rwlock %s [%x,%x]\n",
    205219__FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid,
     
    223237
    224238#if DEBUG_RWLOCK_TYPE
    225 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
     239if( (lock_type      == DEBUG_RWLOCK_TYPE) &&
     240    ((intptr_t)lock == DEBUG_RWLOCK_PTR ) &&
     241    (local_cxy      == DEBUG_RWLOCK_CXY ) )
    226242printk("\n[%s] thread[%x,%x] UNBLOCK thread[%x,%x] / rwlock %s [%x,%x]\n",
    227243__FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid,
     
    257273thread_t * this = CURRENT_THREAD;
    258274uint32_t lock_type = lock->lock.type;
    259 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
     275if( (lock_type      == DEBUG_RWLOCK_TYPE) &&
     276    ((intptr_t)lock == DEBUG_RWLOCK_PTR ) &&
     277    (local_cxy      == DEBUG_RWLOCK_CXY ) )
    260278printk("\n[%s] thread[%x,%x] WRITE RELEASE rwlock %s [%x,%x] / taken %d / count %d\n",
    261279__FUNCTION__, this->process->pid, this->trdid,
     
    270288
    271289#if DEBUG_RWLOCK_TYPE
    272 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
     290if( (lock_type      == DEBUG_RWLOCK_TYPE) &&
     291    ((intptr_t)lock == DEBUG_RWLOCK_PTR ) &&
     292    (local_cxy      == DEBUG_RWLOCK_CXY ) )
    273293printk("\n[%s] thread[%x,%x] UNBLOCK thread[%x,%x] / rwlock %s [%x,%x]\n",
    274294__FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid,
     
    291311
    292312#if DEBUG_RWLOCK_TYPE
    293 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
     313if( (lock_type      == DEBUG_RWLOCK_TYPE) &&
     314    ((intptr_t)lock == DEBUG_RWLOCK_PTR ) &&
     315    (local_cxy      == DEBUG_RWLOCK_CXY ) )
    294316printk("\n[%s] thread[%x,%x] UNBLOCK thread[%x,%x] / rwlock %s [%x,%x]\n",
    295317__FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid,
  • trunk/kernel/libk/user_dir.c

    r619 r629  
    22 * user_dir.c - kernel DIR related operations implementation.
    33 *
    4  * Authors   Alain   Greiner (2016,2017,2018)
     4 * Authors   Alain   Greiner (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    9191    pid_t           ref_pid;           // reference process PID
    9292    xptr_t          gpt_xp;            // extended pointer on reference process GPT
    93     uint32_t        gpt_attributes;    // attributes for all mapped gpt entries
     93    uint32_t        attr;              // attributes for all GPT entries
    9494    uint32_t        dirents_per_page;  // number of dirent descriptors per page
    9595    xptr_t          page_xp;           // extended pointer on page descriptor 
     
    9999    uint32_t        total_dirents;     // total number of dirents in dirent array
    100100    uint32_t        total_pages;       // total number of pages for dirent array
    101     vpn_t           vpn;               // first page in dirent array vseg
     101    vpn_t           vpn_base;          // first page in dirent array vseg
     102    vpn_t           vpn;               // current page in dirent array vseg
    102103    ppn_t           ppn;               // ppn of currently allocated physical page
    103104    uint32_t        entries;           // number of dirent actually comied in one page
     
    107108    uint32_t        page_id;           // page index in list of physical pages
    108109    kmem_req_t      req;               // kmem request descriptor
     110    ppn_t           fake_ppn;          // unused, but required by hal_gptlock_pte()
     111    uint32_t        fake_attr;         // unused, but required by hal_gptlock_pte()
    109112    error_t         error;
    110113
    111     // get cluster, local pointer, and pid of reference user process
     114    // get cluster, local pointer, and pid of reference process
    112115    ref_cxy = GET_CXY( ref_xp );
    113116    ref_ptr = GET_PTR( ref_xp );
     
    256259"unconsistent vseg size for dirent array" );
    257260
    258     // build extended pointer on reference process GPT, PTE attributes and ppn
     261    // build extended pointer on reference process GPT
    259262    gpt_xp         = XPTR( ref_cxy , &ref_ptr->vmm.gpt );
    260     gpt_attributes = GPT_MAPPED   |
    261                      GPT_SMALL    |
    262                      GPT_READABLE |
    263                      GPT_CACHABLE |
    264                      GPT_USER     ;
     263
     264    // build PTE attributes
     265    attr = GPT_MAPPED   |
     266           GPT_SMALL    |
     267           GPT_READABLE |
     268           GPT_CACHABLE |
     269           GPT_USER     ;
    265270
    266271    // get first vpn from vseg descriptor
    267     vpn = hal_remote_l32( XPTR( ref_cxy , &vseg->vpn_base ) );
     272    vpn_base = hal_remote_l32( XPTR( ref_cxy , &vseg->vpn_base ) );
    268273
    269274    // scan the list of allocated physical pages to map
    270     // all physical pages in the in the reference process GPT
     275    // all physical pages in the reference process GPT
    271276    page_id = 0;
    272277    while( list_is_empty( &root ) == false )
     
    277282        // compute ppn
    278283        ppn = ppm_page2ppn( XPTR( local_cxy , page ) );
     284
     285        // compute vpn
     286        vpn = vpn_base + page_id;
    279287       
    280         error = hal_gpt_set_pte( gpt_xp,
    281                                  vpn + page_id,
    282                                  gpt_attributes,
    283                                  ppn );
     288        // lock the PTE (and create PT2 if required)
     289        error = hal_gpt_lock_pte( gpt_xp,
     290                                  vpn,
     291                                  &fake_attr,
     292                                  &fake_ppn );
    284293        if( error )
    285294        {
    286295            printk("\n[ERROR] in %s : cannot map vpn %x in GPT\n",
    287             __FUNCTION__, (vpn + page_id) );
     296            __FUNCTION__, vpn );
    288297
    289298            // delete the vseg
    290             if( ref_cxy == local_cxy) vmm_delete_vseg( ref_pid, vpn<<CONFIG_PPM_PAGE_SHIFT );
    291             else rpc_vmm_delete_vseg_client( ref_cxy, ref_pid, vpn<<CONFIG_PPM_PAGE_SHIFT );
     299            if( ref_cxy == local_cxy)
     300                vmm_delete_vseg( ref_pid, vpn_base << CONFIG_PPM_PAGE_SHIFT );
     301            else
     302                rpc_vmm_delete_vseg_client( ref_cxy, ref_pid, vpn_base << CONFIG_PPM_PAGE_SHIFT );
    292303
    293304            // release the user_dir descriptor
     
    298309        }
    299310
     311        // set PTE in GPT                         
     312        hal_gpt_set_pte( gpt_xp,
     313                         vpn,
     314                         attr,
     315                         ppn );
     316
    300317#if DEBUG_USER_DIR
    301318if( cycle > DEBUG_USER_DIR )
     
    317334    dir->current = 0;
    318335    dir->entries = total_dirents;
    319     dir->ident   = (intptr_t)(vpn << CONFIG_PPM_PAGE_SHIFT);
     336    dir->ident   = (intptr_t)(vpn_base << CONFIG_PPM_PAGE_SHIFT);
    320337
    321338    // build extended pointers on root and lock of user_dir xlist in ref process
  • trunk/kernel/libk/user_dir.h

    r623 r629  
    22 * user_dir.h -  DIR related operations definition.
    33 *
    4  * Authors   Alain Greiner   (2016,2017,2018)
     4 * Authors   Alain Greiner   (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
  • trunk/kernel/mm/vmm.c

    r625 r629  
    741741    child_vmm  = &child_process->vmm;
    742742
    743     // initialize the locks protecting the child VSL and GPT
    744     remote_rwlock_init( XPTR( local_cxy , &child_vmm->gpt_lock ) , LOCK_VMM_GPT );
     743    // initialize the lock protecting the child VSL
    745744        remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsl_lock ) , LOCK_VMM_VSL );
    746745
     
    952951    xptr_t   vsl_root_xp = XPTR( local_cxy , &vmm->vsegs_root );
    953952    xptr_t   vsl_lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
    954     xptr_t   gpt_lock_xp = XPTR( local_cxy , &vmm->gpt_lock );
    955953
    956954    // take the VSL lock
     
    10221020    }
    10231021
    1024     // take the GPT lock
    1025     remote_rwlock_wr_acquire( gpt_lock_xp );
    1026 
    10271022    // release memory allocated to the GPT itself
    10281023    hal_gpt_destroy( &vmm->gpt );
    1029 
    1030     // release the GPT lock
    1031     remote_rwlock_wr_release( gpt_lock_xp );
    10321024
    10331025#if DEBUG_VMM_DESTROY
     
    12261218{
    12271219    vmm_t     * vmm;        // local pointer on process VMM
     1220    xptr_t      gpt_xp;     // extended pointer on GPT
    12281221    bool_t      is_ref;     // local process is reference process
    12291222    uint32_t    vseg_type;  // vseg type
     
    12501243    vmm = &process->vmm;
    12511244
     1245    // build extended pointer on GPT
     1246    gpt_xp = XPTR( local_cxy , &vmm->gpt );
     1247
    12521248    // get relevant vseg infos
    12531249    vseg_type = vseg->type;
     
    12681264    {
    12691265        // get ppn and attr
    1270         hal_gpt_get_pte( XPTR( local_cxy , &vmm->gpt ) , vpn , &attr , &ppn );
     1266        hal_gpt_get_pte( gpt_xp , vpn , &attr , &ppn );
    12711267
    12721268        if( attr & GPT_MAPPED )  // PTE is mapped
     
    12781274#endif
    12791275            // unmap GPT entry in local GPT
    1280             hal_gpt_reset_pte( &vmm->gpt , vpn );
     1276            hal_gpt_reset_pte( gpt_xp , vpn );
    12811277
    12821278            // get pointers on physical page descriptor
     
    19151911{
    19161912    vseg_t         * vseg;            // vseg containing vpn
    1917     uint32_t         new_attr;        // new PTE_ATTR value
    1918     ppn_t            new_ppn;         // new PTE_PPN value
     1913    uint32_t         attr;            // PTE_ATTR value
     1914    ppn_t            ppn;             // PTE_PPN value
    19191915    uint32_t         ref_attr;        // PTE_ATTR value in reference GPT
    19201916    ppn_t            ref_ppn;         // PTE_PPN value in reference GPT
     
    19221918    process_t      * ref_ptr;         // reference process for missing vpn
    19231919    xptr_t           local_gpt_xp;    // extended pointer on local GPT
    1924     xptr_t           local_lock_xp;   // extended pointer on local GPT lock
    19251920    xptr_t           ref_gpt_xp;      // extended pointer on reference GPT
    1926     xptr_t           ref_lock_xp;     // extended pointer on reference GPT lock
    19271921    error_t          error;           // value returned by called functions
    19281922
     1923    thread_t * this  = CURRENT_THREAD;
     1924
     1925#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
     1926uint32_t start_cycle = (uint32_t)hal_get_cycles();
     1927#endif
     1928
    19291929#if DEBUG_VMM_HANDLE_PAGE_FAULT
    1930 uint32_t   cycle = (uint32_t)hal_get_cycles();
    1931 thread_t * this  = CURRENT_THREAD;
    1932 if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle )
     1930if( DEBUG_VMM_HANDLE_PAGE_FAULT < start_cycle )
    19331931printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n",
    1934 __FUNCTION__, this->process->pid, this->trdid, vpn, cycle );
    1935 hal_vmm_display( process , true );
     1932__FUNCTION__, this->process->pid, this->trdid, vpn, start_cycle );
     1933#endif
     1934
     1935#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
     1936hal_vmm_display( this->process , false );
    19361937#endif
    19371938
     
    19421943    if( error )
    19431944    {
    1944         printk("\n[ERROR] in %s : vpn %x in process %x not in registered vseg / cycle %d\n",
    1945         __FUNCTION__ , vpn , process->pid, (uint32_t)hal_get_cycles() );
     1945        printk("\n[ERROR] in %s : vpn %x in thread[%x,%x] not in registered vseg\n",
     1946        __FUNCTION__ , vpn , process->pid, this->trdid );
    19461947       
    19471948        return EXCP_USER_ERROR;
     
    19491950
    19501951#if DEBUG_VMM_HANDLE_PAGE_FAULT
    1951 cycle = (uint32_t)hal_get_cycles();
    1952 if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle )
    1953 printk("\n[%s] threadr[%x,%x] found vseg %s / cycle %d\n",
    1954 __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type), cycle );
    1955 #endif
    1956 
    1957     //////////////// private vseg => access only the local GPT
    1958     if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) )
    1959     {
    1960         // build extended pointer on local GPT and local GPT lock
    1961         local_gpt_xp  = XPTR( local_cxy , &process->vmm.gpt );
    1962         local_lock_xp = XPTR( local_cxy , &process->vmm.gpt_lock );
    1963 
    1964         // take local GPT lock in write mode
    1965         remote_rwlock_wr_acquire( local_lock_xp );
    1966 
    1967         // check VPN still unmapped in local GPT
    1968 
    1969         // do nothing if VPN has been mapped by a a concurrent page_fault
    1970         hal_gpt_get_pte( local_gpt_xp,
    1971                          vpn,
    1972                          &new_attr,
    1973                          &new_ppn );
    1974 
    1975         if( (new_attr & GPT_MAPPED) == 0 )       // VPN still unmapped
    1976         {
    1977             // allocate and initialise a physical page depending on the vseg type
    1978             error = vmm_get_one_ppn( vseg , vpn , &new_ppn );
    1979 
    1980             if( error )
    1981             {
    1982                 printk("\n[ERROR] in %s : no memory / process = %x / vpn = %x\n",
    1983                 __FUNCTION__ , process->pid , vpn );
    1984 
    1985                 // release local GPT lock in write mode
    1986                 remote_rwlock_wr_release( local_lock_xp );
    1987 
    1988                 return EXCP_KERNEL_PANIC;
    1989             }
    1990 
    1991             // define new_attr from vseg flags
    1992             new_attr = GPT_MAPPED | GPT_SMALL;
    1993             if( vseg->flags & VSEG_USER  ) new_attr |= GPT_USER;
    1994             if( vseg->flags & VSEG_WRITE ) new_attr |= GPT_WRITABLE;
    1995             if( vseg->flags & VSEG_EXEC  ) new_attr |= GPT_EXECUTABLE;
    1996             if( vseg->flags & VSEG_CACHE ) new_attr |= GPT_CACHABLE;
    1997 
    1998             // set PTE (PPN & attribute) to local GPT
    1999             error = hal_gpt_set_pte( local_gpt_xp,
    2000                                      vpn,
    2001                                      new_attr,
    2002                                      new_ppn );
    2003             if ( error )
    2004             {
    2005                 printk("\n[ERROR] in %s : cannot update local GPT / process %x / vpn = %x\n",
    2006                 __FUNCTION__ , process->pid , vpn );
    2007 
    2008                 // release local GPT lock in write mode
    2009                 remote_rwlock_wr_release( local_lock_xp );
    2010 
    2011                 return EXCP_KERNEL_PANIC;
    2012             }
    2013         }
    2014 
    2015         // release local GPT lock in write mode
    2016         remote_rwlock_wr_release( local_lock_xp );
    2017 
    2018 #if DEBUG_VMM_HANDLE_PAGE_FAULT
    2019 cycle = (uint32_t)hal_get_cycles();
    2020 if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle )
    2021 printk("\n[%s] private page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n",
    2022 __FUNCTION__, vpn, new_ppn, new_attr, cycle );
    2023 #endif
    2024         return EXCP_NON_FATAL;
    2025 
    2026     }   // end local GPT access
    2027 
    2028     //////////// public vseg => access reference GPT
    2029     else                               
     1952if( DEBUG_VMM_HANDLE_PAGE_FAULT < start_cycle )
     1953printk("\n[%s] thread[%x,%x] found vseg %s\n",
     1954__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type) );
     1955#endif
     1956
     1957    // build extended pointer on local GPT
     1958    local_gpt_xp  = XPTR( local_cxy , &process->vmm.gpt );
     1959
     1960    // lock target PTE in local GPT and get current PPN and attributes
     1961    error = hal_gpt_lock_pte( local_gpt_xp,
     1962                              vpn,
     1963                              &attr,
     1964                              &ppn );
     1965    if( error )
     1966    {
     1967        printk("\n[PANIC] in %s : cannot lock PTE in local GPT / vpn %x / process %x\n",
     1968        __FUNCTION__ , vpn , process->pid );
     1969       
     1970        return EXCP_KERNEL_PANIC;
     1971    }
     1972
     1973    // handle page fault only if PTE still unmapped after lock
     1974    if( (attr & GPT_MAPPED) == 0 )
    20301975    {
    20311976        // get reference process cluster and local pointer
     
    20331978        ref_ptr = GET_PTR( process->ref_xp );
    20341979
    2035         // build extended pointer on reference GPT and reference GPT lock
    2036         ref_gpt_xp  = XPTR( ref_cxy , &ref_ptr->vmm.gpt );
    2037         ref_lock_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt_lock );
    2038 
    2039         // build extended pointer on local GPT and local GPT lock
    2040         local_gpt_xp  = XPTR( local_cxy , &process->vmm.gpt );
    2041         local_lock_xp = XPTR( local_cxy , &process->vmm.gpt_lock );
    2042 
    2043         // take reference GPT lock in read mode
    2044         remote_rwlock_rd_acquire( ref_lock_xp );
    2045 
    2046         // get directly PPN & attributes from reference GPT
    2047         // this can avoids a costly RPC for a false page fault
    2048         hal_gpt_get_pte( ref_gpt_xp,
    2049                          vpn,
    2050                          &ref_attr,
    2051                          &ref_ppn );
    2052 
    2053         // release reference GPT lock in read mode
    2054         remote_rwlock_rd_release( ref_lock_xp );
    2055 
    2056         if( ref_attr & GPT_MAPPED )        // false page fault => update local GPT
     1980        // private vseg or (local == reference) => access only the local GPT
     1981        if( (vseg->type == VSEG_TYPE_STACK) ||
     1982            (vseg->type == VSEG_TYPE_CODE)  ||
     1983            (ref_cxy    == local_cxy ) )
    20571984        {
    2058             // take local GPT lock in write mode
    2059             remote_rwlock_wr_acquire( local_lock_xp );
    2060            
    2061             // check VPN still unmapped in local GPT
    2062             hal_gpt_get_pte( local_gpt_xp,
     1985            // allocate and initialise a physical page depending on the vseg type
     1986            error = vmm_get_one_ppn( vseg , vpn , &ppn );
     1987
     1988            if( error )
     1989            {
     1990                printk("\n[ERROR] in %s : no physical page / process = %x / vpn = %x\n",
     1991                __FUNCTION__ , process->pid , vpn );
     1992
     1993                // unlock PTE in local GPT
     1994                hal_gpt_unlock_pte( local_gpt_xp , vpn );
     1995
     1996                return EXCP_KERNEL_PANIC;
     1997            }
     1998
     1999            // define attr from vseg flags
     2000            attr = GPT_MAPPED | GPT_SMALL;
     2001            if( vseg->flags & VSEG_USER  ) attr |= GPT_USER;
     2002            if( vseg->flags & VSEG_WRITE ) attr |= GPT_WRITABLE;
     2003            if( vseg->flags & VSEG_EXEC  ) attr |= GPT_EXECUTABLE;
     2004            if( vseg->flags & VSEG_CACHE ) attr |= GPT_CACHABLE;
     2005
     2006            // set PTE to local GPT
     2007            hal_gpt_set_pte( local_gpt_xp,
    20632008                             vpn,
    2064                              &new_attr,
    2065                              &new_ppn );
    2066 
    2067             if( (new_attr & GPT_MAPPED) == 0 )       // VPN still unmapped
    2068             {
    2069                 // update local GPT from reference GPT
    2070                 error = hal_gpt_set_pte( local_gpt_xp,
    2071                                          vpn,
    2072                                          ref_attr,
    2073                                          ref_ppn );
    2074                 if( error )
    2075                 {
    2076                     printk("\n[ERROR] in %s : cannot update local GPT / process %x / vpn %x\n",
    2077                     __FUNCTION__ , process->pid , vpn );
    2078 
    2079                     // release local GPT lock in write mode
    2080                     remote_rwlock_wr_release( local_lock_xp );
    2081            
    2082                     return EXCP_KERNEL_PANIC;
    2083                 }
    2084             }
    2085             else    // VPN has been mapped by a a concurrent page_fault
    2086             {
    2087                 // keep PTE from local GPT
    2088                 ref_attr = new_attr;
    2089                 ref_ppn  = new_ppn;
    2090             }
    2091 
    2092             // release local GPT lock in write mode
    2093             remote_rwlock_wr_release( local_lock_xp );
    2094            
     2009                             attr,
     2010                             ppn );
     2011
     2012#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
     2013uint32_t end_cycle = (uint32_t)hal_get_cycles();
     2014#endif
     2015
    20952016#if DEBUG_VMM_HANDLE_PAGE_FAULT
    2096 cycle = (uint32_t)hal_get_cycles();
    2097 if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle )
    2098 printk("\n[%s] false page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n",
    2099 __FUNCTION__, vpn, ref_ppn, ref_attr, cycle );
     2017if( DEBUG_VMM_HANDLE_PAGE_FAULT < end_cycle )
     2018printk("\n[%s] local page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n",
     2019__FUNCTION__, vpn, ppn, attr, end_cycle );
     2020#endif
     2021
     2022#if CONFIG_INSTRUMENTATION_PGFAULTS
     2023this->info.local_pgfault_nr++;
     2024this->info.local_pgfault_cost += (end_cycle - start_cycle);
    21002025#endif
    21012026            return EXCP_NON_FATAL;
    2102         }
    2103         else                            // true page fault => update reference GPT
     2027
     2028        }   // end local GPT access
     2029
     2030        // public vseg and (local != reference) => access ref GPT to update local GPT
     2031        else                               
    21042032        {
    2105             // take reference GPT lock in write mode
    2106             remote_rwlock_wr_acquire( ref_lock_xp );
    2107            
    2108             // check VPN still unmapped in reference GPT
    2109             // do nothing if VPN has been mapped by a a concurrent page_fault
     2033            // build extended pointer on reference GPT
     2034            ref_gpt_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt );
     2035
     2036            // get current PPN and attributes from reference GPT
    21102037            hal_gpt_get_pte( ref_gpt_xp,
    21112038                             vpn,
     
    21132040                             &ref_ppn );
    21142041
    2115             if( (ref_attr & GPT_MAPPED) == 0 )       // VPN actually unmapped
    2116             {
     2042            if( ref_attr & GPT_MAPPED )        // false page fault => update local GPT
     2043            {
     2044                // update local GPT from reference GPT values
     2045                hal_gpt_set_pte( local_gpt_xp,
     2046                                 vpn,
     2047                                 ref_attr,
     2048                                 ref_ppn );
     2049
     2050#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
     2051uint32_t end_cycle = (uint32_t)hal_get_cycles();
     2052#endif
     2053
     2054#if DEBUG_VMM_HANDLE_PAGE_FAULT
     2055if( DEBUG_VMM_HANDLE_PAGE_FAULT < end_cycle )
     2056printk("\n[%s] false page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n",
     2057__FUNCTION__, vpn, ref_ppn, ref_attr, end_cycle );
     2058#endif
     2059
     2060#if CONFIG_INSTRUMENTATION_PGFAULTS
     2061this->info.false_pgfault_nr++;
     2062this->info.false_pgfault_cost += (end_cycle - start_cycle);
     2063#endif
     2064                return EXCP_NON_FATAL;
     2065            }
     2066            else                            // true page fault => update both GPTs
     2067            {
    21172068                // allocate and initialise a physical page depending on the vseg type
    2118                 error = vmm_get_one_ppn( vseg , vpn , &new_ppn );
     2069                error = vmm_get_one_ppn( vseg , vpn , &ppn );
    21192070
    21202071                if( error )
     
    21232074                    __FUNCTION__ , process->pid , vpn );
    21242075
    2125                    // release reference GPT lock in write mode
    2126                    remote_rwlock_wr_release( ref_lock_xp );
     2076                    // unlock PTE in local GPT
     2077                    hal_gpt_unlock_pte( local_gpt_xp , vpn );
    21272078                   
    2128                    return EXCP_KERNEL_PANIC;
     2079                    return EXCP_KERNEL_PANIC;
    21292080                }
    21302081
    2131                 // define new_attr from vseg flags
    2132                 new_attr = GPT_MAPPED | GPT_SMALL;
    2133                 if( vseg->flags & VSEG_USER  ) new_attr |= GPT_USER;
    2134                 if( vseg->flags & VSEG_WRITE ) new_attr |= GPT_WRITABLE;
    2135                 if( vseg->flags & VSEG_EXEC  ) new_attr |= GPT_EXECUTABLE;
    2136                 if( vseg->flags & VSEG_CACHE ) new_attr |= GPT_CACHABLE;
    2137 
    2138                 // update reference GPT
    2139                 error = hal_gpt_set_pte( ref_gpt_xp,
    2140                                          vpn,
    2141                                          new_attr,
    2142                                          new_ppn );
    2143 
    2144                 // update local GPT (protected by reference GPT lock)
    2145                 error |= hal_gpt_set_pte( local_gpt_xp,
     2082                // lock PTE in reference GPT
     2083                error = hal_gpt_lock_pte( ref_gpt_xp,
    21462084                                          vpn,
    2147                                           new_attr,
    2148                                           new_ppn );
    2149 
     2085                                          &ref_attr,
     2086                                          &ref_ppn );
    21502087                if( error )
    21512088                {
    2152                     printk("\n[ERROR] in %s : cannot update GPT / process %x / vpn = %x\n",
    2153                     __FUNCTION__ , process->pid , vpn );
    2154 
    2155                     // release reference GPT lock in write mode
    2156                     remote_rwlock_wr_release( ref_lock_xp );
    2157 
     2089                    printk("\n[PANIC] in %s : cannot lock PTE in ref GPT / vpn %x / process %x\n",
     2090                    __FUNCTION__ , vpn , process->pid );
     2091       
     2092                    // unlock PTE in local GPT
     2093                    hal_gpt_unlock_pte( local_gpt_xp , vpn );
     2094                   
    21582095                    return EXCP_KERNEL_PANIC;
    21592096                }
     2097
     2098                // define attr from vseg flags
     2099                attr = GPT_MAPPED | GPT_SMALL;
     2100                if( vseg->flags & VSEG_USER  ) attr |= GPT_USER;
     2101                if( vseg->flags & VSEG_WRITE ) attr |= GPT_WRITABLE;
     2102                if( vseg->flags & VSEG_EXEC  ) attr |= GPT_EXECUTABLE;
     2103                if( vseg->flags & VSEG_CACHE ) attr |= GPT_CACHABLE;
     2104
     2105                // set PTE in reference GPT
     2106                hal_gpt_set_pte( ref_gpt_xp,
     2107                                 vpn,
     2108                                 attr,
     2109                                 ppn );
     2110
     2111                // set PTE in local GPT
     2112                hal_gpt_set_pte( local_gpt_xp,
     2113                                 vpn,
     2114                                 attr,
     2115                                 ppn );
     2116
     2117#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
     2118uint32_t end_cycle = (uint32_t)hal_get_cycles();
     2119#endif
     2120
     2121#if DEBUG_VMM_HANDLE_PAGE_FAULT
     2122if( DEBUG_VMM_HANDLE_PAGE_FAULT < end_cycle )
     2123printk("\n[%s] global page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n",
     2124__FUNCTION__, vpn, ppn, attr, end_cycle );
     2125#endif
     2126
     2127#if CONFIG_INSTRUMENTATION_PGFAULTS
     2128this->info.global_pgfault_nr++;
     2129this->info.global_pgfault_cost += (end_cycle - start_cycle);
     2130#endif
     2131                return EXCP_NON_FATAL;
    21602132            }
    2161 
    2162             // release reference GPT lock in write mode
    2163             remote_rwlock_wr_release( ref_lock_xp );
    2164 
    2165 #if DEBUG_VMM_HANDLE_PAGE_FAULT
    2166 cycle = (uint32_t)hal_get_cycles();
    2167 if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle )
    2168 printk("\n[%s] true page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n",
    2169 __FUNCTION__, vpn, new_ppn, new_attr, cycle );
    2170 #endif
    2171             return EXCP_NON_FATAL;
    21722133        }
    21732134    }
     2135    else   // page has been locally mapped by another concurrent thread
     2136    {
     2137        // unlock PTE in local GPT
     2138        hal_gpt_unlock_pte( local_gpt_xp , vpn );
     2139
     2140        return EXCP_NON_FATAL;
     2141    }
     2142
    21742143}   // end vmm_handle_page_fault()
    21752144
     
    21792148{
    21802149    vseg_t         * vseg;            // vseg containing vpn
    2181     cxy_t            ref_cxy;         // reference cluster for missing vpn
    2182     process_t      * ref_ptr;         // reference process for missing vpn
    2183     xptr_t           gpt_xp;          // extended pointer on GPT
    2184     xptr_t           gpt_lock_xp;     // extended pointer on GPT lock
     2150    xptr_t           gpt_xp;          // extended pointer on GPT (local or reference)
     2151    gpt_t          * gpt_ptr;         // local pointer on GPT (local or reference)
     2152    cxy_t            gpt_cxy;         // GPT cluster identifier
    21852153    uint32_t         old_attr;        // current PTE_ATTR value
    21862154    ppn_t            old_ppn;         // current PTE_PPN value
    21872155    uint32_t         new_attr;        // new PTE_ATTR value
    21882156    ppn_t            new_ppn;         // new PTE_PPN value
     2157    cxy_t            ref_cxy;         // reference process cluster
     2158    process_t      * ref_ptr;         // local pointer on reference process
    21892159    error_t          error;
    21902160
    2191     thread_t * this = CURRENT_THREAD;
     2161    thread_t * this  = CURRENT_THREAD;
    21922162
    21932163#if DEBUG_VMM_HANDLE_COW
    2194 uint32_t   cycle   = (uint32_t)hal_get_cycles();
     2164uint32_t   cycle = (uint32_t)hal_get_cycles();
    21952165if( DEBUG_VMM_HANDLE_COW < cycle )
    21962166printk("\n[%s] thread[%x,%x] enter for vpn %x / core[%x,%d] / cycle %d\n",
    21972167__FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle );
     2168#endif
     2169
     2170#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
    21982171hal_vmm_display( process , true );
    21992172#endif
    2200 
    2201     // access local GPT to get GPT_COW flag
    2202     bool_t cow = hal_gpt_pte_is_cow( &(process->vmm.gpt), vpn );
    2203 
    2204     if( cow == false ) return EXCP_USER_ERROR;
    22052173
    22062174    // get local vseg
     
    22102178    if( error )
    22112179    {
    2212         printk("\n[PANIC] in %s vpn %x in thread[%x,%x] not in a registered vseg\n",
     2180        printk("\n[ERROR] in %s : vpn %x in thread[%x,%x] not in a registered vseg\n",
    22132181        __FUNCTION__, vpn, process->pid, this->trdid );
    22142182
    2215         return EXCP_KERNEL_PANIC;
    2216     }
    2217 
    2218 #if( DEBUG_VMM_HANDLE_COW & 1)
     2183        return EXCP_USER_ERROR;
     2184    }
     2185
     2186#if DEBUG_VMM_HANDLE_COW
    22192187if( DEBUG_VMM_HANDLE_COW < cycle )
    2220 printk("\n[%s] thread[%x,%x] get vseg for vpn %x\n",
    2221 __FUNCTION__, this->process->pid, this->trdid, vpn );
    2222 #endif
    2223 
    2224     // get reference GPT cluster and local pointer
     2188printk("\n[%s] thread[%x,%x] get vseg %s\n",
     2189__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type) );
     2190#endif
     2191
     2192    // get reference process cluster and local pointer
    22252193    ref_cxy = GET_CXY( process->ref_xp );
    22262194    ref_ptr = GET_PTR( process->ref_xp );
    22272195
    2228     // build relevant extended pointers on  relevant GPT and  GPT lock
    2229     // - access local GPT for a private vseg 
    2230     // - access reference GPT for a public vseg
     2196    // build pointers on relevant GPT
     2197    // - access only local GPT for a private vseg 
     2198    // - access reference GPT and all copies for a public vseg
    22312199    if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) )
    22322200    {
    2233         gpt_xp      = XPTR( local_cxy , &process->vmm.gpt );
    2234         gpt_lock_xp = XPTR( local_cxy , &process->vmm.gpt_lock );
     2201        gpt_cxy = local_cxy;
     2202        gpt_ptr = &process->vmm.gpt;
     2203        gpt_xp  = XPTR( gpt_cxy , gpt_ptr );
    22352204    }
    22362205    else
    22372206    {
    2238         gpt_xp      = XPTR( ref_cxy , &ref_ptr->vmm.gpt );
    2239         gpt_lock_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt_lock );
    2240     }
    2241 
    2242     // take GPT lock in write mode
    2243     remote_rwlock_wr_acquire( gpt_lock_xp );
    2244 
    2245     // get current PTE from reference GPT
    2246     hal_gpt_get_pte( gpt_xp,
    2247                      vpn,
    2248                      &old_attr,
    2249                      &old_ppn );
    2250 
    2251 #if( DEBUG_VMM_HANDLE_COW & 1)
     2207        gpt_cxy = ref_cxy;
     2208        gpt_ptr = &ref_ptr->vmm.gpt;
     2209        gpt_xp  = XPTR( gpt_cxy , gpt_ptr );
     2210    }
     2211
     2212    // lock target PTE in relevant GPT (local or reference)
     2213    error = hal_gpt_lock_pte( gpt_xp,
     2214                              vpn,
     2215                              &old_attr,
     2216                              &old_ppn );
     2217    if( error )
     2218    {
     2219        printk("\n[PANIC] in %s : cannot lock PTE in GPT / cxy %x / vpn %x / process %x\n",
     2220        __FUNCTION__ , gpt_cxy, vpn , process->pid );
     2221       
     2222        return EXCP_KERNEL_PANIC;
     2223    }
     2224
     2225#if DEBUG_VMM_HANDLE_COW
    22522226if( DEBUG_VMM_HANDLE_COW < cycle )
    22532227printk("\n[%s] thread[%x,%x] get pte for vpn %x : ppn %x / attr %x\n",
     
    22552229#endif
    22562230
    2257     // the PTE must be mapped for a COW
    2258     if( (old_attr & GPT_MAPPED) == 0 )
    2259     {
    2260         printk("\n[PANIC] in %s : VPN %x in process %x unmapped\n",
    2261         __FUNCTION__, vpn, process->pid );
    2262 
    2263         // release GPT lock in write mode
    2264         remote_rwlock_wr_release( gpt_lock_xp );
    2265 
    2266         return EXCP_KERNEL_PANIC;
     2231    // return user error if COW attribute not set or PTE2 unmapped
     2232    if( ((old_attr & GPT_COW) == 0) || ((old_attr & GPT_MAPPED) == 0) )
     2233    {
     2234        hal_gpt_unlock_pte( gpt_xp , vpn );
     2235
     2236        return EXCP_USER_ERROR;
    22672237    }
    22682238
     
    22822252    uint32_t forks = hal_remote_l32( forks_xp );
    22832253
    2284 #if( DEBUG_VMM_HANDLE_COW & 1)
     2254#if DEBUG_VMM_HANDLE_COW
    22852255if( DEBUG_VMM_HANDLE_COW < cycle )
    22862256printk("\n[%s] thread[%x,%x] get forks = %d for vpn %x\n",
     
    22962266        remote_busylock_release( forks_lock_xp );
    22972267
    2298         // allocate a new page
     2268        // allocate a new physical page depending on vseg type
    22992269        page_xp = vmm_page_allocate( vseg , vpn );
    23002270
     
    23042274            __FUNCTION__ , vpn, process->pid );
    23052275
    2306             // release GPT lock in write mode
    2307             remote_rwlock_wr_acquire( gpt_lock_xp );
     2276            hal_gpt_unlock_pte( gpt_xp , vpn );
    23082277
    23092278            return EXCP_KERNEL_PANIC;
     
    23132282        new_ppn = ppm_page2ppn( page_xp );
    23142283
    2315 #if( DEBUG_VMM_HANDLE_COW & 1)
     2284#if DEBUG_VMM_HANDLE_COW
    23162285if( DEBUG_VMM_HANDLE_COW < cycle )
    23172286printk("\n[%s] thread[%x,%x] get new ppn %x for vpn %x\n",
     
    23242293                           CONFIG_PPM_PAGE_SIZE );
    23252294
    2326 #if(DEBUG_VMM_HANDLE_COW & 1)
     2295#if DEBUG_VMM_HANDLE_COW
    23272296if( DEBUG_VMM_HANDLE_COW < cycle )
    23282297printk("\n[%s] thread[%x,%x] copied old page to new page\n",
     
    23442313    }
    23452314
    2346     // build new_attr : reset COW and set WRITABLE,
    2347     new_attr = (old_attr | GPT_WRITABLE) & (~GPT_COW);
    2348 
    2349     // update the relevant GPT
    2350     // - private vseg => update local GPT
    2351     // - public vseg => update all GPT copies
     2315    // build new_attr : set WRITABLE, reset COW, reset LOCKED
     2316    new_attr = (((old_attr | GPT_WRITABLE) & (~GPT_COW)) & (~GPT_LOCKED));
     2317
     2318    // update the relevant GPT(s)
     2319    // - private vseg => update only the local GPT
     2320    // - public vseg => update the reference GPT AND all the GPT copies
    23522321    if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) )
    23532322    {
     2323        // set the new PTE2
    23542324        hal_gpt_set_pte( gpt_xp,
    23552325                         vpn,
     
    23762346    }
    23772347
    2378     // release GPT lock in write mode
    2379     remote_rwlock_wr_release( gpt_lock_xp );
    2380 
    23812348#if DEBUG_VMM_HANDLE_COW
    23822349cycle = (uint32_t)hal_get_cycles();
  • trunk/kernel/mm/vmm.h

    r625 r629  
    106106 * 3. The GPT in the reference cluster can be directly accessed by remote threads to handle
    107107 *    false page-fault (page is mapped in the reference GPT, but the PTE copy is missing
    108  *    in the local GPT). It is also protected by a remote_rwlock.
     108 *    in the local GPT). As each PTE can be protected by a specific GPT_LOCKED attribute
     109 *    for exclusive access, it is NOT protected by a global lock.
    109110 ********************************************************************************************/
    110111
     
    115116        uint32_t         vsegs_nr;           /*! total number of local vsegs                    */
    116117
    117     remote_rwlock_t  gpt_lock;           /*! lock protecting the local GPT                  */
    118118    gpt_t            gpt;                /*! Generic Page Table (complete in reference)     */
    119119
     
    165165 * This function is called by the process_make_fork() function. It partially copies
    166166 * the content of a remote parent process VMM to the local child process VMM:
    167  * - All DATA, ANON, REMOTE vsegs registered in the parent VSL are registered in the
     167 * - The DATA, ANON, REMOTE vsegs registered in the parent VSL are registered in the
    168168 *   child VSL. All valid PTEs in parent GPT are copied to the child GPT, but the
    169169 *   WRITABLE flag is reset and the COW flag is set.
    170  * - All CODE vsegs registered in the parent VSL are registered in the child VSL, but the
     170 * - The CODE vsegs registered in the parent VSL are registered in the child VSL, but the
    171171 *   GPT entries are not copied in the child GPT, and will be dynamically updated from
    172172 *   the .elf file when a page fault is reported.
    173  * - All FILE vsegs registered in the parent VSL are registered in the child VSL, and all
     173 * - The FILE vsegs registered in the parent VSL are registered in the child VSL, and all
    174174 *   valid GPT entries in parent GPT are copied to the child GPT. The COW flag is not set.
    175175 * - No STACK vseg is copied from  parent VMM to child VMM, because the child stack vseg
     
    186186
    187187/*********************************************************************************************
    188  * This function is called by the process_make_fork() function executing the fork syscall.
    189  * It set the COW flag, and reset the WRITABLE flag of all GPT entries of the DATA, MMAP,
    190  * and REMOTE vsegs of a process identified by the <process> argument.
     188 * This function is called by the process_make_fork() function to update the COW attribute
     189 * in the parent parent process vsegs. It set the COW flag, and reset the WRITABLE flag of
     190 * all GPT entries of the DATA, MMAP, and REMOTE vsegs of the <process> argument.
    191191 * It must be called by a thread running in the reference cluster, that contains the complete
    192192 * VSL and GPT (use the rpc_vmm_set_cow_client() when the calling thread client is remote).
     
    201201
    202202/*********************************************************************************************
    203  * This function modifies a GPT entry identified by the <process> and <vpn> arguments
     203 * This function modifies one GPT entry identified by the <process> and <vpn> arguments
    204204 * in all clusters containing a process copy.
    205  * It must be called by a thread running in the reference cluster.
    206  * It updates all copies of the process in all clusters, to maintain coherence in GPT copies,
    207  * using the list of copies stored in the owner process, and using remote_write accesses to
    208  * update the remote GPTs. It cannot fail, as only mapped entries in GPT copies are updated.
     205 * It must be called by a thread running in the process owner cluster.
     206 * It is used to update to maintain coherence in GPT copies, using the list of copies
     207 * stored in the owner process, and uses remote_write accesses.
     208 * It cannot fail, as only mapped PTE2 in GPT copies are updated.
    209209 *********************************************************************************************
    210210 * @ process   : local pointer on local process descriptor.
     
    373373 *    is mapped in the reference GPT, but not in the local GPT. For this false page-fault,
    374374 *    the local GPT is simply updated from the reference GPT.
    375  * 3) if the missing VPN is public, and unmapped in the reference GPT, it's a true page fault.
     375 * 3) if the missing VPN is public, and unmapped in the ref GPT, it is a true page fault.
    376376 *    The calling thread  allocates a new physical page, computes the attributes, depending
    377377 *    on vseg type, and updates directly (without RPC) the local GPT and the reference GPT.
    378378 *    Other GPT copies  will updated on demand.
    379  * Concurrent accesses to the GPT are handled, thanks to the
    380  * remote_rwlock protecting each GPT copy.
     379 * Concurrent accesses to the GPT(s) are handled, by locking the target PTE before accessing
     380 * the local and/or reference GPT(s).
    381381 *********************************************************************************************
    382382 * @ process  : local pointer on local process.
     
    392392 * It returns a kernel panic if VPN is not in a registered vseg or is not mapped.
    393393 * For a legal mapped vseg there is two cases:
    394  * 1) If the missing VPN belongs to a private vseg (STACK or CODE segment types, non
    395  *    replicated in all clusters), it access the local GPT to get the current PPN and ATTR.
     394 * 1) If the missing VPN belongs to a private vseg (STACK), it access only the local GPT.
    396395 *    It access the forks counter in the current physical page descriptor.
    397396 *    If there is a pending fork, it allocates a new physical page from the cluster defined
     
    399398 *    and decrements the pending_fork counter in old physical page descriptor.
    400399 *    Finally, it reset the COW flag and set the WRITE flag in local GPT.
    401  * 2) If the missing VPN is public, it access the reference GPT to get the current PPN and
    402  *    ATTR. It access the forks counter in the current physical page descriptor.
     400 * 2) If the missing VPN is public, it access only the reference GPT.
     401 *    It access the forks counter in the current physical page descriptor.
    403402 *    If there is a pending fork, it allocates a new physical page from the cluster defined
    404403 *    by the vseg type, copies the old physical page content to the new physical page,
     
    406405 *    Finally it calls the vmm_global_update_pte() function to reset the COW flag and set
    407406 *    the WRITE flag in all the GPT copies, using a RPC if the reference cluster is remote.
    408  * In both cases, concurrent accesses to the GPT are protected by the remote_rwlock
    409  * atached to the GPT copy in VMM.
     407 * In both cases, concurrent accesses to the GPT are handled by locking the target PTE
     408 * before accessing the GPT.
    410409 *********************************************************************************************
    411410 * @ process   : pointer on local process descriptor copy.
  • trunk/kernel/syscalls/sys_barrier.c

    r626 r629  
    184184        }
    185185        ////////
    186         default: {
     186        default:
     187        {
    187188            assert ( false, "illegal operation type <%x>", operation );
    188189        }
Note: See TracChangeset for help on using the changeset viewer.