source: trunk/kernel/mm/vmm.c @ 635

Last change on this file since 635 was 635, checked in by alain, 5 years ago

This version is a major evolution: The physical memory allocators,
defined in the kmem.c, ppm.c, and kcm.c files have been modified
to support remote accesses. The RPCs that were previously user
to allocate physical memory in a remote cluster have been removed.
This has been done to cure a dead-lock in case of concurrent page-faults.

This version 2.2 has been tested on a (4 clusters / 2 cores per cluster)
TSAR architecture, for both the "sort" and the "fft" applications.

File size: 81.9 KB
Line 
1/*
2 * vmm.c - virtual memory manager related operations definition.
3 *
4 * Authors   Ghassan Almaless (2008,2009,2010,2011, 2012)
5 *           Mohamed Lamine Karaoui (2015)
6 *           Alain Greiner (2016,2017,2018,2019)
7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
10 * This file is part of ALMOS-MKH.
11 *
12 * ALMOS-MKH is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
16 * ALMOS-MKH is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <kernel_config.h>
27#include <hal_kernel_types.h>
28#include <hal_special.h>
29#include <hal_gpt.h>
30#include <hal_vmm.h>
31#include <hal_macros.h>
32#include <printk.h>
33#include <memcpy.h>
34#include <remote_rwlock.h>
35#include <remote_queuelock.h>
36#include <list.h>
37#include <xlist.h>
38#include <bits.h>
39#include <process.h>
40#include <thread.h>
41#include <vseg.h>
42#include <cluster.h>
43#include <scheduler.h>
44#include <vfs.h>
45#include <mapper.h>
46#include <page.h>
47#include <kmem.h>
48#include <vmm.h>
49#include <hal_exception.h>
50
51////////////////////////////////////////////////////////////////////////////////////////////
52//   Extern global variables
53////////////////////////////////////////////////////////////////////////////////////////////
54
55extern  process_t  process_zero;      // allocated in cluster.c
56
57////////////////////////////////////////////////////////////////////////////////////////////
58// This static function is called by the vmm_create_vseg() function, and implements
59// the VMM STACK specific allocator.
60////////////////////////////////////////////////////////////////////////////////////////////
61// @ vmm      : [in]  pointer on VMM.
62// @ ltid     : [in]  requested slot == local user thread identifier.
63// @ vpn_base : [out] first allocated page
64// @ vpn_size : [out] number of allocated pages
65////////////////////////////////////////////////////////////////////////////////////////////
66static void vmm_stack_alloc( vmm_t  * vmm,
67                             ltid_t   ltid,
68                             vpn_t  * vpn_base,
69                             vpn_t  * vpn_size )
70{
71
72// check ltid argument
73assert( (ltid <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)),
74"slot index %d too large for an user stack vseg", ltid );
75
76    // get stack allocator pointer
77    stack_mgr_t * mgr = &vmm->stack_mgr;
78
79    // get lock on stack allocator
80    busylock_acquire( &mgr->lock );
81
82// check requested slot is available
83assert( (bitmap_state( &mgr->bitmap , ltid ) == false),
84"slot index %d already allocated", ltid );
85
86    // update bitmap
87    bitmap_set( &mgr->bitmap , ltid );
88
89    // release lock on stack allocator
90    busylock_release( &mgr->lock );
91
92    // returns vpn_base, vpn_size (first page non allocated)
93    *vpn_base = mgr->vpn_base + ltid * CONFIG_VMM_STACK_SIZE + 1;
94    *vpn_size = CONFIG_VMM_STACK_SIZE - 1;
95
96} // end vmm_stack_alloc()
97
98////////////////////////////////////////////////////////////////////////////////////////////
99// This static function is called by the vmm_remove_vseg() function, and implements
100// the VMM STACK specific desallocator.
101////////////////////////////////////////////////////////////////////////////////////////////
102// @ vmm      : [in] pointer on VMM.
103// @ vseg     : [in] pointer on released vseg.
104////////////////////////////////////////////////////////////////////////////////////////////
105static void vmm_stack_free( vmm_t  * vmm,
106                            vseg_t * vseg )
107{
108    // get stack allocator pointer
109    stack_mgr_t * mgr = &vmm->stack_mgr;
110
111    // compute slot index
112    uint32_t index = (vseg->vpn_base - 1 - mgr->vpn_base) / CONFIG_VMM_STACK_SIZE;
113
114// check index
115assert( (index <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)),
116"slot index %d too large for an user stack vseg", index );
117
118// check released slot is allocated
119assert( (bitmap_state( &mgr->bitmap , index ) == true),
120"released slot index %d non allocated", index );
121
122    // get lock on stack allocator
123    busylock_acquire( &mgr->lock );
124
125    // update stacks_bitmap
126    bitmap_clear( &mgr->bitmap , index );
127
128    // release lock on stack allocator
129    busylock_release( &mgr->lock );
130
131}  // end vmm_stack_free()
132
133////////////////////////////////////////////////////////////////////////////////////////////
134// This static function is called by the vmm_create_vseg() function, and implements
135// the VMM MMAP specific allocator.
136////////////////////////////////////////////////////////////////////////////////////////////
137// @ vmm      : [in] pointer on VMM.
138// @ npages   : [in] requested number of pages.
139// @ vpn_base : [out] first allocated page.
140// @ vpn_size : [out] actual number of allocated pages.
141////////////////////////////////////////////////////////////////////////////////////////////
142static error_t vmm_mmap_alloc( vmm_t * vmm,
143                               vpn_t   npages,
144                               vpn_t * vpn_base,
145                               vpn_t * vpn_size )
146{
147    uint32_t   order;
148    xptr_t     vseg_xp;
149    vseg_t   * vseg;
150    vpn_t      base;
151    vpn_t      size;
152    vpn_t      free;
153
154#if DEBUG_VMM_MMAP_ALLOC
155thread_t * this = CURRENT_THREAD;
156uint32_t cycle = (uint32_t)hal_get_cycles();
157if( DEBUG_VMM_MMAP_ALLOC < cycle )
158printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
159__FUNCTION__, this->process->pid, this->trdid, cycle );
160#endif
161
162    // number of allocated pages must be power of 2
163    // compute actual size and order
164    size  = POW2_ROUNDUP( npages );
165    order = bits_log2( size );
166
167    // get mmap allocator pointer
168    mmap_mgr_t * mgr = &vmm->mmap_mgr;
169
170    // build extended pointer on root of zombi_list[order]
171    xptr_t root_xp = XPTR( local_cxy , &mgr->zombi_list[order] );
172
173    // take lock protecting zombi_lists
174    busylock_acquire( &mgr->lock );
175
176    // get vseg from zombi_list or from mmap zone
177    if( xlist_is_empty( root_xp ) )                   // from mmap zone
178    {
179        // check overflow
180        free = mgr->first_free_vpn;
181        if( (free + size) > mgr->vpn_size ) return -1;
182
183        // update MMAP allocator
184        mgr->first_free_vpn += size;
185
186        // compute base
187        base = free;
188    }
189    else                                              // from zombi_list
190    {
191        // get pointer on zombi vseg from zombi_list
192        vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist );
193        vseg    = GET_PTR( vseg_xp );
194
195        // remove vseg from free-list
196        xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
197
198        // compute base
199        base = vseg->vpn_base;
200    }
201
202    // release lock
203    busylock_release( &mgr->lock );
204
205#if DEBUG_VMM_MMAP_ALLOC
206cycle = (uint32_t)hal_get_cycles();
207if( DEBUG_VMM_DESTROY < cycle )
208printk("\n[%s] thread[%x,%x] exit / vpn_base %x / vpn_size %x / cycle %d\n",
209__FUNCTION__, this->process->pid, this->trdid, base, size, cycle );
210#endif
211
212    // returns vpn_base, vpn_size
213    *vpn_base = base;
214    *vpn_size = size;
215    return 0;
216
217}  // end vmm_mmap_alloc()
218
219////////////////////////////////////////////////////////////////////////////////////////////
220// This static function is called by the vmm_remove_vseg() function, and implements
221// the VMM MMAP specific desallocator.
222////////////////////////////////////////////////////////////////////////////////////////////
223// @ vmm      : [in] pointer on VMM.
224// @ vseg     : [in] pointer on released vseg.
225////////////////////////////////////////////////////////////////////////////////////////////
226static void vmm_mmap_free( vmm_t  * vmm,
227                           vseg_t * vseg )
228{
229    // get pointer on mmap allocator
230    mmap_mgr_t * mgr = &vmm->mmap_mgr;
231
232    // compute zombi_list order
233    uint32_t order = bits_log2( vseg->vpn_size );
234
235    // take lock protecting zombi lists
236    busylock_acquire( &mgr->lock );
237
238    // update relevant zombi_list
239    xlist_add_first( XPTR( local_cxy , &mgr->zombi_list[order] ),
240                     XPTR( local_cxy , &vseg->xlist ) );
241
242    // release lock
243    busylock_release( &mgr->lock );
244
245}  // end of vmm_mmap_free()
246
247////////////////////////////////////////////////////////////////////////////////////////////
248// This static function registers one vseg in the VSL of a local process descriptor.
249////////////////////////////////////////////////////////////////////////////////////////////
250// vmm       : [in] pointer on VMM.
251// vseg      : [in] pointer on vseg.
252////////////////////////////////////////////////////////////////////////////////////////////
253void vmm_attach_vseg_to_vsl( vmm_t  * vmm,
254                             vseg_t * vseg )
255{
256    // update vseg descriptor
257    vseg->vmm = vmm;
258
259    // increment vsegs number
260    vmm->vsegs_nr++;
261
262    // add vseg in vmm list
263    xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ),
264                    XPTR( local_cxy , &vseg->xlist ) );
265
266}  // end vmm_attach_vseg_from_vsl()
267
268////////////////////////////////////////////////////////////////////////////////////////////
269// This static function removes one vseg from the VSL of a local process descriptor.
270////////////////////////////////////////////////////////////////////////////////////////////
271// vmm       : [in] pointer on VMM.
272// vseg      : [in] pointer on vseg.
273////////////////////////////////////////////////////////////////////////////////////////////
274void vmm_detach_vseg_from_vsl( vmm_t  * vmm,
275                               vseg_t * vseg )
276{
277    // update vseg descriptor
278    vseg->vmm = NULL;
279
280    // decrement vsegs number
281    vmm->vsegs_nr--;
282
283    // remove vseg from VSL
284    xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
285
286}  // end vmm_detach_from_vsl()
287
288////////////////////////////////////////////
289error_t vmm_user_init( process_t * process )
290{
291    uint32_t  i;
292
293#if DEBUG_VMM_USER_INIT
294thread_t * this = CURRENT_THREAD;
295uint32_t cycle = (uint32_t)hal_get_cycles();
296if( DEBUG_VMM_USER_INIT )
297printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", 
298__FUNCTION__ , this->process->pid, this->trdid, process->pid, local_cxy, cycle );
299#endif
300
301    // get pointer on VMM
302    vmm_t   * vmm = &process->vmm;
303
304// check UTILS zone
305assert( ((CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE) <= 
306         (CONFIG_VMM_ELF_BASE - CONFIG_VMM_UTILS_BASE)) ,
307         "UTILS zone too small\n" );
308
309// check STACK zone
310assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <=
311(CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) ,
312"STACK zone too small\n");
313
314    // initialize the lock protecting the VSL
315        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
316
317
318/*
319    // register "args" vseg in VSL
320    base = CONFIG_VMM_UTILS_BASE << CONFIG_PPM_PAGE_SHIFT;
321    size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_SHIFT;
322
323    vseg_args = vmm_create_vseg( process,
324                                 VSEG_TYPE_DATA,
325                                 base,
326                                 size,
327                                 0,             // file_offset unused
328                                 0,             // file_size unused
329                                 XPTR_NULL,     // mapper_xp unused
330                                 local_cxy );
331    if( vseg_args == NULL )
332    {
333        printk("\n[ERROR] in %s : cannot register args vseg\n", __FUNCTION__ );
334        return -1;
335    }
336
337    vmm->args_vpn_base = base;
338
339    // register "envs" vseg in VSL
340    base = (CONFIG_VMM_UTILS_BASE + CONFIG_VMM_ARGS_SIZE) << CONFIG_PPM_PAGE_SHIFT;
341    size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_SHIFT;
342
343    vseg_envs = vmm_create_vseg( process,
344                                 VSEG_TYPE_DATA,
345                                 base,
346                                 size,
347                                 0,             // file_offset unused
348                                 0,             // file_size unused
349                                 XPTR_NULL,     // mapper_xp unused
350                                 local_cxy );
351    if( vseg_envs == NULL )
352    {
353        printk("\n[ERROR] in %s : cannot register envs vseg\n", __FUNCTION__ );
354        return -1;
355    }
356
357    vmm->envs_vpn_base = base;
358*/
359    // initialize STACK allocator
360    vmm->stack_mgr.bitmap   = 0;
361    vmm->stack_mgr.vpn_base = CONFIG_VMM_STACK_BASE;
362    busylock_init( &vmm->stack_mgr.lock , LOCK_VMM_STACK );
363
364    // initialize MMAP allocator
365    vmm->mmap_mgr.vpn_base        = CONFIG_VMM_HEAP_BASE;
366    vmm->mmap_mgr.vpn_size        = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE;
367    vmm->mmap_mgr.first_free_vpn  = CONFIG_VMM_HEAP_BASE;
368    busylock_init( &vmm->mmap_mgr.lock , LOCK_VMM_MMAP );
369    for( i = 0 ; i < 32 ; i++ )
370    {
371        xlist_root_init( XPTR( local_cxy , &vmm->mmap_mgr.zombi_list[i] ) );
372    }
373
374    // initialize instrumentation counters
375        vmm->false_pgfault_nr    = 0;
376        vmm->local_pgfault_nr    = 0;
377        vmm->global_pgfault_nr   = 0;
378        vmm->false_pgfault_cost  = 0;
379        vmm->local_pgfault_cost  = 0;
380        vmm->global_pgfault_cost = 0;
381
382    hal_fence();
383
384#if DEBUG_VMM_USER_INIT
385cycle = (uint32_t)hal_get_cycles();
386if( DEBUG_VMM_USER_INIT )
387printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", 
388__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
389#endif
390
391    return 0;
392
393}  // end vmm_user_init()
394
395//////////////////////////////////////////
396void vmm_user_reset( process_t * process )
397{
398    xptr_t       vseg_xp;
399        vseg_t     * vseg;
400    vseg_type_t  vseg_type;
401
402#if DEBUG_VMM_USER_RESET
403uint32_t   cycle;
404thread_t * this = CURRENT_THREAD;
405#endif
406
407#if (DEBUG_VMM_USER_RESET & 1 )
408cycle = (uint32_t)hal_get_cycles();
409if( DEBUG_VMM_USER_RESET < cycle )
410printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
411__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
412#endif
413
414#if (DEBUG_VMM_USER_RESET & 1 )
415if( DEBUG_VMM_USER_RESET < cycle )
416hal_vmm_display( XPTR( local_cxy , process ) , true );
417#endif
418
419    // get pointer on local VMM
420    vmm_t * vmm = &process->vmm;
421
422    // build extended pointer on VSL root and VSL lock
423    xptr_t   root_xp = XPTR( local_cxy , &vmm->vsegs_root );
424    xptr_t   lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
425
426    // take the VSL lock
427        remote_rwlock_wr_acquire( lock_xp );
428
429    // scan the VSL to delete all non kernel vsegs
430    // (we don't use a FOREACH in case of item deletion)
431    xptr_t   iter_xp;
432    xptr_t   next_xp;
433        for( iter_xp = hal_remote_l64( root_xp ) ; 
434         iter_xp != root_xp ;
435         iter_xp = next_xp )
436        {
437        // save extended pointer on next item in xlist
438        next_xp = hal_remote_l64( iter_xp );
439
440        // get pointers on current vseg in VSL
441        vseg_xp   = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
442        vseg      = GET_PTR( vseg_xp );
443        vseg_type = vseg->type;
444
445#if( DEBUG_VMM_USER_RESET & 1 )
446if( DEBUG_VMM_USER_RESET < cycle )
447printk("\n[%s] found %s vseg / vpn_base %x / vpn_size %d\n",
448__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
449#endif
450        // delete non kernel vseg 
451        if( (vseg_type != VSEG_TYPE_KCODE) && 
452            (vseg_type != VSEG_TYPE_KDATA) && 
453            (vseg_type != VSEG_TYPE_KDEV ) )
454        {
455            // remove vseg from VSL
456            vmm_remove_vseg( process , vseg );
457
458#if( DEBUG_VMM_USER_RESET & 1 )
459if( DEBUG_VMM_USER_RESET < cycle )
460printk("\n[%s] %s vseg deleted / vpn_base %x / vpn_size %d\n",
461__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
462#endif
463        }
464        else
465        {
466
467#if( DEBUG_VMM_USER_RESET & 1 )
468if( DEBUG_VMM_USER_RESET < cycle )
469printk("\n[%s] keep %s vseg / vpn_base %x / vpn_size %d\n",
470__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
471#endif
472        }
473        }  // end loop on vsegs in VSL
474
475    // release the VSL lock
476        remote_rwlock_wr_release( lock_xp );
477
478// FIXME il faut gérer les process copies...
479
480#if DEBUG_VMM_USER_RESET
481cycle = (uint32_t)hal_get_cycles();
482if( DEBUG_VMM_USER_RESET < cycle )
483printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
484__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
485#endif
486
487#if (DEBUG_VMM_USER_RESET & 1 )
488if( DEBUG_VMM_USER_RESET < cycle )
489hal_vmm_display( XPTR( local_cxy , process ) , true );
490#endif
491
492}  // end vmm_user_reset()
493
494////////////////////////////////////////////////
495void vmm_global_update_pte( process_t * process,
496                            vpn_t       vpn,
497                            uint32_t    attr,
498                            ppn_t       ppn )
499{
500    xlist_entry_t * process_root_ptr;
501    xptr_t          process_root_xp;
502    xptr_t          process_iter_xp;
503
504    xptr_t          remote_process_xp;
505    cxy_t           remote_process_cxy;
506    process_t     * remote_process_ptr;
507    xptr_t          remote_gpt_xp;
508
509    pid_t           pid;
510    cxy_t           owner_cxy;
511    lpid_t          owner_lpid;
512
513#if DEBUG_VMM_UPDATE_PTE
514uint32_t cycle = (uint32_t)hal_get_cycles();
515thread_t * this = CURRENT_THREAD;
516if( DEBUG_VMM_UPDATE_PTE < cycle )
517printk("\n[%s] thread[%x,%x] enter for process %x / vpn %x / attr %x / ppn %x / ycle %d\n",
518__FUNCTION__, this->process->pid, this->trdid, process->pid, vpn, attr, ppn, cycle );
519#endif
520
521    // get extended pointer on root of process copies xlist in owner cluster
522    pid              = process->pid;
523    owner_cxy        = CXY_FROM_PID( pid );
524    owner_lpid       = LPID_FROM_PID( pid );
525    process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid];
526    process_root_xp  = XPTR( owner_cxy , process_root_ptr );
527
528// check local cluster is owner cluster
529assert( (owner_cxy == local_cxy) , "must be called in owner cluster\n");
530
531    // loop on destination process copies
532    XLIST_FOREACH( process_root_xp , process_iter_xp )
533    {
534        // get cluster and local pointer on remote process
535        remote_process_xp  = XLIST_ELEMENT( process_iter_xp , process_t , copies_list );
536        remote_process_ptr = GET_PTR( remote_process_xp );
537        remote_process_cxy = GET_CXY( remote_process_xp );
538
539#if (DEBUG_VMM_UPDATE_PTE & 1)
540if( DEBUG_VMM_UPDATE_PTE < cycle )
541printk("\n[%s] thread[%x,%x] handling vpn %x for process %x in cluster %x\n",
542__FUNCTION__, this->process->pid, this->trdid, vpn, process->pid, remote_process_cxy );
543#endif
544
545        // get extended pointer on remote gpt
546        remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt );
547
548        // update remote GPT
549        hal_gpt_update_pte( remote_gpt_xp, vpn, attr, ppn );
550    } 
551
552#if DEBUG_VMM_UPDATE_PTE
553cycle = (uint32_t)hal_get_cycles();
554if( DEBUG_VMM_UPDATE_PTE < cycle )
555printk("\n[%s] thread[%x,%x] exit for process %x / vpn %x / cycle %d\n",
556__FUNCTION__, this->process->pid, this->trdid, process->pid , vpn , cycle );
557#endif
558
559#if (DEBUG_VMM_UPDATE_PTE & 1)
560hal_vmm_display( process , true );
561#endif
562
563}  // end vmm_global_update_pte()
564
565///////////////////////////////////////
566void vmm_set_cow( process_t * process )
567{
568    vmm_t         * vmm;
569
570    xlist_entry_t * process_root_ptr;
571    xptr_t          process_root_xp;
572    xptr_t          process_iter_xp;
573
574    xptr_t          remote_process_xp;
575    cxy_t           remote_process_cxy;
576    process_t     * remote_process_ptr;
577    xptr_t          remote_gpt_xp;
578
579    xptr_t          vseg_root_xp;
580    xptr_t          vseg_iter_xp;
581
582    xptr_t          vseg_xp;
583    vseg_t        * vseg;
584
585    pid_t           pid;
586    cxy_t           owner_cxy;
587    lpid_t          owner_lpid;
588
589    // get target process PID
590    pid = process->pid;
591
592#if DEBUG_VMM_SET_COW
593uint32_t   cycle = (uint32_t)hal_get_cycles();
594thread_t * this  = CURRENT_THREAD;
595if( DEBUG_VMM_SET_COW < cycle )
596printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
597__FUNCTION__, this->process->pid, this->trdid, pid , cycle );
598#endif
599
600#if (DEBUG_VMM_SET_COW & 1)
601if( DEBUG_VMM_SET_COW < cycle )
602hal_vmm_display( process , true );
603#endif
604
605// check cluster is reference
606assert( (XPTR( local_cxy , process ) == process->ref_xp),
607"local cluster must be process reference cluster\n");
608
609    // get pointer on reference VMM
610    vmm = &process->vmm;
611
612    // get extended pointer on root of process copies xlist in owner cluster
613    owner_cxy        = CXY_FROM_PID( pid );
614    owner_lpid       = LPID_FROM_PID( pid );
615    process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid];
616    process_root_xp  = XPTR( owner_cxy , process_root_ptr );
617
618    // get extended pointer on root of vsegs xlist from reference VMM
619    vseg_root_xp  = XPTR( local_cxy , &vmm->vsegs_root ); 
620
621    // loop on target process copies
622    XLIST_FOREACH( process_root_xp , process_iter_xp )
623    {
624        // get cluster and local pointer on remote process copy
625        remote_process_xp  = XLIST_ELEMENT( process_iter_xp , process_t , copies_list );
626        remote_process_ptr = GET_PTR( remote_process_xp );
627        remote_process_cxy = GET_CXY( remote_process_xp );
628
629#if (DEBUG_VMM_SET_COW & 1)
630if( DEBUG_VMM_SET_COW < cycle )
631printk("\n[%s] thread[%x,%x] (%x) handles process %x in cluster %x\n",
632__FUNCTION__, this->process->pid, this->trdid, this, pid, remote_process_cxy );
633#endif
634
635        // get extended pointer on remote gpt
636        remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt );
637
638        // loop on vsegs in (local) reference process VSL
639        XLIST_FOREACH( vseg_root_xp , vseg_iter_xp )
640        {
641            // get pointer on vseg
642            vseg_xp  = XLIST_ELEMENT( vseg_iter_xp , vseg_t , xlist );
643            vseg     = GET_PTR( vseg_xp );
644
645            // get vseg type, base and size
646            uint32_t type     = vseg->type;
647            vpn_t    vpn_base = vseg->vpn_base;
648            vpn_t    vpn_size = vseg->vpn_size;
649
650#if (DEBUG_VMM_SET_COW & 1)
651if( DEBUG_VMM_SET_COW < cycle )
652printk("\n[%s] thread[%x,%x] found vseg %s / vpn_base = %x / vpn_size = %x\n",
653__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), vpn_base, vpn_size );
654#endif
655            // only DATA, ANON and REMOTE vsegs
656            if( (type == VSEG_TYPE_DATA)  ||
657                (type == VSEG_TYPE_ANON)  ||
658                (type == VSEG_TYPE_REMOTE) )
659            {
660                vpn_t      vpn;
661                uint32_t   attr;
662                ppn_t      ppn;
663                xptr_t     page_xp;
664                cxy_t      page_cxy;
665                page_t   * page_ptr;
666                xptr_t     forks_xp;
667                xptr_t     lock_xp;
668
669                // update flags in remote GPT
670                hal_gpt_set_cow( remote_gpt_xp,
671                                 vpn_base,
672                                 vpn_size ); 
673
674                // atomically increment pending forks counter in physical pages,
675                // this is only done once, when handling the reference copy
676                if( remote_process_cxy == local_cxy )
677                {
678
679#if (DEBUG_VMM_SET_COW & 1)
680if( DEBUG_VMM_SET_COW < cycle )
681printk("\n[%s] thread[%x,%x] handles vseg %s / vpn_base = %x / vpn_size = %x\n",
682__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), vpn_base, vpn_size );
683#endif
684                    // scan all pages in vseg
685                    for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ )
686                    {
687                        // get page attributes and PPN from reference GPT
688                        hal_gpt_get_pte( remote_gpt_xp , vpn , &attr , &ppn ); 
689
690                        // atomically update pending forks counter if page is mapped
691                        if( attr & GPT_MAPPED )
692                        {
693                            // get pointers and cluster on page descriptor
694                            page_xp  = ppm_ppn2page( ppn );
695                            page_cxy = GET_CXY( page_xp );
696                            page_ptr = GET_PTR( page_xp );
697
698                            // get extended pointers on "forks" and "lock"
699                            forks_xp = XPTR( page_cxy , &page_ptr->forks );
700                            lock_xp  = XPTR( page_cxy , &page_ptr->lock );
701
702                            // take lock protecting "forks" counter
703                            remote_busylock_acquire( lock_xp );
704
705                            // increment "forks"
706                            hal_remote_atomic_add( forks_xp , 1 );
707
708                            // release lock protecting "forks" counter
709                            remote_busylock_release( lock_xp );
710                        }
711                    }   // end loop on vpn
712
713#if (DEBUG_VMM_SET_COW & 1)
714if( DEBUG_VMM_SET_COW < cycle )
715printk("\n[%s] thread[%x,%x] completes vseg %s / vpn_base = %x / vpn_size = %x\n",
716__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), vpn_base, vpn_size );
717#endif
718                }   // end if local
719            }   // end if vseg type
720        }   // end loop on vsegs
721    }   // end loop on process copies
722 
723#if DEBUG_VMM_SET_COW
724cycle = (uint32_t)hal_get_cycles();
725if( DEBUG_VMM_SET_COW < cycle )
726printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
727__FUNCTION__, this->process->pid, this->trdid, process->pid , cycle );
728#endif
729
730}  // end vmm_set-cow()
731
732/////////////////////////////////////////////////
733error_t vmm_fork_copy( process_t * child_process,
734                       xptr_t      parent_process_xp )
735{
736    error_t     error;
737    cxy_t       parent_cxy;
738    process_t * parent_process;
739    vmm_t     * parent_vmm;
740    xptr_t      parent_lock_xp;
741    vmm_t     * child_vmm;
742    xptr_t      iter_xp;
743    xptr_t      parent_vseg_xp;
744    vseg_t    * parent_vseg;
745    vseg_t    * child_vseg;
746    uint32_t    type;
747    vpn_t       vpn;           
748    vpn_t       vpn_base;
749    vpn_t       vpn_size;
750    xptr_t      parent_root_xp;
751    bool_t      mapped; 
752    ppn_t       ppn;
753
754#if DEBUG_VMM_FORK_COPY
755uint32_t cycle = (uint32_t)hal_get_cycles();
756thread_t * this = CURRENT_THREAD;
757if( DEBUG_VMM_FORK_COPY < cycle )
758printk("\n[%s] thread %x enter / cycle %d\n",
759__FUNCTION__ , this->process->pid, this->trdid, cycle );
760#endif
761
762    // get parent process cluster and local pointer
763    parent_cxy     = GET_CXY( parent_process_xp );
764    parent_process = GET_PTR( parent_process_xp );
765
766    // get local pointers on parent and child VMM
767    parent_vmm = &parent_process->vmm; 
768    child_vmm  = &child_process->vmm;
769
770    // build extended pointer on parent VSL root and lock
771    parent_root_xp = XPTR( parent_cxy , &parent_vmm->vsegs_root );
772    parent_lock_xp = XPTR( parent_cxy , &parent_vmm->vsl_lock );
773
774    // take the lock protecting the parent VSL in read mode
775    remote_rwlock_rd_acquire( parent_lock_xp );
776
777    // loop on parent VSL xlist
778    XLIST_FOREACH( parent_root_xp , iter_xp )
779    {
780        // get pointers on current parent vseg
781        parent_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
782        parent_vseg    = GET_PTR( parent_vseg_xp );
783
784        // get vseg type
785        type = hal_remote_l32( XPTR( parent_cxy , &parent_vseg->type ) );
786       
787#if DEBUG_VMM_FORK_COPY
788cycle = (uint32_t)hal_get_cycles();
789if( DEBUG_VMM_FORK_COPY < cycle )
790printk("\n[%s] thread[%x,%x] found parent vseg %s / vpn_base = %x / cycle %d\n",
791__FUNCTION__ , this->process->pid, this->trdid, vseg_type_str(type),
792hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );
793#endif
794
795        // all parent vsegs - but STACK and kernel vsegs - must be copied in child VSL
796        if( (type != VSEG_TYPE_STACK) && (type != VSEG_TYPE_KCODE) &&
797            (type != VSEG_TYPE_KDATA) && (type != VSEG_TYPE_KDEV) )
798        {
799            // allocate memory for a new child vseg
800            child_vseg = vseg_alloc();
801            if( child_vseg == NULL )   // release all allocated vsegs
802            {
803                vmm_destroy( child_process );
804                printk("\n[ERROR] in %s : cannot create vseg for child\n", __FUNCTION__ );
805                return -1;
806            }
807
808            // copy parent vseg to child vseg
809            vseg_init_from_ref( child_vseg , parent_vseg_xp );
810
811            // build extended pointer on VSL lock
812            xptr_t lock_xp = XPTR( local_cxy , &child_vmm->vsl_lock );
813 
814            // take the VSL lock in write mode
815            remote_rwlock_wr_acquire( lock_xp );
816
817            // register child vseg in child VSL
818            vmm_attach_vseg_to_vsl( child_vmm , child_vseg );
819
820            // release the VSL lock
821            remote_rwlock_wr_release( lock_xp );
822
823#if DEBUG_VMM_FORK_COPY
824cycle = (uint32_t)hal_get_cycles();
825if( DEBUG_VMM_FORK_COPY < cycle )
826printk("\n[%s] thread[%x,%x] copied vseg %s / vpn_base = %x to child VSL / cycle %d\n",
827__FUNCTION__ , this->process->pid, this->trdid, vseg_type_str(type),
828hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );
829#endif
830            // copy DATA, ANON, REMOTE, FILE parent GPT entries to child GPT
831            if( type != VSEG_TYPE_CODE )
832            {
833                // activate the COW for DATA, ANON, REMOTE vsegs only
834                // cow = ( type != VSEG_TYPE_FILE );
835
836                vpn_base = child_vseg->vpn_base;
837                vpn_size = child_vseg->vpn_size;
838
839                // scan pages in parent vseg
840                for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ )
841                {
842                    error = hal_gpt_pte_copy( &child_vmm->gpt,
843                                              vpn,
844                                              XPTR( parent_cxy , &parent_vmm->gpt ),
845                                              vpn,
846                                              false,      // does not handle COW flag
847                                              &ppn,       // unused
848                                              &mapped );  // unused
849                    if( error )
850                    {
851                        vmm_destroy( child_process );
852                        printk("\n[ERROR] in %s : cannot copy GPT\n", __FUNCTION__ );
853                        return -1;
854                    }
855
856#if DEBUG_VMM_FORK_COPY
857cycle = (uint32_t)hal_get_cycles();
858if( DEBUG_VMM_FORK_COPY < cycle )
859printk("\n[%s] thread[%x,%x] copied vpn %x to child GPT / cycle %d\n",
860__FUNCTION__ , this->process->pid, this->trdid , vpn , cycle );
861#endif
862                }
863            }   // end if no code & no stack
864        }   // end if no stack
865    }   // end loop on vsegs
866
867    // release the parent VSL lock in read mode
868    remote_rwlock_rd_release( parent_lock_xp );
869
870    // initialize the child VMM STACK allocator
871    child_vmm->stack_mgr.bitmap   = 0;
872    child_vmm->stack_mgr.vpn_base = CONFIG_VMM_STACK_BASE;
873
874    // initialize the child VMM MMAP allocator
875    uint32_t i;
876    child_vmm->mmap_mgr.vpn_base        = CONFIG_VMM_HEAP_BASE;
877    child_vmm->mmap_mgr.vpn_size        = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE;
878    child_vmm->mmap_mgr.first_free_vpn  = CONFIG_VMM_HEAP_BASE;
879    for( i = 0 ; i < 32 ; i++ ) 
880    {
881        xlist_root_init( XPTR( local_cxy , &child_vmm->mmap_mgr.zombi_list[i] ) );
882    }
883
884    // initialize instrumentation counters
885        child_vmm->false_pgfault_nr    = 0;
886        child_vmm->local_pgfault_nr    = 0;
887        child_vmm->global_pgfault_nr   = 0;
888        child_vmm->false_pgfault_cost  = 0;
889        child_vmm->local_pgfault_cost  = 0;
890        child_vmm->global_pgfault_cost = 0;
891
892    // copy base addresses from parent VMM to child VMM
893    child_vmm->args_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->args_vpn_base));
894    child_vmm->envs_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->envs_vpn_base));
895    child_vmm->heap_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->heap_vpn_base));
896    child_vmm->code_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->code_vpn_base));
897    child_vmm->data_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->data_vpn_base));
898
899    child_vmm->entry_point = (intptr_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->entry_point));
900
901    hal_fence();
902
903#if DEBUG_VMM_FORK_COPY
904cycle = (uint32_t)hal_get_cycles();
905if( DEBUG_VMM_FORK_COPY < cycle )
906printk("\n[%s] thread[%x,%x] exit successfully / cycle %d\n",
907__FUNCTION__ , this->process->pid, this->trdid , cycle );
908#endif
909
910    return 0;
911
912}  // vmm_fork_copy()
913
914///////////////////////////////////////
915void vmm_destroy( process_t * process )
916{
917    xptr_t   vseg_xp;
918        vseg_t * vseg;
919
920#if DEBUG_VMM_DESTROY
921uint32_t   cycle = (uint32_t)hal_get_cycles();
922thread_t * this  = CURRENT_THREAD;
923if( DEBUG_VMM_DESTROY < cycle )
924printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
925__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
926#endif
927
928#if (DEBUG_VMM_DESTROY & 1 )
929if( DEBUG_VMM_DESTROY < cycle )
930hal_vmm_display( XPTR( local_cxy, process ) , true );
931#endif
932
933    // get pointer on local VMM
934    vmm_t  * vmm = &process->vmm;
935
936    // build extended pointer on VSL root, VSL lock and GPT lock
937    xptr_t   vsl_root_xp = XPTR( local_cxy , &vmm->vsegs_root );
938    xptr_t   vsl_lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
939
940    // take the VSL lock
941    remote_rwlock_wr_acquire( vsl_lock_xp );
942
943    // scan the VSL to delete all registered vsegs
944    // (we don't use a FOREACH in case of item deletion)
945    xptr_t  iter_xp;
946    xptr_t  next_xp;
947        for( iter_xp = hal_remote_l64( vsl_root_xp ) ; 
948         iter_xp != vsl_root_xp ;
949         iter_xp = next_xp )
950        {
951        // save extended pointer on next item in xlist
952        next_xp = hal_remote_l64( iter_xp );
953
954        // get pointers on current vseg in VSL
955        vseg_xp   = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
956        vseg      = GET_PTR( vseg_xp );
957
958        // delete vseg and release physical pages
959        vmm_remove_vseg( process , vseg );
960
961#if( DEBUG_VMM_DESTROY & 1 )
962if( DEBUG_VMM_DESTROY < cycle )
963printk("\n[%s] %s vseg deleted / vpn_base %x / vpn_size %d\n",
964__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
965#endif
966
967        }
968
969    // release the VSL lock
970    remote_rwlock_wr_release( vsl_lock_xp );
971
972    // remove all registered MMAP vsegs
973    // from zombi_lists in MMAP allocator
974    uint32_t i;
975    for( i = 0 ; i<32 ; i++ )
976    {
977        // build extended pointer on zombi_list[i]
978        xptr_t root_xp = XPTR( local_cxy , &vmm->mmap_mgr.zombi_list[i] );
979 
980        // scan zombi_list[i]
981            while( !xlist_is_empty( root_xp ) )
982            {
983                    vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist );
984            vseg    = GET_PTR( vseg_xp );
985
986#if( DEBUG_VMM_DESTROY & 1 )
987if( DEBUG_VMM_DESTROY < cycle )
988printk("\n[%s] found zombi vseg / vpn_base %x / vpn_size %d\n",
989__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
990#endif
991            // clean vseg descriptor
992            vseg->vmm = NULL;
993
994            // remove vseg from  zombi_list
995            xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
996
997                    // release vseg descriptor
998            vseg_free( vseg );
999
1000#if( DEBUG_VMM_DESTROY & 1 )
1001if( DEBUG_VMM_DESTROY < cycle )
1002printk("\n[%s] zombi vseg released / vpn_base %x / vpn_size %d\n",
1003__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
1004#endif
1005            }
1006    }
1007
1008    // release memory allocated to the GPT itself
1009    hal_gpt_destroy( &vmm->gpt );
1010
1011#if DEBUG_VMM_DESTROY
1012cycle = (uint32_t)hal_get_cycles();
1013if( DEBUG_VMM_DESTROY < cycle )
1014printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
1015__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
1016#endif
1017
1018}  // end vmm_destroy()
1019
1020/////////////////////////////////////////////////
1021vseg_t * vmm_check_conflict( process_t * process,
1022                             vpn_t       vpn_base,
1023                             vpn_t       vpn_size )
1024{
1025    vmm_t        * vmm = &process->vmm;
1026
1027    // scan the VSL
1028        vseg_t       * vseg;
1029    xptr_t         iter_xp;
1030    xptr_t         vseg_xp;
1031    xptr_t         root_xp = XPTR( local_cxy , &vmm->vsegs_root );
1032
1033        XLIST_FOREACH( root_xp , iter_xp )
1034        {
1035                vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
1036        vseg    = GET_PTR( vseg_xp );
1037
1038                if( ((vpn_base + vpn_size) > vseg->vpn_base) &&
1039             (vpn_base < (vseg->vpn_base + vseg->vpn_size)) ) return vseg;
1040        }
1041    return NULL;
1042
1043}  // end vmm_check_conflict()
1044
1045
1046
1047////////////////////////////////////////////////
1048vseg_t * vmm_create_vseg( process_t   * process,
1049                              vseg_type_t   type,
1050                          intptr_t      base,         // ltid for VSEG_TYPE_STACK
1051                              uint32_t      size,
1052                          uint32_t      file_offset,
1053                          uint32_t      file_size,
1054                          xptr_t        mapper_xp,
1055                          cxy_t         cxy )
1056{
1057    vseg_t     * vseg;          // created vseg pointer
1058    vpn_t        vpn_base;      // first page index
1059    vpn_t        vpn_size;      // number of pages covered by vseg
1060        error_t      error;
1061
1062#if (DEBUG_VMM_CREATE_VSEG & 1)
1063thread_t * this  = CURRENT_THREAD;
1064uint32_t   cycle = (uint32_t)hal_get_cycles();
1065if( DEBUG_VMM_CREATE_VSEG < cycle )
1066printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cxy %x / cycle %d\n",
1067__FUNCTION__, this->process->pid, this->trdid,
1068process->pid, vseg_type_str(type), base, cxy, cycle );
1069#endif
1070
1071    // get pointer on VMM
1072        vmm_t * vmm    = &process->vmm;
1073
1074    // compute base, size, vpn_base, vpn_size, depending on vseg type
1075    // we use the VMM specific allocators for "stack", "file", "anon", & "remote" vsegs
1076
1077    if( type == VSEG_TYPE_STACK )
1078    {
1079        // get vpn_base and vpn_size from STACK allocator
1080        vmm_stack_alloc( vmm , base , &vpn_base , &vpn_size );
1081
1082        // compute vseg base and size from vpn_base and vpn_size
1083        base = vpn_base << CONFIG_PPM_PAGE_SHIFT;
1084        size = vpn_size << CONFIG_PPM_PAGE_SHIFT;
1085    }
1086    else if( type == VSEG_TYPE_FILE )
1087    {
1088        // compute page index (in mapper) for first byte
1089        vpn_t    vpn_min    = file_offset >> CONFIG_PPM_PAGE_SHIFT;
1090
1091        // compute page index (in mapper) for last byte
1092        vpn_t    vpn_max    = (file_offset + size - 1) >> CONFIG_PPM_PAGE_SHIFT;
1093
1094        // compute offset in first page
1095        uint32_t offset = file_offset & CONFIG_PPM_PAGE_MASK;
1096
1097        // compute number of pages required in virtual space
1098        vpn_t    npages      = vpn_max - vpn_min + 1;
1099
1100        // get vpn_base and vpn_size from MMAP allocator
1101        error = vmm_mmap_alloc( vmm , npages , &vpn_base , &vpn_size );
1102        if( error )
1103        {
1104            printk("\n[ERROR] in %s : no vspace for mmap vseg / process %x in cluster %x\n",
1105                   __FUNCTION__ , process->pid , local_cxy );
1106            return NULL;
1107        }
1108
1109        // set the vseg base (not always aligned for FILE)
1110        base = (vpn_base << CONFIG_PPM_PAGE_SHIFT) + offset; 
1111    }
1112    else if( (type == VSEG_TYPE_ANON) ||
1113             (type == VSEG_TYPE_REMOTE) )
1114    {
1115        // compute number of required pages in virtual space
1116        vpn_t npages = size >> CONFIG_PPM_PAGE_SHIFT;
1117        if( size & CONFIG_PPM_PAGE_MASK) npages++;
1118       
1119        // get vpn_base and vpn_size from MMAP allocator
1120        error = vmm_mmap_alloc( vmm , npages , &vpn_base , &vpn_size );
1121        if( error )
1122        {
1123            printk("\n[ERROR] in %s : no vspace for mmap vseg / process %x in cluster %x\n",
1124                   __FUNCTION__ , process->pid , local_cxy );
1125            return NULL;
1126        }
1127
1128        // set vseg base (always aligned for ANON or REMOTE)
1129        base = vpn_base << CONFIG_PPM_PAGE_SHIFT;
1130    }
1131    else    // VSEG_TYPE_DATA, VSEG_TYPE_CODE or KERNEL vseg
1132    {
1133        uint32_t vpn_min = base >> CONFIG_PPM_PAGE_SHIFT;
1134        uint32_t vpn_max = (base + size - 1) >> CONFIG_PPM_PAGE_SHIFT;
1135
1136        vpn_base = vpn_min;
1137            vpn_size = vpn_max - vpn_min + 1;
1138    }
1139
1140    // check collisions
1141    vseg = vmm_check_conflict( process , vpn_base , vpn_size );
1142
1143    if( vseg != NULL )
1144    {
1145        printk("\n[ERROR] in %s for process %x : new vseg [vpn_base %x / vpn_size %x]\n"
1146               "  overlap existing vseg [vpn_base %x / vpn_size %x]\n",
1147        __FUNCTION__ , process->pid, vpn_base, vpn_size, vseg->vpn_base, vseg->vpn_size );
1148        return NULL;
1149    }
1150
1151    // allocate physical memory for vseg descriptor
1152        vseg = vseg_alloc();
1153        if( vseg == NULL )
1154        {
1155            printk("\n[ERROR] in %s for process %x : cannot allocate memory for vseg\n",
1156        __FUNCTION__ , process->pid );
1157        return NULL;
1158        }
1159
1160#if (DEBUG_VMM_CREATE_VSEG & 1)
1161if( DEBUG_VMM_CREATE_VSEG < cycle )
1162printk("\n[%s] thread[%x,%x] : base %x / size %x / vpn_base %x / vpn_size %x\n",
1163__FUNCTION__, this->process->pid, this->trdid, base, size, vpn_base, vpn_size );
1164#endif
1165
1166    // initialize vseg descriptor
1167        vseg_init( vseg,
1168               type,
1169               base,
1170               size,
1171               vpn_base,
1172               vpn_size,
1173               file_offset,
1174               file_size,
1175               mapper_xp,
1176               cxy );
1177
1178    // build extended pointer on VSL lock
1179    xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
1180 
1181    // take the VSL lock in write mode
1182    remote_rwlock_wr_acquire( lock_xp );
1183
1184    // attach vseg to VSL
1185        vmm_attach_vseg_to_vsl( vmm , vseg );
1186
1187    // release the VSL lock
1188    remote_rwlock_wr_release( lock_xp );
1189
1190#if DEBUG_VMM_CREATE_VSEG
1191cycle = (uint32_t)hal_get_cycles();
1192if( DEBUG_VMM_CREATE_VSEG < cycle )
1193printk("\n[%s] thread[%x,%x] exit / process %x / %s / base %x / cxy %x / cycle %d\n",
1194__FUNCTION__, this->process->pid, this->trdid, 
1195process->pid, vseg_type_str(type), base, cxy, cycle );
1196#endif
1197
1198        return vseg;
1199
1200}  // vmm_create_vseg()
1201
1202
1203//////////////////////////////////////////
1204void vmm_remove_vseg( process_t * process,
1205                      vseg_t    * vseg )
1206{
1207    vmm_t     * vmm;        // local pointer on process VMM
1208    xptr_t      gpt_xp;     // extended pointer on GPT
1209    bool_t      is_ref;     // local process is reference process
1210    uint32_t    vseg_type;  // vseg type
1211    vpn_t       vpn;        // VPN of current PTE
1212    vpn_t       vpn_min;    // VPN of first PTE
1213    vpn_t       vpn_max;    // VPN of last PTE (excluded)
1214    ppn_t       ppn;        // current PTE ppn value
1215    uint32_t    attr;       // current PTE attributes
1216    xptr_t      page_xp;    // extended pointer on page descriptor
1217    cxy_t       page_cxy;   // page descriptor cluster
1218    page_t    * page_ptr;   // page descriptor pointer
1219    xptr_t      count_xp;   // extended pointer on page refcount
1220
1221// check arguments
1222assert( (process != NULL), "process argument is NULL" );
1223assert( (vseg    != NULL), "vseg argument is NULL" );
1224
1225    // compute is_ref
1226    is_ref = (GET_CXY( process->ref_xp ) == local_cxy);
1227
1228    // get pointers on local process VMM
1229    vmm = &process->vmm;
1230
1231    // build extended pointer on GPT
1232    gpt_xp = XPTR( local_cxy , &vmm->gpt );
1233
1234    // get relevant vseg infos
1235    vseg_type = vseg->type;
1236    vpn_min   = vseg->vpn_base;
1237    vpn_max   = vpn_min + vseg->vpn_size;
1238
1239#if DEBUG_VMM_REMOVE_VSEG
1240uint32_t   cycle = (uint32_t)hal_get_cycles();
1241thread_t * this  = CURRENT_THREAD;
1242if( DEBUG_VMM_REMOVE_VSEG < cycle )
1243printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cycle %d\n",
1244__FUNCTION__, this->process->pid, this->trdid, 
1245process->pid, vseg_type_str(vseg->type), vseg->min, cycle );
1246#endif
1247
1248    // loop on PTEs in GPT
1249        for( vpn = vpn_min ; vpn < vpn_max ; vpn++ )
1250    {
1251        // get ppn and attr
1252        hal_gpt_get_pte( gpt_xp , vpn , &attr , &ppn );
1253
1254        if( attr & GPT_MAPPED )  // PTE is mapped
1255        { 
1256
1257#if( DEBUG_VMM_REMOVE_VSEG & 1 )
1258if( DEBUG_VMM_REMOVE_VSEG < cycle )
1259printk("- unmap vpn %x / ppn %x / %s" , vpn , ppn, vseg_type_str(vseg_type) );
1260#endif
1261            // unmap GPT entry in local GPT
1262            hal_gpt_reset_pte( gpt_xp , vpn );
1263
1264            // get pointers on physical page descriptor
1265            page_xp  = ppm_ppn2page( ppn );
1266            page_cxy = GET_CXY( page_xp );
1267            page_ptr = GET_PTR( page_xp );
1268
1269            // decrement page refcount
1270            count_xp = XPTR( page_cxy , &page_ptr->refcount );
1271            hal_remote_atomic_add( count_xp , -1 );
1272
1273            // compute the ppn_release condition depending on vseg type
1274            bool_t ppn_release;
1275            if( (vseg_type == VSEG_TYPE_FILE)  ||
1276                (vseg_type == VSEG_TYPE_KCODE) || 
1277                (vseg_type == VSEG_TYPE_KDATA) || 
1278                (vseg_type == VSEG_TYPE_KDEV) )           
1279            {
1280                // no physical page release for FILE and KERNEL
1281                ppn_release = false;
1282            }
1283            else if( (vseg_type == VSEG_TYPE_CODE)  ||
1284                     (vseg_type == VSEG_TYPE_STACK) ) 
1285            {
1286                // always release physical page for private vsegs
1287                ppn_release = true;
1288            }
1289            else if( (vseg_type == VSEG_TYPE_ANON)  ||
1290                     (vseg_type == VSEG_TYPE_REMOTE) )
1291            {
1292                // release physical page if reference cluster
1293                ppn_release = is_ref;
1294            }
1295            else if( is_ref )  // vseg_type == DATA in reference cluster
1296            {
1297                // get extended pointers on forks and lock field in page descriptor
1298                xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks );
1299                xptr_t lock_xp  = XPTR( page_cxy , &page_ptr->lock );
1300
1301                // take lock protecting "forks" counter
1302                remote_busylock_acquire( lock_xp );
1303
1304                // get number of pending forks from page descriptor
1305                uint32_t forks = hal_remote_l32( forks_xp );
1306
1307                // decrement pending forks counter if required
1308                if( forks )  hal_remote_atomic_add( forks_xp , -1 );
1309
1310                // release lock protecting "forks" counter
1311                remote_busylock_release( lock_xp );
1312
1313                // release physical page if forks == 0
1314                ppn_release = (forks == 0); 
1315            }
1316            else              // vseg_type == DATA not in reference cluster
1317            {
1318                // no physical page release if not in reference cluster
1319                ppn_release = false;
1320            }
1321
1322            // release physical page to relevant kmem when required
1323            if( ppn_release ) ppm_remote_free_pages( page_cxy , page_ptr );
1324
1325#if( DEBUG_VMM_REMOVE_VSEG & 1 )
1326if( DEBUG_VMM_REMOVE_VSEG < cycle )
1327{
1328    if( ppn_release ) printk(" / released to kmem\n" );
1329    else              printk("\n");
1330}
1331#endif
1332        }
1333    }
1334
1335    // remove vseg from VSL
1336    vmm_detach_vseg_from_vsl( vmm , vseg );
1337
1338    // release vseg descriptor depending on vseg type
1339    if( vseg_type == VSEG_TYPE_STACK )
1340    {
1341        // release slot to local stack allocator
1342        vmm_stack_free( vmm , vseg );
1343
1344        // release vseg descriptor to local kmem
1345        vseg_free( vseg );
1346    }
1347    else if( (vseg_type == VSEG_TYPE_ANON) || 
1348             (vseg_type == VSEG_TYPE_FILE) || 
1349             (vseg_type == VSEG_TYPE_REMOTE) ) 
1350    {
1351        // release vseg to local mmap allocator
1352        vmm_mmap_free( vmm , vseg );
1353    }
1354    else
1355    {
1356        // release vseg descriptor to local kmem
1357        vseg_free( vseg );
1358    }
1359
1360#if DEBUG_VMM_REMOVE_VSEG
1361cycle = (uint32_t)hal_get_cycles();
1362if( DEBUG_VMM_REMOVE_VSEG < cycle )
1363printk("[%s] thread[%x,%x] exit / process %x / %s / base %x / cycle %d\n",
1364__FUNCTION__, this->process->pid, this->trdid, 
1365process->pid, vseg_type_str(vseg->type), vseg->min, cycle );
1366#endif
1367
1368}  // end vmm_remove_vseg()
1369
1370
1371///////////////////////////////////
1372void vmm_delete_vseg( pid_t    pid,
1373                      intptr_t vaddr )
1374{
1375    process_t * process;    // local pointer on local process
1376    vseg_t    * vseg;       // local pointer on local vseg containing vaddr
1377
1378    // get local pointer on local process descriptor
1379    process = cluster_get_local_process_from_pid( pid );
1380
1381    if( process == NULL )
1382    {
1383        printk("\n[WARNING] in %s : cannot get local process descriptor\n",
1384        __FUNCTION__ );
1385        return;
1386    }
1387
1388    // get local pointer on local vseg containing vaddr
1389    vseg = vmm_vseg_from_vaddr( &process->vmm , vaddr );
1390
1391    if( vseg == NULL )
1392    {
1393        printk("\n[WARNING] in %s : cannot get vseg descriptor\n",
1394        __FUNCTION__ );
1395        return;
1396    }
1397
1398    // call relevant function
1399    vmm_remove_vseg( process , vseg );
1400
1401}  // end vmm_delete_vseg
1402
1403
1404/////////////////////////////////////////////
1405vseg_t * vmm_vseg_from_vaddr( vmm_t    * vmm,
1406                              intptr_t   vaddr )
1407{
1408    xptr_t   vseg_xp;
1409    vseg_t * vseg;
1410    xptr_t   iter_xp;
1411
1412    // get extended pointers on VSL lock and root
1413    xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
1414    xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root );
1415
1416    // get lock protecting the VSL
1417    remote_rwlock_rd_acquire( lock_xp );
1418
1419    // scan the list of vsegs in VSL
1420    XLIST_FOREACH( root_xp , iter_xp )
1421    {
1422        // get pointers on vseg
1423        vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
1424        vseg    = GET_PTR( vseg_xp );
1425
1426        // return success when match
1427        if( (vaddr >= vseg->min) && (vaddr < vseg->max) )
1428        { 
1429            // return success
1430            remote_rwlock_rd_release( lock_xp );
1431            return vseg;
1432        }
1433    }
1434
1435    // return failure
1436    remote_rwlock_rd_release( lock_xp );
1437    return NULL;
1438
1439}  // end vmm_vseg_from_vaddr()
1440
1441/////////////////////////////////////////////
1442error_t vmm_resize_vseg( process_t * process,
1443                         intptr_t    base,
1444                         intptr_t    size )
1445{
1446    error_t   error;
1447    vseg_t  * new;
1448    vpn_t     vpn_min;
1449    vpn_t     vpn_max;
1450
1451#if DEBUG_VMM_RESIZE_VSEG
1452uint32_t   cycle = (uint32_t)hal_get_cycles();
1453thread_t * this  = CURRENT_THREAD;
1454if( DEBUG_VMM_RESIZE_VSEG < cycle )
1455printk("\n[%s] thread[%x,%x] enter / process %x / base %x / size %d / cycle %d\n",
1456__FUNCTION__, this->process->pid, this->trdid, process->pid, base, size, cycle );
1457#endif
1458
1459    // get pointer on process VMM
1460    vmm_t * vmm = &process->vmm;
1461
1462    intptr_t addr_min = base;
1463        intptr_t addr_max = base + size;
1464
1465    // get pointer on vseg
1466        vseg_t * vseg = vmm_vseg_from_vaddr( vmm , base );
1467
1468        if( vseg == NULL)
1469    {
1470        printk("\n[ERROR] in %s : vseg(%x,%d) not found\n",
1471        __FUNCTION__, base , size );
1472        return -1;
1473    }
1474
1475    // resize depends on unmapped region base and size
1476        if( (vseg->min > addr_min) || (vseg->max < addr_max) )        // not included in vseg
1477    {
1478        printk("\n[ERROR] in %s : unmapped region[%x->%x[ not included in vseg[%x->%x[\n",
1479        __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
1480
1481        error = -1;
1482    }
1483        else if( (vseg->min == addr_min) && (vseg->max == addr_max) )  // vseg must be deleted
1484    {
1485
1486#if( DEBUG_VMM_RESIZE_VSEG & 1 )
1487if( DEBUG_VMM_RESIZE_VSEG < cycle )
1488printk("\n[%s] unmapped region[%x->%x[ equal vseg[%x->%x[\n",
1489__FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
1490#endif
1491        vmm_delete_vseg( process->pid , vseg->min );
1492
1493#if( DEBUG_VMM_RESIZE_VSEG & 1 )
1494if( DEBUG_VMM_RESIZE_VSEG < cycle )
1495printk("\n[%s] thread[%x,%x] deleted vseg\n",
1496__FUNCTION__, this->process->pid, this->trdid );
1497#endif
1498        error = 0;
1499    }
1500        else if( vseg->min == addr_min )                               // vseg must be resized
1501    {
1502
1503#if( DEBUG_VMM_RESIZE_VSEG & 1 )
1504if( DEBUG_VMM_RESIZE_VSEG < cycle )
1505printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n",
1506__FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
1507#endif
1508        // update vseg min address
1509        vseg->min = addr_max;
1510
1511        // update vpn_base and vpn_size
1512        vpn_min        = vseg->min >> CONFIG_PPM_PAGE_SHIFT;
1513        vpn_max        = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT;
1514        vseg->vpn_base = vpn_min;
1515        vseg->vpn_size = vpn_max - vpn_min + 1;
1516
1517#if( DEBUG_VMM_RESIZE_VSEG & 1 )
1518if( DEBUG_VMM_RESIZE_VSEG < cycle )
1519printk("\n[%s] thread[%x,%x] changed vseg_min\n",
1520__FUNCTION__, this->process->pid, this->trdid );
1521#endif
1522        error = 0;
1523    }
1524        else if( vseg->max == addr_max )                              // vseg must be resized
1525    {
1526
1527#if( DEBUG_VMM_RESIZE_VSEG & 1 )
1528if( DEBUG_VMM_RESIZE_VSEG < cycle )
1529printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n",
1530__FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
1531#endif
1532        // update vseg max address
1533        vseg->max = addr_min;
1534
1535        // update vpn_base and vpn_size
1536        vpn_min        = vseg->min >> CONFIG_PPM_PAGE_SHIFT;
1537        vpn_max        = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT;
1538        vseg->vpn_base = vpn_min;
1539        vseg->vpn_size = vpn_max - vpn_min + 1;
1540
1541#if( DEBUG_VMM_RESIZE_VSEG & 1 )
1542if( DEBUG_VMM_RESIZE_VSEG < cycle )
1543printk("\n[%s] thread[%x,%x] changed vseg_max\n",
1544__FUNCTION__, this->process->pid, this->trdid );
1545#endif
1546        error = 0;
1547
1548    }
1549    else                                                          // vseg cut in three regions
1550    {
1551
1552#if( DEBUG_VMM_RESIZE_VSEG & 1 )
1553if( DEBUG_VMM_RESIZE_VSEG < cycle )
1554printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n",
1555__FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
1556#endif
1557        // resize existing vseg
1558        vseg->max = addr_min;
1559
1560        // update vpn_base and vpn_size
1561        vpn_min        = vseg->min >> CONFIG_PPM_PAGE_SHIFT;
1562        vpn_max        = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT;
1563        vseg->vpn_base = vpn_min;
1564        vseg->vpn_size = vpn_max - vpn_min + 1;
1565
1566        // create new vseg
1567        new = vmm_create_vseg( process, 
1568                               vseg->type,
1569                               addr_min, 
1570                               (vseg->max - addr_max),
1571                               vseg->file_offset,
1572                               vseg->file_size,
1573                               vseg->mapper_xp,
1574                               vseg->cxy ); 
1575
1576#if( DEBUG_VMM_RESIZE_VSEG & 1 )
1577if( DEBUG_VMM_RESIZE_VSEG < cycle )
1578printk("\n[%s] thread[%x,%x] replaced vseg by two smal vsegs\n",
1579__FUNCTION__, this->process->pid, this->trdid );
1580#endif
1581
1582        if( new == NULL ) error = -1;
1583        else              error = 0;
1584    }
1585
1586#if DEBUG_VMM_RESIZE_VSEG
1587if( DEBUG_VMM_RESIZE_VSEG < cycle )
1588printk("\n[%s] thread[%x,%x] exit / process %x / base %x / size %d / cycle %d\n",
1589__FUNCTION__, this->process->pid, this->trdid, process->pid, base, size, cycle );
1590#endif
1591
1592        return error;
1593
1594}  // vmm_resize_vseg()
1595
1596///////////////////////////////////////////
1597error_t  vmm_get_vseg( process_t * process,
1598                       intptr_t    vaddr,
1599                       vseg_t   ** found_vseg )
1600{
1601    xptr_t    vseg_xp;
1602    vseg_t  * vseg;
1603    vmm_t   * vmm;
1604    error_t   error;
1605
1606    // get pointer on local VMM
1607    vmm = &process->vmm;
1608
1609    // try to get vseg from local VMM
1610    vseg = vmm_vseg_from_vaddr( vmm , vaddr );
1611
1612    if( vseg == NULL )   // vseg not found in local cluster => try to get it from ref
1613        {
1614        // get extended pointer on reference process
1615        xptr_t ref_xp = process->ref_xp;
1616
1617        // get cluster and local pointer on reference process
1618        cxy_t       ref_cxy = GET_CXY( ref_xp );
1619        process_t * ref_ptr = GET_PTR( ref_xp );
1620
1621        if( local_cxy == ref_cxy )  return -1;   // local cluster is the reference
1622
1623        // get extended pointer on reference vseg
1624        rpc_vmm_get_vseg_client( ref_cxy , ref_ptr , vaddr , &vseg_xp , &error );
1625           
1626        if( error )   return -1;                // vseg not found => illegal user vaddr
1627       
1628        // allocate a vseg in local cluster
1629        vseg = vseg_alloc();
1630
1631        if( vseg == NULL ) return -1;           // cannot allocate a local vseg
1632
1633        // initialise local vseg from reference
1634        vseg_init_from_ref( vseg , vseg_xp );
1635
1636        // build extended pointer on VSL lock
1637        xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
1638 
1639        // take the VSL lock in write mode
1640        remote_rwlock_wr_acquire( lock_xp );
1641
1642        // register local vseg in local VSL
1643        vmm_attach_vseg_to_vsl( vmm , vseg );
1644 
1645        // release the VSL lock
1646        remote_rwlock_wr_release( lock_xp );
1647    }   
1648
1649    // success
1650    *found_vseg = vseg;
1651    return 0;
1652
1653}  // end vmm_get_vseg()
1654
1655//////////////////////////////////////////////////////////////////////////////////////
1656// This static function compute the target cluster to allocate a physical page
1657// for a given <vpn> in a given <vseg>, allocates the page and returns an extended
1658// pointer on the allocated page descriptor.
1659// The vseg cannot have the FILE type.
1660//////////////////////////////////////////////////////////////////////////////////////
1661static xptr_t vmm_page_allocate( vseg_t * vseg,
1662                                 vpn_t    vpn )
1663{
1664
1665#if DEBUG_VMM_PAGE_ALLOCATE
1666uint32_t   cycle   = (uint32_t)hal_get_cycles();
1667thread_t * this    = CURRENT_THREAD;
1668if( DEBUG_VMM_PAGE_ALLOCATE < cycle )
1669printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n",
1670__FUNCTION__ , this->process->pid, this->trdid, vpn, cycle );
1671#endif
1672
1673    xptr_t       page_xp;
1674    cxy_t        page_cxy;
1675    page_t     * page_ptr;
1676    uint32_t     index;
1677
1678    uint32_t     type   = vseg->type;
1679    uint32_t     flags  = vseg->flags;
1680    uint32_t     x_size = LOCAL_CLUSTER->x_size;
1681    uint32_t     y_size = LOCAL_CLUSTER->y_size;
1682
1683// check vseg type
1684assert( ( type != VSEG_TYPE_FILE ) , "illegal vseg type\n" );
1685
1686    if( flags & VSEG_DISTRIB )    // distributed => cxy depends on vpn LSB
1687    {
1688        index    = vpn & ((x_size * y_size) - 1);
1689        page_cxy = HAL_CXY_FROM_XY( (index / y_size) , (index % y_size) );
1690
1691        // If the cluster selected from VPN's LSBs is empty, we select one randomly
1692        if ( cluster_is_active( page_cxy ) == false )
1693        {
1694            page_cxy = cluster_random_select();
1695        }
1696    }
1697    else                          // other cases => cxy specified in vseg
1698    {
1699        page_cxy = vseg->cxy;
1700    }
1701
1702    // allocate one small physical page from target cluster
1703    page_ptr = ppm_remote_alloc_pages( page_cxy , 0 );
1704
1705    page_xp = XPTR( page_cxy , page_ptr );
1706
1707#if DEBUG_VMM_PAGE_ALLOCATE
1708cycle = (uint32_t)hal_get_cycles();
1709if( DEBUG_VMM_PAGE_ALLOCATE < cycle )
1710printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle %d\n",
1711__FUNCTION__ , this->process->pid, this->trdid, vpn, ppm_page2ppn(page_xp), cycle );
1712#endif
1713
1714    return page_xp;
1715
1716}  // end vmm_page_allocate() 
1717
1718////////////////////////////////////////
1719error_t vmm_get_one_ppn( vseg_t * vseg,
1720                         vpn_t    vpn,
1721                         ppn_t  * ppn )
1722{
1723    error_t    error;
1724    xptr_t     page_xp;           // extended pointer on physical page descriptor
1725    uint32_t   page_id;           // missing page index in vseg mapper
1726    uint32_t   type;              // vseg type;
1727
1728    type      = vseg->type;
1729    page_id   = vpn - vseg->vpn_base;
1730
1731#if DEBUG_VMM_GET_ONE_PPN
1732uint32_t   cycle = (uint32_t)hal_get_cycles();
1733thread_t * this  = CURRENT_THREAD;
1734if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) )
1735printk("\n[%s] thread[%x,%x] enter for vpn %x / type %s / page_id  %d / cycle %d\n",
1736__FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(type), page_id, cycle );
1737#endif
1738
1739    // FILE type : get the physical page from the file mapper
1740    if( type == VSEG_TYPE_FILE )
1741    {
1742        // get extended pointer on mapper
1743        xptr_t mapper_xp = vseg->mapper_xp;
1744
1745assert( (mapper_xp != XPTR_NULL),
1746"mapper not defined for a FILE vseg\n" );
1747       
1748        // get extended pointer on page descriptor
1749        page_xp = mapper_remote_get_page( mapper_xp , page_id );
1750
1751        if ( page_xp == XPTR_NULL ) return EINVAL;
1752    }
1753
1754    // Other types : allocate a physical page from target cluster,
1755    // as defined by vseg type and vpn value
1756    else
1757    {
1758        // allocate one physical page
1759        page_xp = vmm_page_allocate( vseg , vpn );
1760
1761        if( page_xp == XPTR_NULL ) return -1;
1762
1763        // initialise missing page from .elf file mapper for DATA and CODE types
1764        // the vseg->mapper_xp field is an extended pointer on the .elf file mapper
1765        if( (type == VSEG_TYPE_CODE) || (type == VSEG_TYPE_DATA) )
1766        {
1767            // get extended pointer on mapper
1768            xptr_t     mapper_xp = vseg->mapper_xp;
1769
1770assert( (mapper_xp != XPTR_NULL),
1771"mapper not defined for a CODE or DATA vseg\n" );
1772       
1773            // compute missing page offset in vseg
1774            uint32_t offset = page_id << CONFIG_PPM_PAGE_SHIFT;
1775
1776            // compute missing page offset in .elf file
1777            uint32_t elf_offset = vseg->file_offset + offset;
1778
1779#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
1780if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) )
1781printk("\n[%s] thread[%x,%x] for vpn = %x / elf_offset = %x\n",
1782__FUNCTION__, this->process->pid, this->trdid, vpn, elf_offset );
1783#endif
1784            // compute extended pointer on page base
1785            xptr_t base_xp  = ppm_page2base( page_xp );
1786
1787            // file_size (in .elf mapper) can be smaller than vseg_size (BSS)
1788            uint32_t file_size = vseg->file_size;
1789
1790            if( file_size < offset )                 // missing page fully in  BSS
1791            {
1792
1793#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
1794if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) )
1795printk("\n[%s] thread[%x,%x] for vpn  %x / fully in BSS\n",
1796__FUNCTION__, this->process->pid, this->trdid, vpn );
1797#endif
1798                if( GET_CXY( page_xp ) == local_cxy )
1799                {
1800                    memset( GET_PTR( base_xp ) , 0 , CONFIG_PPM_PAGE_SIZE );
1801                }
1802                else
1803                {
1804                   hal_remote_memset( base_xp , 0 , CONFIG_PPM_PAGE_SIZE );       
1805                }
1806            }
1807            else if( file_size >= (offset + CONFIG_PPM_PAGE_SIZE) )  // fully in  mapper
1808            {
1809
1810#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
1811if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) )
1812printk("\n[%s] thread[%x,%x] for vpn  %x / fully in mapper\n",
1813__FUNCTION__, this->process->pid, this->trdid, vpn );
1814#endif
1815                error = mapper_move_kernel( mapper_xp,
1816                                            true,             // to_buffer
1817                                            elf_offset,
1818                                            base_xp,
1819                                            CONFIG_PPM_PAGE_SIZE ); 
1820                if( error ) return EINVAL;
1821            }
1822            else  // both in mapper and in BSS :
1823                  // - (file_size - offset)             bytes from mapper
1824                  // - (page_size + offset - file_size) bytes from BSS
1825            {
1826
1827#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
1828if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) )
1829printk("\n[%s] thread[%x,%x] for vpn  %x / both mapper & BSS\n"
1830"      %d bytes from mapper / %d bytes from BSS\n",
1831__FUNCTION__, this->process->pid, this->trdid, vpn,
1832file_size - offset , offset + CONFIG_PPM_PAGE_SIZE - file_size  );
1833#endif
1834                // initialize mapper part
1835                error = mapper_move_kernel( mapper_xp,
1836                                            true,         // to buffer
1837                                            elf_offset,
1838                                            base_xp,
1839                                            file_size - offset ); 
1840                if( error ) return EINVAL;
1841
1842                // initialize BSS part
1843                if( GET_CXY( page_xp ) == local_cxy )
1844                {
1845                    memset( GET_PTR( base_xp ) + file_size - offset , 0 , 
1846                            offset + CONFIG_PPM_PAGE_SIZE - file_size );
1847                }
1848                else
1849                {
1850                   hal_remote_memset( base_xp + file_size - offset , 0 , 
1851                                      offset + CONFIG_PPM_PAGE_SIZE - file_size );
1852                }
1853            }   
1854        }  // end initialisation for CODE or DATA types   
1855    } 
1856
1857    // return ppn
1858    *ppn = ppm_page2ppn( page_xp );
1859
1860#if DEBUG_VMM_GET_ONE_PPN
1861cycle = (uint32_t)hal_get_cycles();
1862if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) )
1863printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle %d\n",
1864__FUNCTION__ , this->process->pid, this->trdid , vpn , *ppn, cycle );
1865#endif
1866
1867    return 0;
1868
1869}  // end vmm_get_one_ppn()
1870
1871///////////////////////////////////////////////////
1872error_t vmm_handle_page_fault( process_t * process,
1873                               vpn_t       vpn )
1874{
1875    vseg_t         * vseg;            // vseg containing vpn
1876    uint32_t         attr;            // PTE_ATTR value
1877    ppn_t            ppn;             // PTE_PPN value
1878    uint32_t         ref_attr;        // PTE_ATTR value in reference GPT
1879    ppn_t            ref_ppn;         // PTE_PPN value in reference GPT
1880    cxy_t            ref_cxy;         // reference cluster for missing vpn
1881    process_t      * ref_ptr;         // reference process for missing vpn
1882    xptr_t           local_gpt_xp;    // extended pointer on local GPT
1883    xptr_t           ref_gpt_xp;      // extended pointer on reference GPT
1884    error_t          error;           // value returned by called functions
1885
1886    thread_t * this  = CURRENT_THREAD;
1887
1888#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
1889uint32_t start_cycle = (uint32_t)hal_get_cycles();
1890#endif
1891
1892#if DEBUG_VMM_HANDLE_PAGE_FAULT
1893if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
1894printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n",
1895__FUNCTION__, this->process->pid, this->trdid, vpn, start_cycle );
1896#endif
1897
1898#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
1899if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
1900hal_vmm_display( this->process , true );
1901#endif
1902
1903    // get local vseg (access to reference VSL can be required)
1904    error = vmm_get_vseg( process, 
1905                          (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT,
1906                          &vseg );
1907    if( error )
1908    {
1909        printk("\n[ERROR] in %s : vpn %x in thread[%x,%x] not in registered vseg\n",
1910        __FUNCTION__ , vpn , process->pid, this->trdid );
1911       
1912        return EXCP_USER_ERROR;
1913    }
1914
1915#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
1916if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
1917printk("\n[%s] thread[%x,%x] found vseg %s\n",
1918__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type) );
1919#endif
1920
1921    // build extended pointer on local GPT
1922    local_gpt_xp  = XPTR( local_cxy , &process->vmm.gpt );
1923
1924    // lock PTE in local GPT and get current PPN and attributes
1925    error = hal_gpt_lock_pte( local_gpt_xp,
1926                              vpn,
1927                              &attr,
1928                              &ppn );
1929    if( error )
1930    {
1931        printk("\n[PANIC] in %s : cannot lock PTE in local GPT / vpn %x / process %x\n",
1932        __FUNCTION__ , vpn , process->pid );
1933       
1934        return EXCP_KERNEL_PANIC;
1935    }
1936
1937#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
1938if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
1939printk("\n[%s] thread[%x,%x] locked vpn %x in cluster %x\n",
1940__FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy );
1941#endif
1942
1943    // handle page fault only if local PTE still unmapped after lock
1944    if( (attr & GPT_MAPPED) == 0 )
1945    {
1946        // get reference process cluster and local pointer
1947        ref_cxy = GET_CXY( process->ref_xp );
1948        ref_ptr = GET_PTR( process->ref_xp );
1949
1950        /////////////// private vseg or (local == reference)
1951        /////////////// => access only the local GPT
1952        if( (vseg->type == VSEG_TYPE_STACK) ||
1953            (vseg->type == VSEG_TYPE_CODE)  ||
1954            (ref_cxy    == local_cxy ) )
1955        {
1956
1957#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
1958if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
1959printk("\n[%s] thread[%x,%x] access local gpt : cxy %x / ref_cxy %x / type %s / cycle %d\n",
1960__FUNCTION__, this->process->pid, this->trdid,
1961local_cxy, ref_cxy, vseg_type_str(vseg->type), (uint32_t)hal_get_cycles() );
1962#endif
1963            // allocate and initialise a physical page
1964            error = vmm_get_one_ppn( vseg , vpn , &ppn );
1965
1966            if( error )
1967            {
1968                printk("\n[ERROR] in %s : no physical page / process = %x / vpn = %x\n",
1969                __FUNCTION__ , process->pid , vpn );
1970
1971                // unlock PTE in local GPT
1972                hal_gpt_unlock_pte( local_gpt_xp , vpn );
1973
1974                return EXCP_KERNEL_PANIC;
1975            }
1976
1977            // define attr from vseg flags
1978            attr = GPT_MAPPED | GPT_SMALL | GPT_READABLE;
1979            if( vseg->flags & VSEG_USER  ) attr |= GPT_USER;
1980            if( vseg->flags & VSEG_WRITE ) attr |= GPT_WRITABLE;
1981            if( vseg->flags & VSEG_EXEC  ) attr |= GPT_EXECUTABLE;
1982            if( vseg->flags & VSEG_CACHE ) attr |= GPT_CACHABLE;
1983
1984            // set PTE to local GPT
1985            // it unlocks this PTE
1986            hal_gpt_set_pte( local_gpt_xp,
1987                             vpn,
1988                             attr,
1989                             ppn );
1990
1991#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
1992uint32_t end_cycle = (uint32_t)hal_get_cycles();
1993#endif
1994
1995#if DEBUG_VMM_HANDLE_PAGE_FAULT
1996if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
1997printk("\n[%s] thread[%x,%x] handled local pgfault / ppn %x / attr %x / cycle %d\n",
1998__FUNCTION__, this->process->pid, this->trdid, ppn, attr, end_cycle );
1999#endif
2000
2001#if CONFIG_INSTRUMENTATION_PGFAULTS
2002this->info.local_pgfault_nr++;
2003this->info.local_pgfault_cost += (end_cycle - start_cycle);
2004#endif
2005            return EXCP_NON_FATAL;
2006
2007        }   // end local GPT access
2008
2009        /////////////////// public vseg and (local != reference)
2010        /////////////////// => access ref GPT to update local GPT
2011        else                               
2012        {
2013
2014#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2015if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2016printk("\n[%s] thread[%x,%x] access ref gpt : cxy %x / ref_cxy %x / type %s / cycle %d\n",
2017__FUNCTION__, this->process->pid, this->trdid, 
2018local_cxy, ref_cxy, vseg_type_str(vseg->type), (uint32_t)hal_get_cycles() );
2019#endif
2020            // build extended pointer on reference GPT
2021            ref_gpt_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt );
2022
2023            // lock PTE in reference GPT and get current PPN and attributes
2024            error = hal_gpt_lock_pte( ref_gpt_xp,
2025                                      vpn,
2026                                      &ref_attr,
2027                                      &ref_ppn );
2028            if( error )
2029            {
2030                printk("\n[PANIC] in %s : cannot lock PTE in ref GPT / vpn %x / process %x\n",
2031                __FUNCTION__ , vpn , process->pid );
2032       
2033                // unlock PTE in local GPT
2034                hal_gpt_unlock_pte( local_gpt_xp , vpn );
2035                   
2036                return EXCP_KERNEL_PANIC;
2037            }
2038
2039#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2040if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2041printk("\n[%s] thread[%x,%x] get pte from ref gpt / attr %x / ppn %x\n",
2042__FUNCTION__, this->process->pid, this->trdid, ref_attr, ref_ppn );
2043#endif
2044
2045            if( ref_attr & GPT_MAPPED )        // false page fault
2046            {
2047                // update local GPT from reference GPT values
2048                // this unlocks the PTE in local GPT
2049                hal_gpt_set_pte( local_gpt_xp,
2050                                 vpn,
2051                                 ref_attr,
2052                                 ref_ppn );
2053
2054#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2055if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2056printk("\n[%s] thread[%x,%x] updated local gpt for a false pgfault\n",
2057__FUNCTION__, this->process->pid, this->trdid );
2058#endif
2059
2060                // unlock the PTE in reference GPT
2061                hal_gpt_unlock_pte( ref_gpt_xp, vpn );
2062                             
2063#if (DEBUG_VMM_HANDLE_PAGE_FAULT &1)
2064if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2065printk("\n[%s] thread[%x,%x] unlock the ref gpt after a false pgfault\n",
2066__FUNCTION__, this->process->pid, this->trdid );
2067#endif
2068
2069#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
2070uint32_t end_cycle = (uint32_t)hal_get_cycles();
2071#endif
2072
2073#if DEBUG_VMM_HANDLE_PAGE_FAULT
2074if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2075printk("\n[%s] thread[%x,%x] handled false pgfault / ppn %x / attr %x / cycle %d\n",
2076__FUNCTION__, this->process->pid, this->trdid, ref_ppn, ref_attr, end_cycle );
2077#endif
2078
2079#if CONFIG_INSTRUMENTATION_PGFAULTS
2080this->info.false_pgfault_nr++;
2081this->info.false_pgfault_cost += (end_cycle - start_cycle);
2082#endif
2083                return EXCP_NON_FATAL;
2084            }
2085            else                            // true page fault
2086            {
2087                // allocate and initialise a physical page depending on the vseg type
2088                error = vmm_get_one_ppn( vseg , vpn , &ppn );
2089
2090                if( error )
2091                {
2092                    printk("\n[ERROR] in %s : no memory / process = %x / vpn = %x\n",
2093                    __FUNCTION__ , process->pid , vpn );
2094
2095                    // unlock PTE in local GPT and in reference GPT
2096                    hal_gpt_unlock_pte( local_gpt_xp , vpn );
2097                    hal_gpt_unlock_pte( ref_gpt_xp   , vpn );
2098                   
2099                    return EXCP_KERNEL_PANIC;
2100                }
2101
2102                // define attr from vseg flags
2103                attr = GPT_MAPPED | GPT_SMALL | GPT_READABLE;
2104                if( vseg->flags & VSEG_USER  ) attr |= GPT_USER;
2105                if( vseg->flags & VSEG_WRITE ) attr |= GPT_WRITABLE;
2106                if( vseg->flags & VSEG_EXEC  ) attr |= GPT_EXECUTABLE;
2107                if( vseg->flags & VSEG_CACHE ) attr |= GPT_CACHABLE;
2108
2109#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2110if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2111printk("\n[%s] thread[%x,%x] build a new PTE for a true pgfault\n",
2112__FUNCTION__, this->process->pid, this->trdid );
2113#endif
2114                // set PTE in reference GPT
2115                // this unlock the PTE
2116                hal_gpt_set_pte( ref_gpt_xp,
2117                                 vpn,
2118                                 attr,
2119                                 ppn );
2120
2121#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2122if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2123printk("\n[%s] thread[%x,%x] set new PTE in ref gpt for a true page fault\n",
2124__FUNCTION__, this->process->pid, this->trdid );
2125#endif
2126
2127                // set PTE in local GPT
2128                // this unlock the PTE
2129                hal_gpt_set_pte( local_gpt_xp,
2130                                 vpn,
2131                                 attr,
2132                                 ppn );
2133
2134#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
2135uint32_t end_cycle = (uint32_t)hal_get_cycles();
2136#endif
2137
2138#if DEBUG_VMM_HANDLE_PAGE_FAULT
2139if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2140printk("\n[%s] thread[%x,%x] handled global pgfault / ppn %x / attr %x / cycle %d\n",
2141__FUNCTION__, this->process->pid, this->trdid, ppn, attr, end_cycle );
2142#endif
2143
2144#if CONFIG_INSTRUMENTATION_PGFAULTS
2145this->info.global_pgfault_nr++;
2146this->info.global_pgfault_cost += (end_cycle - start_cycle);
2147#endif
2148                return EXCP_NON_FATAL;
2149            }
2150        }
2151    }
2152    else   // page has been locally mapped by another concurrent thread
2153    {
2154        // unlock the PTE in local GPT
2155        hal_gpt_unlock_pte( local_gpt_xp , vpn );
2156
2157#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
2158uint32_t end_cycle = (uint32_t)hal_get_cycles();
2159#endif
2160
2161#if DEBUG_VMM_HANDLE_PAGE_FAULT
2162if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2163printk("\n[%s] handled by another thread / vpn %x / ppn %x / attr %x / cycle %d\n",
2164__FUNCTION__, vpn, ppn, attr, end_cycle );
2165#endif
2166
2167#if CONFIG_INSTRUMENTATION_PGFAULTS
2168this->info.false_pgfault_nr++;
2169this->info.false_pgfault_cost += (end_cycle - start_cycle);
2170#endif
2171        return EXCP_NON_FATAL;
2172    }
2173
2174}   // end vmm_handle_page_fault()
2175
2176////////////////////////////////////////////
2177error_t vmm_handle_cow( process_t * process,
2178                        vpn_t       vpn )
2179{
2180    vseg_t         * vseg;            // vseg containing vpn
2181    xptr_t           gpt_xp;          // extended pointer on GPT (local or reference)
2182    gpt_t          * gpt_ptr;         // local pointer on GPT (local or reference)
2183    cxy_t            gpt_cxy;         // GPT cluster identifier
2184    uint32_t         old_attr;        // current PTE_ATTR value
2185    ppn_t            old_ppn;         // current PTE_PPN value
2186    uint32_t         new_attr;        // new PTE_ATTR value
2187    ppn_t            new_ppn;         // new PTE_PPN value
2188    cxy_t            ref_cxy;         // reference process cluster
2189    process_t      * ref_ptr;         // local pointer on reference process
2190    error_t          error;
2191
2192    thread_t * this  = CURRENT_THREAD;
2193
2194#if DEBUG_VMM_HANDLE_COW
2195uint32_t   cycle = (uint32_t)hal_get_cycles();
2196if( DEBUG_VMM_HANDLE_COW < cycle )
2197printk("\n[%s] thread[%x,%x] enter for vpn %x / core[%x,%d] / cycle %d\n",
2198__FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle );
2199#endif
2200
2201#if ((DEBUG_VMM_HANDLE_COW & 3) == 3 )
2202hal_vmm_display( process , true );
2203#endif
2204
2205    // get local vseg
2206    error = vmm_get_vseg( process, 
2207                          (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT,
2208                          &vseg );
2209    if( error )
2210    {
2211        printk("\n[ERROR] in %s : vpn %x in thread[%x,%x] not in a registered vseg\n",
2212        __FUNCTION__, vpn, process->pid, this->trdid );
2213
2214        return EXCP_USER_ERROR;
2215    }
2216
2217#if DEBUG_VMM_HANDLE_COW
2218if( DEBUG_VMM_HANDLE_COW < cycle )
2219printk("\n[%s] thread[%x,%x] get vseg %s\n",
2220__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type) );
2221#endif
2222
2223    // get reference process cluster and local pointer
2224    ref_cxy = GET_CXY( process->ref_xp );
2225    ref_ptr = GET_PTR( process->ref_xp );
2226
2227    // build pointers on relevant GPT
2228    // - access only local GPT for a private vseg 
2229    // - access reference GPT and all copies for a public vseg
2230    if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) )
2231    {
2232        gpt_cxy = local_cxy;
2233        gpt_ptr = &process->vmm.gpt;
2234        gpt_xp  = XPTR( gpt_cxy , gpt_ptr );
2235    }
2236    else
2237    {
2238        gpt_cxy = ref_cxy;
2239        gpt_ptr = &ref_ptr->vmm.gpt;
2240        gpt_xp  = XPTR( gpt_cxy , gpt_ptr );
2241    }
2242
2243    // lock target PTE in relevant GPT (local or reference)
2244    // and get current PTE value
2245    error = hal_gpt_lock_pte( gpt_xp,
2246                              vpn,
2247                              &old_attr,
2248                              &old_ppn );
2249    if( error )
2250    {
2251        printk("\n[PANIC] in %s : cannot lock PTE in GPT / cxy %x / vpn %x / process %x\n",
2252        __FUNCTION__ , gpt_cxy, vpn , process->pid );
2253       
2254        return EXCP_KERNEL_PANIC;
2255    }
2256
2257#if DEBUG_VMM_HANDLE_COW
2258if( DEBUG_VMM_HANDLE_COW < cycle )
2259printk("\n[%s] thread[%x,%x] get pte for vpn %x : ppn %x / attr %x\n",
2260__FUNCTION__, this->process->pid, this->trdid, vpn, old_ppn, old_attr );
2261#endif
2262
2263    // return user error if COW attribute not set or PTE2 unmapped
2264    if( ((old_attr & GPT_COW) == 0) || ((old_attr & GPT_MAPPED) == 0) )
2265    {
2266        hal_gpt_unlock_pte( gpt_xp , vpn );
2267
2268        return EXCP_USER_ERROR;
2269    }
2270
2271    // get pointers on physical page descriptor
2272    xptr_t   page_xp  = ppm_ppn2page( old_ppn );
2273    cxy_t    page_cxy = GET_CXY( page_xp );
2274    page_t * page_ptr = GET_PTR( page_xp );
2275
2276    // get extended pointers on forks and lock field in page descriptor
2277    xptr_t forks_xp       = XPTR( page_cxy , &page_ptr->forks );
2278    xptr_t forks_lock_xp  = XPTR( page_cxy , &page_ptr->lock );
2279
2280    // take lock protecting "forks" counter
2281    remote_busylock_acquire( forks_lock_xp );
2282
2283    // get number of pending forks from page descriptor
2284    uint32_t forks = hal_remote_l32( forks_xp );
2285
2286#if DEBUG_VMM_HANDLE_COW
2287if( DEBUG_VMM_HANDLE_COW < cycle )
2288printk("\n[%s] thread[%x,%x] get forks = %d for vpn %x\n",
2289__FUNCTION__, this->process->pid, this->trdid, forks, vpn );
2290#endif
2291
2292    if( forks )        // pending fork => allocate a new page, and copy old to new
2293    {
2294        // decrement pending forks counter in page descriptor
2295        hal_remote_atomic_add( forks_xp , -1 );
2296
2297        // release lock protecting "forks" counter
2298        remote_busylock_release( forks_lock_xp );
2299
2300        // allocate a new physical page depending on vseg type
2301        page_xp = vmm_page_allocate( vseg , vpn );
2302
2303        if( page_xp == XPTR_NULL ) 
2304        {
2305            printk("\n[PANIC] in %s : no memory for vpn %x in process %x\n",
2306            __FUNCTION__ , vpn, process->pid );
2307
2308            hal_gpt_unlock_pte( gpt_xp , vpn ); 
2309
2310            return EXCP_KERNEL_PANIC;
2311        }
2312
2313        // compute allocated page PPN
2314        new_ppn = ppm_page2ppn( page_xp );
2315
2316#if DEBUG_VMM_HANDLE_COW
2317if( DEBUG_VMM_HANDLE_COW < cycle )
2318printk("\n[%s] thread[%x,%x] get new ppn %x for vpn %x\n",
2319__FUNCTION__, this->process->pid, this->trdid, new_ppn, vpn );
2320#endif
2321
2322        // copy old page content to new page
2323        hal_remote_memcpy( ppm_ppn2base( new_ppn ),
2324                           ppm_ppn2base( old_ppn ),
2325                           CONFIG_PPM_PAGE_SIZE );
2326
2327#if DEBUG_VMM_HANDLE_COW
2328if( DEBUG_VMM_HANDLE_COW < cycle )
2329printk("\n[%s] thread[%x,%x] copied old page to new page\n",
2330__FUNCTION__, this->process->pid, this->trdid );
2331#endif
2332
2333    }             
2334    else               // no pending fork => keep the existing page
2335    {
2336        // release lock protecting "forks" counter
2337        remote_busylock_release( forks_lock_xp );
2338
2339#if(DEBUG_VMM_HANDLE_COW & 1)
2340if( DEBUG_VMM_HANDLE_COW < cycle )
2341printk("\n[%s] thread[%x,%x] no pending forks / keep existing PPN %x\n",
2342__FUNCTION__, this->process->pid, this->trdid, old_ppn );
2343#endif
2344        new_ppn = old_ppn;
2345    }
2346
2347    // build new_attr : set WRITABLE, reset COW, reset LOCKED
2348    new_attr = (((old_attr | GPT_WRITABLE) & (~GPT_COW)) & (~GPT_LOCKED));
2349
2350#if(DEBUG_VMM_HANDLE_COW & 1)
2351if( DEBUG_VMM_HANDLE_COW < cycle )
2352printk("\n[%s] thread[%x,%x] new_attr %x / new_ppn %x\n",
2353__FUNCTION__, this->process->pid, this->trdid, new_attr, new_ppn );
2354#endif
2355
2356    // update the relevant GPT(s)
2357    // - private vseg => update only the local GPT
2358    // - public vseg => update the reference GPT AND all the GPT copies
2359    if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) )
2360    {
2361        // set new PTE in local gpt
2362        hal_gpt_set_pte( gpt_xp,
2363                         vpn,
2364                         new_attr,
2365                         new_ppn );
2366    }
2367    else
2368    {
2369        if( ref_cxy == local_cxy )                  // reference cluster is local
2370        {
2371            vmm_global_update_pte( process,
2372                                   vpn,
2373                                   new_attr,
2374                                   new_ppn );
2375        }
2376        else                                        // reference cluster is remote
2377        {
2378            rpc_vmm_global_update_pte_client( ref_cxy,
2379                                              ref_ptr,
2380                                              vpn,
2381                                              new_attr,
2382                                              new_ppn );
2383        }
2384    }
2385
2386#if DEBUG_VMM_HANDLE_COW
2387cycle = (uint32_t)hal_get_cycles();
2388if( DEBUG_VMM_HANDLE_COW < cycle )
2389printk("\n[%s] thread[%x,%x] exit for vpn %x / core[%x,%d] / cycle %d\n",
2390__FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle );
2391#endif
2392
2393#if ((DEBUG_VMM_HANDLE_COW & 3) == 3)
2394hal_vmm_display( process , true );
2395#endif
2396
2397     return EXCP_NON_FATAL;
2398
2399}   // end vmm_handle_cow()
2400
Note: See TracBrowser for help on using the repository browser.