source: trunk/kernel/mm/vmm.c @ 683

Last change on this file since 683 was 683, checked in by alain, 3 years ago

All modifications required to support the <tcp_chat> application
including error recovery in case of packet loss.A

File size: 101.4 KB
Line 
1/*
2 * vmm.c - virtual memory manager related operations implementation.
3 *
4 * Authors   Ghassan Almaless (2008,2009,2010,2011,2012)
5 *           Alain Greiner    (2016,2017,2018,2019,2020)
6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25#include <kernel_config.h>
26#include <hal_kernel_types.h>
27#include <hal_special.h>
28#include <hal_gpt.h>
29#include <hal_vmm.h>
30#include <hal_irqmask.h>
31#include <hal_macros.h>
32#include <printk.h>
33#include <memcpy.h>
34#include <remote_queuelock.h>
35#include <list.h>
36#include <xlist.h>
37#include <bits.h>
38#include <process.h>
39#include <thread.h>
40#include <vseg.h>
41#include <cluster.h>
42#include <scheduler.h>
43#include <vfs.h>
44#include <mapper.h>
45#include <page.h>
46#include <kmem.h>
47#include <vmm.h>
48#include <hal_exception.h>
49
50////////////////////////////////////////////////////////////////////////////////////////////
51//   Extern global variables
52////////////////////////////////////////////////////////////////////////////////////////////
53
54extern  process_t  process_zero;      // allocated in cluster.c
55
56////////////////////////////////////////////////////////////////////////////////////////////
57// This static function is called by the vmm_user_init() function.
58// It initialises the free lists of vsegs used by the VMM MMAP allocator.
59// It makes the assumption that HEAP_BASE == 1 Gbytes and HEAP_SIZE == 2 Gbytes.
60////////////////////////////////////////////////////////////////////////////////////////////
61static void vmm_stack_init( vmm_t * vmm )
62{
63
64// check STACK zone
65assert( __FUNCTION__, ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <=
66(CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) , "STACK zone too small\n");
67
68    // get pointer on STACK allocator
69    stack_mgr_t * mgr = &vmm->stack_mgr;
70
71    mgr->bitmap   = 0;
72    mgr->vpn_base = CONFIG_VMM_STACK_BASE;
73    busylock_init( &mgr->lock , LOCK_VMM_STACK );
74
75}
76
77////////////////////////////////////////////////////////////////////////////////////////////
78// This static function is called by the vmm_create_vseg() function, and implements
79// the VMM STACK specific allocator. Depending on the local thread index <ltid>,
80// it ckeks availability of the corresponding slot in the process STACKS region,
81// allocates a vseg descriptor, and initializes the "vpn_base" and "vpn_size" fields.
82////////////////////////////////////////////////////////////////////////////////////////////
83// @ vmm      : [in]  pointer on VMM.
84// @ ltid     : [in]  requested slot == local user thread identifier.
85////////////////////////////////////////////////////////////////////////////////////////////
86static vseg_t * vmm_stack_alloc( vmm_t  * vmm,
87                                 ltid_t   ltid )
88{
89
90// check ltid argument
91assert( __FUNCTION__, 
92(ltid <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)),
93"slot index %d too large for an user stack vseg", ltid );
94
95    // get stack allocator pointer
96    stack_mgr_t * mgr = &vmm->stack_mgr;
97
98    // get lock protecting stack allocator
99    busylock_acquire( &mgr->lock );
100
101// check requested slot is available
102assert( __FUNCTION__, (bitmap_state( &mgr->bitmap , ltid ) == false),
103"slot index %d already allocated", ltid );
104
105    // allocate a vseg descriptor
106    vseg_t * vseg = vseg_alloc();
107
108    if( vseg == NULL )
109        {
110 
111#if DEBUG_VMM_ERROR
112printk("\n[ERROR] %s cannot allocate memory for vseg in cluster %x\n",
113__FUNCTION__ , local_cxy );
114#endif
115        busylock_release( &mgr->lock );
116        return NULL;
117    }
118
119    // update bitmap
120    bitmap_set( &mgr->bitmap , ltid );
121
122    // release lock on stack allocator
123    busylock_release( &mgr->lock );
124
125    // set "vpn_base" & "vpn_size" fields (first page non allocated)
126    vseg->vpn_base = mgr->vpn_base + (ltid * CONFIG_VMM_STACK_SIZE) + 1;
127    vseg->vpn_size = CONFIG_VMM_STACK_SIZE - 1;
128
129    return vseg;
130
131} // end vmm_stack_alloc()
132
133////////////////////////////////////////////////////////////////////////////////////////////
134// This static function is called by the vmm_remove_vseg() function, and implements
135// the VMM STACK specific desallocator.
136// It updates the bitmap to release the corresponding slot in the process STACKS region,
137// and releases memory allocated to vseg descriptor.
138////////////////////////////////////////////////////////////////////////////////////////////
139// @ vmm      : [in] pointer on VMM.
140// @ vseg     : [in] pointer on released vseg.
141////////////////////////////////////////////////////////////////////////////////////////////
142static void vmm_stack_free( vmm_t  * vmm,
143                            vseg_t * vseg )
144{
145    // get stack allocator pointer
146    stack_mgr_t * mgr = &vmm->stack_mgr;
147
148    // compute slot index
149    uint32_t index = (vseg->vpn_base - 1 - mgr->vpn_base) / CONFIG_VMM_STACK_SIZE;
150
151// check index
152assert( __FUNCTION__, (index <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)),
153"slot index %d too large for an user stack vseg", index );
154
155// check released slot is allocated
156assert( __FUNCTION__, (bitmap_state( &mgr->bitmap , index ) == true),
157"released slot index %d non allocated", index );
158
159    // get lock on stack allocator
160    busylock_acquire( &mgr->lock );
161
162    // update stacks_bitmap
163    bitmap_clear( &mgr->bitmap , index );
164
165    // release lock on stack allocator
166    busylock_release( &mgr->lock );
167
168    // release memory allocated to vseg descriptor
169    vseg_free( vseg );
170
171}  // end vmm_stack_free()
172
173
174
175////////////////////////////////////////////////////////////////////////////////////////////
176// This function display the current state of the VMM MMAP allocator of a process VMM
177// identified by the <vmm> argument.
178////////////////////////////////////////////////////////////////////////////////////////////
179void vmm_mmap_display( vmm_t * vmm )
180{
181    uint32_t  order;
182    xptr_t    root_xp;
183    xptr_t    iter_xp;
184
185    // get pointer on process
186    process_t * process = (process_t *)(((char*)vmm) - OFFSETOF( process_t , vmm ));
187
188    // get process PID
189    pid_t pid = process->pid;
190
191    // get pointer on VMM MMAP allocator
192    mmap_mgr_t * mgr = &vmm->mmap_mgr;
193
194    // display header
195    printk("***** VMM MMAP allocator / process %x *****\n", pid );
196
197    // scan the array of free lists of vsegs
198    for( order = 0 ; order <= CONFIG_VMM_HEAP_MAX_ORDER ; order++ )
199    {
200        root_xp = XPTR( local_cxy , &mgr->free_list_root[order] );
201
202        if( !xlist_is_empty( root_xp ) )
203        {
204            printk(" - %d (%x pages) : ", order , 1<<order );
205
206            XLIST_FOREACH( root_xp , iter_xp )
207            {
208                xptr_t   vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
209                vseg_t * vseg    = GET_PTR( vseg_xp );
210
211                printk("%x | ", vseg->vpn_base );
212            }
213
214            printk("\n");
215        }
216    }
217}  // end vmm_mmap_display()
218
219////////////////////////////////////////////////////////////////////////////////////////////
220// This static function is called by the vmm_user_init() function.
221// It initialises the free lists of vsegs used by the VMM MMAP allocator.
222// TODO this function is only valid for 32 bits cores, and makes three assumptions:
223// HEAP_BASE == 1 Gbytes / HEAP_SIZE == 2 Gbytes / MMAP_MAX_SIZE == 1 Gbytes
224////////////////////////////////////////////////////////////////////////////////////////////
225void vmm_mmap_init( vmm_t * vmm )
226{
227
228// check HEAP base and size
229assert( __FUNCTION__, (CONFIG_VMM_HEAP_BASE == 0x40000) & (CONFIG_VMM_STACK_BASE == 0xc0000),
230"CONFIG_VMM_HEAP_BASE != 0x40000 or CONFIG_VMM_STACK_BASE != 0xc0000" );
231
232// check  MMAP vseg max order
233assert( __FUNCTION__, (CONFIG_VMM_HEAP_MAX_ORDER == 18), "max mmap vseg size is 256K pages" );
234
235    // get pointer on MMAP allocator
236    mmap_mgr_t * mgr = &vmm->mmap_mgr;
237
238    // initialize HEAP base and size
239    mgr->vpn_base        = CONFIG_VMM_HEAP_BASE;
240    mgr->vpn_size        = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE;
241
242    // initialize lock
243    busylock_init( &mgr->lock , LOCK_VMM_MMAP );
244
245    // initialize free lists
246    uint32_t   i;
247    for( i = 0 ; i <= CONFIG_VMM_HEAP_MAX_ORDER ; i++ )
248    {
249        xlist_root_init( XPTR( local_cxy , &mgr->free_list_root[i] ) );
250    }
251
252    // allocate and register first 1 Gbytes vseg
253    vseg_t * vseg0 = vseg_alloc();
254
255assert( __FUNCTION__, (vseg0 != NULL) , "cannot allocate vseg" );
256
257    vseg0->vpn_base = CONFIG_VMM_HEAP_BASE;
258    vseg0->vpn_size = CONFIG_VMM_HEAP_BASE;
259
260    xlist_add_first( XPTR( local_cxy , &mgr->free_list_root[CONFIG_VMM_HEAP_MAX_ORDER] ),
261                     XPTR( local_cxy , &vseg0->xlist ) );
262
263    // allocate and register second 1 Gbytes vseg
264    vseg_t * vseg1 = vseg_alloc();
265
266assert( __FUNCTION__, (vseg1 != NULL) , "cannot allocate vseg" );
267
268    vseg1->vpn_base = CONFIG_VMM_HEAP_BASE << 1;
269    vseg1->vpn_size = CONFIG_VMM_HEAP_BASE;
270
271    xlist_add_first( XPTR( local_cxy , &mgr->free_list_root[CONFIG_VMM_HEAP_MAX_ORDER] ),
272                     XPTR( local_cxy , &vseg1->xlist ) );
273
274#if DEBUG_VMM_MMAP
275thread_t * this = CURRENT_THREAD;
276uint32_t cycle = (uint32_t)hal_get_cycles();
277printk("\n[%s] thread[%x,%x] / cycle %d\n",
278__FUNCTION__, this->process->pid, this->trdid, cycle );
279vmm_mmap_display( vmm );
280#endif
281
282}  // end vmm_mmap_init()
283
284////////////////////////////////////////////////////////////////////////////////////////////
285// This static function is called by the vmm_create_vseg() function, and implements
286// the VMM MMAP specific allocator.  Depending on the requested number of pages <npages>,
287// it get a free vseg from the relevant free_list, and initializes the "vpn_base" and
288// "vpn_size" fields.
289////////////////////////////////////////////////////////////////////////////////////////////
290// @ vmm      : [in] pointer on VMM.
291// @ npages   : [in] requested number of pages.
292// @ returns local pointer on vseg if success / returns NULL if failure.
293////////////////////////////////////////////////////////////////////////////////////////////
294static vseg_t * vmm_mmap_alloc( vmm_t * vmm,
295                                vpn_t   npages )
296{
297
298#if DEBUG_VMM_MMAP
299thread_t * this = CURRENT_THREAD;
300uint32_t cycle = (uint32_t)hal_get_cycles();
301if( DEBUG_VMM_MMAP < cycle )
302printk("\n[%s] thread[%x,%x] for %x pages / cycle %d\n",
303__FUNCTION__, this->process->pid, this->trdid, npages, cycle );
304#endif
305
306    // number of allocated pages must be power of 2
307    // compute actual size and order
308    vpn_t    required_vpn_size = POW2_ROUNDUP( npages );
309    uint32_t required_order    = bits_log2( required_vpn_size );
310
311    // get mmap allocator pointer
312    mmap_mgr_t * mgr = &vmm->mmap_mgr;
313
314    // take lock protecting free lists in MMAP allocator
315    busylock_acquire( &mgr->lock );
316
317    // initialises the while loop variables
318    uint32_t   current_order = required_order;
319    vseg_t   * current_vseg  = NULL;
320
321    // search a free vseg equal or larger than requested size
322        while( current_order <= CONFIG_VMM_HEAP_MAX_ORDER )
323        {
324        // build extended pointer on free_pages_root[current_order]
325        xptr_t root_xp = XPTR( local_cxy , &mgr->free_list_root[current_order] );
326
327                if( !xlist_is_empty( root_xp ) )
328                {
329            // get extended pointer on first vseg in this free_list
330                        xptr_t current_vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist );
331            current_vseg = GET_PTR( current_vseg_xp );
332
333            // build extended pointer on xlist field in vseg descriptor
334            xptr_t list_entry_xp = XPTR( local_cxy , &current_vseg->xlist );
335
336            // remove this vseg from the free_list
337                        xlist_unlink( list_entry_xp );
338
339                        break; 
340                }
341
342        // increment loop index
343        current_order++;
344
345    }  // end while loop
346
347    if( current_vseg == NULL )  // return failure
348    {
349
350#if DEBUG_VMM_ERROR
351printk("\n[ERROR] %s cannot allocate ) %d page(s) in cluster %x\n",
352__FUNCTION__, npages , local_cxy );
353#endif
354        busylock_release( &mgr->lock );
355        return NULL;
356    }
357
358        // split recursively the found vseg in smaller vsegs
359    // if required, and update the free-lists accordingly
360        while( current_order > required_order )
361        {
362        // get found vseg base and size
363        vpn_t  vpn_base = current_vseg->vpn_base;
364        vpn_t  vpn_size = current_vseg->vpn_size;
365       
366        // allocate a new vseg for the upper half of current vseg
367            vseg_t * new_vseg = vseg_alloc();
368
369            if( new_vseg == NULL )
370        {
371
372#if DEBUG_VMM_ERROR
373printk("\n[ERROR] %s cannot allocate memory for vseg in cluster %x\n",
374__FUNCTION__ , local_cxy );
375#endif
376            busylock_release( &mgr->lock );
377            return NULL;
378            }
379
380        // initialise new vseg (upper half of found vseg)
381        new_vseg->vmm      = vmm;
382        new_vseg->vpn_base = vpn_base + (vpn_size >> 1);
383        new_vseg->vpn_size = vpn_size >> 1;
384
385        // insert new vseg in relevant free_list
386                xlist_add_first( XPTR( local_cxy , &mgr->free_list_root[current_order-1] ),
387                         XPTR( local_cxy , &new_vseg->xlist ) );
388
389        // update found vseg
390        current_vseg->vpn_size = vpn_size>>1; 
391
392        // update order
393                current_order --;
394        }
395
396        // release lock protecting free lists
397        busylock_release( &mgr->lock );
398
399#if DEBUG_VMM_MMAP
400vmm_mmap_display( vmm );
401#endif
402
403    return current_vseg;
404
405}  // end vmm_mmap_alloc()
406
407////////////////////////////////////////////////////////////////////////////////////////////
408// This static function implements the VMM MMAP specific desallocator.
409// It is called by the vmm_remove_vseg() function.
410// It releases the vseg to the relevant free_list, after trying (recursively) to
411// merge it to the buddy vseg.
412////////////////////////////////////////////////////////////////////////////////////////////
413// @ vmm      : [in] pointer on VMM.
414// @ vseg     : [in] pointer on released vseg.
415////////////////////////////////////////////////////////////////////////////////////////////
416static void vmm_mmap_free( vmm_t  * vmm,
417                           vseg_t * vseg )
418{
419
420#if DEBUG_VMM_MMAP
421thread_t * this = CURRENT_THREAD;
422uint32_t cycle = (uint32_t)hal_get_cycles();
423if( DEBUG_VMM_MMAP < cycle )
424printk("\n[%s] thread[%x,%x] for vpn_base %x / vpn_size %x / cycle %d\n",
425__FUNCTION__, this->process->pid, this->trdid, vseg->vpn_base, vseg->vpn_size, cycle );
426#endif
427
428    vseg_t * buddy_vseg;
429
430    // get mmap allocator pointer
431    mmap_mgr_t * mgr = &vmm->mmap_mgr;
432
433    // take lock protecting free lists
434    busylock_acquire( &mgr->lock );
435
436    // initialise loop variables
437    // released_vseg is the currently released vseg
438    vseg_t * released_vseg     = vseg;
439    uint32_t released_order    = bits_log2( vseg->vpn_size );
440
441        // iteratively merge the released vseg to the buddy vseg
442        // release the current page and exit when buddy not found
443    while( released_order <= CONFIG_VMM_HEAP_MAX_ORDER )
444    {
445        // compute buddy_vseg vpn_base
446                vpn_t buddy_vpn_base = released_vseg->vpn_base ^ (1 << released_order);
447       
448        // build extended pointer on free_pages_root[current_order]
449        xptr_t root_xp = XPTR( local_cxy , &mgr->free_list_root[released_order] );
450
451        // scan this free list to find the buddy vseg
452        xptr_t   iter_xp;
453        buddy_vseg = NULL;
454        XLIST_FOREACH( root_xp , iter_xp )
455        {
456            xptr_t   current_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
457            vseg_t * current_vseg    = GET_PTR( current_vseg_xp );
458
459            if( current_vseg->vpn_base == buddy_vpn_base )
460            {
461                buddy_vseg = current_vseg;
462                break;
463            }
464        }
465       
466        if( buddy_vseg != NULL )     // buddy found => merge released & buddy
467        {
468            // update released vseg fields
469            released_vseg->vpn_size = buddy_vseg->vpn_size<<1;
470            if( released_vseg->vpn_base > buddy_vseg->vpn_base) 
471                released_vseg->vpn_base = buddy_vseg->vpn_base;
472
473            // remove buddy vseg from free_list
474            xlist_unlink( XPTR( local_cxy , &buddy_vseg->xlist ) );
475
476            // release memory allocated to buddy descriptor
477            vseg_free( buddy_vseg );
478        }
479        else                         // buddy not found => register & exit
480        {
481            // register released vseg in free list
482            xlist_add_first( root_xp , XPTR( local_cxy , &released_vseg->xlist ) );
483
484            // exit while loop
485            break;
486        }
487
488        // increment released_order
489        released_order++;
490    }
491
492    // release lock
493    busylock_release( &mgr->lock );
494
495#if DEBUG_VMM_MMAP
496vmm_mmap_display( vmm );
497#endif
498
499}  // end vmm_mmap_free()
500
501////////////////////////////////////////////////////////////////////////////////////////////
502// This static function registers one vseg in the VSL of a local process descriptor.
503////////////////////////////////////////////////////////////////////////////////////////////
504// vmm       : [in] pointer on VMM.
505// vseg      : [in] pointer on vseg.
506////////////////////////////////////////////////////////////////////////////////////////////
507void vmm_attach_vseg_to_vsl( vmm_t  * vmm,
508                             vseg_t * vseg )
509{
510    // update vseg descriptor
511    vseg->vmm = vmm;
512
513    // increment vsegs number
514    vmm->vsegs_nr++;
515
516    // add vseg in vmm list
517    xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ),
518                    XPTR( local_cxy , &vseg->xlist ) );
519
520}  // end vmm_attach_vseg_to_vsl()
521
522////////////////////////////////////////////////////////////////////////////////////////////
523// This static function removes one vseg from the VSL of a local process descriptor.
524////////////////////////////////////////////////////////////////////////////////////////////
525// vmm       : [in] pointer on VMM.
526// vseg      : [in] pointer on vseg.
527////////////////////////////////////////////////////////////////////////////////////////////
528void vmm_detach_vseg_from_vsl( vmm_t  * vmm,
529                               vseg_t * vseg )
530{
531    // update vseg descriptor
532    vseg->vmm = NULL;
533
534    // decrement vsegs number
535    vmm->vsegs_nr--;
536
537    // remove vseg from VSL
538    xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
539
540}  // end vmm_detach_vseg_from_vsl()
541
542////////////////////////////////////////////
543error_t vmm_user_init( process_t * process )
544{
545
546#if DEBUG_VMM_USER_INIT
547thread_t * this = CURRENT_THREAD;
548uint32_t cycle = (uint32_t)hal_get_cycles();
549if( DEBUG_VMM_USER_INIT )
550printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", 
551__FUNCTION__ , this->process->pid, this->trdid, process->pid, local_cxy, cycle );
552#endif
553
554    // get pointer on VMM
555    vmm_t   * vmm = &process->vmm;
556
557// check UTILS zone
558assert( __FUNCTION__ , ((CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE) <= 
559(CONFIG_VMM_ELF_BASE - CONFIG_VMM_UTILS_BASE)) , "UTILS zone too small\n" );
560
561    // initialize lock protecting the VSL
562        remote_queuelock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
563
564    // initialize STACK allocator
565    vmm_stack_init( vmm );
566
567    // initialize MMAP allocator
568    vmm_mmap_init( vmm );
569
570    // initialize instrumentation counters
571        vmm->false_pgfault_nr    = 0;
572        vmm->local_pgfault_nr    = 0;
573        vmm->global_pgfault_nr   = 0;
574        vmm->false_pgfault_cost  = 0;
575        vmm->local_pgfault_cost  = 0;
576        vmm->global_pgfault_cost = 0;
577
578    hal_fence();
579
580#if DEBUG_VMM_USER_INIT
581cycle = (uint32_t)hal_get_cycles();
582if( DEBUG_VMM_USER_INIT )
583printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", 
584__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
585#endif
586
587    return 0;
588
589}  // end vmm_user_init()
590
591//////////////////////////////////////////
592void vmm_user_reset( process_t * process )
593{
594    xptr_t       vseg_xp;
595        vseg_t     * vseg;
596    vseg_type_t  vseg_type;
597
598#if DEBUG_VMM_USER_RESET
599uint32_t   cycle;
600thread_t * this = CURRENT_THREAD;
601#endif
602
603#if (DEBUG_VMM_USER_RESET & 1 )
604cycle = (uint32_t)hal_get_cycles();
605if( DEBUG_VMM_USER_RESET < cycle )
606printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
607__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
608#endif
609
610#if (DEBUG_VMM_USER_RESET & 1 )
611if( DEBUG_VMM_USER_RESET < cycle )
612hal_vmm_display( XPTR( local_cxy , process ) , true );
613#endif
614
615    // get pointer on local VMM
616    vmm_t * vmm = &process->vmm;
617
618    // build extended pointer on VSL root and VSL lock
619    xptr_t   root_xp = XPTR( local_cxy , &vmm->vsegs_root );
620    xptr_t   lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
621
622    // take the VSL lock
623        remote_queuelock_acquire( lock_xp );
624
625    // scan the VSL to delete all non kernel vsegs
626    // (we don't use a FOREACH in case of item deletion)
627    xptr_t   iter_xp;
628    xptr_t   next_xp;
629        for( iter_xp = hal_remote_l64( root_xp ) ; 
630         iter_xp != root_xp ;
631         iter_xp = next_xp )
632        {
633        // save extended pointer on next item in xlist
634        next_xp = hal_remote_l64( iter_xp );
635
636        // get pointers on current vseg in VSL
637        vseg_xp   = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
638        vseg      = GET_PTR( vseg_xp );
639        vseg_type = vseg->type;
640
641#if( DEBUG_VMM_USER_RESET & 1 )
642if( DEBUG_VMM_USER_RESET < cycle )
643printk("\n[%s] found %s vseg / vpn_base %x / vpn_size %d\n",
644__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
645#endif
646        // delete non kernel vseg 
647        if( (vseg_type != VSEG_TYPE_KCODE) && 
648            (vseg_type != VSEG_TYPE_KDATA) && 
649            (vseg_type != VSEG_TYPE_KDEV ) )
650        {
651            // remove vseg from VSL
652            vmm_remove_vseg( process , vseg );
653
654#if( DEBUG_VMM_USER_RESET & 1 )
655if( DEBUG_VMM_USER_RESET < cycle )
656printk("\n[%s] %s vseg deleted / vpn_base %x / vpn_size %d\n",
657__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
658#endif
659        }
660        else
661        {
662
663#if( DEBUG_VMM_USER_RESET & 1 )
664if( DEBUG_VMM_USER_RESET < cycle )
665printk("\n[%s] keep %s vseg / vpn_base %x / vpn_size %d\n",
666__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
667#endif
668        }
669        }  // end loop on vsegs in VSL
670
671    // release the VSL lock
672        remote_queuelock_release( lock_xp );
673
674// FIXME il faut gérer les process copies...
675
676    // re-initialise VMM
677    vmm_user_init( process );
678
679#if DEBUG_VMM_USER_RESET
680cycle = (uint32_t)hal_get_cycles();
681if( DEBUG_VMM_USER_RESET < cycle )
682printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
683__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
684#endif
685
686#if (DEBUG_VMM_USER_RESET & 1 )
687if( DEBUG_VMM_USER_RESET < cycle )
688hal_vmm_display( XPTR( local_cxy , process ) , true );
689#endif
690
691}  // end vmm_user_reset()
692
693/////////////////////////////////////////////////
694void vmm_global_delete_vseg( process_t * process,
695                             intptr_t    base )
696{
697    cxy_t           owner_cxy;
698    lpid_t          owner_lpid;
699    reg_t           save_sr;
700
701    xptr_t          process_lock_xp;
702    xptr_t          process_root_xp;
703    xptr_t          process_iter_xp;
704
705    xptr_t          remote_process_xp;
706    cxy_t           remote_process_cxy;
707    process_t     * remote_process_ptr;
708
709    xptr_t          vsl_root_xp;
710    xptr_t          vsl_lock_xp;
711    xptr_t          vsl_iter_xp;
712
713    rpc_desc_t      rpc;                  // shared rpc descriptor for parallel RPCs
714    uint32_t        responses;            // RPC responses counter
715
716    thread_t      * this    = CURRENT_THREAD;
717    pid_t           pid     = process->pid;
718    cluster_t     * cluster = LOCAL_CLUSTER;
719
720#if DEBUG_VMM_GLOBAL_DELETE_VSEG
721uint32_t cycle = (uint32_t)hal_get_cycles();
722#endif
723
724#if (DEBUG_VMM_GLOBAL_DELETE_VSEG & 1)
725if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle )
726printk("\n[%s] thread[%x,%x] enters / process %x / base %x / cycle %d\n",
727__FUNCTION__, this->process->pid, this->trdid, process->pid, base, cycle );
728#endif
729
730    // initialize a shared RPC descriptor
731    rpc.rsp       = &responses;
732    rpc.blocking  = false;                  // non blocking behaviour for rpc_send()
733    rpc.index     = RPC_VMM_REMOVE_VSEG;
734    rpc.thread    = this;
735    rpc.lid       = this->core->lid;
736    rpc.args[0]   = this->process->pid;
737    rpc.args[1]   = base;
738
739    // get owner process cluster and local index
740    owner_cxy        = CXY_FROM_PID( pid );
741    owner_lpid       = LPID_FROM_PID( pid );
742
743    // get extended pointer on root and lock of process copies xlist in owner cluster
744    process_root_xp  = XPTR( owner_cxy , &cluster->pmgr.copies_root[owner_lpid] );
745    process_lock_xp  = XPTR( owner_cxy , &cluster->pmgr.copies_lock[owner_lpid] );
746
747    // mask IRQs
748    hal_disable_irq( &save_sr );
749
750    // client thread blocks itself
751    thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC );
752
753    // take the lock protecting process copies
754    remote_queuelock_acquire( process_lock_xp );
755
756    // initialize responses counter
757    responses = 0;
758
759    // loop on process copies
760    XLIST_FOREACH( process_root_xp , process_iter_xp )
761    {
762        // get cluster and local pointer on remote process
763        remote_process_xp  = XLIST_ELEMENT( process_iter_xp , process_t , copies_list );
764        remote_process_ptr = GET_PTR( remote_process_xp );
765        remote_process_cxy = GET_CXY( remote_process_xp );
766
767        // build extended pointers on remote VSL root and lock
768        vsl_root_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsegs_root );
769        vsl_lock_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsl_lock );
770
771        // get lock on remote VSL
772        remote_queuelock_acquire( vsl_lock_xp );
773
774        // loop on vsegs in remote process VSL
775        XLIST_FOREACH( vsl_root_xp , vsl_iter_xp )
776        {
777            // get pointers on current vseg
778            xptr_t   vseg_xp  = XLIST_ELEMENT( vsl_iter_xp , vseg_t , xlist );
779            vseg_t * vseg_ptr = GET_PTR( vseg_xp );
780
781            // get current vseg base address
782            intptr_t vseg_base = (intptr_t)hal_remote_lpt( XPTR( remote_process_cxy,
783                                                                 &vseg_ptr->min ) );
784
785            if( vseg_base == base )   // found searched vseg
786            {
787                // atomically increment responses counter
788                hal_atomic_add( &responses , 1 );
789
790#if (DEBUG_VMM_GLOBAL_DELETE_VSEG & 1)
791if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle )
792printk("\n[%s] thread[%x,%x] register RPC request in cluster %x\n",
793__FUNCTION__, this->process->pid, this->trdid, remote_process_cxy );
794#endif
795                // send RPC to remote cluster
796                rpc_send( remote_process_cxy , &rpc );
797
798                // exit loop on vsegs
799                break;
800            }
801        }  // end of loop on vsegs
802
803        // release lock on remote VSL
804        remote_queuelock_release( vsl_lock_xp );
805
806    }  // end of loop on process copies
807
808    // release the lock protecting process copies
809    remote_queuelock_release( process_lock_xp );
810
811#if (DEBUG_VMM_GLOBAL_DELETE_VSEG & 1)
812if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle )
813printk("\n[%s] thread[%x,%x] deschedule / process %x / base %x\n",
814__FUNCTION__, this->process->pid, this->trdid, process->pid, base );
815#endif
816
817    // client thread deschedule
818    sched_yield("blocked on rpc_vmm_delete_vseg");
819 
820    // restore IRQs
821    hal_restore_irq( save_sr );
822
823#if DEBUG_VMM_GLOBAL_DELETE_VSEG
824cycle = (uint32_t)hal_get_cycles();
825if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle )
826printk("\n[%s] thread[%x,%x] exit / process %x / base %x / cycle %d\n",
827__FUNCTION__, this->process->pid, this->trdid, process->pid, base, cycle );
828#endif
829
830}  // end vmm_global_delete_vseg()
831
832////////////////////////////////////////////////
833void vmm_global_resize_vseg( process_t * process,
834                             intptr_t    base,
835                             intptr_t    new_base,
836                             intptr_t    new_size )
837{
838    cxy_t           owner_cxy;
839    lpid_t          owner_lpid;
840    reg_t           save_sr;
841
842    xptr_t          process_lock_xp;
843    xptr_t          process_root_xp;
844    xptr_t          process_iter_xp;
845
846    xptr_t          remote_process_xp;
847    cxy_t           remote_process_cxy;
848    process_t     * remote_process_ptr;
849
850    xptr_t          vsl_root_xp;
851    xptr_t          vsl_lock_xp;
852    xptr_t          vsl_iter_xp;
853
854    rpc_desc_t      rpc;                  // shared rpc descriptor for parallel RPCs
855    uint32_t        responses;            // RPC responses counter
856
857    thread_t      * this    = CURRENT_THREAD; 
858    pid_t           pid     = process->pid;
859    cluster_t     * cluster = LOCAL_CLUSTER;
860
861#if DEBUG_VMM_GLOBAL_RESIZE_VSEG
862uint32_t cycle = (uint32_t)hal_get_cycles();
863#endif
864
865#if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1)
866if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle )
867printk("\n[%s] thread[%x,%x] : process %x / base %x / new_base %x / new_size %x / cycle %d\n",
868__FUNCTION__, this->process->pid, this->trdid, process->pid, base, new_base, new_size, cycle );
869#endif
870
871    // initialize a shared RPC descriptor
872    rpc.rsp       = &responses;
873    rpc.blocking  = false;                  // non blocking behaviour for rpc_send()
874    rpc.index     = RPC_VMM_REMOVE_VSEG;
875    rpc.thread    = this;
876    rpc.lid       = this->core->lid;
877    rpc.args[0]   = this->process->pid;
878    rpc.args[1]   = base;
879    rpc.args[2]   = new_base;
880    rpc.args[3]   = new_size;
881
882    // get owner process cluster and local index
883    owner_cxy        = CXY_FROM_PID( pid );
884    owner_lpid       = LPID_FROM_PID( pid );
885
886    // get extended pointer on root and lock of process copies xlist in owner cluster
887    process_root_xp  = XPTR( owner_cxy , &cluster->pmgr.copies_root[owner_lpid] );
888    process_lock_xp  = XPTR( owner_cxy , &cluster->pmgr.copies_lock[owner_lpid] );
889
890    // mask IRQs
891    hal_disable_irq( &save_sr );
892
893    // client thread blocks itself
894    thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC );
895
896    // take the lock protecting process copies
897    remote_queuelock_acquire( process_lock_xp );
898
899    // initialize responses counter
900    responses = 0;
901
902    // loop on process copies
903    XLIST_FOREACH( process_root_xp , process_iter_xp )
904    {
905        // get cluster and local pointer on remote process
906        remote_process_xp  = XLIST_ELEMENT( process_iter_xp , process_t , copies_list );
907        remote_process_ptr = GET_PTR( remote_process_xp );
908        remote_process_cxy = GET_CXY( remote_process_xp );
909
910        // build extended pointers on remote VSL root and lock
911        vsl_root_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsegs_root );
912        vsl_lock_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsl_lock );
913
914        // get lock on remote VSL
915        remote_queuelock_acquire( vsl_lock_xp );
916
917        // loop on vsegs in remote process VSL
918        XLIST_FOREACH( vsl_root_xp , vsl_iter_xp )
919        {
920            // get pointers on current vseg
921            xptr_t   vseg_xp  = XLIST_ELEMENT( vsl_iter_xp , vseg_t , xlist );
922            vseg_t * vseg_ptr = GET_PTR( vseg_xp );
923
924            // get current vseg base address
925            intptr_t vseg_base = (intptr_t)hal_remote_lpt( XPTR( remote_process_cxy,
926                                                                 &vseg_ptr->min ) );
927
928            if( vseg_base == base )   // found searched vseg
929            {
930                // atomically increment responses counter
931                hal_atomic_add( &responses , 1 );
932
933#if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1)
934if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle )
935printk("\n[%s] thread[%x,%x] register RPC request in cluster %x\n",
936__FUNCTION__, this->process->pid, this->trdid, remote_process_cxy );
937#endif
938                // send RPC to remote cluster
939                rpc_send( remote_process_cxy , & rpc );
940
941                // exit loop on vsegs
942                break;
943            }
944
945        }  // end of loop on vsegs
946
947#if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1)
948if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle )
949hal_vmm_display( remote_process_xp , false );
950#endif
951
952        // release lock on remote VSL
953        remote_queuelock_release( vsl_lock_xp );
954
955    }  // end of loop on process copies
956
957    // release the lock protecting process copies
958    remote_queuelock_release( process_lock_xp );
959
960#if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1)
961if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle )
962printk("\n[%s] thread[%x,%x] deschedule / process %x / base %x\n",
963__FUNCTION__, this->process->pid, this->trdid, process->pid, base );
964#endif
965
966    // client thread deschedule
967    sched_yield("blocked on rpc_vmm_delete_vseg");
968
969    // restore IRQs
970    hal_restore_irq( save_sr );
971
972#if DEBUG_VMM_GLOBAL_RESIZE_VSEG
973cycle = (uint32_t)hal_get_cycles();
974if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle )
975printk("\n[%s] thread[%x,%x] exit for process %x / base %x / cycle %d\n",
976__FUNCTION__, this->process->pid, this->trdid, process->pid , base, cycle );
977#endif
978
979}  // end vmm_global_resize_vseg()
980
981////////////////////////////////////////////////
982void vmm_global_update_pte( process_t * process,
983                            vpn_t       vpn,
984                            uint32_t    attr,
985                            ppn_t       ppn )
986{
987    pid_t           pid;
988    cxy_t           owner_cxy;
989    lpid_t          owner_lpid;
990
991    xlist_entry_t * process_root_ptr;
992    xptr_t          process_root_xp;
993    xptr_t          process_iter_xp;
994
995    xptr_t          remote_process_xp;
996    cxy_t           remote_process_cxy;
997    process_t     * remote_process_ptr;
998    xptr_t          remote_gpt_xp;
999
1000#if DEBUG_VMM_GLOBAL_UPDATE_PTE
1001uint32_t cycle = (uint32_t)hal_get_cycles();
1002thread_t * this = CURRENT_THREAD;
1003#endif
1004
1005
1006#if (DEBUG_VMM_GLOBAL_UPDATE_PTE & 1)
1007if( DEBUG_VMM_GLOBAL_UPDATE_PTE < cycle )
1008printk("\n[%s] thread[%x,%x] enter for process %x / vpn %x / attr %x / ppn %x / ycle %d\n",
1009__FUNCTION__, this->process->pid, this->trdid, process->pid, vpn, attr, ppn, cycle );
1010#endif
1011
1012    // get owner process cluster and local index
1013    pid              = process->pid;
1014    owner_cxy        = CXY_FROM_PID( pid );
1015    owner_lpid       = LPID_FROM_PID( pid );
1016
1017    // get extended pointer on root of process copies xlist in owner cluster
1018    process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid];
1019    process_root_xp  = XPTR( owner_cxy , process_root_ptr );
1020
1021    // loop on process copies
1022    XLIST_FOREACH( process_root_xp , process_iter_xp )
1023    {
1024        // get cluster and local pointer on remote process
1025        remote_process_xp  = XLIST_ELEMENT( process_iter_xp , process_t , copies_list );
1026        remote_process_ptr = GET_PTR( remote_process_xp );
1027        remote_process_cxy = GET_CXY( remote_process_xp );
1028
1029#if (DEBUG_VMM_GLOBAL_UPDATE_PTE & 1)
1030if( DEBUG_VMM_GLOBAL_UPDATE_PTE < cycle )
1031printk("\n[%s] thread[%x,%x] handling vpn %x for process %x in cluster %x\n",
1032__FUNCTION__, this->process->pid, this->trdid, vpn, process->pid, remote_process_cxy );
1033#endif
1034
1035        // get extended pointer on remote gpt
1036        remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt );
1037
1038        // update remote GPT
1039        hal_gpt_update_pte( remote_gpt_xp, vpn, attr, ppn );
1040    } 
1041
1042#if DEBUG_VMM_GLOBAL_UPDATE_PTE
1043cycle = (uint32_t)hal_get_cycles();
1044if( DEBUG_VMM_GLOBAL_UPDATE_PTE < cycle )
1045printk("\n[%s] thread[%x,%x] exit for process %x / vpn %x / cycle %d\n",
1046__FUNCTION__, this->process->pid, this->trdid, process->pid , vpn , cycle );
1047#endif
1048
1049#if (DEBUG_VMM_GLOBAL_UPDATE_PTE & 1)
1050hal_vmm_display( process , true );
1051#endif
1052
1053}  // end vmm_global_update_pte()
1054
1055///////////////////////////////////////
1056void vmm_set_cow( process_t * process )
1057{
1058    vmm_t         * vmm;
1059
1060    xlist_entry_t * process_root_ptr;
1061    xptr_t          process_root_xp;
1062    xptr_t          process_iter_xp;
1063
1064    xptr_t          remote_process_xp;
1065    cxy_t           remote_process_cxy;
1066    process_t     * remote_process_ptr;
1067    xptr_t          remote_gpt_xp;
1068
1069    xptr_t          vseg_root_xp;
1070    xptr_t          vseg_iter_xp;
1071
1072    xptr_t          vseg_xp;
1073    vseg_t        * vseg;
1074
1075    pid_t           pid;
1076    cxy_t           owner_cxy;
1077    lpid_t          owner_lpid;
1078
1079    // get target process PID
1080    pid = process->pid;
1081
1082#if DEBUG_VMM_SET_COW
1083uint32_t   cycle = (uint32_t)hal_get_cycles();
1084thread_t * this  = CURRENT_THREAD;
1085if( DEBUG_VMM_SET_COW < cycle )
1086printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
1087__FUNCTION__, this->process->pid, this->trdid, pid , cycle );
1088#endif
1089
1090#if (DEBUG_VMM_SET_COW & 1)
1091if( DEBUG_VMM_SET_COW < cycle )
1092hal_vmm_display( process , true );
1093#endif
1094
1095// check cluster is reference
1096assert( __FUNCTION__, (XPTR( local_cxy , process ) == process->ref_xp),
1097"local cluster must be process reference cluster\n");
1098
1099    // get pointer on reference VMM
1100    vmm = &process->vmm;
1101
1102    // get extended pointer on root of process copies xlist in owner cluster
1103    owner_cxy        = CXY_FROM_PID( pid );
1104    owner_lpid       = LPID_FROM_PID( pid );
1105    process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid];
1106    process_root_xp  = XPTR( owner_cxy , process_root_ptr );
1107
1108    // get extended pointer on root of vsegs xlist from reference VMM
1109    vseg_root_xp  = XPTR( local_cxy , &vmm->vsegs_root ); 
1110
1111    // loop on target process copies
1112    XLIST_FOREACH( process_root_xp , process_iter_xp )
1113    {
1114        // get cluster and local pointer on remote process copy
1115        remote_process_xp  = XLIST_ELEMENT( process_iter_xp , process_t , copies_list );
1116        remote_process_ptr = GET_PTR( remote_process_xp );
1117        remote_process_cxy = GET_CXY( remote_process_xp );
1118
1119#if (DEBUG_VMM_SET_COW & 1)
1120if( DEBUG_VMM_SET_COW < cycle )
1121printk("\n[%s] thread[%x,%x] (%x) handles process %x in cluster %x\n",
1122__FUNCTION__, this->process->pid, this->trdid, this, pid, remote_process_cxy );
1123#endif
1124
1125        // get extended pointer on remote gpt
1126        remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt );
1127
1128        // loop on vsegs in (local) reference process VSL
1129        XLIST_FOREACH( vseg_root_xp , vseg_iter_xp )
1130        {
1131            // get pointer on vseg
1132            vseg_xp  = XLIST_ELEMENT( vseg_iter_xp , vseg_t , xlist );
1133            vseg     = GET_PTR( vseg_xp );
1134
1135            // get vseg type, base and size
1136            uint32_t type     = vseg->type;
1137            vpn_t    vpn_base = vseg->vpn_base;
1138            vpn_t    vpn_size = vseg->vpn_size;
1139
1140#if (DEBUG_VMM_SET_COW & 1)
1141if( DEBUG_VMM_SET_COW < cycle )
1142printk("\n[%s] thread[%x,%x] found vseg %s / vpn_base = %x / vpn_size = %x\n",
1143__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), vpn_base, vpn_size );
1144#endif
1145            // only DATA, ANON and REMOTE vsegs
1146            if( (type == VSEG_TYPE_DATA)  ||
1147                (type == VSEG_TYPE_ANON)  ||
1148                (type == VSEG_TYPE_REMOTE) )
1149            {
1150                vpn_t      vpn;
1151                uint32_t   attr;
1152                ppn_t      ppn;
1153                xptr_t     page_xp;
1154                cxy_t      page_cxy;
1155                page_t   * page_ptr;
1156                xptr_t     forks_xp;
1157                xptr_t     lock_xp;
1158
1159                // update flags in remote GPT
1160                hal_gpt_set_cow( remote_gpt_xp,
1161                                 vpn_base,
1162                                 vpn_size ); 
1163
1164                // atomically increment pending forks counter in physical pages,
1165                // this is only done once, when handling the reference copy
1166                if( remote_process_cxy == local_cxy )
1167                {
1168
1169#if (DEBUG_VMM_SET_COW & 1)
1170if( DEBUG_VMM_SET_COW < cycle )
1171printk("\n[%s] thread[%x,%x] handles vseg %s / vpn_base = %x / vpn_size = %x\n",
1172__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), vpn_base, vpn_size );
1173#endif
1174                    // scan all pages in vseg
1175                    for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ )
1176                    {
1177                        // get page attributes and PPN from reference GPT
1178                        hal_gpt_get_pte( remote_gpt_xp , vpn , &attr , &ppn ); 
1179
1180                        // atomically update pending forks counter if page is mapped
1181                        if( attr & GPT_MAPPED )
1182                        {
1183                            // get pointers and cluster on page descriptor
1184                            page_xp  = ppm_ppn2page( ppn );
1185                            page_cxy = GET_CXY( page_xp );
1186                            page_ptr = GET_PTR( page_xp );
1187
1188                            // get extended pointers on "forks" and "lock"
1189                            forks_xp = XPTR( page_cxy , &page_ptr->forks );
1190                            lock_xp  = XPTR( page_cxy , &page_ptr->lock );
1191
1192                            // take lock protecting "forks" counter
1193                            remote_busylock_acquire( lock_xp );
1194
1195                            // increment "forks"
1196                            hal_remote_atomic_add( forks_xp , 1 );
1197
1198                            // release lock protecting "forks" counter
1199                            remote_busylock_release( lock_xp );
1200                        }
1201                    }   // end loop on vpn
1202
1203#if (DEBUG_VMM_SET_COW & 1)
1204if( DEBUG_VMM_SET_COW < cycle )
1205printk("\n[%s] thread[%x,%x] completes vseg %s / vpn_base = %x / vpn_size = %x\n",
1206__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), vpn_base, vpn_size );
1207#endif
1208                }   // end if local
1209            }   // end if vseg type
1210        }   // end loop on vsegs
1211    }   // end loop on process copies
1212 
1213#if DEBUG_VMM_SET_COW
1214cycle = (uint32_t)hal_get_cycles();
1215if( DEBUG_VMM_SET_COW < cycle )
1216printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
1217__FUNCTION__, this->process->pid, this->trdid, process->pid , cycle );
1218#endif
1219
1220}  // end vmm_set-cow()
1221
1222/////////////////////////////////////////////////
1223error_t vmm_fork_copy( process_t * child_process,
1224                       xptr_t      parent_process_xp )
1225{
1226    error_t     error;
1227    cxy_t       parent_cxy;
1228    process_t * parent_process;
1229    vmm_t     * parent_vmm;
1230    xptr_t      parent_lock_xp;
1231    vmm_t     * child_vmm;
1232    xptr_t      iter_xp;
1233    xptr_t      parent_vseg_xp;
1234    vseg_t    * parent_vseg;
1235    vseg_t    * child_vseg;
1236    uint32_t    type;
1237    vpn_t       vpn;           
1238    vpn_t       vpn_base;
1239    vpn_t       vpn_size;
1240    xptr_t      parent_root_xp;
1241    bool_t      mapped; 
1242    ppn_t       ppn;
1243
1244#if DEBUG_VMM_FORK_COPY
1245uint32_t cycle = (uint32_t)hal_get_cycles();
1246thread_t * this = CURRENT_THREAD;
1247if( DEBUG_VMM_FORK_COPY < cycle )
1248printk("\n[%s] thread %x enter / cycle %d\n",
1249__FUNCTION__ , this->process->pid, this->trdid, cycle );
1250#endif
1251
1252    // get parent process cluster and local pointer
1253    parent_cxy     = GET_CXY( parent_process_xp );
1254    parent_process = GET_PTR( parent_process_xp );
1255
1256    // get local pointers on parent and child VMM
1257    parent_vmm = &parent_process->vmm; 
1258    child_vmm  = &child_process->vmm;
1259
1260    // build extended pointer on parent VSL root and lock
1261    parent_root_xp = XPTR( parent_cxy , &parent_vmm->vsegs_root );
1262    parent_lock_xp = XPTR( parent_cxy , &parent_vmm->vsl_lock );
1263
1264    // take the lock protecting the parent VSL
1265    remote_queuelock_acquire( parent_lock_xp );
1266
1267    // loop on parent VSL xlist
1268    XLIST_FOREACH( parent_root_xp , iter_xp )
1269    {
1270        // get pointers on current parent vseg
1271        parent_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
1272        parent_vseg    = GET_PTR( parent_vseg_xp );
1273
1274        // get vseg type
1275        type = hal_remote_l32( XPTR( parent_cxy , &parent_vseg->type ) );
1276       
1277#if DEBUG_VMM_FORK_COPY
1278cycle = (uint32_t)hal_get_cycles();
1279if( DEBUG_VMM_FORK_COPY < cycle )
1280printk("\n[%s] thread[%x,%x] found parent vseg %s / vpn_base = %x / cycle %d\n",
1281__FUNCTION__ , this->process->pid, this->trdid, vseg_type_str(type),
1282hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );
1283#endif
1284
1285        // all parent vsegs - but STACK and kernel vsegs - must be copied in child VSL
1286        if( (type != VSEG_TYPE_STACK) && (type != VSEG_TYPE_KCODE) &&
1287            (type != VSEG_TYPE_KDATA) && (type != VSEG_TYPE_KDEV) )
1288        {
1289            // allocate memory for a new child vseg
1290            child_vseg = vseg_alloc();
1291            if( child_vseg == NULL )   // release all allocated vsegs
1292            {
1293
1294#if DEBUG_VMM_ERROR
1295printk("\n[ERROR] in %s : cannot create vseg for child in cluster %x\n",
1296__FUNCTION__, local_cxy );
1297#endif
1298                vmm_destroy( child_process );
1299                return -1;
1300            }
1301
1302            // copy parent vseg to child vseg
1303            vseg_init_from_ref( child_vseg , parent_vseg_xp );
1304
1305            // build extended pointer on child VSL lock
1306            xptr_t child_lock_xp = XPTR( local_cxy , &child_vmm->vsl_lock );
1307 
1308            // take the child VSL lock
1309            remote_queuelock_acquire( child_lock_xp );
1310
1311            // register child vseg in child VSL
1312            vmm_attach_vseg_to_vsl( child_vmm , child_vseg );
1313
1314            // release the child VSL lock
1315            remote_queuelock_release( child_lock_xp );
1316
1317#if DEBUG_VMM_FORK_COPY
1318cycle = (uint32_t)hal_get_cycles();
1319if( DEBUG_VMM_FORK_COPY < cycle )
1320printk("\n[%s] thread[%x,%x] copied vseg %s / vpn_base = %x to child VSL / cycle %d\n",
1321__FUNCTION__ , this->process->pid, this->trdid, vseg_type_str(type),
1322hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );
1323#endif
1324            // copy DATA, ANON, REMOTE, FILE parent GPT entries to child GPT
1325            if( type != VSEG_TYPE_CODE )
1326            {
1327                // activate the COW for DATA, ANON, REMOTE vsegs only
1328                // cow = ( type != VSEG_TYPE_FILE );
1329
1330                vpn_base = child_vseg->vpn_base;
1331                vpn_size = child_vseg->vpn_size;
1332
1333                // scan pages in parent vseg
1334                for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ )
1335                {
1336                    error = hal_gpt_pte_copy( &child_vmm->gpt,
1337                                              vpn,
1338                                              XPTR( parent_cxy , &parent_vmm->gpt ),
1339                                              vpn,
1340                                              false,      // does not handle COW flag
1341                                              &ppn,       // unused
1342                                              &mapped );  // unused
1343                    if( error )
1344                    {
1345
1346#if DEBUG_VMM_ERROR
1347printk("\n[ERROR] in %s : cannot copy GPT\n",
1348__FUNCTION__ );
1349#endif
1350                        vmm_destroy( child_process );
1351                        return -1;
1352                    }
1353
1354#if DEBUG_VMM_FORK_COPY
1355cycle = (uint32_t)hal_get_cycles();
1356if( DEBUG_VMM_FORK_COPY < cycle )
1357printk("\n[%s] thread[%x,%x] copied vpn %x to child GPT / cycle %d\n",
1358__FUNCTION__ , this->process->pid, this->trdid , vpn , cycle );
1359#endif
1360                }
1361            }   // end if no code & no stack
1362        }   // end if no stack
1363    }   // end loop on vsegs
1364
1365    // release the parent VSL lock in read mode
1366    remote_queuelock_release( parent_lock_xp );
1367
1368    // copy base addresses from parent VMM to child VMM
1369    child_vmm->args_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->args_vpn_base));
1370    child_vmm->envs_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->envs_vpn_base));
1371    child_vmm->heap_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->heap_vpn_base));
1372    child_vmm->code_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->code_vpn_base));
1373    child_vmm->data_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->data_vpn_base));
1374
1375    child_vmm->entry_point = (intptr_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->entry_point));
1376
1377    hal_fence();
1378
1379#if DEBUG_VMM_FORK_COPY
1380cycle = (uint32_t)hal_get_cycles();
1381if( DEBUG_VMM_FORK_COPY < cycle )
1382printk("\n[%s] thread[%x,%x] exit successfully / cycle %d\n",
1383__FUNCTION__ , this->process->pid, this->trdid , cycle );
1384#endif
1385
1386    return 0;
1387
1388}  // vmm_fork_copy()
1389
1390///////////////////////////////////////
1391void vmm_destroy( process_t * process )
1392{
1393    xptr_t   vseg_xp;
1394        vseg_t * vseg;
1395
1396#if DEBUG_VMM_DESTROY
1397uint32_t   cycle = (uint32_t)hal_get_cycles();
1398thread_t * this  = CURRENT_THREAD;
1399if( DEBUG_VMM_DESTROY < cycle )
1400printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
1401__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
1402#endif
1403
1404#if (DEBUG_VMM_DESTROY & 1 )
1405if( DEBUG_VMM_DESTROY < cycle )
1406hal_vmm_display( XPTR( local_cxy, process ) , true );
1407#endif
1408
1409    // get pointer on local VMM
1410    vmm_t  * vmm = &process->vmm;
1411
1412    // build extended pointer on VSL root, VSL lock and GPT lock
1413    xptr_t   vsl_root_xp = XPTR( local_cxy , &vmm->vsegs_root );
1414    xptr_t   vsl_lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
1415
1416    // take the VSL lock
1417    remote_queuelock_acquire( vsl_lock_xp );
1418
1419    // scan the VSL to delete all registered vsegs
1420    // (we don't use a FOREACH in case of item deletion)
1421    xptr_t  iter_xp;
1422    xptr_t  next_xp;
1423        for( iter_xp = hal_remote_l64( vsl_root_xp ) ; 
1424         iter_xp != vsl_root_xp ;
1425         iter_xp = next_xp )
1426        {
1427        // save extended pointer on next item in xlist
1428        next_xp = hal_remote_l64( iter_xp );
1429
1430        // get pointers on current vseg in VSL
1431        vseg_xp   = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
1432        vseg      = GET_PTR( vseg_xp );
1433
1434        // delete vseg and release physical pages
1435        vmm_remove_vseg( process , vseg );
1436
1437#if( DEBUG_VMM_DESTROY & 1 )
1438if( DEBUG_VMM_DESTROY < cycle )
1439printk("\n[%s] %s vseg deleted / vpn_base %x / vpn_size %d\n",
1440__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
1441#endif
1442
1443        }
1444
1445    // release the VSL lock
1446    remote_queuelock_release( vsl_lock_xp );
1447
1448    // remove all registered MMAP vsegs from free_lists in MMAP allocator
1449    uint32_t i;
1450    for( i = 0 ; i <= CONFIG_VMM_HEAP_MAX_ORDER ; i++ )
1451    {
1452        // build extended pointer on free list root
1453        xptr_t root_xp = XPTR( local_cxy , &vmm->mmap_mgr.free_list_root[i] );
1454 
1455        // scan zombi_list[i]
1456            while( !xlist_is_empty( root_xp ) )
1457            {
1458                    vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist );
1459            vseg    = GET_PTR( vseg_xp );
1460
1461#if( DEBUG_VMM_DESTROY & 1 )
1462if( DEBUG_VMM_DESTROY < cycle )
1463printk("\n[%s] found zombi vseg / vpn_base %x / vpn_size %d\n",
1464__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
1465#endif
1466            // clean vseg descriptor
1467            vseg->vmm = NULL;
1468
1469            // remove vseg from  zombi_list
1470            xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
1471
1472                    // release vseg descriptor
1473            vseg_free( vseg );
1474
1475#if( DEBUG_VMM_DESTROY & 1 )
1476if( DEBUG_VMM_DESTROY < cycle )
1477printk("\n[%s] zombi vseg released / vpn_base %x / vpn_size %d\n",
1478__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
1479#endif
1480            }
1481    }
1482
1483    // release memory allocated to the GPT itself
1484    hal_gpt_destroy( &vmm->gpt );
1485
1486#if DEBUG_VMM_DESTROY
1487cycle = (uint32_t)hal_get_cycles();
1488if( DEBUG_VMM_DESTROY < cycle )
1489printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
1490__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
1491#endif
1492
1493}  // end vmm_destroy()
1494
1495/////////////////////////////////////////////////
1496vseg_t * vmm_check_conflict( process_t * process,
1497                             vpn_t       vpn_base,
1498                             vpn_t       vpn_size )
1499{
1500    vmm_t        * vmm = &process->vmm;
1501
1502    // scan the VSL
1503        vseg_t       * vseg;
1504    xptr_t         iter_xp;
1505    xptr_t         vseg_xp;
1506    xptr_t         root_xp = XPTR( local_cxy , &vmm->vsegs_root );
1507
1508        XLIST_FOREACH( root_xp , iter_xp )
1509        {
1510                vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
1511        vseg    = GET_PTR( vseg_xp );
1512
1513                if( ((vpn_base + vpn_size) > vseg->vpn_base) &&
1514             (vpn_base < (vseg->vpn_base + vseg->vpn_size)) ) return vseg;
1515        }
1516    return NULL;
1517
1518}  // end vmm_check_conflict()
1519
1520////////////////////////////////////////////////
1521vseg_t * vmm_create_vseg( process_t   * process,
1522                              vseg_type_t   type,
1523                          intptr_t      base,         // ltid for VSEG_TYPE_STACK
1524                              uint32_t      size,
1525                          uint32_t      file_offset,
1526                          uint32_t      file_size,
1527                          xptr_t        mapper_xp,
1528                          cxy_t         cxy )
1529{
1530    vseg_t     * vseg;          // pointer on allocated vseg descriptor
1531
1532#if DEBUG_VMM_CREATE_VSEG
1533thread_t * this  = CURRENT_THREAD;
1534uint32_t   cycle;
1535#endif
1536
1537#if (DEBUG_VMM_CREATE_VSEG & 1)
1538cycle = (uint32_t)hal_get_cycles();
1539if( DEBUG_VMM_CREATE_VSEG < cycle )
1540printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cxy %x / cycle %d\n",
1541__FUNCTION__, this->process->pid, this->trdid,
1542process->pid, vseg_type_str(type), base, cxy, cycle );
1543#endif
1544
1545    // get pointer on VMM
1546        vmm_t * vmm    = &process->vmm;
1547
1548    // allocate a vseg descriptor and initialize it, depending on type
1549    // we use specific allocators for "stack" and "mmap" types
1550
1551    /////////////////////////////
1552    if( type == VSEG_TYPE_STACK )
1553    {
1554        // get vseg from STACK allocator
1555        vseg = vmm_stack_alloc( vmm , base );    // base == ltid
1556       
1557        if( vseg == NULL )
1558        {
1559
1560#if DEBUG_VMM_ERROR
1561printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
1562__FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
1563#endif
1564            return NULL;
1565        }
1566
1567        // initialize vseg
1568        vseg->type = type;
1569        vseg->vmm  = vmm;
1570        vseg->min  = vseg->vpn_base << CONFIG_PPM_PAGE_ORDER;
1571        vseg->max  = vseg->min + (vseg->vpn_size << CONFIG_PPM_PAGE_ORDER);
1572        vseg->cxy  = cxy;
1573
1574        vseg_init_flags( vseg , type );
1575    }
1576    /////////////////////////////////
1577    else if( type == VSEG_TYPE_FILE )
1578    {
1579        // compute page index (in mapper) for first and last byte
1580        vpn_t    vpn_min    = file_offset >> CONFIG_PPM_PAGE_ORDER;
1581        vpn_t    vpn_max    = (file_offset + size - 1) >> CONFIG_PPM_PAGE_ORDER;
1582
1583        // compute offset in first page and number of pages
1584        uint32_t offset = file_offset & CONFIG_PPM_PAGE_MASK;
1585        vpn_t    npages      = vpn_max - vpn_min + 1;
1586
1587        // get vseg from MMAP allocator
1588        vseg = vmm_mmap_alloc( vmm , npages );
1589
1590        if( vseg == NULL )
1591        {
1592
1593#if DEBUG_VMM_ERROR
1594printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
1595__FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
1596#endif
1597            return NULL;
1598        }
1599
1600        // initialize vseg
1601        vseg->type        = type;
1602        vseg->vmm         = vmm;
1603        vseg->min         = (vseg->vpn_base << CONFIG_PPM_PAGE_ORDER) + offset; 
1604        vseg->max         = vseg->min + size;
1605        vseg->file_offset = file_offset;
1606        vseg->file_size   = file_size;
1607        vseg->mapper_xp   = mapper_xp;
1608        vseg->cxy         = cxy;
1609
1610        vseg_init_flags( vseg , type );
1611    }
1612    /////////////////////////////////////////////////////////////////
1613    else if( (type == VSEG_TYPE_ANON) || (type == VSEG_TYPE_REMOTE) )
1614    {
1615        // compute number of required pages in virtual space
1616        vpn_t npages = size >> CONFIG_PPM_PAGE_ORDER;
1617        if( size & CONFIG_PPM_PAGE_MASK) npages++;
1618       
1619        // allocate vseg from MMAP allocator
1620        vseg = vmm_mmap_alloc( vmm , npages );
1621
1622        if( vseg == NULL )
1623        {
1624
1625#if DEBUG_VMM_ERROR
1626printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
1627__FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
1628#endif
1629            return NULL;
1630        }
1631
1632        // initialize vseg
1633        vseg->type = type;
1634        vseg->vmm  = vmm;
1635        vseg->min  = vseg->vpn_base << CONFIG_PPM_PAGE_ORDER;
1636        vseg->max  = vseg->min + (vseg->vpn_size << CONFIG_PPM_PAGE_ORDER);
1637        vseg->cxy  = cxy;
1638
1639        vseg_init_flags( vseg , type );
1640    }
1641    /////////////////////////////////////////////////////////////////
1642    else    // VSEG_TYPE_DATA, VSEG_TYPE_CODE or KERNEL vseg
1643    {
1644        uint32_t vpn_min = base >> CONFIG_PPM_PAGE_ORDER;
1645        uint32_t vpn_max = (base + size - 1) >> CONFIG_PPM_PAGE_ORDER;
1646
1647        // allocate vseg descriptor
1648            vseg = vseg_alloc();
1649
1650            if( vseg == NULL )
1651            {
1652
1653#if DEBUG_VMM_ERROR
1654printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
1655__FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
1656#endif
1657            return NULL;
1658            }
1659
1660        // initialize vseg
1661        vseg->type        = type;
1662        vseg->vmm         = vmm;
1663        vseg->min         = base;
1664        vseg->max         = base + size;
1665        vseg->vpn_base    = base >> CONFIG_PPM_PAGE_ORDER;
1666        vseg->vpn_size    = vpn_max - vpn_min + 1;
1667        vseg->file_offset = file_offset;
1668        vseg->file_size   = file_size;
1669        vseg->mapper_xp   = mapper_xp;
1670        vseg->cxy         = cxy;
1671
1672        vseg_init_flags( vseg , type );
1673    }
1674
1675    // check collisions
1676    vseg_t * existing_vseg = vmm_check_conflict( process , vseg->vpn_base , vseg->vpn_size );
1677
1678    if( existing_vseg != NULL )
1679    {
1680
1681#if DEBUG_VMM_ERROR
1682printk("\n[ERROR] in %s for process %x : new vseg %s [vpn_base %x / vpn_size %x]\n"
1683       "        overlap existing vseg %s [vpn_base %x / vpn_size %x]\n",
1684__FUNCTION__ , process->pid, vseg_type_str(vseg->type), vseg->vpn_base, vseg->vpn_size, 
1685vseg_type_str(existing_vseg->type), existing_vseg->vpn_base, existing_vseg->vpn_size );
1686#endif
1687        vseg_free( vseg );
1688        return NULL;
1689    }
1690
1691    // build extended pointer on VSL lock
1692    xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
1693 
1694    // take the VSL lock in write mode
1695    remote_queuelock_acquire( lock_xp );
1696
1697    // attach vseg to VSL
1698        vmm_attach_vseg_to_vsl( vmm , vseg );
1699
1700    // release the VSL lock
1701    remote_queuelock_release( lock_xp );
1702
1703#if DEBUG_VMM_CREATE_VSEG
1704cycle = (uint32_t)hal_get_cycles();
1705if( DEBUG_VMM_CREATE_VSEG < cycle )
1706printk("\n[%s] thread[%x,%x] exit / %s / vpn_base %x / vpn_size %x / cycle %d\n",
1707__FUNCTION__, this->process->pid, this->trdid,
1708vseg_type_str(type), vseg->vpn_base, vseg->vpn_size, cycle );
1709#endif
1710
1711        return vseg;
1712
1713}  // vmm_create_vseg()
1714
1715////////////////////////////////////////////////////////////////////////////////////////////
1716// This static function is called by the vmm_remove_vseg() and vmm_resize_vseg() functions
1717// to update the physical page descriptor identified by the <ppn> argument.
1718// It decrements the refcount, set the dirty bit when required, and releases the physical
1719// page to kmem depending on the vseg type.
1720// - KERNEL : refcount decremented / not released to kmem    / dirty bit not set
1721// - FILE   : refcount decremented / not released to kmem    / dirty bit set when required.
1722// - CODE   : refcount decremented / released to kmem        / dirty bit not set.
1723// - STAK   : refcount decremented / released to kmem        / dirty bit not set.
1724// - DATA   : refcount decremented / released to kmem if ref / dirty bit not set.
1725// - MMAP   : refcount decremented / released to kmem if ref / dirty bit not set.
1726////////////////////////////////////////////////////////////////////////////////////////////
1727// @ process  : local pointer on process.
1728// @ vseg     : local pointer on vseg.
1729// @ ppn      : released pysical page index.
1730// @ dirty    : set the dirty bit in page descriptor when non zero.
1731////////////////////////////////////////////////////////////////////////////////////////////
1732static void vmm_ppn_release( process_t * process,
1733                             vseg_t    * vseg,
1734                             ppn_t       ppn,
1735                             uint32_t    dirty )
1736{
1737    bool_t do_kmem_release;
1738
1739    // get vseg type
1740    vseg_type_t type = vseg->type;
1741
1742    // compute is_ref <=> this vseg is the reference vseg
1743    bool_t is_ref = (GET_CXY( process->ref_xp ) == local_cxy);
1744
1745    // get pointers on physical page descriptor
1746    xptr_t   page_xp  = ppm_ppn2page( ppn );
1747    cxy_t    page_cxy = GET_CXY( page_xp );
1748    page_t * page_ptr = GET_PTR( page_xp );
1749
1750    // decrement page refcount
1751    xptr_t count_xp = XPTR( page_cxy , &page_ptr->refcount );
1752    hal_remote_atomic_add( count_xp , -1 );
1753
1754    // compute the do_kmem_release condition depending on vseg type
1755    if( (type == VSEG_TYPE_KCODE) || 
1756        (type == VSEG_TYPE_KDATA) || 
1757        (type == VSEG_TYPE_KDEV) )           
1758    {
1759        // no physical page release for KERNEL
1760        do_kmem_release = false;
1761    }
1762    else if( type == VSEG_TYPE_FILE )
1763    {
1764        // no physical page release for KERNEL
1765        do_kmem_release = false;
1766
1767        // set dirty bit if required
1768        if( dirty ) ppm_page_do_dirty( page_xp );
1769    }   
1770    else if( (type == VSEG_TYPE_CODE)  ||
1771             (type == VSEG_TYPE_STACK) ) 
1772    {
1773        // always release physical page for private vsegs
1774        do_kmem_release = true;
1775    }
1776    else if( (type == VSEG_TYPE_ANON)  ||
1777             (type == VSEG_TYPE_REMOTE) )
1778    {
1779        // release physical page if reference cluster
1780        do_kmem_release = is_ref;
1781    }
1782    else if( is_ref )  // vseg_type == DATA in reference cluster
1783    {
1784        // get extended pointers on forks and lock field in page descriptor
1785        xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks );
1786        xptr_t lock_xp  = XPTR( page_cxy , &page_ptr->lock );
1787
1788        // take lock protecting "forks" counter
1789        remote_busylock_acquire( lock_xp );
1790
1791        // get number of pending forks from page descriptor
1792        uint32_t forks = hal_remote_l32( forks_xp );
1793
1794        // decrement pending forks counter if required
1795        if( forks )  hal_remote_atomic_add( forks_xp , -1 );
1796
1797        // release lock protecting "forks" counter
1798        remote_busylock_release( lock_xp );
1799
1800        // release physical page if forks == 0
1801        do_kmem_release = (forks == 0); 
1802    }
1803    else              // vseg_type == DATA not in reference cluster
1804    {
1805        // no physical page release if not in reference cluster
1806        do_kmem_release = false;
1807    }
1808
1809    // release physical page to relevant kmem when required
1810    if( do_kmem_release )
1811    {
1812        // get physical page order
1813        uint32_t order = CONFIG_PPM_PAGE_ORDER +
1814                         hal_remote_l32( XPTR( page_cxy , &page_ptr->order ));
1815
1816        // get physical page base
1817        void * base = GET_PTR( ppm_ppn2base( ppn ) );
1818
1819        // release physical page
1820        kmem_remote_free( page_cxy , base , order );
1821
1822#if DEBUG_VMM_PPN_RELEASE
1823thread_t * this = CURRENT_THREAD;
1824if( DEBUG_VMM_PPN_RELEASE < cycle )
1825printk("\n[%s] thread[%x,%x] released ppn %x to kmem\n",
1826__FUNCTION__, this->process->pid, this->trdid, ppn );
1827#endif
1828
1829    }
1830} // end vmm_ppn_release()
1831
1832//////////////////////////////////////////
1833void vmm_remove_vseg( process_t * process,
1834                      vseg_t    * vseg )
1835{
1836    uint32_t    vseg_type;  // vseg type
1837    vpn_t       vpn;        // VPN of current PTE
1838    vpn_t       vpn_min;    // VPN of first PTE
1839    vpn_t       vpn_max;    // VPN of last PTE (excluded)
1840    ppn_t       ppn;        // current PTE ppn value
1841    uint32_t    attr;       // current PTE attributes
1842
1843// check arguments
1844assert( __FUNCTION__, (process != NULL), "process argument is NULL" );
1845assert( __FUNCTION__, (vseg    != NULL), "vseg argument is NULL" );
1846
1847    // get pointers on local process VMM
1848    vmm_t * vmm = &process->vmm;
1849
1850    // build extended pointer on GPT
1851    xptr_t gpt_xp = XPTR( local_cxy , &vmm->gpt );
1852
1853    // get relevant vseg infos
1854    vseg_type = vseg->type;
1855    vpn_min   = vseg->vpn_base;
1856    vpn_max   = vpn_min + vseg->vpn_size;
1857
1858#if DEBUG_VMM_REMOVE_VSEG
1859uint32_t   cycle = (uint32_t)hal_get_cycles();
1860thread_t * this  = CURRENT_THREAD;
1861#endif
1862
1863#if (DEBUG_VMM_REMOVE_VSEG & 1 )
1864if( DEBUG_VMM_REMOVE_VSEG < cycle )
1865printk("\n[%s] thread[%x,%x] enters / process %x / type %s / base %x / cycle %d\n",
1866__FUNCTION__, this->process->pid, this->trdid, 
1867process->pid, vseg_type_str(vseg->type), vseg->min, cycle );
1868#endif
1869
1870    // the loop on PTEs in GPT to unmap all mapped PTEs
1871    for( vpn = vpn_min ; vpn < vpn_max ; vpn++ )
1872    {
1873        // get ppn and attr
1874        hal_gpt_get_pte( gpt_xp , vpn , &attr , &ppn );
1875
1876        if( attr & GPT_MAPPED )  // PTE is mapped
1877        { 
1878
1879#if( DEBUG_VMM_REMOVE_VSEG & 1 )
1880if( DEBUG_VMM_REMOVE_VSEG < cycle )
1881printk("\n[%s] thread[%x,%x] unmap vpn %x / ppn %x / type %s\n",
1882__FUNCTION__, this->process->pid, this->trdid, vpn , ppn, vseg_type_str(vseg_type) );
1883#endif
1884            // unmap GPT entry in local GPT
1885            hal_gpt_reset_pte( gpt_xp , vpn );
1886
1887            // release physical page depending on vseg type
1888            vmm_ppn_release( process , vseg , ppn , attr & GPT_DIRTY );
1889        }
1890    }
1891
1892    // remove vseg from VSL
1893    vmm_detach_vseg_from_vsl( vmm , vseg );
1894
1895    // release vseg descriptor depending on vseg type
1896    if( vseg_type == VSEG_TYPE_STACK )
1897    {
1898        // release slot to local stack allocator
1899        vmm_stack_free( vmm , vseg );
1900    }
1901    else if( (vseg_type == VSEG_TYPE_ANON) || 
1902             (vseg_type == VSEG_TYPE_FILE) || 
1903             (vseg_type == VSEG_TYPE_REMOTE) ) 
1904    {
1905        // release vseg to local mmap allocator
1906        vmm_mmap_free( vmm , vseg );
1907    }
1908    else
1909    {
1910        // release vseg descriptor to local kmem
1911        vseg_free( vseg );
1912    }
1913
1914#if DEBUG_VMM_REMOVE_VSEG
1915cycle = (uint32_t)hal_get_cycles();
1916if( DEBUG_VMM_REMOVE_VSEG < cycle )
1917printk("\n[%s] thread[%x,%x] exit / process %x / type %s / base %x / cycle %d\n",
1918__FUNCTION__, this->process->pid, this->trdid, 
1919process->pid, vseg_type_str(vseg->type), vseg->min, cycle );
1920#endif
1921
1922}  // end vmm_remove_vseg()
1923
1924/////////////////////////////////////////////
1925void vmm_resize_vseg( process_t * process,
1926                      vseg_t    * vseg,
1927                      intptr_t    new_base,
1928                      intptr_t    new_size )
1929{
1930    vpn_t     vpn;
1931    ppn_t     ppn;
1932    uint32_t  attr;
1933
1934// check arguments
1935assert( __FUNCTION__, (process != NULL), "process argument is NULL" );
1936assert( __FUNCTION__, (vseg    != NULL), "vseg argument is NULL" );
1937
1938#if DEBUG_VMM_RESIZE_VSEG
1939uint32_t   cycle = (uint32_t)hal_get_cycles();
1940thread_t * this  = CURRENT_THREAD;
1941#endif
1942
1943#if (DEBUG_VMM_RESIZE_VSEG & 1)
1944if( DEBUG_VMM_RESIZE_VSEG < cycle )
1945printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cycle %d\n",
1946__FUNCTION__, this->process->pid, this->trdid, 
1947process->pid, vseg_type_str(vseg->type), old_base, cycle );
1948#endif
1949
1950    // get existing vseg vpn_min and vpn_max
1951    vpn_t     old_vpn_min = vseg->vpn_base;
1952    vpn_t     old_vpn_max = old_vpn_min + vseg->vpn_size - 1;
1953
1954    // compute new vseg vpn_min & vpn_max 
1955    intptr_t min          = new_base;
1956    intptr_t max          = new_base + new_size;
1957    vpn_t    new_vpn_min  = min >> CONFIG_PPM_PAGE_ORDER;
1958    vpn_t    new_vpn_max  = (max - 1) >> CONFIG_PPM_PAGE_ORDER;
1959
1960    // build extended pointer on GPT
1961    xptr_t gpt_xp = XPTR( local_cxy , &process->vmm.gpt );
1962
1963    // loop on PTEs in GPT to unmap PTE if (old_vpn_min <= vpn < new_vpn_min)
1964        for( vpn = old_vpn_min ; vpn < new_vpn_min ; vpn++ )
1965    {
1966        // get ppn and attr
1967        hal_gpt_get_pte( gpt_xp , vpn , &attr , &ppn );
1968
1969        if( attr & GPT_MAPPED )  // PTE is mapped
1970        { 
1971
1972#if( DEBUG_VMM_RESIZE_VSEG & 1 )
1973if( DEBUG_VMM_RESIZE_VSEG < cycle )
1974printk("\n[%s] thread[%x,%x] unmap vpn %x / ppn %x / %s",
1975__FUNCTION__, this->process->pid, this->trdid, vpn , ppn, vseg_type_str(vseg_type) );
1976#endif
1977            // unmap GPT entry
1978            hal_gpt_reset_pte( gpt_xp , vpn );
1979
1980            // release physical page when required
1981            vmm_ppn_release( process , vseg , ppn , attr & GPT_DIRTY );
1982        }
1983    }
1984
1985    // loop on PTEs in GPT to unmap PTE if (new vpn_max <= vpn < old_vpn_max)
1986        for( vpn = new_vpn_max ; vpn < old_vpn_max ; vpn++ )
1987    {
1988        // get ppn and attr
1989        hal_gpt_get_pte( gpt_xp , vpn , &attr , &ppn );
1990
1991        if( attr & GPT_MAPPED )  // PTE is mapped
1992        { 
1993
1994#if( DEBUG_VMM_RESIZE_VSEG & 1 )
1995if( DEBUG_VMM_RESIZE_VSEG < cycle )
1996printk("\n[%s] thread[%x,%x] unmap vpn %x / ppn %x / %s",
1997__FUNCTION__, this->process->pid, this->trdid, vpn , ppn, vseg_type_str(vseg_type) );
1998#endif
1999            // unmap GPT entry in local GPT
2000            hal_gpt_reset_pte( gpt_xp , vpn );
2001
2002            // release physical page when required
2003            vmm_ppn_release( process , vseg , ppn , attr & GPT_DIRTY );
2004        }
2005    }
2006
2007    // resize vseg in VSL
2008    vseg->min      = min;
2009    vseg->max      = max;
2010    vseg->vpn_base = new_vpn_min;
2011    vseg->vpn_size = new_vpn_max - new_vpn_min + 1;
2012
2013#if DEBUG_VMM_RESIZE_VSEG
2014cycle = (uint32_t)hal_get_cycles();
2015if( DEBUG_VMM_RESIZE_VSEG < cycle )
2016printk("[%s] thread[%x,%x] exit / process %x / %s / base %x / cycle %d\n",
2017__FUNCTION__, this->process->pid, this->trdid, 
2018process->pid, vseg_type_str(vseg->type), vseg->min, cycle );
2019#endif
2020
2021}  // end vmm_resize_vseg
2022
2023/////////////////////////////////////////////////////////////////////////////////////////////
2024// This static function is called twice by the vmm_get_vseg() function.
2025// It scan the - possibly remote - VSL defined by the <vmm_xp> argument to find the vseg
2026// containing a given virtual address <vaddr>. It uses remote accesses to access the remote
2027// VSL if required. The VSL lock protecting the VSL must be taken by the caller.
2028/////////////////////////////////////////////////////////////////////////////////////////////
2029// @ vmm_xp  : extended pointer on the process VMM.
2030// @ vaddr   : virtual address.
2031// @ return local pointer on remote vseg if success / return NULL if not found.
2032/////////////////////////////////////////////////////////////////////////////////////////////
2033static vseg_t * vmm_vseg_from_vaddr( xptr_t     vmm_xp,
2034                                     intptr_t   vaddr )
2035{
2036    xptr_t   iter_xp;
2037    xptr_t   vseg_xp;
2038    vseg_t * vseg;
2039    intptr_t min;
2040    intptr_t max;
2041
2042    // get cluster and local pointer on target VMM
2043    vmm_t * vmm_ptr = GET_PTR( vmm_xp );
2044    cxy_t   vmm_cxy = GET_CXY( vmm_xp );
2045
2046    // build extended pointer on VSL root
2047    xptr_t root_xp = XPTR( vmm_cxy , &vmm_ptr->vsegs_root );
2048
2049    // scan the list of vsegs in VSL
2050    XLIST_FOREACH( root_xp , iter_xp )
2051    {
2052        vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
2053        vseg    = GET_PTR( vseg_xp );
2054
2055        min = hal_remote_l32( XPTR( vmm_cxy , &vseg->min ) );
2056        max = hal_remote_l32( XPTR( vmm_cxy , &vseg->max ) );
2057
2058        // return success when match
2059        if( (vaddr >= min) && (vaddr < max) ) return vseg;
2060    }
2061
2062    // return failure
2063    return NULL;
2064
2065}  // end vmm_vseg_from_vaddr()
2066
2067///////////////////////////////////////////
2068error_t  vmm_get_vseg( process_t * process,
2069                       intptr_t    vaddr,
2070                       vseg_t   ** found_vseg )
2071{
2072    xptr_t    loc_lock_xp;     // extended pointer on local VSL lock
2073    xptr_t    ref_lock_xp;     // extended pointer on reference VSL lock
2074    vseg_t  * loc_vseg;        // local pointer on local vseg
2075    vseg_t  * ref_vseg;        // local pointer on reference vseg
2076
2077    // build extended pointer on local VSL lock
2078    loc_lock_xp = XPTR( local_cxy , &process->vmm.vsl_lock );
2079     
2080    // get local VSL lock
2081    remote_queuelock_acquire( loc_lock_xp );
2082
2083    // try to get vseg from local VSL
2084    loc_vseg = vmm_vseg_from_vaddr( XPTR( local_cxy, &process->vmm ) , vaddr );
2085
2086    if (loc_vseg == NULL)   // vseg not found => access reference VSL
2087    {
2088        // get extended pointer on reference process
2089        xptr_t ref_xp = process->ref_xp;
2090
2091        // get cluster and local pointer on reference process
2092        cxy_t       ref_cxy = GET_CXY( ref_xp );
2093        process_t * ref_ptr = GET_PTR( ref_xp );
2094
2095        if( ref_cxy == local_cxy )    // local is ref => return error
2096        {
2097
2098#if DEBUG_VMM_ERROR
2099printk("\n[ERROR] in %s : vaddr %x in process %x out of segment\n",
2100__FUNCTION__, vaddr, process->pid );
2101#endif
2102            remote_queuelock_release( loc_lock_xp );
2103            return -1;
2104        }
2105        else                          // ref != local => access ref VSL                     
2106        {
2107            // build extended pointer on reference VSL lock
2108            ref_lock_xp = XPTR( ref_cxy , &ref_ptr->vmm.vsl_lock );
2109     
2110            // get reference VSL lock
2111            remote_queuelock_acquire( ref_lock_xp );
2112
2113            // try to get vseg from reference VSL
2114            ref_vseg = vmm_vseg_from_vaddr( XPTR( ref_cxy , &ref_ptr->vmm ) , vaddr );
2115
2116            if( ref_vseg == NULL )  // vseg not found => return error
2117            {
2118
2119#if DEBUG_VMM_ERROR
2120printk("\n[ERROR] in %s : vaddr %x in process %x out of segment\n",
2121__FUNCTION__, vaddr, process->pid );
2122#endif
2123                remote_queuelock_release( loc_lock_xp );
2124                remote_queuelock_release( ref_lock_xp );
2125                return -1;
2126            }
2127            else                    // vseg found => try to update local VSL
2128            {
2129                // allocate a local vseg descriptor
2130                loc_vseg = vseg_alloc();
2131
2132                if( loc_vseg == NULL )   // no memory => return error
2133                {
2134
2135#if DEBUG_VMM_ERROR
2136printk("\n[ERROR] in %s : vaddr %x in process %x / no memory\n",
2137__FUNCTION__, vaddr, process->pid );
2138#endif
2139                    remote_queuelock_release( ref_lock_xp );
2140                    remote_queuelock_release( loc_lock_xp );
2141                    return -1;
2142                }
2143                else                     // update local VSL and return success
2144                {
2145                    // initialize local vseg
2146                    vseg_init_from_ref( loc_vseg , XPTR( ref_cxy , ref_vseg ) );
2147
2148                    // register local vseg in local VSL
2149                    vmm_attach_vseg_to_vsl( &process->vmm , loc_vseg );
2150
2151                    // release both VSL locks
2152                    remote_queuelock_release( ref_lock_xp );
2153                    remote_queuelock_release( loc_lock_xp );
2154
2155                    *found_vseg = loc_vseg;
2156                    return 0;
2157                }
2158            }
2159        }
2160    }
2161    else                        // vseg found in local VSL => return success
2162    {
2163        // release local VSL lock
2164        remote_queuelock_release( loc_lock_xp );
2165
2166        *found_vseg = loc_vseg;
2167        return 0;
2168    }
2169}  // end vmm_get_vseg()
2170
2171//////////////////////////////////////////////////////////////////////////////////////
2172// This static function compute the target cluster to allocate a physical page
2173// for a given <vpn> in a given <vseg>, allocates the physical page from a local
2174// or remote cluster (depending on the vseg type), and returns an extended pointer
2175// on the allocated page descriptor.
2176// The vseg cannot have the FILE type.
2177//////////////////////////////////////////////////////////////////////////////////////
2178// @ vseg   : local pointer on vseg.
2179// @ vpn    : unmapped vpn.
2180// @ return xptr on page descriptor if success / return XPTR_NULL if failure
2181//////////////////////////////////////////////////////////////////////////////////////
2182static xptr_t vmm_page_allocate( vseg_t * vseg,
2183                                 vpn_t    vpn )
2184{
2185
2186#if DEBUG_VMM_PAGE_ALLOCATE
2187uint32_t   cycle   = (uint32_t)hal_get_cycles();
2188thread_t * this    = CURRENT_THREAD;
2189if( DEBUG_VMM_PAGE_ALLOCATE < cycle )
2190printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n",
2191__FUNCTION__ , this->process->pid, this->trdid, vpn, cycle );
2192#endif
2193
2194    xptr_t       page_xp;
2195    cxy_t        page_cxy;
2196    uint32_t     index;
2197
2198    uint32_t     type   = vseg->type;
2199    uint32_t     flags  = vseg->flags;
2200    uint32_t     x_size = LOCAL_CLUSTER->x_size;
2201    uint32_t     y_size = LOCAL_CLUSTER->y_size;
2202
2203// check vseg type
2204assert( __FUNCTION__, ( type != VSEG_TYPE_FILE ) , "illegal vseg type\n" );
2205
2206    // compute target cluster identifier
2207    if( flags & VSEG_DISTRIB )    // distributed => cxy depends on vpn LSB
2208    {
2209        index    = vpn & ((x_size * y_size) - 1);
2210        page_cxy = HAL_CXY_FROM_XY( (index / y_size) , (index % y_size) );
2211
2212        // If the cluster selected from VPN's LSBs is empty, we select one randomly
2213        if ( cluster_is_active( page_cxy ) == false )
2214        {
2215            page_cxy = cluster_random_select();
2216        }
2217    }
2218    else                          // other cases => cxy specified in vseg
2219    {
2220        page_cxy = vseg->cxy;
2221    }
2222
2223    // get local pointer on page base
2224    void * ptr = kmem_remote_alloc( page_cxy , CONFIG_PPM_PAGE_ORDER , AF_ZERO );
2225
2226    if( ptr == NULL )
2227    {
2228
2229#if DEBUG_VMM_ERROR
2230printk("\n[ERROR] in %s : cannot allocate memory from cluster %x\n",
2231__FUNCTION__, page_cxy );
2232#endif
2233        return XPTR_NULL;
2234    }     
2235    // get extended pointer on page descriptor
2236    page_xp = ppm_base2page( XPTR( page_cxy , ptr ) );
2237
2238#if DEBUG_VMM_PAGE_ALLOCATE
2239cycle = (uint32_t)hal_get_cycles();
2240if( DEBUG_VMM_PAGE_ALLOCATE < cycle )
2241printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle %d\n",
2242__FUNCTION__ , this->process->pid, this->trdid, vpn, ppm_page2ppn(page_xp), cycle );
2243#endif
2244
2245    return page_xp;
2246
2247}  // end vmm_page_allocate() 
2248
2249////////////////////////////////////////
2250error_t vmm_get_one_ppn( vseg_t * vseg,
2251                         vpn_t    vpn,
2252                         ppn_t  * ppn )
2253{
2254    error_t    error;
2255    xptr_t     page_xp;           // extended pointer on physical page descriptor
2256    uint32_t   page_id;           // missing page index in vseg mapper
2257    uint32_t   type;              // vseg type;
2258
2259    type      = vseg->type;
2260    page_id   = vpn - vseg->vpn_base;
2261
2262#if DEBUG_VMM_GET_ONE_PPN
2263uint32_t   cycle = (uint32_t)hal_get_cycles();
2264thread_t * this  = CURRENT_THREAD;
2265if( DEBUG_VMM_GET_ONE_PPN < cycle )
2266printk("\n[%s] thread[%x,%x] enter for vpn %x / vseg %s / page_id  %d / cycle %d\n",
2267__FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(type), page_id, cycle );
2268#endif
2269
2270#if (DEBUG_VMM_GET_ONE_PPN & 2)
2271if( DEBUG_VMM_GET_ONE_PPN < cycle )
2272hal_vmm_display( XPTR( local_cxy , this->process ) , true );
2273#endif
2274
2275    // FILE type : get the physical page from the file mapper
2276    if( type == VSEG_TYPE_FILE )
2277    {
2278        // get extended pointer on mapper
2279        xptr_t mapper_xp = vseg->mapper_xp;
2280
2281assert( __FUNCTION__, (mapper_xp != XPTR_NULL),
2282"mapper not defined for a FILE vseg\n" );
2283       
2284        // get extended pointer on page descriptor
2285        page_xp = mapper_get_page( mapper_xp , page_id );
2286
2287        if ( page_xp == XPTR_NULL ) return EINVAL;
2288    }
2289
2290    // Other types : allocate a physical page from target cluster,
2291    // as defined by vseg type and vpn value
2292    else
2293    {
2294        // allocate one physical page
2295        page_xp = vmm_page_allocate( vseg , vpn );
2296
2297        if( page_xp == XPTR_NULL ) return -1;
2298
2299        // initialise missing page from .elf file mapper for DATA and CODE types
2300        // the vseg->mapper_xp field is an extended pointer on the .elf file mapper
2301        if( (type == VSEG_TYPE_CODE) || (type == VSEG_TYPE_DATA) )
2302        {
2303            // get extended pointer on mapper
2304            xptr_t     mapper_xp = vseg->mapper_xp;
2305
2306assert( __FUNCTION__, (mapper_xp != XPTR_NULL),
2307"mapper not defined for a CODE or DATA vseg\n" );
2308       
2309            // compute missing page offset in vseg
2310            uint32_t offset = page_id << CONFIG_PPM_PAGE_ORDER;
2311
2312            // compute missing page offset in .elf file
2313            uint32_t elf_offset = vseg->file_offset + offset;
2314
2315#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
2316if( DEBUG_VMM_GET_ONE_PPN < cycle )
2317printk("\n[%s] thread[%x,%x] for vpn = %x / elf_offset = %x\n",
2318__FUNCTION__, this->process->pid, this->trdid, vpn, elf_offset );
2319#endif
2320            // compute extended pointer on page base
2321            xptr_t base_xp  = ppm_page2base( page_xp );
2322
2323            // file_size (in .elf mapper) can be smaller than vseg_size (BSS)
2324            uint32_t file_size = vseg->file_size;
2325
2326            if( file_size < offset )                 // missing page fully in  BSS
2327            {
2328
2329#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
2330if( DEBUG_VMM_GET_ONE_PPN < cycle )
2331printk("\n[%s] thread[%x,%x] for vpn  %x / fully in BSS\n",
2332__FUNCTION__, this->process->pid, this->trdid, vpn );
2333#endif
2334                if( GET_CXY( page_xp ) == local_cxy )
2335                {
2336                    memset( GET_PTR( base_xp ) , 0 , CONFIG_PPM_PAGE_SIZE );
2337                }
2338                else
2339                {
2340                   hal_remote_memset( base_xp , 0 , CONFIG_PPM_PAGE_SIZE );       
2341                }
2342            }
2343            else if( file_size >= (offset + CONFIG_PPM_PAGE_SIZE) )  // fully in  mapper
2344            {
2345
2346#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
2347if( DEBUG_VMM_GET_ONE_PPN < cycle )
2348printk("\n[%s] thread[%x,%x] for vpn  %x / fully in mapper\n",
2349__FUNCTION__, this->process->pid, this->trdid, vpn );
2350#endif
2351                error = mapper_move_kernel( mapper_xp,
2352                                            true,             // to_buffer
2353                                            elf_offset,
2354                                            base_xp,
2355                                            CONFIG_PPM_PAGE_SIZE ); 
2356                if( error ) return EINVAL;
2357            }
2358            else  // both in mapper and in BSS :
2359                  // - (file_size - offset)             bytes from mapper
2360                  // - (page_size + offset - file_size) bytes from BSS
2361            {
2362
2363#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
2364if( DEBUG_VMM_GET_ONE_PPN < cycle )
2365printk("\n[%s] thread[%x,%x] for vpn  %x / both mapper & BSS\n"
2366"      %d bytes from mapper / %d bytes from BSS\n",
2367__FUNCTION__, this->process->pid, this->trdid, vpn,
2368file_size - offset , offset + CONFIG_PPM_PAGE_SIZE - file_size  );
2369#endif
2370                // initialize mapper part
2371                error = mapper_move_kernel( mapper_xp,
2372                                            true,         // to buffer
2373                                            elf_offset,
2374                                            base_xp,
2375                                            file_size - offset ); 
2376                if( error ) return EINVAL;
2377
2378                // initialize BSS part
2379                if( GET_CXY( page_xp ) == local_cxy )
2380                {
2381                    memset( GET_PTR( base_xp ) + file_size - offset , 0 , 
2382                            offset + CONFIG_PPM_PAGE_SIZE - file_size );
2383                }
2384                else
2385                {
2386                   hal_remote_memset( base_xp + file_size - offset , 0 , 
2387                                      offset + CONFIG_PPM_PAGE_SIZE - file_size );
2388                }
2389            }   
2390
2391        }  // end if CODE or DATA types   
2392    } 
2393
2394    // return ppn
2395    *ppn = ppm_page2ppn( page_xp );
2396
2397#if DEBUG_VMM_GET_ONE_PPN
2398if( DEBUG_VMM_GET_ONE_PPN < cycle )
2399printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle %d\n",
2400__FUNCTION__ , this->process->pid, this->trdid , vpn , *ppn, cycle );
2401#endif
2402
2403#if (DEBUG_VMM_GET_ONE_PPN & 2)
2404if( DEBUG_VMM_GET_ONE_PPN < cycle )
2405hal_vmm_display( XPTR( local_cxy , this->process ) , true );
2406#endif
2407
2408    return 0;
2409
2410}  // end vmm_get_one_ppn()
2411
2412///////////////////////////////////////////////////
2413error_t vmm_handle_page_fault( process_t * process,
2414                               vpn_t       vpn )
2415{
2416    vseg_t         * vseg;            // vseg containing vpn
2417    uint32_t         attr;            // PTE_ATTR value
2418    ppn_t            ppn;             // PTE_PPN value
2419    uint32_t         ref_attr;        // PTE_ATTR value in reference GPT
2420    ppn_t            ref_ppn;         // PTE_PPN value in reference GPT
2421    cxy_t            ref_cxy;         // reference cluster for missing vpn
2422    process_t      * ref_ptr;         // reference process for missing vpn
2423    xptr_t           local_gpt_xp;    // extended pointer on local GPT
2424    xptr_t           ref_gpt_xp;      // extended pointer on reference GPT
2425    error_t          error;           // value returned by called functions
2426
2427    thread_t * this  = CURRENT_THREAD;
2428
2429#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
2430uint32_t start_cycle = (uint32_t)hal_get_cycles();
2431#endif
2432
2433#if DEBUG_VMM_HANDLE_PAGE_FAULT
2434if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) & (vpn > 0) )
2435printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n",
2436__FUNCTION__, this->process->pid, this->trdid, vpn, start_cycle );
2437#endif
2438
2439#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2)
2440if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2441hal_vmm_display( XPTR( local_cxy , this->process ) , true );
2442#endif
2443
2444    // get local vseg (access to reference VSL can be required)
2445    error = vmm_get_vseg( process, 
2446                          (intptr_t)vpn<<CONFIG_PPM_PAGE_ORDER,
2447                          &vseg );
2448    if( error )
2449    {
2450        printk("\n[ERROR] in %s : vpn %x in thread[%x,%x] not in registered vseg\n",
2451        __FUNCTION__ , vpn , process->pid, this->trdid );
2452       
2453        return EXCP_USER_ERROR;
2454    }
2455
2456#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2457if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2458printk("\n[%s] thread[%x,%x] found vseg %s\n",
2459__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type) );
2460#endif
2461
2462    // build extended pointer on local GPT
2463    local_gpt_xp  = XPTR( local_cxy , &process->vmm.gpt );
2464
2465    // lock PTE in local GPT and get current PPN and attributes
2466    error = hal_gpt_lock_pte( local_gpt_xp,
2467                              vpn,
2468                              &attr,
2469                              &ppn );
2470    if( error )
2471    {
2472        printk("\n[PANIC] in %s : cannot lock PTE in local GPT / vpn %x / process %x\n",
2473        __FUNCTION__ , vpn , process->pid );
2474       
2475        return EXCP_KERNEL_PANIC;
2476    }
2477
2478#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2479if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2480printk("\n[%s] thread[%x,%x] locked vpn %x in cluster %x\n",
2481__FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy );
2482#endif
2483
2484    // handle page fault only if local PTE still unmapped after lock
2485    if( (attr & GPT_MAPPED) == 0 )
2486    {
2487        // get reference process cluster and local pointer
2488        ref_cxy = GET_CXY( process->ref_xp );
2489        ref_ptr = GET_PTR( process->ref_xp );
2490
2491        /////////////// private vseg or (local == reference)
2492        /////////////// => access only the local GPT
2493        if( (vseg->type == VSEG_TYPE_STACK) ||
2494            (vseg->type == VSEG_TYPE_CODE)  ||
2495            (ref_cxy    == local_cxy ) )
2496        {
2497
2498#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2499if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2500printk("\n[%s] thread[%x,%x] access local gpt : cxy %x / ref_cxy %x / type %s / cycle %d\n",
2501__FUNCTION__, this->process->pid, this->trdid,
2502local_cxy, ref_cxy, vseg_type_str(vseg->type), (uint32_t)hal_get_cycles() );
2503#endif
2504            // allocate and initialise a physical page
2505            error = vmm_get_one_ppn( vseg , vpn , &ppn );
2506
2507            if( error )
2508            {
2509                printk("\n[ERROR] in %s : no physical page / process = %x / vpn = %x\n",
2510                __FUNCTION__ , process->pid , vpn );
2511
2512                // unlock PTE in local GPT
2513                hal_gpt_unlock_pte( local_gpt_xp , vpn );
2514
2515                return EXCP_KERNEL_PANIC;
2516            }
2517
2518            // define attr from vseg flags
2519            attr = GPT_MAPPED | GPT_SMALL | GPT_READABLE;
2520            if( vseg->flags & VSEG_USER  ) attr |= GPT_USER;
2521            if( vseg->flags & VSEG_WRITE ) attr |= GPT_WRITABLE;
2522            if( vseg->flags & VSEG_EXEC  ) attr |= GPT_EXECUTABLE;
2523            if( vseg->flags & VSEG_CACHE ) attr |= GPT_CACHABLE;
2524
2525            // set PTE to local GPT
2526            // it unlocks this PTE
2527            hal_gpt_set_pte( local_gpt_xp,
2528                             vpn,
2529                             attr,
2530                             ppn );
2531
2532#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
2533uint32_t end_cycle = (uint32_t)hal_get_cycles();
2534#endif
2535
2536#if DEBUG_VMM_HANDLE_PAGE_FAULT
2537if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2538printk("\n[%s] thread[%x,%x] handled local pgfault / ppn %x / attr %x / cycle %d\n",
2539__FUNCTION__, this->process->pid, this->trdid, ppn, attr, end_cycle );
2540#endif
2541
2542#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2)
2543if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2544hal_vmm_display( XPTR( local_cxy , this->process ) , true );
2545#endif
2546
2547#if CONFIG_INSTRUMENTATION_PGFAULTS
2548uint32_t cost      = end_cycle - start_cycle;
2549this->info.local_pgfault_nr++;
2550this->info.local_pgfault_cost += cost;
2551if( cost > this->info.local_pgfault_max ) this->info.local_pgfault_max = cost;
2552#endif
2553            return EXCP_NON_FATAL;
2554
2555        }   // end local GPT access
2556
2557        /////////////////// public vseg and (local != reference)
2558        /////////////////// => access ref GPT to update local GPT
2559        else                               
2560        {
2561
2562#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2563if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2564printk("\n[%s] thread[%x,%x] access ref gpt : cxy %x / ref_cxy %x / type %s / cycle %d\n",
2565__FUNCTION__, this->process->pid, this->trdid, 
2566local_cxy, ref_cxy, vseg_type_str(vseg->type), (uint32_t)hal_get_cycles() );
2567#endif
2568            // build extended pointer on reference GPT
2569            ref_gpt_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt );
2570
2571            // lock PTE in reference GPT and get current PPN and attributes
2572            error = hal_gpt_lock_pte( ref_gpt_xp,
2573                                      vpn,
2574                                      &ref_attr,
2575                                      &ref_ppn );
2576            if( error )
2577            {
2578                printk("\n[PANIC] in %s : cannot lock PTE in ref GPT / vpn %x / process %x\n",
2579                __FUNCTION__ , vpn , process->pid );
2580       
2581                // unlock PTE in local GPT
2582                hal_gpt_unlock_pte( local_gpt_xp , vpn );
2583                   
2584                return EXCP_KERNEL_PANIC;
2585            }
2586
2587#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2588if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2589printk("\n[%s] thread[%x,%x] get pte from ref gpt / attr %x / ppn %x\n",
2590__FUNCTION__, this->process->pid, this->trdid, ref_attr, ref_ppn );
2591#endif
2592
2593            if( ref_attr & GPT_MAPPED )        // false page fault
2594            {
2595                // update local GPT from reference GPT values
2596                // this unlocks the PTE in local GPT
2597                hal_gpt_set_pte( local_gpt_xp,
2598                                 vpn,
2599                                 ref_attr,
2600                                 ref_ppn );
2601
2602#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2603if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2604printk("\n[%s] thread[%x,%x] updated local gpt for a false pgfault\n",
2605__FUNCTION__, this->process->pid, this->trdid );
2606#endif
2607
2608                // unlock the PTE in reference GPT
2609                hal_gpt_unlock_pte( ref_gpt_xp, vpn );
2610                             
2611#if (DEBUG_VMM_HANDLE_PAGE_FAULT &1)
2612if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2613printk("\n[%s] thread[%x,%x] unlock the ref gpt after a false pgfault\n",
2614__FUNCTION__, this->process->pid, this->trdid );
2615#endif
2616
2617#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
2618uint32_t end_cycle = (uint32_t)hal_get_cycles();
2619#endif
2620
2621#if DEBUG_VMM_HANDLE_PAGE_FAULT
2622if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2623printk("\n[%s] thread[%x,%x] handled false pgfault / ppn %x / attr %x / cycle %d\n",
2624__FUNCTION__, this->process->pid, this->trdid, ref_ppn, ref_attr, end_cycle );
2625#endif
2626
2627#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2)
2628if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2629hal_vmm_display( XPTR( local_cxy , this->process ) , true );
2630#endif
2631
2632#if CONFIG_INSTRUMENTATION_PGFAULTS
2633uint32_t cost      = end_cycle - start_cycle;
2634this->info.false_pgfault_nr++;
2635this->info.false_pgfault_cost += cost;
2636if( cost > this->info.false_pgfault_max ) this->info.false_pgfault_max = cost;
2637#endif
2638                return EXCP_NON_FATAL;
2639            }
2640            else                            // true page fault
2641            {
2642                // allocate and initialise a physical page depending on the vseg type
2643                error = vmm_get_one_ppn( vseg , vpn , &ppn );
2644
2645                if( error )
2646                {
2647                    printk("\n[ERROR] in %s : no memory / process = %x / vpn = %x\n",
2648                    __FUNCTION__ , process->pid , vpn );
2649
2650                    // unlock PTE in local GPT and in reference GPT
2651                    hal_gpt_unlock_pte( local_gpt_xp , vpn );
2652                    hal_gpt_unlock_pte( ref_gpt_xp   , vpn );
2653                   
2654                    return EXCP_KERNEL_PANIC;
2655                }
2656
2657                // define attr from vseg flags
2658                attr = GPT_MAPPED | GPT_SMALL | GPT_READABLE;
2659                if( vseg->flags & VSEG_USER  ) attr |= GPT_USER;
2660                if( vseg->flags & VSEG_WRITE ) attr |= GPT_WRITABLE;
2661                if( vseg->flags & VSEG_EXEC  ) attr |= GPT_EXECUTABLE;
2662                if( vseg->flags & VSEG_CACHE ) attr |= GPT_CACHABLE;
2663
2664#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2665if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2666printk("\n[%s] thread[%x,%x] build a new PTE for a true pgfault\n",
2667__FUNCTION__, this->process->pid, this->trdid );
2668#endif
2669                // set PTE in reference GPT
2670                // this unlock the PTE
2671                hal_gpt_set_pte( ref_gpt_xp,
2672                                 vpn,
2673                                 attr,
2674                                 ppn );
2675
2676#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2677if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2678printk("\n[%s] thread[%x,%x] set new PTE in ref gpt for a true page fault\n",
2679__FUNCTION__, this->process->pid, this->trdid );
2680#endif
2681
2682                // set PTE in local GPT
2683                // this unlock the PTE
2684                hal_gpt_set_pte( local_gpt_xp,
2685                                 vpn,
2686                                 attr,
2687                                 ppn );
2688
2689#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
2690uint32_t end_cycle = (uint32_t)hal_get_cycles();
2691#endif
2692
2693#if DEBUG_VMM_HANDLE_PAGE_FAULT
2694if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2695printk("\n[%s] thread[%x,%x] handled global pgfault / ppn %x / attr %x / cycle %d\n",
2696__FUNCTION__, this->process->pid, this->trdid, ppn, attr, end_cycle );
2697#endif
2698
2699#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2)
2700if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2701hal_vmm_display( XPTR( local_cxy , this->process ) , true );
2702#endif
2703
2704#if CONFIG_INSTRUMENTATION_PGFAULTS
2705uint32_t cost      = end_cycle - start_cycle;
2706this->info.global_pgfault_nr++;
2707this->info.global_pgfault_cost += cost;
2708if( cost > this->info.global_pgfault_max ) this->info.global_pgfault_max = cost;
2709#endif
2710                return EXCP_NON_FATAL;
2711            }
2712        }
2713    }
2714    else   // page has been locally mapped by another concurrent thread
2715    {
2716        // unlock the PTE in local GPT
2717        hal_gpt_unlock_pte( local_gpt_xp , vpn );
2718
2719#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
2720uint32_t end_cycle = (uint32_t)hal_get_cycles();
2721#endif
2722
2723#if DEBUG_VMM_HANDLE_PAGE_FAULT
2724if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2725printk("\n[%s] handled by another thread / vpn %x / ppn %x / attr %x / cycle %d\n",
2726__FUNCTION__, vpn, ppn, attr, end_cycle );
2727#endif
2728
2729#if CONFIG_INSTRUMENTATION_PGFAULTS
2730uint32_t cost      = end_cycle - start_cycle;
2731this->info.false_pgfault_nr++;
2732this->info.false_pgfault_cost += cost;
2733if( cost > this->info.false_pgfault_max ) this->info.false_pgfault_max = cost;
2734#endif
2735        return EXCP_NON_FATAL;
2736    }
2737
2738}   // end vmm_handle_page_fault()
2739
2740////////////////////////////////////////////
2741error_t vmm_handle_cow( process_t * process,
2742                        vpn_t       vpn )
2743{
2744    vseg_t         * vseg;            // vseg containing vpn
2745    xptr_t           gpt_xp;          // extended pointer on GPT (local or reference)
2746    gpt_t          * gpt_ptr;         // local pointer on GPT (local or reference)
2747    cxy_t            gpt_cxy;         // GPT cluster identifier
2748    uint32_t         old_attr;        // current PTE_ATTR value
2749    ppn_t            old_ppn;         // current PTE_PPN value
2750    uint32_t         new_attr;        // new PTE_ATTR value
2751    ppn_t            new_ppn;         // new PTE_PPN value
2752    cxy_t            ref_cxy;         // reference process cluster
2753    process_t      * ref_ptr;         // local pointer on reference process
2754    error_t          error;
2755
2756    thread_t * this  = CURRENT_THREAD;
2757
2758#if DEBUG_VMM_HANDLE_COW
2759uint32_t   cycle = (uint32_t)hal_get_cycles();
2760if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
2761printk("\n[%s] thread[%x,%x] enter for vpn %x / core[%x,%d] / cycle %d\n",
2762__FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle );
2763#endif
2764
2765#if (DEBUG_VMM_HANDLE_COW & 2)
2766hal_vmm_display( XPTR( local_cxy , process ) , true );
2767#endif
2768
2769    // get local vseg
2770    error = vmm_get_vseg( process, 
2771                          (intptr_t)vpn<<CONFIG_PPM_PAGE_ORDER,
2772                          &vseg );
2773    if( error )
2774    {
2775        printk("\n[ERROR] in %s : vpn %x in thread[%x,%x] not in a registered vseg\n",
2776        __FUNCTION__, vpn, process->pid, this->trdid );
2777
2778        return EXCP_USER_ERROR;
2779    }
2780
2781#if DEBUG_VMM_HANDLE_COW
2782if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
2783printk("\n[%s] thread[%x,%x] get vseg %s\n",
2784__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type) );
2785#endif
2786
2787    // get reference process cluster and local pointer
2788    ref_cxy = GET_CXY( process->ref_xp );
2789    ref_ptr = GET_PTR( process->ref_xp );
2790
2791    // build pointers on relevant GPT
2792    // - access only local GPT for a private vseg 
2793    // - access reference GPT and all copies for a public vseg
2794    if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) )
2795    {
2796        gpt_cxy = local_cxy;
2797        gpt_ptr = &process->vmm.gpt;
2798        gpt_xp  = XPTR( gpt_cxy , gpt_ptr );
2799    }
2800    else
2801    {
2802        gpt_cxy = ref_cxy;
2803        gpt_ptr = &ref_ptr->vmm.gpt;
2804        gpt_xp  = XPTR( gpt_cxy , gpt_ptr );
2805    }
2806
2807    // lock target PTE in relevant GPT (local or reference)
2808    // and get current PTE value
2809    error = hal_gpt_lock_pte( gpt_xp,
2810                              vpn,
2811                              &old_attr,
2812                              &old_ppn );
2813    if( error )
2814    {
2815        printk("\n[PANIC] in %s : cannot lock PTE in GPT / cxy %x / vpn %x / process %x\n",
2816        __FUNCTION__ , gpt_cxy, vpn , process->pid );
2817       
2818        return EXCP_KERNEL_PANIC;
2819    }
2820
2821#if DEBUG_VMM_HANDLE_COW
2822if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
2823printk("\n[%s] thread[%x,%x] get pte for vpn %x : ppn %x / attr %x\n",
2824__FUNCTION__, this->process->pid, this->trdid, vpn, old_ppn, old_attr );
2825#endif
2826
2827    // return user error if COW attribute not set or PTE2 unmapped
2828    if( ((old_attr & GPT_COW) == 0) || ((old_attr & GPT_MAPPED) == 0) )
2829    {
2830        hal_gpt_unlock_pte( gpt_xp , vpn );
2831
2832        return EXCP_USER_ERROR;
2833    }
2834
2835    // get pointers on physical page descriptor
2836    xptr_t   page_xp  = ppm_ppn2page( old_ppn );
2837    cxy_t    page_cxy = GET_CXY( page_xp );
2838    page_t * page_ptr = GET_PTR( page_xp );
2839
2840    // get extended pointers on forks and lock field in page descriptor
2841    xptr_t forks_xp       = XPTR( page_cxy , &page_ptr->forks );
2842    xptr_t forks_lock_xp  = XPTR( page_cxy , &page_ptr->lock );
2843
2844    // take lock protecting "forks" counter
2845    remote_busylock_acquire( forks_lock_xp );
2846
2847    // get number of pending forks from page descriptor
2848    uint32_t forks = hal_remote_l32( forks_xp );
2849
2850#if DEBUG_VMM_HANDLE_COW
2851if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
2852printk("\n[%s] thread[%x,%x] get forks = %d for vpn %x\n",
2853__FUNCTION__, this->process->pid, this->trdid, forks, vpn );
2854#endif
2855
2856    if( forks )        // pending fork => allocate a new page, and copy old to new
2857    {
2858        // decrement pending forks counter in page descriptor
2859        hal_remote_atomic_add( forks_xp , -1 );
2860
2861        // release lock protecting "forks" counter
2862        remote_busylock_release( forks_lock_xp );
2863
2864        // allocate a new physical page depending on vseg type
2865        page_xp = vmm_page_allocate( vseg , vpn );
2866
2867        if( page_xp == XPTR_NULL ) 
2868        {
2869            printk("\n[PANIC] in %s : no memory for vpn %x in process %x\n",
2870            __FUNCTION__ , vpn, process->pid );
2871
2872            hal_gpt_unlock_pte( gpt_xp , vpn ); 
2873
2874            return EXCP_KERNEL_PANIC;
2875        }
2876
2877        // compute allocated page PPN
2878        new_ppn = ppm_page2ppn( page_xp );
2879
2880#if DEBUG_VMM_HANDLE_COW
2881if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
2882printk("\n[%s] thread[%x,%x] get new ppn %x for vpn %x\n",
2883__FUNCTION__, this->process->pid, this->trdid, new_ppn, vpn );
2884#endif
2885
2886        // copy old page content to new page
2887        hal_remote_memcpy( ppm_ppn2base( new_ppn ),
2888                           ppm_ppn2base( old_ppn ),
2889                           CONFIG_PPM_PAGE_SIZE );
2890
2891#if DEBUG_VMM_HANDLE_COW
2892if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
2893printk("\n[%s] thread[%x,%x] copied old page to new page\n",
2894__FUNCTION__, this->process->pid, this->trdid );
2895#endif
2896
2897    }             
2898    else               // no pending fork => keep the existing page
2899    {
2900        // release lock protecting "forks" counter
2901        remote_busylock_release( forks_lock_xp );
2902
2903#if(DEBUG_VMM_HANDLE_COW & 1)
2904if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
2905printk("\n[%s] thread[%x,%x] no pending forks / keep existing PPN %x\n",
2906__FUNCTION__, this->process->pid, this->trdid, old_ppn );
2907#endif
2908        new_ppn = old_ppn;
2909    }
2910
2911    // build new_attr : set WRITABLE, reset COW, reset LOCKED
2912    new_attr = (((old_attr | GPT_WRITABLE) & (~GPT_COW)) & (~GPT_LOCKED));
2913
2914#if(DEBUG_VMM_HANDLE_COW & 1)
2915if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
2916printk("\n[%s] thread[%x,%x] new_attr %x / new_ppn %x\n",
2917__FUNCTION__, this->process->pid, this->trdid, new_attr, new_ppn );
2918#endif
2919
2920    // update the relevant GPT(s)
2921    // - private vseg => update only the local GPT
2922    // - public vseg => update the reference GPT AND all the GPT copies
2923    if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) )
2924    {
2925        // set new PTE in local gpt
2926        hal_gpt_set_pte( gpt_xp,
2927                         vpn,
2928                         new_attr,
2929                         new_ppn );
2930    }
2931    else
2932    {
2933        // set new PTE in all GPT copies
2934        vmm_global_update_pte( process,
2935                               vpn,
2936                               new_attr,
2937                               new_ppn );
2938    }
2939
2940#if DEBUG_VMM_HANDLE_COW
2941cycle = (uint32_t)hal_get_cycles();
2942if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
2943printk("\n[%s] thread[%x,%x] exit for vpn %x / core[%x,%d] / cycle %d\n",
2944__FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle );
2945#endif
2946
2947#if (DEBUG_VMM_HANDLE_COW & 2)
2948hal_vmm_display( XPTR( local_cxy , process ) , true );
2949#endif
2950
2951     return EXCP_NON_FATAL;
2952
2953}   // end vmm_handle_cow()
2954
Note: See TracBrowser for help on using the repository browser.