source: trunk/kernel/mm/vmm.c @ 682

Last change on this file since 682 was 672, checked in by alain, 4 years ago

1) Introduce up to 4 command lines arguments in the KSH "load" command.
These arguments are transfered to the user process through the
argc/argv mechanism, using the user space "args" vseg.

2) Introduce the named and anonymous "pipes", for inter-process communication
through the pipe() and mkfifo() syscalls.

3) Introduce the "chat" application to validate the two above mechanisms.

4) Improve printk() and assert() fonctions in printk.c.

File size: 101.7 KB
RevLine 
[1]1/*
[611]2 * vmm.c - virtual memory manager related operations definition.
[1]3 *
[672]4 * Authors   Ghassan Almaless (2008,2009,2010,2011,2012)
5 *           Alain Greiner    (2016,2017,2018,2019,2020)
[21]6 *
[1]7 * Copyright (c) UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
[14]25#include <kernel_config.h>
[457]26#include <hal_kernel_types.h>
[1]27#include <hal_special.h>
28#include <hal_gpt.h>
[409]29#include <hal_vmm.h>
[641]30#include <hal_irqmask.h>
[577]31#include <hal_macros.h>
[1]32#include <printk.h>
[23]33#include <memcpy.h>
[567]34#include <remote_queuelock.h>
[1]35#include <list.h>
[408]36#include <xlist.h>
[1]37#include <bits.h>
38#include <process.h>
39#include <thread.h>
40#include <vseg.h>
41#include <cluster.h>
42#include <scheduler.h>
43#include <vfs.h>
44#include <mapper.h>
45#include <page.h>
46#include <kmem.h>
47#include <vmm.h>
[585]48#include <hal_exception.h>
[1]49
[635]50////////////////////////////////////////////////////////////////////////////////////////////
[1]51//   Extern global variables
[635]52////////////////////////////////////////////////////////////////////////////////////////////
[1]53
[567]54extern  process_t  process_zero;      // allocated in cluster.c
[1]55
[625]56////////////////////////////////////////////////////////////////////////////////////////////
[651]57// This static function is called by the vmm_user_init() function.
58// It initialises the free lists of vsegs used by the VMM MMAP allocator.
59// It makes the assumption that HEAP_BASE == 1 Gbytes and HEAP_SIZE == 2 Gbytes.
60////////////////////////////////////////////////////////////////////////////////////////////
61static void vmm_stack_init( vmm_t * vmm )
62{
63
64// check STACK zone
[672]65assert( __FUNCTION__, ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <=
[651]66(CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) , "STACK zone too small\n");
67
68    // get pointer on STACK allocator
69    stack_mgr_t * mgr = &vmm->stack_mgr;
70
71    mgr->bitmap   = 0;
72    mgr->vpn_base = CONFIG_VMM_STACK_BASE;
73    busylock_init( &mgr->lock , LOCK_VMM_STACK );
74
75}
76
77////////////////////////////////////////////////////////////////////////////////////////////
[625]78// This static function is called by the vmm_create_vseg() function, and implements
[651]79// the VMM STACK specific allocator. Depending on the local thread index <ltid>,
80// it ckeks availability of the corresponding slot in the process STACKS region,
81// allocates a vseg descriptor, and initializes the "vpn_base" and "vpn_size" fields.
[625]82////////////////////////////////////////////////////////////////////////////////////////////
83// @ vmm      : [in]  pointer on VMM.
84// @ ltid     : [in]  requested slot == local user thread identifier.
85////////////////////////////////////////////////////////////////////////////////////////////
[651]86static vseg_t * vmm_stack_alloc( vmm_t  * vmm,
87                                 ltid_t   ltid )
[21]88{
[625]89
90// check ltid argument
[672]91assert( __FUNCTION__, (ltid <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)),
[625]92"slot index %d too large for an user stack vseg", ltid );
93
94    // get stack allocator pointer
95    stack_mgr_t * mgr = &vmm->stack_mgr;
96
[651]97    // get lock protecting stack allocator
[625]98    busylock_acquire( &mgr->lock );
99
100// check requested slot is available
[672]101assert( __FUNCTION__, (bitmap_state( &mgr->bitmap , ltid ) == false),
[625]102"slot index %d already allocated", ltid );
103
[651]104    // allocate a vseg descriptor
105    vseg_t * vseg = vseg_alloc();
106
107    if( vseg == NULL )
108        {
109        // release lock protecting free lists
110        busylock_release( &mgr->lock );
111
112        printk("\n[ERROR] %s cannot allocate memory for vseg in cluster %x\n",
113        __FUNCTION__ , local_cxy );
114
115        return NULL;
116    }
117
[625]118    // update bitmap
119    bitmap_set( &mgr->bitmap , ltid );
120
121    // release lock on stack allocator
122    busylock_release( &mgr->lock );
123
[651]124    // set "vpn_base" & "vpn_size" fields (first page non allocated)
125    vseg->vpn_base = mgr->vpn_base + (ltid * CONFIG_VMM_STACK_SIZE) + 1;
126    vseg->vpn_size = CONFIG_VMM_STACK_SIZE - 1;
[625]127
[651]128    return vseg;
129
[625]130} // end vmm_stack_alloc()
131
132////////////////////////////////////////////////////////////////////////////////////////////
133// This static function is called by the vmm_remove_vseg() function, and implements
134// the VMM STACK specific desallocator.
[651]135// It updates the bitmap to release the corresponding slot in the process STACKS region,
136// and releases memory allocated to vseg descriptor.
[625]137////////////////////////////////////////////////////////////////////////////////////////////
138// @ vmm      : [in] pointer on VMM.
139// @ vseg     : [in] pointer on released vseg.
140////////////////////////////////////////////////////////////////////////////////////////////
141static void vmm_stack_free( vmm_t  * vmm,
142                            vseg_t * vseg )
143{
144    // get stack allocator pointer
145    stack_mgr_t * mgr = &vmm->stack_mgr;
146
147    // compute slot index
148    uint32_t index = (vseg->vpn_base - 1 - mgr->vpn_base) / CONFIG_VMM_STACK_SIZE;
149
150// check index
[672]151assert( __FUNCTION__, (index <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)),
[625]152"slot index %d too large for an user stack vseg", index );
153
154// check released slot is allocated
[672]155assert( __FUNCTION__, (bitmap_state( &mgr->bitmap , index ) == true),
[625]156"released slot index %d non allocated", index );
157
158    // get lock on stack allocator
159    busylock_acquire( &mgr->lock );
160
161    // update stacks_bitmap
162    bitmap_clear( &mgr->bitmap , index );
163
164    // release lock on stack allocator
165    busylock_release( &mgr->lock );
166
[651]167    // release memory allocated to vseg descriptor
168    vseg_free( vseg );
169
[625]170}  // end vmm_stack_free()
171
[651]172
173
[625]174////////////////////////////////////////////////////////////////////////////////////////////
[651]175// This function display the current state of the VMM MMAP allocator of a process VMM
176// identified by the <vmm> argument.
177////////////////////////////////////////////////////////////////////////////////////////////
178void vmm_mmap_display( vmm_t * vmm )
179{
180    uint32_t  order;
181    xptr_t    root_xp;
182    xptr_t    iter_xp;
183
184    // get pointer on process
185    process_t * process = (process_t *)(((char*)vmm) - OFFSETOF( process_t , vmm ));
186
187    // get process PID
188    pid_t pid = process->pid;
189
190    // get pointer on VMM MMAP allocator
191    mmap_mgr_t * mgr = &vmm->mmap_mgr;
192
193    // display header
194    printk("***** VMM MMAP allocator / process %x *****\n", pid );
195
196    // scan the array of free lists of vsegs
197    for( order = 0 ; order <= CONFIG_VMM_HEAP_MAX_ORDER ; order++ )
198    {
199        root_xp = XPTR( local_cxy , &mgr->free_list_root[order] );
200
201        if( !xlist_is_empty( root_xp ) )
202        {
203            printk(" - %d (%x pages) : ", order , 1<<order );
204
205            XLIST_FOREACH( root_xp , iter_xp )
206            {
207                xptr_t   vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
208                vseg_t * vseg    = GET_PTR( vseg_xp );
209
210                printk("%x | ", vseg->vpn_base );
211            }
212
213            printk("\n");
214        }
215    }
216}  // end vmm_mmap_display()
217
218////////////////////////////////////////////////////////////////////////////////////////////
219// This static function is called by the vmm_user_init() function.
220// It initialises the free lists of vsegs used by the VMM MMAP allocator.
221// TODO this function is only valid for 32 bits cores, and makes three assumptions:
222// HEAP_BASE == 1 Gbytes / HEAP_SIZE == 2 Gbytes / MMAP_MAX_SIZE == 1 Gbytes
223////////////////////////////////////////////////////////////////////////////////////////////
224void vmm_mmap_init( vmm_t * vmm )
225{
226
227// check HEAP base and size
[672]228assert( __FUNCTION__, (CONFIG_VMM_HEAP_BASE == 0x40000) & (CONFIG_VMM_STACK_BASE == 0xc0000),
[651]229"CONFIG_VMM_HEAP_BASE != 0x40000 or CONFIG_VMM_STACK_BASE != 0xc0000" );
230
231// check  MMAP vseg max order
[672]232assert( __FUNCTION__, (CONFIG_VMM_HEAP_MAX_ORDER == 18), "max mmap vseg size is 256K pages" );
[651]233
234    // get pointer on MMAP allocator
235    mmap_mgr_t * mgr = &vmm->mmap_mgr;
236
237    // initialize HEAP base and size
238    mgr->vpn_base        = CONFIG_VMM_HEAP_BASE;
239    mgr->vpn_size        = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE;
240
241    // initialize lock
242    busylock_init( &mgr->lock , LOCK_VMM_MMAP );
243
244    // initialize free lists
245    uint32_t   i;
246    for( i = 0 ; i <= CONFIG_VMM_HEAP_MAX_ORDER ; i++ )
247    {
248        xlist_root_init( XPTR( local_cxy , &mgr->free_list_root[i] ) );
249    }
250
251    // allocate and register first 1 Gbytes vseg
252    vseg_t * vseg0 = vseg_alloc();
253
[672]254assert( __FUNCTION__, (vseg0 != NULL) , "cannot allocate vseg" );
[651]255
256    vseg0->vpn_base = CONFIG_VMM_HEAP_BASE;
257    vseg0->vpn_size = CONFIG_VMM_HEAP_BASE;
258
259    xlist_add_first( XPTR( local_cxy , &mgr->free_list_root[CONFIG_VMM_HEAP_MAX_ORDER] ),
260                     XPTR( local_cxy , &vseg0->xlist ) );
261
262    // allocate and register second 1 Gbytes vseg
263    vseg_t * vseg1 = vseg_alloc();
264
[672]265assert( __FUNCTION__, (vseg1 != NULL) , "cannot allocate vseg" );
[651]266
267    vseg1->vpn_base = CONFIG_VMM_HEAP_BASE << 1;
268    vseg1->vpn_size = CONFIG_VMM_HEAP_BASE;
269
270    xlist_add_first( XPTR( local_cxy , &mgr->free_list_root[CONFIG_VMM_HEAP_MAX_ORDER] ),
271                     XPTR( local_cxy , &vseg1->xlist ) );
272
273#if DEBUG_VMM_MMAP
274thread_t * this = CURRENT_THREAD;
275uint32_t cycle = (uint32_t)hal_get_cycles();
276printk("\n[%s] thread[%x,%x] / cycle %d\n",
277__FUNCTION__, this->process->pid, this->trdid, cycle );
278vmm_mmap_display( vmm );
279#endif
280
281}  // end vmm_mmap_init()
282
283////////////////////////////////////////////////////////////////////////////////////////////
[625]284// This static function is called by the vmm_create_vseg() function, and implements
[651]285// the VMM MMAP specific allocator.  Depending on the requested number of pages <npages>,
286// it get a free vseg from the relevant free_list, and initializes the "vpn_base" and
287// "vpn_size" fields.
[625]288////////////////////////////////////////////////////////////////////////////////////////////
289// @ vmm      : [in] pointer on VMM.
290// @ npages   : [in] requested number of pages.
[651]291// @ returns local pointer on vseg if success / returns NULL if failure.
[625]292////////////////////////////////////////////////////////////////////////////////////////////
[651]293static vseg_t * vmm_mmap_alloc( vmm_t * vmm,
294                                vpn_t   npages )
[625]295{
296
[651]297#if DEBUG_VMM_MMAP
[625]298thread_t * this = CURRENT_THREAD;
299uint32_t cycle = (uint32_t)hal_get_cycles();
[651]300if( DEBUG_VMM_MMAP < cycle )
301printk("\n[%s] thread[%x,%x] for %x pages / cycle %d\n",
302__FUNCTION__, this->process->pid, this->trdid, npages, cycle );
[625]303#endif
304
305    // number of allocated pages must be power of 2
306    // compute actual size and order
[651]307    vpn_t    required_vpn_size = POW2_ROUNDUP( npages );
308    uint32_t required_order    = bits_log2( required_vpn_size );
[625]309
310    // get mmap allocator pointer
311    mmap_mgr_t * mgr = &vmm->mmap_mgr;
312
[651]313    // take lock protecting free lists in MMAP allocator
[625]314    busylock_acquire( &mgr->lock );
315
[651]316    // initialises the while loop variables
317    uint32_t   current_order = required_order;
318    vseg_t   * current_vseg  = NULL;
[625]319
[651]320    // search a free vseg equal or larger than requested size
321        while( current_order <= CONFIG_VMM_HEAP_MAX_ORDER )
322        {
323        // build extended pointer on free_pages_root[current_order]
324        xptr_t root_xp = XPTR( local_cxy , &mgr->free_list_root[current_order] );
[625]325
[651]326                if( !xlist_is_empty( root_xp ) )
327                {
328            // get extended pointer on first vseg in this free_list
329                        xptr_t current_vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist );
330            current_vseg = GET_PTR( current_vseg_xp );
331
332            // build extended pointer on xlist field in vseg descriptor
333            xptr_t list_entry_xp = XPTR( local_cxy , &current_vseg->xlist );
334
335            // remove this vseg from the free_list
336                        xlist_unlink( list_entry_xp );
337
338                        break; 
339                }
340
341        // increment loop index
342        current_order++;
343
344    }  // end while loop
345
346    if( current_vseg == NULL )  // return failure
[625]347    {
[651]348        // release lock protecting free lists
349        busylock_release( &mgr->lock );
[625]350
[651]351        printk("\n[ERROR] %s cannot allocate ) %d page(s) in cluster %x\n",
352        __FUNCTION__, npages , local_cxy );
[625]353
[651]354        return NULL;
[625]355    }
356
[651]357        // split recursively the found vseg in smaller vsegs
358    // if required, and update the free-lists accordingly
359        while( current_order > required_order )
360        {
361        // get found vseg base and size
362        vpn_t  vpn_base = current_vseg->vpn_base;
363        vpn_t  vpn_size = current_vseg->vpn_size;
364       
365        // allocate a new vseg for the upper half of current vseg
366            vseg_t * new_vseg = vseg_alloc();
[625]367
[651]368            if( new_vseg == NULL )
369        {
370                // release lock protecting free lists
371            busylock_release( &mgr->lock );
372
373            printk("\n[ERROR] %s cannot allocate memory for vseg in cluster %x\n",
374            __FUNCTION__ , local_cxy );
375
376            return NULL;
377            }
378
379        // initialise new vseg (upper half of found vseg)
380        new_vseg->vmm      = vmm;
381        new_vseg->vpn_base = vpn_base + (vpn_size >> 1);
382        new_vseg->vpn_size = vpn_size >> 1;
383
384        // insert new vseg in relevant free_list
385                xlist_add_first( XPTR( local_cxy , &mgr->free_list_root[current_order-1] ),
386                         XPTR( local_cxy , &new_vseg->xlist ) );
387
388        // update found vseg
389        current_vseg->vpn_size = vpn_size>>1; 
390
391        // update order
392                current_order --;
393        }
394
395        // release lock protecting free lists
396        busylock_release( &mgr->lock );
397
398#if DEBUG_VMM_MMAP
399vmm_mmap_display( vmm );
[625]400#endif
401
[651]402    return current_vseg;
[625]403
404}  // end vmm_mmap_alloc()
405
406////////////////////////////////////////////////////////////////////////////////////////////
[641]407// This static function implements the VMM MMAP specific desallocator.
408// It is called by the vmm_remove_vseg() function.
[651]409// It releases the vseg to the relevant free_list, after trying (recursively) to
410// merge it to the buddy vseg.
[625]411////////////////////////////////////////////////////////////////////////////////////////////
412// @ vmm      : [in] pointer on VMM.
413// @ vseg     : [in] pointer on released vseg.
414////////////////////////////////////////////////////////////////////////////////////////////
415static void vmm_mmap_free( vmm_t  * vmm,
416                           vseg_t * vseg )
417{
[651]418
419#if DEBUG_VMM_MMAP
420thread_t * this = CURRENT_THREAD;
421uint32_t cycle = (uint32_t)hal_get_cycles();
422if( DEBUG_VMM_MMAP < cycle )
423printk("\n[%s] thread[%x,%x] for vpn_base %x / vpn_size %x / cycle %d\n",
424__FUNCTION__, this->process->pid, this->trdid, vseg->vpn_base, vseg->vpn_size, cycle );
425#endif
426
427    vseg_t * buddy_vseg;
428
429    // get mmap allocator pointer
[625]430    mmap_mgr_t * mgr = &vmm->mmap_mgr;
431
[651]432    // take lock protecting free lists
[625]433    busylock_acquire( &mgr->lock );
434
[651]435    // initialise loop variables
436    // released_vseg is the currently released vseg
437    vseg_t * released_vseg     = vseg;
438    uint32_t released_order    = bits_log2( vseg->vpn_size );
[625]439
[651]440        // iteratively merge the released vseg to the buddy vseg
441        // release the current page and exit when buddy not found
442    while( released_order <= CONFIG_VMM_HEAP_MAX_ORDER )
443    {
444        // compute buddy_vseg vpn_base
445                vpn_t buddy_vpn_base = released_vseg->vpn_base ^ (1 << released_order);
446       
447        // build extended pointer on free_pages_root[current_order]
448        xptr_t root_xp = XPTR( local_cxy , &mgr->free_list_root[released_order] );
449
450        // scan this free list to find the buddy vseg
451        xptr_t   iter_xp;
452        buddy_vseg = NULL;
453        XLIST_FOREACH( root_xp , iter_xp )
454        {
455            xptr_t   current_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
456            vseg_t * current_vseg    = GET_PTR( current_vseg_xp );
457
458            if( current_vseg->vpn_base == buddy_vpn_base )
459            {
460                buddy_vseg = current_vseg;
461                break;
462            }
463        }
464       
465        if( buddy_vseg != NULL )     // buddy found => merge released & buddy
466        {
467            // update released vseg fields
468            released_vseg->vpn_size = buddy_vseg->vpn_size<<1;
469            if( released_vseg->vpn_base > buddy_vseg->vpn_base) 
470                released_vseg->vpn_base = buddy_vseg->vpn_base;
471
472            // remove buddy vseg from free_list
473            xlist_unlink( XPTR( local_cxy , &buddy_vseg->xlist ) );
474
475            // release memory allocated to buddy descriptor
476            vseg_free( buddy_vseg );
477        }
478        else                         // buddy not found => register & exit
479        {
480            // register released vseg in free list
481            xlist_add_first( root_xp , XPTR( local_cxy , &released_vseg->xlist ) );
482
483            // exit while loop
484            break;
485        }
486
487        // increment released_order
488        released_order++;
489    }
490
[625]491    // release lock
492    busylock_release( &mgr->lock );
493
[651]494#if DEBUG_VMM_MMAP
495vmm_mmap_display( vmm );
496#endif
[625]497
[651]498}  // end vmm_mmap_free()
499
[625]500////////////////////////////////////////////////////////////////////////////////////////////
501// This static function registers one vseg in the VSL of a local process descriptor.
502////////////////////////////////////////////////////////////////////////////////////////////
503// vmm       : [in] pointer on VMM.
504// vseg      : [in] pointer on vseg.
505////////////////////////////////////////////////////////////////////////////////////////////
506void vmm_attach_vseg_to_vsl( vmm_t  * vmm,
507                             vseg_t * vseg )
508{
509    // update vseg descriptor
510    vseg->vmm = vmm;
511
512    // increment vsegs number
513    vmm->vsegs_nr++;
514
515    // add vseg in vmm list
516    xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ),
517                    XPTR( local_cxy , &vseg->xlist ) );
518
519}  // end vmm_attach_vseg_from_vsl()
520
521////////////////////////////////////////////////////////////////////////////////////////////
522// This static function removes one vseg from the VSL of a local process descriptor.
523////////////////////////////////////////////////////////////////////////////////////////////
524// vmm       : [in] pointer on VMM.
525// vseg      : [in] pointer on vseg.
526////////////////////////////////////////////////////////////////////////////////////////////
527void vmm_detach_vseg_from_vsl( vmm_t  * vmm,
528                               vseg_t * vseg )
529{
530    // update vseg descriptor
531    vseg->vmm = NULL;
532
533    // decrement vsegs number
534    vmm->vsegs_nr--;
535
536    // remove vseg from VSL
537    xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
538
539}  // end vmm_detach_from_vsl()
540
541////////////////////////////////////////////
542error_t vmm_user_init( process_t * process )
543{
[1]544
[625]545#if DEBUG_VMM_USER_INIT
[567]546thread_t * this = CURRENT_THREAD;
[433]547uint32_t cycle = (uint32_t)hal_get_cycles();
[625]548if( DEBUG_VMM_USER_INIT )
[614]549printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", 
550__FUNCTION__ , this->process->pid, this->trdid, process->pid, local_cxy, cycle );
[433]551#endif
[204]552
[1]553    // get pointer on VMM
554    vmm_t   * vmm = &process->vmm;
555
[625]556// check UTILS zone
[672]557assert( __FUNCTION__ , ((CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE) <= 
558(CONFIG_VMM_ELF_BASE - CONFIG_VMM_UTILS_BASE)) , "UTILS zone too small\n" );
[21]559
[651]560    // initialize lock protecting the VSL
[640]561        remote_queuelock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
[635]562
[651]563    // initialize STACK allocator
564    vmm_stack_init( vmm );
565
566    // initialize MMAP allocator
567    vmm_mmap_init( vmm );
568
569    // initialize instrumentation counters
570        vmm->false_pgfault_nr    = 0;
571        vmm->local_pgfault_nr    = 0;
572        vmm->global_pgfault_nr   = 0;
573        vmm->false_pgfault_cost  = 0;
574        vmm->local_pgfault_cost  = 0;
575        vmm->global_pgfault_cost = 0;
576
[124]577    hal_fence();
[1]578
[625]579#if DEBUG_VMM_USER_INIT
[433]580cycle = (uint32_t)hal_get_cycles();
[625]581if( DEBUG_VMM_USER_INIT )
[614]582printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", 
583__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
[433]584#endif
[204]585
[415]586    return 0;
587
[625]588}  // end vmm_user_init()
[204]589
[611]590//////////////////////////////////////////
[625]591void vmm_user_reset( process_t * process )
[567]592{
[625]593    xptr_t       vseg_xp;
594        vseg_t     * vseg;
595    vseg_type_t  vseg_type;
[567]596
[625]597#if DEBUG_VMM_USER_RESET
[635]598uint32_t   cycle;
[625]599thread_t * this = CURRENT_THREAD;
[635]600#endif
601
602#if (DEBUG_VMM_USER_RESET & 1 )
603cycle = (uint32_t)hal_get_cycles();
[625]604if( DEBUG_VMM_USER_RESET < cycle )
605printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
606__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
607#endif
[567]608
[625]609#if (DEBUG_VMM_USER_RESET & 1 )
610if( DEBUG_VMM_USER_RESET < cycle )
[635]611hal_vmm_display( XPTR( local_cxy , process ) , true );
[625]612#endif
[567]613
[625]614    // get pointer on local VMM
615    vmm_t * vmm = &process->vmm;
[624]616
[625]617    // build extended pointer on VSL root and VSL lock
618    xptr_t   root_xp = XPTR( local_cxy , &vmm->vsegs_root );
619    xptr_t   lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
[567]620
[625]621    // take the VSL lock
[640]622        remote_queuelock_acquire( lock_xp );
[567]623
[625]624    // scan the VSL to delete all non kernel vsegs
625    // (we don't use a FOREACH in case of item deletion)
626    xptr_t   iter_xp;
627    xptr_t   next_xp;
628        for( iter_xp = hal_remote_l64( root_xp ) ; 
629         iter_xp != root_xp ;
630         iter_xp = next_xp )
631        {
632        // save extended pointer on next item in xlist
633        next_xp = hal_remote_l64( iter_xp );
[611]634
[625]635        // get pointers on current vseg in VSL
636        vseg_xp   = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
637        vseg      = GET_PTR( vseg_xp );
638        vseg_type = vseg->type;
[567]639
[625]640#if( DEBUG_VMM_USER_RESET & 1 )
641if( DEBUG_VMM_USER_RESET < cycle )
642printk("\n[%s] found %s vseg / vpn_base %x / vpn_size %d\n",
643__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
644#endif
645        // delete non kernel vseg 
646        if( (vseg_type != VSEG_TYPE_KCODE) && 
647            (vseg_type != VSEG_TYPE_KDATA) && 
648            (vseg_type != VSEG_TYPE_KDEV ) )
649        {
650            // remove vseg from VSL
651            vmm_remove_vseg( process , vseg );
[567]652
[625]653#if( DEBUG_VMM_USER_RESET & 1 )
654if( DEBUG_VMM_USER_RESET < cycle )
655printk("\n[%s] %s vseg deleted / vpn_base %x / vpn_size %d\n",
656__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
657#endif
658        }
659        else
660        {
[567]661
[625]662#if( DEBUG_VMM_USER_RESET & 1 )
663if( DEBUG_VMM_USER_RESET < cycle )
664printk("\n[%s] keep %s vseg / vpn_base %x / vpn_size %d\n",
665__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
666#endif
667        }
668        }  // end loop on vsegs in VSL
[567]669
[625]670    // release the VSL lock
[640]671        remote_queuelock_release( lock_xp );
[567]672
[625]673// FIXME il faut gérer les process copies...
[611]674
[672]675    // re-initialise VMM
676    vmm_user_init( process );
677
[625]678#if DEBUG_VMM_USER_RESET
679cycle = (uint32_t)hal_get_cycles();
680if( DEBUG_VMM_USER_RESET < cycle )
681printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
682__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
683#endif
[611]684
[635]685#if (DEBUG_VMM_USER_RESET & 1 )
686if( DEBUG_VMM_USER_RESET < cycle )
687hal_vmm_display( XPTR( local_cxy , process ) , true );
688#endif
689
[625]690}  // end vmm_user_reset()
[611]691
[640]692/////////////////////////////////////////////////
693void vmm_global_delete_vseg( process_t * process,
694                             intptr_t    base )
695{
696    cxy_t           owner_cxy;
697    lpid_t          owner_lpid;
[641]698    reg_t           save_sr;
[640]699
[641]700    xptr_t          process_lock_xp;
[640]701    xptr_t          process_root_xp;
702    xptr_t          process_iter_xp;
703
704    xptr_t          remote_process_xp;
705    cxy_t           remote_process_cxy;
706    process_t     * remote_process_ptr;
707
708    xptr_t          vsl_root_xp;
709    xptr_t          vsl_lock_xp;
710    xptr_t          vsl_iter_xp;
711
[641]712    rpc_desc_t      rpc;                  // shared rpc descriptor for parallel RPCs
713    uint32_t        responses;            // RPC responses counter
714
715    thread_t      * this    = CURRENT_THREAD;
716    pid_t           pid     = process->pid;
717    cluster_t     * cluster = LOCAL_CLUSTER;
718
[640]719#if DEBUG_VMM_GLOBAL_DELETE_VSEG
720uint32_t cycle = (uint32_t)hal_get_cycles();
721#endif
722
723#if (DEBUG_VMM_GLOBAL_DELETE_VSEG & 1)
724if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle )
[641]725printk("\n[%s] thread[%x,%x] enters / process %x / base %x / cycle %d\n",
[640]726__FUNCTION__, this->process->pid, this->trdid, process->pid, base, cycle );
727#endif
728
[641]729    // initialize a shared RPC descriptor
730    rpc.rsp       = &responses;
731    rpc.blocking  = false;                  // non blocking behaviour for rpc_send()
732    rpc.index     = RPC_VMM_REMOVE_VSEG;
733    rpc.thread    = this;
734    rpc.lid       = this->core->lid;
735    rpc.args[0]   = this->process->pid;
736    rpc.args[1]   = base;
737
[640]738    // get owner process cluster and local index
739    owner_cxy        = CXY_FROM_PID( pid );
740    owner_lpid       = LPID_FROM_PID( pid );
741
[641]742    // get extended pointer on root and lock of process copies xlist in owner cluster
743    process_root_xp  = XPTR( owner_cxy , &cluster->pmgr.copies_root[owner_lpid] );
744    process_lock_xp  = XPTR( owner_cxy , &cluster->pmgr.copies_lock[owner_lpid] );
[640]745
[641]746    // mask IRQs
747    hal_disable_irq( &save_sr );
748
749    // client thread blocks itself
750    thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC );
751
752    // take the lock protecting process copies
753    remote_queuelock_acquire( process_lock_xp );
754
755    // initialize responses counter
756    responses = 0;
757
[640]758    // loop on process copies
759    XLIST_FOREACH( process_root_xp , process_iter_xp )
760    {
761        // get cluster and local pointer on remote process
762        remote_process_xp  = XLIST_ELEMENT( process_iter_xp , process_t , copies_list );
763        remote_process_ptr = GET_PTR( remote_process_xp );
764        remote_process_cxy = GET_CXY( remote_process_xp );
765
766        // build extended pointers on remote VSL root and lock
767        vsl_root_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsegs_root );
768        vsl_lock_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsl_lock );
769
770        // get lock on remote VSL
771        remote_queuelock_acquire( vsl_lock_xp );
772
773        // loop on vsegs in remote process VSL
774        XLIST_FOREACH( vsl_root_xp , vsl_iter_xp )
775        {
776            // get pointers on current vseg
777            xptr_t   vseg_xp  = XLIST_ELEMENT( vsl_iter_xp , vseg_t , xlist );
778            vseg_t * vseg_ptr = GET_PTR( vseg_xp );
779
780            // get current vseg base address
781            intptr_t vseg_base = (intptr_t)hal_remote_lpt( XPTR( remote_process_cxy,
782                                                                 &vseg_ptr->min ) );
783
784            if( vseg_base == base )   // found searched vseg
785            {
[641]786                // atomically increment responses counter
787                hal_atomic_add( &responses , 1 );
[640]788
789#if (DEBUG_VMM_GLOBAL_DELETE_VSEG & 1)
790if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle )
[641]791printk("\n[%s] thread[%x,%x] register RPC request in cluster %x\n",
792__FUNCTION__, this->process->pid, this->trdid, remote_process_cxy );
[640]793#endif
[641]794                // send RPC to remote cluster
795                rpc_send( remote_process_cxy , &rpc );
[640]796
[641]797                // exit loop on vsegs
798                break;
[640]799            }
800        }  // end of loop on vsegs
801
[641]802        // release lock on remote VSL
803        remote_queuelock_release( vsl_lock_xp );
804
805    }  // end of loop on process copies
806
807    // release the lock protecting process copies
808    remote_queuelock_release( process_lock_xp );
809
[640]810#if (DEBUG_VMM_GLOBAL_DELETE_VSEG & 1)
811if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle )
[641]812printk("\n[%s] thread[%x,%x] deschedule / process %x / base %x\n",
813__FUNCTION__, this->process->pid, this->trdid, process->pid, base );
[640]814#endif
815
[641]816    // client thread deschedule
817    sched_yield("blocked on rpc_vmm_delete_vseg");
818 
819    // restore IRQs
820    hal_restore_irq( save_sr );
[640]821
822#if DEBUG_VMM_GLOBAL_DELETE_VSEG
823cycle = (uint32_t)hal_get_cycles();
824if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle )
[641]825printk("\n[%s] thread[%x,%x] exit / process %x / base %x / cycle %d\n",
826__FUNCTION__, this->process->pid, this->trdid, process->pid, base, cycle );
[640]827#endif
828
829}  // end vmm_global_delete_vseg()
830
[595]831////////////////////////////////////////////////
[640]832void vmm_global_resize_vseg( process_t * process,
833                             intptr_t    base,
834                             intptr_t    new_base,
835                             intptr_t    new_size )
836{
837    cxy_t           owner_cxy;
838    lpid_t          owner_lpid;
[641]839    reg_t           save_sr;
[640]840
[641]841    xptr_t          process_lock_xp;
[640]842    xptr_t          process_root_xp;
843    xptr_t          process_iter_xp;
844
845    xptr_t          remote_process_xp;
846    cxy_t           remote_process_cxy;
847    process_t     * remote_process_ptr;
848
849    xptr_t          vsl_root_xp;
850    xptr_t          vsl_lock_xp;
851    xptr_t          vsl_iter_xp;
852
[641]853    rpc_desc_t      rpc;                  // shared rpc descriptor for parallel RPCs
854    uint32_t        responses;            // RPC responses counter
855
856    thread_t      * this    = CURRENT_THREAD; 
857    pid_t           pid     = process->pid;
858    cluster_t     * cluster = LOCAL_CLUSTER;
859
[640]860#if DEBUG_VMM_GLOBAL_RESIZE_VSEG
861uint32_t cycle = (uint32_t)hal_get_cycles();
862#endif
863
864#if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1)
865if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle )
866printk("\n[%s] thread[%x,%x] : process %x / base %x / new_base %x / new_size %x / cycle %d\n",
867__FUNCTION__, this->process->pid, this->trdid, process->pid, base, new_base, new_size, cycle );
868#endif
869
[641]870    // initialize a shared RPC descriptor
871    rpc.rsp       = &responses;
872    rpc.blocking  = false;                  // non blocking behaviour for rpc_send()
873    rpc.index     = RPC_VMM_REMOVE_VSEG;
874    rpc.thread    = this;
875    rpc.lid       = this->core->lid;
876    rpc.args[0]   = this->process->pid;
877    rpc.args[1]   = base;
878    rpc.args[2]   = new_base;
879    rpc.args[3]   = new_size;
880
881    // get owner process cluster and local index
[640]882    owner_cxy        = CXY_FROM_PID( pid );
883    owner_lpid       = LPID_FROM_PID( pid );
884
[641]885    // get extended pointer on root and lock of process copies xlist in owner cluster
886    process_root_xp  = XPTR( owner_cxy , &cluster->pmgr.copies_root[owner_lpid] );
887    process_lock_xp  = XPTR( owner_cxy , &cluster->pmgr.copies_lock[owner_lpid] );
[640]888
[641]889    // mask IRQs
890    hal_disable_irq( &save_sr );
891
892    // client thread blocks itself
893    thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC );
894
895    // take the lock protecting process copies
896    remote_queuelock_acquire( process_lock_xp );
897
898    // initialize responses counter
899    responses = 0;
900
[640]901    // loop on process copies
902    XLIST_FOREACH( process_root_xp , process_iter_xp )
903    {
904        // get cluster and local pointer on remote process
905        remote_process_xp  = XLIST_ELEMENT( process_iter_xp , process_t , copies_list );
906        remote_process_ptr = GET_PTR( remote_process_xp );
907        remote_process_cxy = GET_CXY( remote_process_xp );
908
909        // build extended pointers on remote VSL root and lock
910        vsl_root_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsegs_root );
911        vsl_lock_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsl_lock );
912
913        // get lock on remote VSL
914        remote_queuelock_acquire( vsl_lock_xp );
915
916        // loop on vsegs in remote process VSL
917        XLIST_FOREACH( vsl_root_xp , vsl_iter_xp )
918        {
919            // get pointers on current vseg
920            xptr_t   vseg_xp  = XLIST_ELEMENT( vsl_iter_xp , vseg_t , xlist );
921            vseg_t * vseg_ptr = GET_PTR( vseg_xp );
922
923            // get current vseg base address
924            intptr_t vseg_base = (intptr_t)hal_remote_lpt( XPTR( remote_process_cxy,
925                                                                 &vseg_ptr->min ) );
926
927            if( vseg_base == base )   // found searched vseg
928            {
[641]929                // atomically increment responses counter
930                hal_atomic_add( &responses , 1 );
931
[640]932#if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1)
933if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle )
[641]934printk("\n[%s] thread[%x,%x] register RPC request in cluster %x\n",
935__FUNCTION__, this->process->pid, this->trdid, remote_process_cxy );
[640]936#endif
[641]937                // send RPC to remote cluster
938                rpc_send( remote_process_cxy , & rpc );
[640]939
[641]940                // exit loop on vsegs
941                break;
[640]942            }
[641]943
[640]944        }  // end of loop on vsegs
945
946#if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1)
947if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle )
948hal_vmm_display( remote_process_xp , false );
949#endif
950
951        // release lock on remote VSL
952        remote_queuelock_release( vsl_lock_xp );
[641]953
[640]954    }  // end of loop on process copies
955
[641]956    // release the lock protecting process copies
957    remote_queuelock_release( process_lock_xp );
958
959#if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1)
960if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle )
961printk("\n[%s] thread[%x,%x] deschedule / process %x / base %x\n",
962__FUNCTION__, this->process->pid, this->trdid, process->pid, base );
963#endif
964
965    // client thread deschedule
966    sched_yield("blocked on rpc_vmm_delete_vseg");
967
968    // restore IRQs
969    hal_restore_irq( save_sr );
970
[640]971#if DEBUG_VMM_GLOBAL_RESIZE_VSEG
972cycle = (uint32_t)hal_get_cycles();
973if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle )
974printk("\n[%s] thread[%x,%x] exit for process %x / base %x / cycle %d\n",
975__FUNCTION__, this->process->pid, this->trdid, process->pid , base, cycle );
976#endif
977
978}  // end vmm_global_resize_vseg()
979
980////////////////////////////////////////////////
[433]981void vmm_global_update_pte( process_t * process,
982                            vpn_t       vpn,
983                            uint32_t    attr,
984                            ppn_t       ppn )
[23]985{
[640]986    pid_t           pid;
987    cxy_t           owner_cxy;
988    lpid_t          owner_lpid;
989
[408]990    xlist_entry_t * process_root_ptr;
991    xptr_t          process_root_xp;
992    xptr_t          process_iter_xp;
[23]993
[408]994    xptr_t          remote_process_xp;
995    cxy_t           remote_process_cxy;
996    process_t     * remote_process_ptr;
997    xptr_t          remote_gpt_xp;
[23]998
[640]999#if DEBUG_VMM_GLOBAL_UPDATE_PTE
[433]1000uint32_t cycle = (uint32_t)hal_get_cycles();
[595]1001thread_t * this = CURRENT_THREAD;
[640]1002#endif
1003
1004
1005#if (DEBUG_VMM_GLOBAL_UPDATE_PTE & 1)
1006if( DEBUG_VMM_GLOBAL_UPDATE_PTE < cycle )
[635]1007printk("\n[%s] thread[%x,%x] enter for process %x / vpn %x / attr %x / ppn %x / ycle %d\n",
1008__FUNCTION__, this->process->pid, this->trdid, process->pid, vpn, attr, ppn, cycle );
[433]1009#endif
1010
[640]1011    // get owner process cluster and local index
[408]1012    pid              = process->pid;
1013    owner_cxy        = CXY_FROM_PID( pid );
1014    owner_lpid       = LPID_FROM_PID( pid );
[640]1015
1016    // get extended pointer on root of process copies xlist in owner cluster
[408]1017    process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid];
1018    process_root_xp  = XPTR( owner_cxy , process_root_ptr );
[23]1019
[640]1020    // loop on process copies
[408]1021    XLIST_FOREACH( process_root_xp , process_iter_xp )
1022    {
1023        // get cluster and local pointer on remote process
1024        remote_process_xp  = XLIST_ELEMENT( process_iter_xp , process_t , copies_list );
[433]1025        remote_process_ptr = GET_PTR( remote_process_xp );
[408]1026        remote_process_cxy = GET_CXY( remote_process_xp );
[407]1027
[640]1028#if (DEBUG_VMM_GLOBAL_UPDATE_PTE & 1)
1029if( DEBUG_VMM_GLOBAL_UPDATE_PTE < cycle )
[635]1030printk("\n[%s] thread[%x,%x] handling vpn %x for process %x in cluster %x\n",
[595]1031__FUNCTION__, this->process->pid, this->trdid, vpn, process->pid, remote_process_cxy );
[433]1032#endif
1033
[408]1034        // get extended pointer on remote gpt
1035        remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt );
1036
[433]1037        // update remote GPT
1038        hal_gpt_update_pte( remote_gpt_xp, vpn, attr, ppn );
[408]1039    } 
1040
[640]1041#if DEBUG_VMM_GLOBAL_UPDATE_PTE
[433]1042cycle = (uint32_t)hal_get_cycles();
[640]1043if( DEBUG_VMM_GLOBAL_UPDATE_PTE < cycle )
[595]1044printk("\n[%s] thread[%x,%x] exit for process %x / vpn %x / cycle %d\n",
1045__FUNCTION__, this->process->pid, this->trdid, process->pid , vpn , cycle );
[433]1046#endif
1047
[640]1048#if (DEBUG_VMM_GLOBAL_UPDATE_PTE & 1)
[635]1049hal_vmm_display( process , true );
1050#endif
1051
[433]1052}  // end vmm_global_update_pte()
1053
[408]1054///////////////////////////////////////
1055void vmm_set_cow( process_t * process )
1056{
1057    vmm_t         * vmm;
1058
1059    xlist_entry_t * process_root_ptr;
1060    xptr_t          process_root_xp;
1061    xptr_t          process_iter_xp;
1062
1063    xptr_t          remote_process_xp;
1064    cxy_t           remote_process_cxy;
1065    process_t     * remote_process_ptr;
1066    xptr_t          remote_gpt_xp;
1067
1068    xptr_t          vseg_root_xp;
1069    xptr_t          vseg_iter_xp;
1070
1071    xptr_t          vseg_xp;
1072    vseg_t        * vseg;
1073
1074    pid_t           pid;
1075    cxy_t           owner_cxy;
1076    lpid_t          owner_lpid;
1077
[635]1078    // get target process PID
1079    pid = process->pid;
1080
[438]1081#if DEBUG_VMM_SET_COW
[595]1082uint32_t   cycle = (uint32_t)hal_get_cycles();
1083thread_t * this  = CURRENT_THREAD;
[438]1084if( DEBUG_VMM_SET_COW < cycle )
[595]1085printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
[635]1086__FUNCTION__, this->process->pid, this->trdid, pid , cycle );
[433]1087#endif
[408]1088
[635]1089#if (DEBUG_VMM_SET_COW & 1)
1090if( DEBUG_VMM_SET_COW < cycle )
1091hal_vmm_display( process , true );
1092#endif
1093
[567]1094// check cluster is reference
[672]1095assert( __FUNCTION__, (XPTR( local_cxy , process ) == process->ref_xp),
[635]1096"local cluster must be process reference cluster\n");
[408]1097
1098    // get pointer on reference VMM
1099    vmm = &process->vmm;
1100
1101    // get extended pointer on root of process copies xlist in owner cluster
1102    owner_cxy        = CXY_FROM_PID( pid );
1103    owner_lpid       = LPID_FROM_PID( pid );
1104    process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid];
1105    process_root_xp  = XPTR( owner_cxy , process_root_ptr );
1106
1107    // get extended pointer on root of vsegs xlist from reference VMM
1108    vseg_root_xp  = XPTR( local_cxy , &vmm->vsegs_root ); 
1109
[635]1110    // loop on target process copies
[408]1111    XLIST_FOREACH( process_root_xp , process_iter_xp )
1112    {
[635]1113        // get cluster and local pointer on remote process copy
[408]1114        remote_process_xp  = XLIST_ELEMENT( process_iter_xp , process_t , copies_list );
[433]1115        remote_process_ptr = GET_PTR( remote_process_xp );
[408]1116        remote_process_cxy = GET_CXY( remote_process_xp );
1117
[595]1118#if (DEBUG_VMM_SET_COW & 1)
[438]1119if( DEBUG_VMM_SET_COW < cycle )
[635]1120printk("\n[%s] thread[%x,%x] (%x) handles process %x in cluster %x\n",
1121__FUNCTION__, this->process->pid, this->trdid, this, pid, remote_process_cxy );
[433]1122#endif
[408]1123
1124        // get extended pointer on remote gpt
1125        remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt );
1126
1127        // loop on vsegs in (local) reference process VSL
1128        XLIST_FOREACH( vseg_root_xp , vseg_iter_xp )
1129        {
1130            // get pointer on vseg
1131            vseg_xp  = XLIST_ELEMENT( vseg_iter_xp , vseg_t , xlist );
[433]1132            vseg     = GET_PTR( vseg_xp );
[408]1133
1134            // get vseg type, base and size
1135            uint32_t type     = vseg->type;
1136            vpn_t    vpn_base = vseg->vpn_base;
1137            vpn_t    vpn_size = vseg->vpn_size;
1138
[595]1139#if (DEBUG_VMM_SET_COW & 1)
[438]1140if( DEBUG_VMM_SET_COW < cycle )
[635]1141printk("\n[%s] thread[%x,%x] found vseg %s / vpn_base = %x / vpn_size = %x\n",
[595]1142__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), vpn_base, vpn_size );
[433]1143#endif
1144            // only DATA, ANON and REMOTE vsegs
[408]1145            if( (type == VSEG_TYPE_DATA)  ||
1146                (type == VSEG_TYPE_ANON)  ||
1147                (type == VSEG_TYPE_REMOTE) )
1148            {
[433]1149                vpn_t      vpn;
1150                uint32_t   attr;
1151                ppn_t      ppn;
1152                xptr_t     page_xp;
1153                cxy_t      page_cxy;
1154                page_t   * page_ptr;
1155                xptr_t     forks_xp;
[469]1156                xptr_t     lock_xp;
[433]1157
1158                // update flags in remote GPT
1159                hal_gpt_set_cow( remote_gpt_xp,
1160                                 vpn_base,
1161                                 vpn_size ); 
1162
1163                // atomically increment pending forks counter in physical pages,
[635]1164                // this is only done once, when handling the reference copy
[433]1165                if( remote_process_cxy == local_cxy )
1166                {
[635]1167
1168#if (DEBUG_VMM_SET_COW & 1)
1169if( DEBUG_VMM_SET_COW < cycle )
1170printk("\n[%s] thread[%x,%x] handles vseg %s / vpn_base = %x / vpn_size = %x\n",
1171__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), vpn_base, vpn_size );
1172#endif
[433]1173                    // scan all pages in vseg
1174                    for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ )
1175                    {
1176                        // get page attributes and PPN from reference GPT
[585]1177                        hal_gpt_get_pte( remote_gpt_xp , vpn , &attr , &ppn ); 
[433]1178
1179                        // atomically update pending forks counter if page is mapped
1180                        if( attr & GPT_MAPPED )
1181                        {
[469]1182                            // get pointers and cluster on page descriptor
[433]1183                            page_xp  = ppm_ppn2page( ppn );
1184                            page_cxy = GET_CXY( page_xp );
1185                            page_ptr = GET_PTR( page_xp );
[469]1186
1187                            // get extended pointers on "forks" and "lock"
[433]1188                            forks_xp = XPTR( page_cxy , &page_ptr->forks );
[469]1189                            lock_xp  = XPTR( page_cxy , &page_ptr->lock );
1190
[567]1191                            // take lock protecting "forks" counter
1192                            remote_busylock_acquire( lock_xp );
1193
[469]1194                            // increment "forks"
[433]1195                            hal_remote_atomic_add( forks_xp , 1 );
[567]1196
1197                            // release lock protecting "forks" counter
1198                            remote_busylock_release( lock_xp );
[433]1199                        }
1200                    }   // end loop on vpn
[635]1201
1202#if (DEBUG_VMM_SET_COW & 1)
1203if( DEBUG_VMM_SET_COW < cycle )
1204printk("\n[%s] thread[%x,%x] completes vseg %s / vpn_base = %x / vpn_size = %x\n",
1205__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), vpn_base, vpn_size );
1206#endif
[433]1207                }   // end if local
1208            }   // end if vseg type
1209        }   // end loop on vsegs
[408]1210    }   // end loop on process copies
1211 
[438]1212#if DEBUG_VMM_SET_COW
[433]1213cycle = (uint32_t)hal_get_cycles();
[438]1214if( DEBUG_VMM_SET_COW < cycle )
[595]1215printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
1216__FUNCTION__, this->process->pid, this->trdid, process->pid , cycle );
[433]1217#endif
[408]1218
1219}  // end vmm_set-cow()
1220
1221/////////////////////////////////////////////////
1222error_t vmm_fork_copy( process_t * child_process,
1223                       xptr_t      parent_process_xp )
1224{
1225    error_t     error;
1226    cxy_t       parent_cxy;
1227    process_t * parent_process;
1228    vmm_t     * parent_vmm;
1229    xptr_t      parent_lock_xp;
1230    vmm_t     * child_vmm;
1231    xptr_t      iter_xp;
1232    xptr_t      parent_vseg_xp;
1233    vseg_t    * parent_vseg;
1234    vseg_t    * child_vseg;
1235    uint32_t    type;
1236    vpn_t       vpn;           
1237    vpn_t       vpn_base;
1238    vpn_t       vpn_size;
1239    xptr_t      parent_root_xp;
1240    bool_t      mapped; 
1241    ppn_t       ppn;
1242
[438]1243#if DEBUG_VMM_FORK_COPY
[433]1244uint32_t cycle = (uint32_t)hal_get_cycles();
[595]1245thread_t * this = CURRENT_THREAD;
[438]1246if( DEBUG_VMM_FORK_COPY < cycle )
[595]1247printk("\n[%s] thread %x enter / cycle %d\n",
1248__FUNCTION__ , this->process->pid, this->trdid, cycle );
[433]1249#endif
[408]1250
1251    // get parent process cluster and local pointer
1252    parent_cxy     = GET_CXY( parent_process_xp );
[433]1253    parent_process = GET_PTR( parent_process_xp );
[408]1254
1255    // get local pointers on parent and child VMM
1256    parent_vmm = &parent_process->vmm; 
1257    child_vmm  = &child_process->vmm;
1258
[625]1259    // build extended pointer on parent VSL root and lock
[408]1260    parent_root_xp = XPTR( parent_cxy , &parent_vmm->vsegs_root );
[625]1261    parent_lock_xp = XPTR( parent_cxy , &parent_vmm->vsl_lock );
[408]1262
[640]1263    // take the lock protecting the parent VSL
1264    remote_queuelock_acquire( parent_lock_xp );
[415]1265
[408]1266    // loop on parent VSL xlist
1267    XLIST_FOREACH( parent_root_xp , iter_xp )
[23]1268    {
[625]1269        // get pointers on current parent vseg
[408]1270        parent_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
[433]1271        parent_vseg    = GET_PTR( parent_vseg_xp );
[23]1272
[408]1273        // get vseg type
[567]1274        type = hal_remote_l32( XPTR( parent_cxy , &parent_vseg->type ) );
[408]1275       
[438]1276#if DEBUG_VMM_FORK_COPY
[433]1277cycle = (uint32_t)hal_get_cycles();
[438]1278if( DEBUG_VMM_FORK_COPY < cycle )
[595]1279printk("\n[%s] thread[%x,%x] found parent vseg %s / vpn_base = %x / cycle %d\n",
1280__FUNCTION__ , this->process->pid, this->trdid, vseg_type_str(type),
[567]1281hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );
[433]1282#endif
[23]1283
[623]1284        // all parent vsegs - but STACK and kernel vsegs - must be copied in child VSL
1285        if( (type != VSEG_TYPE_STACK) && (type != VSEG_TYPE_KCODE) &&
1286            (type != VSEG_TYPE_KDATA) && (type != VSEG_TYPE_KDEV) )
[23]1287        {
[408]1288            // allocate memory for a new child vseg
1289            child_vseg = vseg_alloc();
1290            if( child_vseg == NULL )   // release all allocated vsegs
[23]1291            {
[408]1292                vmm_destroy( child_process );
1293                printk("\n[ERROR] in %s : cannot create vseg for child\n", __FUNCTION__ );
1294                return -1;
[23]1295            }
1296
[408]1297            // copy parent vseg to child vseg
1298            vseg_init_from_ref( child_vseg , parent_vseg_xp );
[23]1299
[640]1300            // build extended pointer on child VSL lock
1301            xptr_t child_lock_xp = XPTR( local_cxy , &child_vmm->vsl_lock );
[625]1302 
[640]1303            // take the child VSL lock
1304            remote_queuelock_acquire( child_lock_xp );
[625]1305
[408]1306            // register child vseg in child VSL
[611]1307            vmm_attach_vseg_to_vsl( child_vmm , child_vseg );
[407]1308
[640]1309            // release the child VSL lock
1310            remote_queuelock_release( child_lock_xp );
[625]1311
[438]1312#if DEBUG_VMM_FORK_COPY
[433]1313cycle = (uint32_t)hal_get_cycles();
[438]1314if( DEBUG_VMM_FORK_COPY < cycle )
[595]1315printk("\n[%s] thread[%x,%x] copied vseg %s / vpn_base = %x to child VSL / cycle %d\n",
1316__FUNCTION__ , this->process->pid, this->trdid, vseg_type_str(type),
[567]1317hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );
[433]1318#endif
[625]1319            // copy DATA, ANON, REMOTE, FILE parent GPT entries to child GPT
[408]1320            if( type != VSEG_TYPE_CODE )
1321            {
[625]1322                // activate the COW for DATA, ANON, REMOTE vsegs only
[635]1323                // cow = ( type != VSEG_TYPE_FILE );
[23]1324
[408]1325                vpn_base = child_vseg->vpn_base;
1326                vpn_size = child_vseg->vpn_size;
[23]1327
[408]1328                // scan pages in parent vseg
1329                for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ )
1330                {
1331                    error = hal_gpt_pte_copy( &child_vmm->gpt,
[625]1332                                              vpn,
[408]1333                                              XPTR( parent_cxy , &parent_vmm->gpt ),
1334                                              vpn,
[635]1335                                              false,      // does not handle COW flag
1336                                              &ppn,       // unused
1337                                              &mapped );  // unused
[408]1338                    if( error )
1339                    {
1340                        vmm_destroy( child_process );
1341                        printk("\n[ERROR] in %s : cannot copy GPT\n", __FUNCTION__ );
1342                        return -1;
1343                    }
1344
[438]1345#if DEBUG_VMM_FORK_COPY
[433]1346cycle = (uint32_t)hal_get_cycles();
[438]1347if( DEBUG_VMM_FORK_COPY < cycle )
[595]1348printk("\n[%s] thread[%x,%x] copied vpn %x to child GPT / cycle %d\n",
1349__FUNCTION__ , this->process->pid, this->trdid , vpn , cycle );
[433]1350#endif
[408]1351                }
1352            }   // end if no code & no stack
1353        }   // end if no stack
1354    }   // end loop on vsegs
1355
[567]1356    // release the parent VSL lock in read mode
[640]1357    remote_queuelock_release( parent_lock_xp );
[408]1358
[651]1359/* deprecated [AG] : this is already done by the vmm_user_init() funcfion
1360
[408]1361    // initialize the child VMM STACK allocator
[651]1362    vmm_stack_init( child_vmm );
[408]1363
1364    // initialize the child VMM MMAP allocator
[651]1365    vmm_mmap_init( child_vmm );
[23]1366
[178]1367    // initialize instrumentation counters
[635]1368        child_vmm->false_pgfault_nr    = 0;
1369        child_vmm->local_pgfault_nr    = 0;
1370        child_vmm->global_pgfault_nr   = 0;
1371        child_vmm->false_pgfault_cost  = 0;
1372        child_vmm->local_pgfault_cost  = 0;
1373        child_vmm->global_pgfault_cost = 0;
[651]1374*/
[408]1375    // copy base addresses from parent VMM to child VMM
1376    child_vmm->args_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->args_vpn_base));
1377    child_vmm->envs_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->envs_vpn_base));
1378    child_vmm->heap_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->heap_vpn_base));
1379    child_vmm->code_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->code_vpn_base));
1380    child_vmm->data_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->data_vpn_base));
[23]1381
[408]1382    child_vmm->entry_point = (intptr_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->entry_point));
[23]1383
[124]1384    hal_fence();
[23]1385
[438]1386#if DEBUG_VMM_FORK_COPY
[433]1387cycle = (uint32_t)hal_get_cycles();
[438]1388if( DEBUG_VMM_FORK_COPY < cycle )
[595]1389printk("\n[%s] thread[%x,%x] exit successfully / cycle %d\n",
1390__FUNCTION__ , this->process->pid, this->trdid , cycle );
[433]1391#endif
1392
[23]1393    return 0;
1394
[408]1395}  // vmm_fork_copy()
[204]1396
[1]1397///////////////////////////////////////
1398void vmm_destroy( process_t * process )
1399{
[408]1400    xptr_t   vseg_xp;
[1]1401        vseg_t * vseg;
1402
[438]1403#if DEBUG_VMM_DESTROY
[635]1404uint32_t   cycle = (uint32_t)hal_get_cycles();
1405thread_t * this  = CURRENT_THREAD;
[438]1406if( DEBUG_VMM_DESTROY < cycle )
[595]1407printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
1408__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
[433]1409#endif
[416]1410
[438]1411#if (DEBUG_VMM_DESTROY & 1 )
[443]1412if( DEBUG_VMM_DESTROY < cycle )
[635]1413hal_vmm_display( XPTR( local_cxy, process ) , true );
[437]1414#endif
1415
[433]1416    // get pointer on local VMM
[1]1417    vmm_t  * vmm = &process->vmm;
1418
[625]1419    // build extended pointer on VSL root, VSL lock and GPT lock
1420    xptr_t   vsl_root_xp = XPTR( local_cxy , &vmm->vsegs_root );
1421    xptr_t   vsl_lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
[408]1422
[625]1423    // take the VSL lock
[640]1424    remote_queuelock_acquire( vsl_lock_xp );
[625]1425
[611]1426    // scan the VSL to delete all registered vsegs
[625]1427    // (we don't use a FOREACH in case of item deletion)
1428    xptr_t  iter_xp;
1429    xptr_t  next_xp;
1430        for( iter_xp = hal_remote_l64( vsl_root_xp ) ; 
1431         iter_xp != vsl_root_xp ;
1432         iter_xp = next_xp )
[1]1433        {
[625]1434        // save extended pointer on next item in xlist
1435        next_xp = hal_remote_l64( iter_xp );
[409]1436
[625]1437        // get pointers on current vseg in VSL
1438        vseg_xp   = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
1439        vseg      = GET_PTR( vseg_xp );
1440
[611]1441        // delete vseg and release physical pages
[625]1442        vmm_remove_vseg( process , vseg );
[409]1443
[443]1444#if( DEBUG_VMM_DESTROY & 1 )
1445if( DEBUG_VMM_DESTROY < cycle )
[611]1446printk("\n[%s] %s vseg deleted / vpn_base %x / vpn_size %d\n",
[443]1447__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
1448#endif
1449
[1]1450        }
1451
[625]1452    // release the VSL lock
[640]1453    remote_queuelock_release( vsl_lock_xp );
[625]1454
[651]1455    // remove all registered MMAP vsegs from free_lists in MMAP allocator
[1]1456    uint32_t i;
[651]1457    for( i = 0 ; i <= CONFIG_VMM_HEAP_MAX_ORDER ; i++ )
[1]1458    {
[651]1459        // build extended pointer on free list root
1460        xptr_t root_xp = XPTR( local_cxy , &vmm->mmap_mgr.free_list_root[i] );
[625]1461 
1462        // scan zombi_list[i]
1463            while( !xlist_is_empty( root_xp ) )
[1]1464            {
[625]1465                    vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist );
1466            vseg    = GET_PTR( vseg_xp );
[443]1467
1468#if( DEBUG_VMM_DESTROY & 1 )
1469if( DEBUG_VMM_DESTROY < cycle )
[595]1470printk("\n[%s] found zombi vseg / vpn_base %x / vpn_size %d\n",
[443]1471__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
1472#endif
[611]1473            // clean vseg descriptor
1474            vseg->vmm = NULL;
1475
[625]1476            // remove vseg from  zombi_list
[611]1477            xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
1478
1479                    // release vseg descriptor
[1]1480            vseg_free( vseg );
[443]1481
1482#if( DEBUG_VMM_DESTROY & 1 )
1483if( DEBUG_VMM_DESTROY < cycle )
[595]1484printk("\n[%s] zombi vseg released / vpn_base %x / vpn_size %d\n",
[443]1485__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
1486#endif
[1]1487            }
1488    }
1489
[409]1490    // release memory allocated to the GPT itself
[1]1491    hal_gpt_destroy( &vmm->gpt );
1492
[438]1493#if DEBUG_VMM_DESTROY
[433]1494cycle = (uint32_t)hal_get_cycles();
[438]1495if( DEBUG_VMM_DESTROY < cycle )
[595]1496printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
1497__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
[433]1498#endif
[416]1499
[204]1500}  // end vmm_destroy()
1501
[1]1502/////////////////////////////////////////////////
1503vseg_t * vmm_check_conflict( process_t * process,
[21]1504                             vpn_t       vpn_base,
[1]1505                             vpn_t       vpn_size )
1506{
1507    vmm_t        * vmm = &process->vmm;
[408]1508
1509    // scan the VSL
[1]1510        vseg_t       * vseg;
[408]1511    xptr_t         iter_xp;
1512    xptr_t         vseg_xp;
1513    xptr_t         root_xp = XPTR( local_cxy , &vmm->vsegs_root );
[1]1514
[408]1515        XLIST_FOREACH( root_xp , iter_xp )
[1]1516        {
[408]1517                vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
[433]1518        vseg    = GET_PTR( vseg_xp );
[204]1519
[21]1520                if( ((vpn_base + vpn_size) > vseg->vpn_base) &&
1521             (vpn_base < (vseg->vpn_base + vseg->vpn_size)) ) return vseg;
[1]1522        }
1523    return NULL;
1524
[204]1525}  // end vmm_check_conflict()
1526
[407]1527////////////////////////////////////////////////
1528vseg_t * vmm_create_vseg( process_t   * process,
1529                              vseg_type_t   type,
[635]1530                          intptr_t      base,         // ltid for VSEG_TYPE_STACK
[407]1531                              uint32_t      size,
1532                          uint32_t      file_offset,
1533                          uint32_t      file_size,
1534                          xptr_t        mapper_xp,
1535                          cxy_t         cxy )
[1]1536{
[651]1537    vseg_t     * vseg;          // pointer on allocated vseg descriptor
[1]1538
[640]1539#if DEBUG_VMM_CREATE_VSEG
1540thread_t * this  = CURRENT_THREAD;
1541uint32_t   cycle;
1542#endif
1543
[635]1544#if (DEBUG_VMM_CREATE_VSEG & 1)
[640]1545cycle = (uint32_t)hal_get_cycles();
[438]1546if( DEBUG_VMM_CREATE_VSEG < cycle )
[635]1547printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cxy %x / cycle %d\n",
1548__FUNCTION__, this->process->pid, this->trdid,
1549process->pid, vseg_type_str(type), base, cxy, cycle );
[433]1550#endif
[21]1551
[407]1552    // get pointer on VMM
1553        vmm_t * vmm    = &process->vmm;
[21]1554
[651]1555    // allocate a vseg descriptor and initialize it, depending on type
1556    // we use specific allocators for "stack" and "mmap" types
[595]1557
[651]1558    /////////////////////////////
[1]1559    if( type == VSEG_TYPE_STACK )
1560    {
[651]1561        // get vseg from STACK allocator
1562        vseg = vmm_stack_alloc( vmm , base );    // base == ltid
1563       
1564        if( vseg == NULL )
1565        {
1566            printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
1567            __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
1568            return NULL;
1569        }
[1]1570
[651]1571        // initialize vseg
1572        vseg->type = type;
1573        vseg->vmm  = vmm;
1574        vseg->min  = vseg->vpn_base << CONFIG_PPM_PAGE_SHIFT;
1575        vseg->max  = vseg->min + (vseg->vpn_size << CONFIG_PPM_PAGE_SHIFT);
1576        vseg->cxy  = cxy;
1577
1578        vseg_init_flags( vseg , type );
[1]1579    }
[651]1580    /////////////////////////////////
[595]1581    else if( type == VSEG_TYPE_FILE )
1582    {
[651]1583        // compute page index (in mapper) for first and last byte
[595]1584        vpn_t    vpn_min    = file_offset >> CONFIG_PPM_PAGE_SHIFT;
1585        vpn_t    vpn_max    = (file_offset + size - 1) >> CONFIG_PPM_PAGE_SHIFT;
1586
[651]1587        // compute offset in first page and number of pages
[595]1588        uint32_t offset = file_offset & CONFIG_PPM_PAGE_MASK;
1589        vpn_t    npages      = vpn_max - vpn_min + 1;
1590
[651]1591        // get vseg from MMAP allocator
1592        vseg = vmm_mmap_alloc( vmm , npages );
1593
1594        if( vseg == NULL )
[595]1595        {
[651]1596            printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
1597            __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
[595]1598            return NULL;
1599        }
1600
[651]1601        // initialize vseg
1602        vseg->type        = type;
1603        vseg->vmm         = vmm;
1604        vseg->min         = (vseg->vpn_base << CONFIG_PPM_PAGE_SHIFT) + offset; 
1605        vseg->max         = vseg->min + size;
1606        vseg->file_offset = file_offset;
1607        vseg->file_size   = file_size;
1608        vseg->mapper_xp   = mapper_xp;
1609        vseg->cxy         = cxy;
1610
1611        vseg_init_flags( vseg , type );
[595]1612    }
[651]1613    /////////////////////////////////////////////////////////////////
1614    else if( (type == VSEG_TYPE_ANON) || (type == VSEG_TYPE_REMOTE) )
[1]1615    {
[595]1616        // compute number of required pages in virtual space
1617        vpn_t npages = size >> CONFIG_PPM_PAGE_SHIFT;
1618        if( size & CONFIG_PPM_PAGE_MASK) npages++;
1619       
[651]1620        // allocate vseg from MMAP allocator
1621        vseg = vmm_mmap_alloc( vmm , npages );
1622
1623        if( vseg == NULL )
[1]1624        {
[651]1625            printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
1626            __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
[1]1627            return NULL;
1628        }
1629
[651]1630        // initialize vseg
1631        vseg->type = type;
1632        vseg->vmm  = vmm;
1633        vseg->min  = vseg->vpn_base << CONFIG_PPM_PAGE_SHIFT;
1634        vseg->max  = vseg->min + (vseg->vpn_size << CONFIG_PPM_PAGE_SHIFT);
1635        vseg->cxy  = cxy;
1636
1637        vseg_init_flags( vseg , type );
[1]1638    }
[651]1639    /////////////////////////////////////////////////////////////////
[623]1640    else    // VSEG_TYPE_DATA, VSEG_TYPE_CODE or KERNEL vseg
[1]1641    {
[204]1642        uint32_t vpn_min = base >> CONFIG_PPM_PAGE_SHIFT;
1643        uint32_t vpn_max = (base + size - 1) >> CONFIG_PPM_PAGE_SHIFT;
1644
[651]1645        // allocate vseg descriptor
1646            vseg = vseg_alloc();
1647
1648            if( vseg == NULL )
1649            {
1650            printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
1651            __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
1652            return NULL;
1653            }
1654        // initialize vseg
1655        vseg->type        = type;
1656        vseg->vmm         = vmm;
1657        vseg->min         = base;
1658        vseg->max         = base + size;
1659        vseg->vpn_base    = base >> CONFIG_PPM_PAGE_SHIFT;
1660        vseg->vpn_size    = vpn_max - vpn_min + 1;
1661        vseg->file_offset = file_offset;
1662        vseg->file_size   = file_size;
1663        vseg->mapper_xp   = mapper_xp;
1664        vseg->cxy         = cxy;
1665
1666        vseg_init_flags( vseg , type );
[1]1667    }
1668
1669    // check collisions
[651]1670    vseg_t * existing_vseg = vmm_check_conflict( process , vseg->vpn_base , vseg->vpn_size );
[624]1671
[651]1672    if( existing_vseg != NULL )
[1]1673    {
[651]1674        printk("\n[ERROR] in %s for process %x : new vseg %s [vpn_base %x / vpn_size %x]\n"
1675               "        overlap existing vseg %s [vpn_base %x / vpn_size %x]\n",
1676        __FUNCTION__ , process->pid, vseg_type_str(vseg->type), vseg->vpn_base, vseg->vpn_size, 
1677        vseg_type_str(existing_vseg->type), existing_vseg->vpn_base, existing_vseg->vpn_size );
1678        vseg_free( vseg );
[1]1679        return NULL;
1680    }
1681
[625]1682    // build extended pointer on VSL lock
1683    xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
1684 
1685    // take the VSL lock in write mode
[640]1686    remote_queuelock_acquire( lock_xp );
[625]1687
[408]1688    // attach vseg to VSL
[611]1689        vmm_attach_vseg_to_vsl( vmm , vseg );
[1]1690
[625]1691    // release the VSL lock
[640]1692    remote_queuelock_release( lock_xp );
[625]1693
[651]1694#if DEBUG_VMM_CREATE_VSEG
[433]1695cycle = (uint32_t)hal_get_cycles();
[651]1696if( DEBUG_VMM_CREATE_VSEG < cycle )
1697printk("\n[%s] thread[%x,%x] exit / %s / vpn_base %x / vpn_size %x / cycle %d\n",
1698__FUNCTION__, this->process->pid, this->trdid,
1699vseg_type_str(type), vseg->vpn_base, vseg->vpn_size, cycle );
[433]1700#endif
[21]1701
[1]1702        return vseg;
1703
[406]1704}  // vmm_create_vseg()
1705
[640]1706////////////////////////////////////////////////////////////////////////////////////////////
[656]1707// This static function is called by the vmm_remove_vseg() and vmm_resize_vseg() functions
1708// to update the physical page descriptor identified by the <ppn> argument.
1709// It decrements the refcount, set the dirty bit when required, and releases the physical
1710// page to kmem depending on the vseg type.
1711// - KERNEL : refcount decremented / not released to kmem    / dirty bit not set
1712// - FILE   : refcount decremented / not released to kmem    / dirty bit set when required.
1713// - CODE   : refcount decremented / released to kmem        / dirty bit not set.
1714// - STAK   : refcount decremented / released to kmem        / dirty bit not set.
1715// - DATA   : refcount decremented / released to kmem if ref / dirty bit not set.
1716// - MMAP   : refcount decremented / released to kmem if ref / dirty bit not set.
[640]1717////////////////////////////////////////////////////////////////////////////////////////////
1718// @ process  : local pointer on process.
1719// @ vseg     : local pointer on vseg.
1720// @ ppn      : released pysical page index.
[656]1721// @ dirty    : set the dirty bit in page descriptor when non zero.
[640]1722////////////////////////////////////////////////////////////////////////////////////////////
1723static void vmm_ppn_release( process_t * process,
1724                             vseg_t    * vseg,
[656]1725                             ppn_t       ppn,
1726                             uint32_t    dirty )
[640]1727{
[656]1728    bool_t do_kmem_release;
[625]1729
[640]1730    // get vseg type
1731    vseg_type_t type = vseg->type;
1732
[656]1733    // compute is_ref <=> this vseg is the reference vseg
[640]1734    bool_t is_ref = (GET_CXY( process->ref_xp ) == local_cxy);
1735
1736    // get pointers on physical page descriptor
1737    xptr_t   page_xp  = ppm_ppn2page( ppn );
1738    cxy_t    page_cxy = GET_CXY( page_xp );
1739    page_t * page_ptr = GET_PTR( page_xp );
1740
1741    // decrement page refcount
1742    xptr_t count_xp = XPTR( page_cxy , &page_ptr->refcount );
1743    hal_remote_atomic_add( count_xp , -1 );
1744
[656]1745    // compute the do_kmem_release condition depending on vseg type
1746    if( (type == VSEG_TYPE_KCODE) || 
[640]1747        (type == VSEG_TYPE_KDATA) || 
1748        (type == VSEG_TYPE_KDEV) )           
1749    {
[656]1750        // no physical page release for KERNEL
1751        do_kmem_release = false;
[640]1752    }
[656]1753    else if( type == VSEG_TYPE_FILE )
1754    {
1755        // no physical page release for KERNEL
1756        do_kmem_release = false;
1757
1758        // set dirty bit if required
1759        if( dirty ) ppm_page_do_dirty( page_xp );
1760    }   
[640]1761    else if( (type == VSEG_TYPE_CODE)  ||
1762             (type == VSEG_TYPE_STACK) ) 
1763    {
1764        // always release physical page for private vsegs
[656]1765        do_kmem_release = true;
[640]1766    }
1767    else if( (type == VSEG_TYPE_ANON)  ||
1768             (type == VSEG_TYPE_REMOTE) )
1769    {
1770        // release physical page if reference cluster
[656]1771        do_kmem_release = is_ref;
[640]1772    }
1773    else if( is_ref )  // vseg_type == DATA in reference cluster
1774    {
1775        // get extended pointers on forks and lock field in page descriptor
1776        xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks );
1777        xptr_t lock_xp  = XPTR( page_cxy , &page_ptr->lock );
1778
1779        // take lock protecting "forks" counter
1780        remote_busylock_acquire( lock_xp );
1781
1782        // get number of pending forks from page descriptor
1783        uint32_t forks = hal_remote_l32( forks_xp );
1784
1785        // decrement pending forks counter if required
1786        if( forks )  hal_remote_atomic_add( forks_xp , -1 );
1787
1788        // release lock protecting "forks" counter
1789        remote_busylock_release( lock_xp );
1790
1791        // release physical page if forks == 0
[656]1792        do_kmem_release = (forks == 0); 
[640]1793    }
1794    else              // vseg_type == DATA not in reference cluster
1795    {
1796        // no physical page release if not in reference cluster
[656]1797        do_kmem_release = false;
[640]1798    }
1799
1800    // release physical page to relevant kmem when required
[656]1801    if( do_kmem_release )
[640]1802    {
[656]1803        kmem_req_t req;
1804        req.type = KMEM_PPM;
1805        req.ptr  = GET_PTR( ppm_ppn2base( ppn ) );
[640]1806
[656]1807        kmem_remote_free( page_cxy , &req );
1808
[640]1809#if DEBUG_VMM_PPN_RELEASE
1810thread_t * this = CURRENT_THREAD;
1811if( DEBUG_VMM_PPN_RELEASE < cycle )
1812printk("\n[%s] thread[%x,%x] released ppn %x to kmem\n",
1813__FUNCTION__, this->process->pid, this->trdid, ppn );
1814#endif
1815
1816    }
1817} // end vmm_ppn_release()
1818
[625]1819//////////////////////////////////////////
1820void vmm_remove_vseg( process_t * process,
1821                      vseg_t    * vseg )
[1]1822{
[625]1823    uint32_t    vseg_type;  // vseg type
[21]1824    vpn_t       vpn;        // VPN of current PTE
1825    vpn_t       vpn_min;    // VPN of first PTE
[1]1826    vpn_t       vpn_max;    // VPN of last PTE (excluded)
[409]1827    ppn_t       ppn;        // current PTE ppn value
1828    uint32_t    attr;       // current PTE attributes
[1]1829
[625]1830// check arguments
[672]1831assert( __FUNCTION__, (process != NULL), "process argument is NULL" );
1832assert( __FUNCTION__, (vseg    != NULL), "vseg argument is NULL" );
[409]1833
[625]1834    // get pointers on local process VMM
[640]1835    vmm_t * vmm = &process->vmm;
[611]1836
[629]1837    // build extended pointer on GPT
[640]1838    xptr_t gpt_xp = XPTR( local_cxy , &vmm->gpt );
[629]1839
[623]1840    // get relevant vseg infos
[624]1841    vseg_type = vseg->type;
1842    vpn_min   = vseg->vpn_base;
1843    vpn_max   = vpn_min + vseg->vpn_size;
[623]1844
[625]1845#if DEBUG_VMM_REMOVE_VSEG
1846uint32_t   cycle = (uint32_t)hal_get_cycles();
1847thread_t * this  = CURRENT_THREAD;
[640]1848#endif
1849
1850#if (DEBUG_VMM_REMOVE_VSEG & 1 )
[625]1851if( DEBUG_VMM_REMOVE_VSEG < cycle )
[641]1852printk("\n[%s] thread[%x,%x] enters / process %x / type %s / base %x / cycle %d\n",
[625]1853__FUNCTION__, this->process->pid, this->trdid, 
1854process->pid, vseg_type_str(vseg->type), vseg->min, cycle );
1855#endif
1856
[640]1857    // loop on PTEs in GPT to unmap all mapped PTE
[1]1858        for( vpn = vpn_min ; vpn < vpn_max ; vpn++ )
1859    {
[625]1860        // get ppn and attr
[629]1861        hal_gpt_get_pte( gpt_xp , vpn , &attr , &ppn );
[409]1862
[625]1863        if( attr & GPT_MAPPED )  // PTE is mapped
[409]1864        { 
[437]1865
[625]1866#if( DEBUG_VMM_REMOVE_VSEG & 1 )
1867if( DEBUG_VMM_REMOVE_VSEG < cycle )
[641]1868printk("\n[%s] thread[%x,%x] unmap vpn %x / ppn %x / type %s\n",
[640]1869__FUNCTION__, this->process->pid, this->trdid, vpn , ppn, vseg_type_str(vseg_type) );
[437]1870#endif
[585]1871            // unmap GPT entry in local GPT
[629]1872            hal_gpt_reset_pte( gpt_xp , vpn );
[409]1873
[656]1874            // release physical page depending on vseg type
1875            vmm_ppn_release( process , vseg , ppn , attr & GPT_DIRTY );
[409]1876        }
[1]1877    }
[433]1878
[625]1879    // remove vseg from VSL
[611]1880    vmm_detach_vseg_from_vsl( vmm , vseg );
1881
[625]1882    // release vseg descriptor depending on vseg type
1883    if( vseg_type == VSEG_TYPE_STACK )
1884    {
1885        // release slot to local stack allocator
1886        vmm_stack_free( vmm , vseg );
1887    }
1888    else if( (vseg_type == VSEG_TYPE_ANON) || 
1889             (vseg_type == VSEG_TYPE_FILE) || 
1890             (vseg_type == VSEG_TYPE_REMOTE) ) 
1891    {
1892        // release vseg to local mmap allocator
1893        vmm_mmap_free( vmm , vseg );
1894    }
1895    else
1896    {
1897        // release vseg descriptor to local kmem
1898        vseg_free( vseg );
1899    }
1900
1901#if DEBUG_VMM_REMOVE_VSEG
[433]1902cycle = (uint32_t)hal_get_cycles();
[625]1903if( DEBUG_VMM_REMOVE_VSEG < cycle )
[641]1904printk("\n[%s] thread[%x,%x] exit / process %x / type %s / base %x / cycle %d\n",
[625]1905__FUNCTION__, this->process->pid, this->trdid, 
1906process->pid, vseg_type_str(vseg->type), vseg->min, cycle );
[433]1907#endif
1908
[625]1909}  // end vmm_remove_vseg()
[1]1910
[611]1911/////////////////////////////////////////////
[640]1912void vmm_resize_vseg( process_t * process,
1913                      vseg_t    * vseg,
1914                      intptr_t    new_base,
1915                      intptr_t    new_size )
[406]1916{
[640]1917    vpn_t     vpn;
1918    ppn_t     ppn;
1919    uint32_t  attr;
[406]1920
[640]1921// check arguments
[672]1922assert( __FUNCTION__, (process != NULL), "process argument is NULL" );
1923assert( __FUNCTION__, (vseg    != NULL), "vseg argument is NULL" );
[406]1924
[623]1925#if DEBUG_VMM_RESIZE_VSEG
1926uint32_t   cycle = (uint32_t)hal_get_cycles();
1927thread_t * this  = CURRENT_THREAD;
[640]1928#endif
1929
1930#if (DEBUG_VMM_RESIZE_VSEG & 1)
[623]1931if( DEBUG_VMM_RESIZE_VSEG < cycle )
[640]1932printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cycle %d\n",
1933__FUNCTION__, this->process->pid, this->trdid, 
1934process->pid, vseg_type_str(vseg->type), old_base, cycle );
[623]1935#endif
1936
[640]1937    // get existing vseg vpn_min and vpn_max
1938    vpn_t     old_vpn_min = vseg->vpn_base;
1939    vpn_t     old_vpn_max = old_vpn_min + vseg->vpn_size - 1;
[1]1940
[640]1941    // compute new vseg vpn_min & vpn_max 
1942    intptr_t min          = new_base;
1943    intptr_t max          = new_base + new_size;
1944    vpn_t    new_vpn_min  = min >> CONFIG_PPM_PAGE_SHIFT;
1945    vpn_t    new_vpn_max  = (max - 1) >> CONFIG_PPM_PAGE_SHIFT;
[1]1946
[640]1947    // build extended pointer on GPT
1948    xptr_t gpt_xp = XPTR( local_cxy , &process->vmm.gpt );
[1]1949
[657]1950    // loop on PTEs in GPT to unmap PTE if (old_vpn_min <= vpn < new_vpn_min)
[640]1951        for( vpn = old_vpn_min ; vpn < new_vpn_min ; vpn++ )
[623]1952    {
[640]1953        // get ppn and attr
1954        hal_gpt_get_pte( gpt_xp , vpn , &attr , &ppn );
[21]1955
[640]1956        if( attr & GPT_MAPPED )  // PTE is mapped
1957        { 
[623]1958
1959#if( DEBUG_VMM_RESIZE_VSEG & 1 )
1960if( DEBUG_VMM_RESIZE_VSEG < cycle )
[640]1961printk("\n[%s] thread[%x,%x] unmap vpn %x / ppn %x / %s",
1962__FUNCTION__, this->process->pid, this->trdid, vpn , ppn, vseg_type_str(vseg_type) );
[623]1963#endif
[640]1964            // unmap GPT entry
1965            hal_gpt_reset_pte( gpt_xp , vpn );
[623]1966
[640]1967            // release physical page when required
[656]1968            vmm_ppn_release( process , vseg , ppn , attr & GPT_DIRTY );
[640]1969        }
[1]1970    }
[640]1971
1972    // loop on PTEs in GPT to unmap PTE if (new vpn_max <= vpn < old_vpn_max)
1973        for( vpn = new_vpn_max ; vpn < old_vpn_max ; vpn++ )
[1]1974    {
[640]1975        // get ppn and attr
1976        hal_gpt_get_pte( gpt_xp , vpn , &attr , &ppn );
[623]1977
[640]1978        if( attr & GPT_MAPPED )  // PTE is mapped
1979        { 
1980
[641]1981#if( DEBUG_VMM_RESIZE_VSEG & 1 )
[623]1982if( DEBUG_VMM_RESIZE_VSEG < cycle )
[640]1983printk("\n[%s] thread[%x,%x] unmap vpn %x / ppn %x / %s",
1984__FUNCTION__, this->process->pid, this->trdid, vpn , ppn, vseg_type_str(vseg_type) );
[623]1985#endif
[640]1986            // unmap GPT entry in local GPT
1987            hal_gpt_reset_pte( gpt_xp , vpn );
[406]1988
[640]1989            // release physical page when required
[656]1990            vmm_ppn_release( process , vseg , ppn , attr & GPT_DIRTY );
[640]1991        }
[1]1992    }
[623]1993
[640]1994    // resize vseg in VSL
1995    vseg->min      = min;
1996    vseg->max      = max;
1997    vseg->vpn_base = new_vpn_min;
1998    vseg->vpn_size = new_vpn_max - new_vpn_min + 1;
1999
2000#if DEBUG_VMM_RESIZE_VSEG
2001cycle = (uint32_t)hal_get_cycles();
[623]2002if( DEBUG_VMM_RESIZE_VSEG < cycle )
[640]2003printk("[%s] thread[%x,%x] exit / process %x / %s / base %x / cycle %d\n",
2004__FUNCTION__, this->process->pid, this->trdid, 
2005process->pid, vseg_type_str(vseg->type), vseg->min, cycle );
[623]2006#endif
[406]2007
[640]2008}  // end vmm_resize_vseg
[623]2009
[640]2010/////////////////////////////////////////////////////////////////////////////////////////////
2011// This static function is called twice by the vmm_get_vseg() function.
2012// It scan the - possibly remote - VSL defined by the <vmm_xp> argument to find the vseg
2013// containing a given virtual address <vaddr>. It uses remote accesses to access the remote
2014// VSL if required. The VSL lock protecting the VSL must be taken by the caller.
2015/////////////////////////////////////////////////////////////////////////////////////////////
2016// @ vmm_xp  : extended pointer on the process VMM.
2017// @ vaddr   : virtual address.
2018// @ return local pointer on remote vseg if success / return NULL if not found.
2019/////////////////////////////////////////////////////////////////////////////////////////////
2020static vseg_t * vmm_vseg_from_vaddr( xptr_t     vmm_xp,
2021                                     intptr_t   vaddr )
2022{
2023    xptr_t   iter_xp;
2024    xptr_t   vseg_xp;
2025    vseg_t * vseg;
2026    intptr_t min;
2027    intptr_t max;
[623]2028
[640]2029    // get cluster and local pointer on target VMM
2030    vmm_t * vmm_ptr = GET_PTR( vmm_xp );
2031    cxy_t   vmm_cxy = GET_CXY( vmm_xp );
[623]2032
[640]2033    // build extended pointer on VSL root
2034    xptr_t root_xp = XPTR( vmm_cxy , &vmm_ptr->vsegs_root );
[406]2035
[640]2036    // scan the list of vsegs in VSL
2037    XLIST_FOREACH( root_xp , iter_xp )
2038    {
2039        vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
2040        vseg    = GET_PTR( vseg_xp );
[406]2041
[640]2042        min = hal_remote_l32( XPTR( vmm_cxy , &vseg->min ) );
2043        max = hal_remote_l32( XPTR( vmm_cxy , &vseg->max ) );
[407]2044
[640]2045        // return success when match
2046        if( (vaddr >= min) && (vaddr < max) ) return vseg;
[1]2047    }
2048
[640]2049    // return failure
2050    return NULL;
[1]2051
[640]2052}  // end vmm_vseg_from_vaddr()
[1]2053
2054///////////////////////////////////////////
[388]2055error_t  vmm_get_vseg( process_t * process,
[394]2056                       intptr_t    vaddr,
[388]2057                       vseg_t   ** found_vseg )
[1]2058{
[640]2059    xptr_t    loc_lock_xp;     // extended pointer on local VSL lock
2060    xptr_t    ref_lock_xp;     // extended pointer on reference VSL lock
2061    vseg_t  * loc_vseg;        // local pointer on local vseg
2062    vseg_t  * ref_vseg;        // local pointer on reference vseg
[1]2063
[640]2064    // build extended pointer on local VSL lock
2065    loc_lock_xp = XPTR( local_cxy , &process->vmm.vsl_lock );
2066     
2067    // get local VSL lock
2068    remote_queuelock_acquire( loc_lock_xp );
[1]2069
[665]2070    // try to get vseg from local VSL
[640]2071    loc_vseg = vmm_vseg_from_vaddr( XPTR( local_cxy, &process->vmm ) , vaddr );
[440]2072
[640]2073    if (loc_vseg == NULL)   // vseg not found => access reference VSL
2074    {
[388]2075        // get extended pointer on reference process
2076        xptr_t ref_xp = process->ref_xp;
[1]2077
[640]2078        // get cluster and local pointer on reference process
[388]2079        cxy_t       ref_cxy = GET_CXY( ref_xp );
[433]2080        process_t * ref_ptr = GET_PTR( ref_xp );
[388]2081
[665]2082        if( ref_cxy == local_cxy )    // local is ref => return error
[640]2083        {
2084            printk("\n[ERROR] in %s : vaddr %x in process %x out of segment\n",
2085            __FUNCTION__, vaddr, process->pid );
[388]2086
[665]2087            // release local VSL lock
2088            remote_queuelock_release( loc_lock_xp );
[388]2089
[640]2090            return -1;
2091        }
[665]2092        else                          // ref != local => access ref VSL                     
[640]2093        {
[665]2094            // build extended pointer on reference VSL lock
2095            ref_lock_xp = XPTR( ref_cxy , &ref_ptr->vmm.vsl_lock );
2096     
2097            // get reference VSL lock
2098            remote_queuelock_acquire( ref_lock_xp );
[625]2099
[665]2100            // try to get vseg from reference VSL
2101            ref_vseg = vmm_vseg_from_vaddr( XPTR( ref_cxy , &ref_ptr->vmm ) , vaddr );
2102
2103            if( ref_vseg == NULL )  // vseg not found => return error
[640]2104            {
[665]2105                // release both VSL locks
2106                remote_queuelock_release( loc_lock_xp );
2107                remote_queuelock_release( ref_lock_xp );
2108
2109                printk("\n[ERROR] in %s : vaddr %x in process %x out of segment\n",
[640]2110                __FUNCTION__, vaddr, process->pid );
[595]2111
[640]2112                return -1;
2113            }
[665]2114            else                    // vseg found => try to update local VSL
[640]2115            {
[665]2116                // allocate a local vseg descriptor
2117                loc_vseg = vseg_alloc();
[640]2118
[665]2119                if( loc_vseg == NULL )   // no memory => return error
2120                {
2121                    printk("\n[ERROR] in %s : vaddr %x in process %x / no memory\n",
2122                    __FUNCTION__, vaddr, process->pid );
[640]2123
[665]2124                    // release both VSL locks
2125                    remote_queuelock_release( ref_lock_xp );
2126                    remote_queuelock_release( loc_lock_xp );
[640]2127
[665]2128                    return -1;
2129                }
2130                else                     // update local VSL and return success
2131                {
2132                    // initialize local vseg
2133                    vseg_init_from_ref( loc_vseg , XPTR( ref_cxy , ref_vseg ) );
2134
2135                    // register local vseg in local VSL
2136                    vmm_attach_vseg_to_vsl( &process->vmm , loc_vseg );
2137
2138                    // release both VSL locks
2139                    remote_queuelock_release( ref_lock_xp );
2140                    remote_queuelock_release( loc_lock_xp );
2141
2142                    *found_vseg = loc_vseg;
2143                    return 0;
2144                }
[640]2145            }
2146        }
2147    }
2148    else                        // vseg found in local VSL => return success
2149    {
[665]2150        // release local VSL lock
[640]2151        remote_queuelock_release( loc_lock_xp );
2152
2153        *found_vseg = loc_vseg;
2154        return 0;
2155    }
[388]2156}  // end vmm_get_vseg()
2157
[407]2158//////////////////////////////////////////////////////////////////////////////////////
2159// This static function compute the target cluster to allocate a physical page
[632]2160// for a given <vpn> in a given <vseg>, allocates the page and returns an extended
2161// pointer on the allocated page descriptor.
[407]2162// The vseg cannot have the FILE type.
2163//////////////////////////////////////////////////////////////////////////////////////
[640]2164// @ vseg   : local pointer on vseg.
2165// @ vpn    : unmapped vpn.
[656]2166// @ return an extended pointer on the allocated page descriptor.
[640]2167//////////////////////////////////////////////////////////////////////////////////////
[407]2168static xptr_t vmm_page_allocate( vseg_t * vseg,
2169                                 vpn_t    vpn )
2170{
[433]2171
[632]2172#if DEBUG_VMM_PAGE_ALLOCATE
[619]2173uint32_t   cycle   = (uint32_t)hal_get_cycles();
2174thread_t * this    = CURRENT_THREAD;
[632]2175if( DEBUG_VMM_PAGE_ALLOCATE < cycle )
[595]2176printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n",
2177__FUNCTION__ , this->process->pid, this->trdid, vpn, cycle );
[433]2178#endif
2179
[632]2180    xptr_t       page_xp;
[407]2181    cxy_t        page_cxy;
[577]2182    uint32_t     index;
[407]2183
[577]2184    uint32_t     type   = vseg->type;
2185    uint32_t     flags  = vseg->flags;
2186    uint32_t     x_size = LOCAL_CLUSTER->x_size;
2187    uint32_t     y_size = LOCAL_CLUSTER->y_size;
[407]2188
[567]2189// check vseg type
[672]2190assert( __FUNCTION__, ( type != VSEG_TYPE_FILE ) , "illegal vseg type\n" );
[407]2191
[656]2192    // compute target cluster identifier
[407]2193    if( flags & VSEG_DISTRIB )    // distributed => cxy depends on vpn LSB
2194    {
[577]2195        index    = vpn & ((x_size * y_size) - 1);
2196        page_cxy = HAL_CXY_FROM_XY( (index / y_size) , (index % y_size) );
[561]2197
[577]2198        // If the cluster selected from VPN's LSBs is empty, we select one randomly
2199        if ( cluster_is_active( page_cxy ) == false )
2200        {
2201            page_cxy = cluster_random_select();
[561]2202        }
[407]2203    }
2204    else                          // other cases => cxy specified in vseg
2205    {
[561]2206        page_cxy = vseg->cxy;
[407]2207    }
2208
[635]2209    // allocate one small physical page from target cluster
[656]2210    kmem_req_t req;
2211    req.type  = KMEM_PPM;
2212    req.order = 0;
2213    req.flags = AF_ZERO;
[407]2214
[656]2215    // get local pointer on page base
2216    void * ptr = kmem_remote_alloc( page_cxy , &req );
[635]2217
[656]2218    // get extended pointer on page descriptor
2219    page_xp = ppm_base2page( XPTR( page_cxy , ptr ) );
2220
[632]2221#if DEBUG_VMM_PAGE_ALLOCATE
[595]2222cycle = (uint32_t)hal_get_cycles();
[632]2223if( DEBUG_VMM_PAGE_ALLOCATE < cycle )
[635]2224printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle %d\n",
2225__FUNCTION__ , this->process->pid, this->trdid, vpn, ppm_page2ppn(page_xp), cycle );
[433]2226#endif
2227
[632]2228    return page_xp;
[407]2229
2230}  // end vmm_page_allocate() 
2231
[313]2232////////////////////////////////////////
2233error_t vmm_get_one_ppn( vseg_t * vseg,
2234                         vpn_t    vpn,
2235                         ppn_t  * ppn )
2236{
2237    error_t    error;
[407]2238    xptr_t     page_xp;           // extended pointer on physical page descriptor
[606]2239    uint32_t   page_id;           // missing page index in vseg mapper
[406]2240    uint32_t   type;              // vseg type;
[313]2241
[406]2242    type      = vseg->type;
[606]2243    page_id   = vpn - vseg->vpn_base;
[313]2244
[438]2245#if DEBUG_VMM_GET_ONE_PPN
[595]2246uint32_t   cycle = (uint32_t)hal_get_cycles();
2247thread_t * this  = CURRENT_THREAD;
[656]2248if( DEBUG_VMM_GET_ONE_PPN < cycle )
2249printk("\n[%s] thread[%x,%x] enter for vpn %x / vseg %s / page_id  %d / cycle %d\n",
[606]2250__FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(type), page_id, cycle );
[433]2251#endif
[313]2252
[656]2253#if (DEBUG_VMM_GET_ONE_PPN & 2)
2254if( DEBUG_VMM_GET_ONE_PPN < cycle )
2255hal_vmm_display( XPTR( local_cxy , this->process ) , true );
2256#endif
2257
[406]2258    // FILE type : get the physical page from the file mapper
[313]2259    if( type == VSEG_TYPE_FILE )
2260    {
[406]2261        // get extended pointer on mapper
[407]2262        xptr_t mapper_xp = vseg->mapper_xp;
[313]2263
[672]2264assert( __FUNCTION__, (mapper_xp != XPTR_NULL),
[567]2265"mapper not defined for a FILE vseg\n" );
[406]2266       
[606]2267        // get extended pointer on page descriptor
[657]2268        page_xp = mapper_get_page( mapper_xp , page_id );
[406]2269
[606]2270        if ( page_xp == XPTR_NULL ) return EINVAL;
[313]2271    }
2272
[406]2273    // Other types : allocate a physical page from target cluster,
[407]2274    // as defined by vseg type and vpn value
[313]2275    else
2276    {
[433]2277        // allocate one physical page
[407]2278        page_xp = vmm_page_allocate( vseg , vpn );
[406]2279
[635]2280        if( page_xp == XPTR_NULL ) return -1;
[313]2281
[406]2282        // initialise missing page from .elf file mapper for DATA and CODE types
[440]2283        // the vseg->mapper_xp field is an extended pointer on the .elf file mapper
[313]2284        if( (type == VSEG_TYPE_CODE) || (type == VSEG_TYPE_DATA) )
2285        {
[406]2286            // get extended pointer on mapper
2287            xptr_t     mapper_xp = vseg->mapper_xp;
[313]2288
[672]2289assert( __FUNCTION__, (mapper_xp != XPTR_NULL),
[567]2290"mapper not defined for a CODE or DATA vseg\n" );
[406]2291       
2292            // compute missing page offset in vseg
[606]2293            uint32_t offset = page_id << CONFIG_PPM_PAGE_SHIFT;
[406]2294
[313]2295            // compute missing page offset in .elf file
[406]2296            uint32_t elf_offset = vseg->file_offset + offset;
[313]2297
[438]2298#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
[656]2299if( DEBUG_VMM_GET_ONE_PPN < cycle )
[595]2300printk("\n[%s] thread[%x,%x] for vpn = %x / elf_offset = %x\n",
2301__FUNCTION__, this->process->pid, this->trdid, vpn, elf_offset );
[433]2302#endif
[406]2303            // compute extended pointer on page base
[407]2304            xptr_t base_xp  = ppm_page2base( page_xp );
[313]2305
[406]2306            // file_size (in .elf mapper) can be smaller than vseg_size (BSS)
2307            uint32_t file_size = vseg->file_size;
2308
2309            if( file_size < offset )                 // missing page fully in  BSS
[313]2310            {
[406]2311
[438]2312#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
[656]2313if( DEBUG_VMM_GET_ONE_PPN < cycle )
[595]2314printk("\n[%s] thread[%x,%x] for vpn  %x / fully in BSS\n",
2315__FUNCTION__, this->process->pid, this->trdid, vpn );
[433]2316#endif
[407]2317                if( GET_CXY( page_xp ) == local_cxy )
[313]2318                {
[315]2319                    memset( GET_PTR( base_xp ) , 0 , CONFIG_PPM_PAGE_SIZE );
[313]2320                }
2321                else
2322                {
[315]2323                   hal_remote_memset( base_xp , 0 , CONFIG_PPM_PAGE_SIZE );       
[313]2324                }
2325            }
[406]2326            else if( file_size >= (offset + CONFIG_PPM_PAGE_SIZE) )  // fully in  mapper
[315]2327            {
[406]2328
[438]2329#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
[656]2330if( DEBUG_VMM_GET_ONE_PPN < cycle )
[595]2331printk("\n[%s] thread[%x,%x] for vpn  %x / fully in mapper\n",
2332__FUNCTION__, this->process->pid, this->trdid, vpn );
[433]2333#endif
[606]2334                error = mapper_move_kernel( mapper_xp,
2335                                            true,             // to_buffer
2336                                            elf_offset,
2337                                            base_xp,
2338                                            CONFIG_PPM_PAGE_SIZE ); 
[313]2339                if( error ) return EINVAL;
2340            }
[406]2341            else  // both in mapper and in BSS :
2342                  // - (file_size - offset)             bytes from mapper
2343                  // - (page_size + offset - file_size) bytes from BSS
[313]2344            {
[406]2345
[438]2346#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
[656]2347if( DEBUG_VMM_GET_ONE_PPN < cycle )
[610]2348printk("\n[%s] thread[%x,%x] for vpn  %x / both mapper & BSS\n"
[433]2349"      %d bytes from mapper / %d bytes from BSS\n",
[595]2350__FUNCTION__, this->process->pid, this->trdid, vpn,
[407]2351file_size - offset , offset + CONFIG_PPM_PAGE_SIZE - file_size  );
[433]2352#endif
[313]2353                // initialize mapper part
[606]2354                error = mapper_move_kernel( mapper_xp,
2355                                            true,         // to buffer
2356                                            elf_offset,
2357                                            base_xp,
2358                                            file_size - offset ); 
[313]2359                if( error ) return EINVAL;
2360
2361                // initialize BSS part
[407]2362                if( GET_CXY( page_xp ) == local_cxy )
[313]2363                {
[406]2364                    memset( GET_PTR( base_xp ) + file_size - offset , 0 , 
2365                            offset + CONFIG_PPM_PAGE_SIZE - file_size );
[313]2366                }
2367                else
2368                {
[406]2369                   hal_remote_memset( base_xp + file_size - offset , 0 , 
2370                                      offset + CONFIG_PPM_PAGE_SIZE - file_size );
[313]2371                }
2372            }   
[656]2373
2374        }  // end if CODE or DATA types   
[313]2375    } 
2376
2377    // return ppn
[407]2378    *ppn = ppm_page2ppn( page_xp );
[406]2379
[438]2380#if DEBUG_VMM_GET_ONE_PPN
[656]2381if( DEBUG_VMM_GET_ONE_PPN < cycle )
[635]2382printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle %d\n",
[595]2383__FUNCTION__ , this->process->pid, this->trdid , vpn , *ppn, cycle );
[433]2384#endif
[406]2385
[656]2386#if (DEBUG_VMM_GET_ONE_PPN & 2)
2387if( DEBUG_VMM_GET_ONE_PPN < cycle )
2388hal_vmm_display( XPTR( local_cxy , this->process ) , true );
2389#endif
2390
[313]2391    return 0;
2392
2393}  // end vmm_get_one_ppn()
2394
[585]2395///////////////////////////////////////////////////
2396error_t vmm_handle_page_fault( process_t * process,
2397                               vpn_t       vpn )
[1]2398{
[585]2399    vseg_t         * vseg;            // vseg containing vpn
[629]2400    uint32_t         attr;            // PTE_ATTR value
2401    ppn_t            ppn;             // PTE_PPN value
[585]2402    uint32_t         ref_attr;        // PTE_ATTR value in reference GPT
2403    ppn_t            ref_ppn;         // PTE_PPN value in reference GPT
2404    cxy_t            ref_cxy;         // reference cluster for missing vpn
2405    process_t      * ref_ptr;         // reference process for missing vpn
2406    xptr_t           local_gpt_xp;    // extended pointer on local GPT
2407    xptr_t           ref_gpt_xp;      // extended pointer on reference GPT
2408    error_t          error;           // value returned by called functions
[1]2409
[629]2410    thread_t * this  = CURRENT_THREAD;
2411
2412#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
2413uint32_t start_cycle = (uint32_t)hal_get_cycles();
2414#endif
2415
[625]2416#if DEBUG_VMM_HANDLE_PAGE_FAULT
[656]2417if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) & (vpn > 0) )
[625]2418printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n",
[629]2419__FUNCTION__, this->process->pid, this->trdid, vpn, start_cycle );
[625]2420#endif
2421
[656]2422#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2)
[635]2423if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
[656]2424hal_vmm_display( XPTR( local_cxy , this->process ) , true );
[629]2425#endif
2426
[585]2427    // get local vseg (access to reference VSL can be required)
2428    error = vmm_get_vseg( process, 
2429                          (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT,
2430                          &vseg );
2431    if( error )
2432    {
[629]2433        printk("\n[ERROR] in %s : vpn %x in thread[%x,%x] not in registered vseg\n",
2434        __FUNCTION__ , vpn , process->pid, this->trdid );
[585]2435       
2436        return EXCP_USER_ERROR;
2437    }
2438
[635]2439#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2440if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
[634]2441printk("\n[%s] thread[%x,%x] found vseg %s\n",
2442__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type) );
[433]2443#endif
[406]2444
[629]2445    // build extended pointer on local GPT
2446    local_gpt_xp  = XPTR( local_cxy , &process->vmm.gpt );
2447
[632]2448    // lock PTE in local GPT and get current PPN and attributes
[629]2449    error = hal_gpt_lock_pte( local_gpt_xp,
2450                              vpn,
2451                              &attr,
2452                              &ppn );
2453    if( error )
[438]2454    {
[629]2455        printk("\n[PANIC] in %s : cannot lock PTE in local GPT / vpn %x / process %x\n",
2456        __FUNCTION__ , vpn , process->pid );
2457       
2458        return EXCP_KERNEL_PANIC;
2459    }
[407]2460
[635]2461#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2462if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2463printk("\n[%s] thread[%x,%x] locked vpn %x in cluster %x\n",
[634]2464__FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy );
[632]2465#endif
2466
2467    // handle page fault only if local PTE still unmapped after lock
[629]2468    if( (attr & GPT_MAPPED) == 0 )
2469    {
2470        // get reference process cluster and local pointer
2471        ref_cxy = GET_CXY( process->ref_xp );
2472        ref_ptr = GET_PTR( process->ref_xp );
[407]2473
[630]2474        /////////////// private vseg or (local == reference)
2475        /////////////// => access only the local GPT
[629]2476        if( (vseg->type == VSEG_TYPE_STACK) ||
2477            (vseg->type == VSEG_TYPE_CODE)  ||
2478            (ref_cxy    == local_cxy ) )
2479        {
[632]2480
[635]2481#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2482if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2483printk("\n[%s] thread[%x,%x] access local gpt : cxy %x / ref_cxy %x / type %s / cycle %d\n",
2484__FUNCTION__, this->process->pid, this->trdid,
2485local_cxy, ref_cxy, vseg_type_str(vseg->type), (uint32_t)hal_get_cycles() );
[632]2486#endif
2487            // allocate and initialise a physical page
[629]2488            error = vmm_get_one_ppn( vseg , vpn , &ppn );
[407]2489
[585]2490            if( error )
[408]2491            {
[629]2492                printk("\n[ERROR] in %s : no physical page / process = %x / vpn = %x\n",
[408]2493                __FUNCTION__ , process->pid , vpn );
[1]2494
[629]2495                // unlock PTE in local GPT
2496                hal_gpt_unlock_pte( local_gpt_xp , vpn );
[406]2497
[585]2498                return EXCP_KERNEL_PANIC;
[407]2499            }
2500
[629]2501            // define attr from vseg flags
[632]2502            attr = GPT_MAPPED | GPT_SMALL | GPT_READABLE;
[629]2503            if( vseg->flags & VSEG_USER  ) attr |= GPT_USER;
2504            if( vseg->flags & VSEG_WRITE ) attr |= GPT_WRITABLE;
2505            if( vseg->flags & VSEG_EXEC  ) attr |= GPT_EXECUTABLE;
2506            if( vseg->flags & VSEG_CACHE ) attr |= GPT_CACHABLE;
[407]2507
[629]2508            // set PTE to local GPT
[632]2509            // it unlocks this PTE
[629]2510            hal_gpt_set_pte( local_gpt_xp,
2511                             vpn,
2512                             attr,
2513                             ppn );
[585]2514
[629]2515#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
2516uint32_t end_cycle = (uint32_t)hal_get_cycles();
2517#endif
[585]2518
2519#if DEBUG_VMM_HANDLE_PAGE_FAULT
[635]2520if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
[632]2521printk("\n[%s] thread[%x,%x] handled local pgfault / ppn %x / attr %x / cycle %d\n",
2522__FUNCTION__, this->process->pid, this->trdid, ppn, attr, end_cycle );
[585]2523#endif
2524
[656]2525#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2)
2526if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2527hal_vmm_display( XPTR( local_cxy , this->process ) , true );
2528#endif
2529
[629]2530#if CONFIG_INSTRUMENTATION_PGFAULTS
[656]2531uint32_t cost      = end_cycle - start_cycle;
[629]2532this->info.local_pgfault_nr++;
[641]2533this->info.local_pgfault_cost += cost;
2534if( cost > this->info.local_pgfault_max ) this->info.local_pgfault_max = cost;
[629]2535#endif
2536            return EXCP_NON_FATAL;
[585]2537
[629]2538        }   // end local GPT access
[585]2539
[630]2540        /////////////////// public vseg and (local != reference)
2541        /////////////////// => access ref GPT to update local GPT
[629]2542        else                               
2543        {
[632]2544
[635]2545#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2546if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2547printk("\n[%s] thread[%x,%x] access ref gpt : cxy %x / ref_cxy %x / type %s / cycle %d\n",
2548__FUNCTION__, this->process->pid, this->trdid, 
2549local_cxy, ref_cxy, vseg_type_str(vseg->type), (uint32_t)hal_get_cycles() );
[632]2550#endif
[629]2551            // build extended pointer on reference GPT
2552            ref_gpt_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt );
[585]2553
[632]2554            // lock PTE in reference GPT and get current PPN and attributes
2555            error = hal_gpt_lock_pte( ref_gpt_xp,
2556                                      vpn,
2557                                      &ref_attr,
2558                                      &ref_ppn );
2559            if( error )
2560            {
2561                printk("\n[PANIC] in %s : cannot lock PTE in ref GPT / vpn %x / process %x\n",
2562                __FUNCTION__ , vpn , process->pid );
2563       
2564                // unlock PTE in local GPT
2565                hal_gpt_unlock_pte( local_gpt_xp , vpn );
2566                   
2567                return EXCP_KERNEL_PANIC;
2568            }
[1]2569
[635]2570#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2571if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
[632]2572printk("\n[%s] thread[%x,%x] get pte from ref gpt / attr %x / ppn %x\n",
2573__FUNCTION__, this->process->pid, this->trdid, ref_attr, ref_ppn );
2574#endif
2575
2576            if( ref_attr & GPT_MAPPED )        // false page fault
[585]2577            {
[629]2578                // update local GPT from reference GPT values
[632]2579                // this unlocks the PTE in local GPT
[629]2580                hal_gpt_set_pte( local_gpt_xp,
2581                                 vpn,
2582                                 ref_attr,
2583                                 ref_ppn );
[585]2584
[635]2585#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2586if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
[632]2587printk("\n[%s] thread[%x,%x] updated local gpt for a false pgfault\n",
2588__FUNCTION__, this->process->pid, this->trdid );
2589#endif
2590
2591                // unlock the PTE in reference GPT
2592                hal_gpt_unlock_pte( ref_gpt_xp, vpn );
2593                             
[635]2594#if (DEBUG_VMM_HANDLE_PAGE_FAULT &1)
2595if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
[632]2596printk("\n[%s] thread[%x,%x] unlock the ref gpt after a false pgfault\n",
2597__FUNCTION__, this->process->pid, this->trdid );
2598#endif
2599
[629]2600#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
2601uint32_t end_cycle = (uint32_t)hal_get_cycles();
2602#endif
2603
[585]2604#if DEBUG_VMM_HANDLE_PAGE_FAULT
[635]2605if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
[632]2606printk("\n[%s] thread[%x,%x] handled false pgfault / ppn %x / attr %x / cycle %d\n",
2607__FUNCTION__, this->process->pid, this->trdid, ref_ppn, ref_attr, end_cycle );
[433]2608#endif
[406]2609
[656]2610#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2)
2611if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2612hal_vmm_display( XPTR( local_cxy , this->process ) , true );
2613#endif
2614
[629]2615#if CONFIG_INSTRUMENTATION_PGFAULTS
[656]2616uint32_t cost      = end_cycle - start_cycle;
[629]2617this->info.false_pgfault_nr++;
[641]2618this->info.false_pgfault_cost += cost;
2619if( cost > this->info.false_pgfault_max ) this->info.false_pgfault_max = cost;
[629]2620#endif
2621                return EXCP_NON_FATAL;
2622            }
[632]2623            else                            // true page fault
[629]2624            {
[585]2625                // allocate and initialise a physical page depending on the vseg type
[629]2626                error = vmm_get_one_ppn( vseg , vpn , &ppn );
[1]2627
[585]2628                if( error )
2629                {
2630                    printk("\n[ERROR] in %s : no memory / process = %x / vpn = %x\n",
2631                    __FUNCTION__ , process->pid , vpn );
[313]2632
[632]2633                    // unlock PTE in local GPT and in reference GPT
[629]2634                    hal_gpt_unlock_pte( local_gpt_xp , vpn );
[632]2635                    hal_gpt_unlock_pte( ref_gpt_xp   , vpn );
[585]2636                   
[629]2637                    return EXCP_KERNEL_PANIC;
[585]2638                }
[1]2639
[629]2640                // define attr from vseg flags
[632]2641                attr = GPT_MAPPED | GPT_SMALL | GPT_READABLE;
[629]2642                if( vseg->flags & VSEG_USER  ) attr |= GPT_USER;
2643                if( vseg->flags & VSEG_WRITE ) attr |= GPT_WRITABLE;
2644                if( vseg->flags & VSEG_EXEC  ) attr |= GPT_EXECUTABLE;
2645                if( vseg->flags & VSEG_CACHE ) attr |= GPT_CACHABLE;
[585]2646
[635]2647#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2648if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
[632]2649printk("\n[%s] thread[%x,%x] build a new PTE for a true pgfault\n",
2650__FUNCTION__, this->process->pid, this->trdid );
2651#endif
[629]2652                // set PTE in reference GPT
[632]2653                // this unlock the PTE
[629]2654                hal_gpt_set_pte( ref_gpt_xp,
2655                                 vpn,
2656                                 attr,
2657                                 ppn );
2658
[635]2659#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2660if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
[632]2661printk("\n[%s] thread[%x,%x] set new PTE in ref gpt for a true page fault\n",
2662__FUNCTION__, this->process->pid, this->trdid );
2663#endif
2664
[629]2665                // set PTE in local GPT
[632]2666                // this unlock the PTE
[629]2667                hal_gpt_set_pte( local_gpt_xp,
2668                                 vpn,
2669                                 attr,
2670                                 ppn );
2671
2672#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
2673uint32_t end_cycle = (uint32_t)hal_get_cycles();
2674#endif
2675
[440]2676#if DEBUG_VMM_HANDLE_PAGE_FAULT
[635]2677if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
[632]2678printk("\n[%s] thread[%x,%x] handled global pgfault / ppn %x / attr %x / cycle %d\n",
2679__FUNCTION__, this->process->pid, this->trdid, ppn, attr, end_cycle );
[435]2680#endif
[629]2681
[656]2682#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2)
2683if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2684hal_vmm_display( XPTR( local_cxy , this->process ) , true );
2685#endif
2686
[629]2687#if CONFIG_INSTRUMENTATION_PGFAULTS
[656]2688uint32_t cost      = end_cycle - start_cycle;
[629]2689this->info.global_pgfault_nr++;
[641]2690this->info.global_pgfault_cost += cost;
2691if( cost > this->info.global_pgfault_max ) this->info.global_pgfault_max = cost;
[629]2692#endif
2693                return EXCP_NON_FATAL;
2694            }
[585]2695        }
2696    }
[629]2697    else   // page has been locally mapped by another concurrent thread
2698    {
[632]2699        // unlock the PTE in local GPT
[629]2700        hal_gpt_unlock_pte( local_gpt_xp , vpn );
2701
[632]2702#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
2703uint32_t end_cycle = (uint32_t)hal_get_cycles();
2704#endif
2705
2706#if DEBUG_VMM_HANDLE_PAGE_FAULT
[635]2707if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
[632]2708printk("\n[%s] handled by another thread / vpn %x / ppn %x / attr %x / cycle %d\n",
2709__FUNCTION__, vpn, ppn, attr, end_cycle );
2710#endif
2711
2712#if CONFIG_INSTRUMENTATION_PGFAULTS
[656]2713uint32_t cost      = end_cycle - start_cycle;
[632]2714this->info.false_pgfault_nr++;
[641]2715this->info.false_pgfault_cost += cost;
2716if( cost > this->info.false_pgfault_max ) this->info.false_pgfault_max = cost;
[632]2717#endif
[629]2718        return EXCP_NON_FATAL;
2719    }
2720
[585]2721}   // end vmm_handle_page_fault()
[435]2722
[585]2723////////////////////////////////////////////
2724error_t vmm_handle_cow( process_t * process,
2725                        vpn_t       vpn )
2726{
2727    vseg_t         * vseg;            // vseg containing vpn
[629]2728    xptr_t           gpt_xp;          // extended pointer on GPT (local or reference)
2729    gpt_t          * gpt_ptr;         // local pointer on GPT (local or reference)
2730    cxy_t            gpt_cxy;         // GPT cluster identifier
[585]2731    uint32_t         old_attr;        // current PTE_ATTR value
2732    ppn_t            old_ppn;         // current PTE_PPN value
2733    uint32_t         new_attr;        // new PTE_ATTR value
2734    ppn_t            new_ppn;         // new PTE_PPN value
[629]2735    cxy_t            ref_cxy;         // reference process cluster
2736    process_t      * ref_ptr;         // local pointer on reference process
[585]2737    error_t          error;
[1]2738
[629]2739    thread_t * this  = CURRENT_THREAD;
[625]2740
[585]2741#if DEBUG_VMM_HANDLE_COW
[629]2742uint32_t   cycle = (uint32_t)hal_get_cycles();
[640]2743if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
[595]2744printk("\n[%s] thread[%x,%x] enter for vpn %x / core[%x,%d] / cycle %d\n",
[619]2745__FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle );
[629]2746#endif
2747
[656]2748#if (DEBUG_VMM_HANDLE_COW & 2)
[640]2749hal_vmm_display( XPTR( local_cxy , process ) , true );
[585]2750#endif
2751
2752    // get local vseg
2753    error = vmm_get_vseg( process, 
2754                          (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT,
2755                          &vseg );
[440]2756    if( error )
[1]2757    {
[629]2758        printk("\n[ERROR] in %s : vpn %x in thread[%x,%x] not in a registered vseg\n",
[625]2759        __FUNCTION__, vpn, process->pid, this->trdid );
[585]2760
[629]2761        return EXCP_USER_ERROR;
[440]2762    }
[407]2763
[629]2764#if DEBUG_VMM_HANDLE_COW
[640]2765if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
[629]2766printk("\n[%s] thread[%x,%x] get vseg %s\n",
2767__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type) );
[619]2768#endif
2769
[629]2770    // get reference process cluster and local pointer
[585]2771    ref_cxy = GET_CXY( process->ref_xp );
2772    ref_ptr = GET_PTR( process->ref_xp );
[407]2773
[629]2774    // build pointers on relevant GPT
2775    // - access only local GPT for a private vseg 
2776    // - access reference GPT and all copies for a public vseg
[585]2777    if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) )
[440]2778    {
[629]2779        gpt_cxy = local_cxy;
2780        gpt_ptr = &process->vmm.gpt;
2781        gpt_xp  = XPTR( gpt_cxy , gpt_ptr );
[1]2782    }
[440]2783    else
[1]2784    {
[629]2785        gpt_cxy = ref_cxy;
2786        gpt_ptr = &ref_ptr->vmm.gpt;
2787        gpt_xp  = XPTR( gpt_cxy , gpt_ptr );
[1]2788    }
2789
[629]2790    // lock target PTE in relevant GPT (local or reference)
[632]2791    // and get current PTE value
[629]2792    error = hal_gpt_lock_pte( gpt_xp,
2793                              vpn,
2794                              &old_attr,
2795                              &old_ppn );
2796    if( error )
2797    {
2798        printk("\n[PANIC] in %s : cannot lock PTE in GPT / cxy %x / vpn %x / process %x\n",
2799        __FUNCTION__ , gpt_cxy, vpn , process->pid );
2800       
2801        return EXCP_KERNEL_PANIC;
2802    }
[441]2803
[629]2804#if DEBUG_VMM_HANDLE_COW
[640]2805if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
[619]2806printk("\n[%s] thread[%x,%x] get pte for vpn %x : ppn %x / attr %x\n",
2807__FUNCTION__, this->process->pid, this->trdid, vpn, old_ppn, old_attr );
2808#endif
2809
[629]2810    // return user error if COW attribute not set or PTE2 unmapped
2811    if( ((old_attr & GPT_COW) == 0) || ((old_attr & GPT_MAPPED) == 0) )
[585]2812    {
[629]2813        hal_gpt_unlock_pte( gpt_xp , vpn );
[407]2814
[629]2815        return EXCP_USER_ERROR;
[407]2816    }
2817
[619]2818    // get pointers on physical page descriptor
[585]2819    xptr_t   page_xp  = ppm_ppn2page( old_ppn );
2820    cxy_t    page_cxy = GET_CXY( page_xp );
2821    page_t * page_ptr = GET_PTR( page_xp );
[435]2822
[585]2823    // get extended pointers on forks and lock field in page descriptor
2824    xptr_t forks_xp       = XPTR( page_cxy , &page_ptr->forks );
2825    xptr_t forks_lock_xp  = XPTR( page_cxy , &page_ptr->lock );
[407]2826
[585]2827    // take lock protecting "forks" counter
2828    remote_busylock_acquire( forks_lock_xp );
[407]2829
[585]2830    // get number of pending forks from page descriptor
2831    uint32_t forks = hal_remote_l32( forks_xp );
[441]2832
[629]2833#if DEBUG_VMM_HANDLE_COW
[640]2834if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
[619]2835printk("\n[%s] thread[%x,%x] get forks = %d for vpn %x\n",
2836__FUNCTION__, this->process->pid, this->trdid, forks, vpn );
2837#endif
2838
[585]2839    if( forks )        // pending fork => allocate a new page, and copy old to new
2840    {
[619]2841        // decrement pending forks counter in page descriptor
2842        hal_remote_atomic_add( forks_xp , -1 );
2843
2844        // release lock protecting "forks" counter
2845        remote_busylock_release( forks_lock_xp );
2846
[629]2847        // allocate a new physical page depending on vseg type
[585]2848        page_xp = vmm_page_allocate( vseg , vpn );
[619]2849
[585]2850        if( page_xp == XPTR_NULL ) 
2851        {
2852            printk("\n[PANIC] in %s : no memory for vpn %x in process %x\n",
2853            __FUNCTION__ , vpn, process->pid );
[441]2854
[629]2855            hal_gpt_unlock_pte( gpt_xp , vpn ); 
[441]2856
[585]2857            return EXCP_KERNEL_PANIC;
2858        }
[441]2859
[585]2860        // compute allocated page PPN
2861        new_ppn = ppm_page2ppn( page_xp );
[441]2862
[629]2863#if DEBUG_VMM_HANDLE_COW
[640]2864if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
[619]2865printk("\n[%s] thread[%x,%x] get new ppn %x for vpn %x\n",
2866__FUNCTION__, this->process->pid, this->trdid, new_ppn, vpn );
2867#endif
2868
[585]2869        // copy old page content to new page
[619]2870        hal_remote_memcpy( ppm_ppn2base( new_ppn ),
2871                           ppm_ppn2base( old_ppn ),
2872                           CONFIG_PPM_PAGE_SIZE );
[441]2873
[629]2874#if DEBUG_VMM_HANDLE_COW
[640]2875if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
[619]2876printk("\n[%s] thread[%x,%x] copied old page to new page\n",
2877__FUNCTION__, this->process->pid, this->trdid );
[585]2878#endif
[440]2879
[585]2880    }             
2881    else               // no pending fork => keep the existing page
2882    {
[619]2883        // release lock protecting "forks" counter
2884        remote_busylock_release( forks_lock_xp );
[1]2885
[585]2886#if(DEBUG_VMM_HANDLE_COW & 1)
[640]2887if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
[635]2888printk("\n[%s] thread[%x,%x] no pending forks / keep existing PPN %x\n",
[619]2889__FUNCTION__, this->process->pid, this->trdid, old_ppn );
[585]2890#endif
2891        new_ppn = old_ppn;
2892    }
[1]2893
[629]2894    // build new_attr : set WRITABLE, reset COW, reset LOCKED
2895    new_attr = (((old_attr | GPT_WRITABLE) & (~GPT_COW)) & (~GPT_LOCKED));
[585]2896
[635]2897#if(DEBUG_VMM_HANDLE_COW & 1)
[640]2898if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
[635]2899printk("\n[%s] thread[%x,%x] new_attr %x / new_ppn %x\n",
2900__FUNCTION__, this->process->pid, this->trdid, new_attr, new_ppn );
2901#endif
2902
[629]2903    // update the relevant GPT(s)
2904    // - private vseg => update only the local GPT
2905    // - public vseg => update the reference GPT AND all the GPT copies
[585]2906    if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) )
[1]2907    {
[635]2908        // set new PTE in local gpt
[585]2909        hal_gpt_set_pte( gpt_xp,
2910                         vpn,
2911                         new_attr,
2912                         new_ppn );
[1]2913    }
[585]2914    else
[1]2915    {
[640]2916        // set new PTE in all GPT copies
2917        vmm_global_update_pte( process,
2918                               vpn,
2919                               new_attr,
2920                               new_ppn );
[1]2921    }
2922
[585]2923#if DEBUG_VMM_HANDLE_COW
2924cycle = (uint32_t)hal_get_cycles();
[640]2925if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
[595]2926printk("\n[%s] thread[%x,%x] exit for vpn %x / core[%x,%d] / cycle %d\n",
[619]2927__FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle );
[585]2928#endif
[313]2929
[656]2930#if (DEBUG_VMM_HANDLE_COW & 2)
[640]2931hal_vmm_display( XPTR( local_cxy , process ) , true );
[635]2932#endif
2933
[585]2934     return EXCP_NON_FATAL;
[1]2935
[585]2936}   // end vmm_handle_cow()
2937
Note: See TracBrowser for help on using the repository browser.