source: trunk/kernel/mm/vmm.c

Last change on this file was 683, checked in by alain, 3 years ago

All modifications required to support the <tcp_chat> application
including error recovery in case of packet loss.A

File size: 101.4 KB
RevLine 
[1]1/*
[683]2 * vmm.c - virtual memory manager related operations implementation.
[1]3 *
[672]4 * Authors   Ghassan Almaless (2008,2009,2010,2011,2012)
5 *           Alain Greiner    (2016,2017,2018,2019,2020)
[21]6 *
[1]7 * Copyright (c) UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
[14]25#include <kernel_config.h>
[457]26#include <hal_kernel_types.h>
[1]27#include <hal_special.h>
28#include <hal_gpt.h>
[409]29#include <hal_vmm.h>
[641]30#include <hal_irqmask.h>
[577]31#include <hal_macros.h>
[1]32#include <printk.h>
[23]33#include <memcpy.h>
[567]34#include <remote_queuelock.h>
[1]35#include <list.h>
[408]36#include <xlist.h>
[1]37#include <bits.h>
38#include <process.h>
39#include <thread.h>
40#include <vseg.h>
41#include <cluster.h>
42#include <scheduler.h>
43#include <vfs.h>
44#include <mapper.h>
45#include <page.h>
46#include <kmem.h>
47#include <vmm.h>
[585]48#include <hal_exception.h>
[1]49
[635]50////////////////////////////////////////////////////////////////////////////////////////////
[1]51//   Extern global variables
[635]52////////////////////////////////////////////////////////////////////////////////////////////
[1]53
[567]54extern  process_t  process_zero;      // allocated in cluster.c
[1]55
[625]56////////////////////////////////////////////////////////////////////////////////////////////
[651]57// This static function is called by the vmm_user_init() function.
58// It initialises the free lists of vsegs used by the VMM MMAP allocator.
59// It makes the assumption that HEAP_BASE == 1 Gbytes and HEAP_SIZE == 2 Gbytes.
60////////////////////////////////////////////////////////////////////////////////////////////
61static void vmm_stack_init( vmm_t * vmm )
62{
63
64// check STACK zone
[672]65assert( __FUNCTION__, ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <=
[651]66(CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) , "STACK zone too small\n");
67
68    // get pointer on STACK allocator
69    stack_mgr_t * mgr = &vmm->stack_mgr;
70
71    mgr->bitmap   = 0;
72    mgr->vpn_base = CONFIG_VMM_STACK_BASE;
73    busylock_init( &mgr->lock , LOCK_VMM_STACK );
74
75}
76
77////////////////////////////////////////////////////////////////////////////////////////////
[625]78// This static function is called by the vmm_create_vseg() function, and implements
[651]79// the VMM STACK specific allocator. Depending on the local thread index <ltid>,
80// it ckeks availability of the corresponding slot in the process STACKS region,
81// allocates a vseg descriptor, and initializes the "vpn_base" and "vpn_size" fields.
[625]82////////////////////////////////////////////////////////////////////////////////////////////
83// @ vmm      : [in]  pointer on VMM.
84// @ ltid     : [in]  requested slot == local user thread identifier.
85////////////////////////////////////////////////////////////////////////////////////////////
[651]86static vseg_t * vmm_stack_alloc( vmm_t  * vmm,
87                                 ltid_t   ltid )
[21]88{
[625]89
90// check ltid argument
[683]91assert( __FUNCTION__, 
92(ltid <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)),
[625]93"slot index %d too large for an user stack vseg", ltid );
94
95    // get stack allocator pointer
96    stack_mgr_t * mgr = &vmm->stack_mgr;
97
[651]98    // get lock protecting stack allocator
[625]99    busylock_acquire( &mgr->lock );
100
101// check requested slot is available
[672]102assert( __FUNCTION__, (bitmap_state( &mgr->bitmap , ltid ) == false),
[625]103"slot index %d already allocated", ltid );
104
[651]105    // allocate a vseg descriptor
106    vseg_t * vseg = vseg_alloc();
107
108    if( vseg == NULL )
109        {
[683]110 
111#if DEBUG_VMM_ERROR
112printk("\n[ERROR] %s cannot allocate memory for vseg in cluster %x\n",
113__FUNCTION__ , local_cxy );
114#endif
[651]115        busylock_release( &mgr->lock );
116        return NULL;
117    }
118
[625]119    // update bitmap
120    bitmap_set( &mgr->bitmap , ltid );
121
122    // release lock on stack allocator
123    busylock_release( &mgr->lock );
124
[651]125    // set "vpn_base" & "vpn_size" fields (first page non allocated)
126    vseg->vpn_base = mgr->vpn_base + (ltid * CONFIG_VMM_STACK_SIZE) + 1;
127    vseg->vpn_size = CONFIG_VMM_STACK_SIZE - 1;
[625]128
[651]129    return vseg;
130
[625]131} // end vmm_stack_alloc()
132
133////////////////////////////////////////////////////////////////////////////////////////////
134// This static function is called by the vmm_remove_vseg() function, and implements
135// the VMM STACK specific desallocator.
[651]136// It updates the bitmap to release the corresponding slot in the process STACKS region,
137// and releases memory allocated to vseg descriptor.
[625]138////////////////////////////////////////////////////////////////////////////////////////////
139// @ vmm      : [in] pointer on VMM.
140// @ vseg     : [in] pointer on released vseg.
141////////////////////////////////////////////////////////////////////////////////////////////
142static void vmm_stack_free( vmm_t  * vmm,
143                            vseg_t * vseg )
144{
145    // get stack allocator pointer
146    stack_mgr_t * mgr = &vmm->stack_mgr;
147
148    // compute slot index
149    uint32_t index = (vseg->vpn_base - 1 - mgr->vpn_base) / CONFIG_VMM_STACK_SIZE;
150
151// check index
[672]152assert( __FUNCTION__, (index <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)),
[625]153"slot index %d too large for an user stack vseg", index );
154
155// check released slot is allocated
[672]156assert( __FUNCTION__, (bitmap_state( &mgr->bitmap , index ) == true),
[625]157"released slot index %d non allocated", index );
158
159    // get lock on stack allocator
160    busylock_acquire( &mgr->lock );
161
162    // update stacks_bitmap
163    bitmap_clear( &mgr->bitmap , index );
164
165    // release lock on stack allocator
166    busylock_release( &mgr->lock );
167
[651]168    // release memory allocated to vseg descriptor
169    vseg_free( vseg );
170
[625]171}  // end vmm_stack_free()
172
[651]173
174
[625]175////////////////////////////////////////////////////////////////////////////////////////////
[651]176// This function display the current state of the VMM MMAP allocator of a process VMM
177// identified by the <vmm> argument.
178////////////////////////////////////////////////////////////////////////////////////////////
179void vmm_mmap_display( vmm_t * vmm )
180{
181    uint32_t  order;
182    xptr_t    root_xp;
183    xptr_t    iter_xp;
184
185    // get pointer on process
186    process_t * process = (process_t *)(((char*)vmm) - OFFSETOF( process_t , vmm ));
187
188    // get process PID
189    pid_t pid = process->pid;
190
191    // get pointer on VMM MMAP allocator
192    mmap_mgr_t * mgr = &vmm->mmap_mgr;
193
194    // display header
195    printk("***** VMM MMAP allocator / process %x *****\n", pid );
196
197    // scan the array of free lists of vsegs
198    for( order = 0 ; order <= CONFIG_VMM_HEAP_MAX_ORDER ; order++ )
199    {
200        root_xp = XPTR( local_cxy , &mgr->free_list_root[order] );
201
202        if( !xlist_is_empty( root_xp ) )
203        {
204            printk(" - %d (%x pages) : ", order , 1<<order );
205
206            XLIST_FOREACH( root_xp , iter_xp )
207            {
208                xptr_t   vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
209                vseg_t * vseg    = GET_PTR( vseg_xp );
210
211                printk("%x | ", vseg->vpn_base );
212            }
213
214            printk("\n");
215        }
216    }
217}  // end vmm_mmap_display()
218
219////////////////////////////////////////////////////////////////////////////////////////////
220// This static function is called by the vmm_user_init() function.
221// It initialises the free lists of vsegs used by the VMM MMAP allocator.
222// TODO this function is only valid for 32 bits cores, and makes three assumptions:
223// HEAP_BASE == 1 Gbytes / HEAP_SIZE == 2 Gbytes / MMAP_MAX_SIZE == 1 Gbytes
224////////////////////////////////////////////////////////////////////////////////////////////
225void vmm_mmap_init( vmm_t * vmm )
226{
227
228// check HEAP base and size
[672]229assert( __FUNCTION__, (CONFIG_VMM_HEAP_BASE == 0x40000) & (CONFIG_VMM_STACK_BASE == 0xc0000),
[651]230"CONFIG_VMM_HEAP_BASE != 0x40000 or CONFIG_VMM_STACK_BASE != 0xc0000" );
231
232// check  MMAP vseg max order
[672]233assert( __FUNCTION__, (CONFIG_VMM_HEAP_MAX_ORDER == 18), "max mmap vseg size is 256K pages" );
[651]234
235    // get pointer on MMAP allocator
236    mmap_mgr_t * mgr = &vmm->mmap_mgr;
237
238    // initialize HEAP base and size
239    mgr->vpn_base        = CONFIG_VMM_HEAP_BASE;
240    mgr->vpn_size        = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE;
241
242    // initialize lock
243    busylock_init( &mgr->lock , LOCK_VMM_MMAP );
244
245    // initialize free lists
246    uint32_t   i;
247    for( i = 0 ; i <= CONFIG_VMM_HEAP_MAX_ORDER ; i++ )
248    {
249        xlist_root_init( XPTR( local_cxy , &mgr->free_list_root[i] ) );
250    }
251
252    // allocate and register first 1 Gbytes vseg
253    vseg_t * vseg0 = vseg_alloc();
254
[672]255assert( __FUNCTION__, (vseg0 != NULL) , "cannot allocate vseg" );
[651]256
257    vseg0->vpn_base = CONFIG_VMM_HEAP_BASE;
258    vseg0->vpn_size = CONFIG_VMM_HEAP_BASE;
259
260    xlist_add_first( XPTR( local_cxy , &mgr->free_list_root[CONFIG_VMM_HEAP_MAX_ORDER] ),
261                     XPTR( local_cxy , &vseg0->xlist ) );
262
263    // allocate and register second 1 Gbytes vseg
264    vseg_t * vseg1 = vseg_alloc();
265
[672]266assert( __FUNCTION__, (vseg1 != NULL) , "cannot allocate vseg" );
[651]267
268    vseg1->vpn_base = CONFIG_VMM_HEAP_BASE << 1;
269    vseg1->vpn_size = CONFIG_VMM_HEAP_BASE;
270
271    xlist_add_first( XPTR( local_cxy , &mgr->free_list_root[CONFIG_VMM_HEAP_MAX_ORDER] ),
272                     XPTR( local_cxy , &vseg1->xlist ) );
273
274#if DEBUG_VMM_MMAP
275thread_t * this = CURRENT_THREAD;
276uint32_t cycle = (uint32_t)hal_get_cycles();
277printk("\n[%s] thread[%x,%x] / cycle %d\n",
278__FUNCTION__, this->process->pid, this->trdid, cycle );
279vmm_mmap_display( vmm );
280#endif
281
282}  // end vmm_mmap_init()
283
284////////////////////////////////////////////////////////////////////////////////////////////
[625]285// This static function is called by the vmm_create_vseg() function, and implements
[651]286// the VMM MMAP specific allocator.  Depending on the requested number of pages <npages>,
287// it get a free vseg from the relevant free_list, and initializes the "vpn_base" and
288// "vpn_size" fields.
[625]289////////////////////////////////////////////////////////////////////////////////////////////
290// @ vmm      : [in] pointer on VMM.
291// @ npages   : [in] requested number of pages.
[651]292// @ returns local pointer on vseg if success / returns NULL if failure.
[625]293////////////////////////////////////////////////////////////////////////////////////////////
[651]294static vseg_t * vmm_mmap_alloc( vmm_t * vmm,
295                                vpn_t   npages )
[625]296{
297
[651]298#if DEBUG_VMM_MMAP
[625]299thread_t * this = CURRENT_THREAD;
300uint32_t cycle = (uint32_t)hal_get_cycles();
[651]301if( DEBUG_VMM_MMAP < cycle )
302printk("\n[%s] thread[%x,%x] for %x pages / cycle %d\n",
303__FUNCTION__, this->process->pid, this->trdid, npages, cycle );
[625]304#endif
305
306    // number of allocated pages must be power of 2
307    // compute actual size and order
[651]308    vpn_t    required_vpn_size = POW2_ROUNDUP( npages );
309    uint32_t required_order    = bits_log2( required_vpn_size );
[625]310
311    // get mmap allocator pointer
312    mmap_mgr_t * mgr = &vmm->mmap_mgr;
313
[651]314    // take lock protecting free lists in MMAP allocator
[625]315    busylock_acquire( &mgr->lock );
316
[651]317    // initialises the while loop variables
318    uint32_t   current_order = required_order;
319    vseg_t   * current_vseg  = NULL;
[625]320
[651]321    // search a free vseg equal or larger than requested size
322        while( current_order <= CONFIG_VMM_HEAP_MAX_ORDER )
323        {
324        // build extended pointer on free_pages_root[current_order]
325        xptr_t root_xp = XPTR( local_cxy , &mgr->free_list_root[current_order] );
[625]326
[651]327                if( !xlist_is_empty( root_xp ) )
328                {
329            // get extended pointer on first vseg in this free_list
330                        xptr_t current_vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist );
331            current_vseg = GET_PTR( current_vseg_xp );
332
333            // build extended pointer on xlist field in vseg descriptor
334            xptr_t list_entry_xp = XPTR( local_cxy , &current_vseg->xlist );
335
336            // remove this vseg from the free_list
337                        xlist_unlink( list_entry_xp );
338
339                        break; 
340                }
341
342        // increment loop index
343        current_order++;
344
345    }  // end while loop
346
347    if( current_vseg == NULL )  // return failure
[625]348    {
[683]349
350#if DEBUG_VMM_ERROR
351printk("\n[ERROR] %s cannot allocate ) %d page(s) in cluster %x\n",
352__FUNCTION__, npages , local_cxy );
353#endif
[651]354        busylock_release( &mgr->lock );
355        return NULL;
[625]356    }
357
[651]358        // split recursively the found vseg in smaller vsegs
359    // if required, and update the free-lists accordingly
360        while( current_order > required_order )
361        {
362        // get found vseg base and size
363        vpn_t  vpn_base = current_vseg->vpn_base;
364        vpn_t  vpn_size = current_vseg->vpn_size;
365       
366        // allocate a new vseg for the upper half of current vseg
367            vseg_t * new_vseg = vseg_alloc();
[625]368
[651]369            if( new_vseg == NULL )
370        {
[683]371
372#if DEBUG_VMM_ERROR
373printk("\n[ERROR] %s cannot allocate memory for vseg in cluster %x\n",
374__FUNCTION__ , local_cxy );
375#endif
[651]376            busylock_release( &mgr->lock );
377            return NULL;
378            }
379
380        // initialise new vseg (upper half of found vseg)
381        new_vseg->vmm      = vmm;
382        new_vseg->vpn_base = vpn_base + (vpn_size >> 1);
383        new_vseg->vpn_size = vpn_size >> 1;
384
385        // insert new vseg in relevant free_list
386                xlist_add_first( XPTR( local_cxy , &mgr->free_list_root[current_order-1] ),
387                         XPTR( local_cxy , &new_vseg->xlist ) );
388
389        // update found vseg
390        current_vseg->vpn_size = vpn_size>>1; 
391
392        // update order
393                current_order --;
394        }
395
396        // release lock protecting free lists
397        busylock_release( &mgr->lock );
398
399#if DEBUG_VMM_MMAP
400vmm_mmap_display( vmm );
[625]401#endif
402
[651]403    return current_vseg;
[625]404
405}  // end vmm_mmap_alloc()
406
407////////////////////////////////////////////////////////////////////////////////////////////
[641]408// This static function implements the VMM MMAP specific desallocator.
409// It is called by the vmm_remove_vseg() function.
[651]410// It releases the vseg to the relevant free_list, after trying (recursively) to
411// merge it to the buddy vseg.
[625]412////////////////////////////////////////////////////////////////////////////////////////////
413// @ vmm      : [in] pointer on VMM.
414// @ vseg     : [in] pointer on released vseg.
415////////////////////////////////////////////////////////////////////////////////////////////
416static void vmm_mmap_free( vmm_t  * vmm,
417                           vseg_t * vseg )
418{
[651]419
420#if DEBUG_VMM_MMAP
421thread_t * this = CURRENT_THREAD;
422uint32_t cycle = (uint32_t)hal_get_cycles();
423if( DEBUG_VMM_MMAP < cycle )
424printk("\n[%s] thread[%x,%x] for vpn_base %x / vpn_size %x / cycle %d\n",
425__FUNCTION__, this->process->pid, this->trdid, vseg->vpn_base, vseg->vpn_size, cycle );
426#endif
427
428    vseg_t * buddy_vseg;
429
430    // get mmap allocator pointer
[625]431    mmap_mgr_t * mgr = &vmm->mmap_mgr;
432
[651]433    // take lock protecting free lists
[625]434    busylock_acquire( &mgr->lock );
435
[651]436    // initialise loop variables
437    // released_vseg is the currently released vseg
438    vseg_t * released_vseg     = vseg;
439    uint32_t released_order    = bits_log2( vseg->vpn_size );
[625]440
[651]441        // iteratively merge the released vseg to the buddy vseg
442        // release the current page and exit when buddy not found
443    while( released_order <= CONFIG_VMM_HEAP_MAX_ORDER )
444    {
445        // compute buddy_vseg vpn_base
446                vpn_t buddy_vpn_base = released_vseg->vpn_base ^ (1 << released_order);
447       
448        // build extended pointer on free_pages_root[current_order]
449        xptr_t root_xp = XPTR( local_cxy , &mgr->free_list_root[released_order] );
450
451        // scan this free list to find the buddy vseg
452        xptr_t   iter_xp;
453        buddy_vseg = NULL;
454        XLIST_FOREACH( root_xp , iter_xp )
455        {
456            xptr_t   current_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
457            vseg_t * current_vseg    = GET_PTR( current_vseg_xp );
458
459            if( current_vseg->vpn_base == buddy_vpn_base )
460            {
461                buddy_vseg = current_vseg;
462                break;
463            }
464        }
465       
466        if( buddy_vseg != NULL )     // buddy found => merge released & buddy
467        {
468            // update released vseg fields
469            released_vseg->vpn_size = buddy_vseg->vpn_size<<1;
470            if( released_vseg->vpn_base > buddy_vseg->vpn_base) 
471                released_vseg->vpn_base = buddy_vseg->vpn_base;
472
473            // remove buddy vseg from free_list
474            xlist_unlink( XPTR( local_cxy , &buddy_vseg->xlist ) );
475
476            // release memory allocated to buddy descriptor
477            vseg_free( buddy_vseg );
478        }
479        else                         // buddy not found => register & exit
480        {
481            // register released vseg in free list
482            xlist_add_first( root_xp , XPTR( local_cxy , &released_vseg->xlist ) );
483
484            // exit while loop
485            break;
486        }
487
488        // increment released_order
489        released_order++;
490    }
491
[625]492    // release lock
493    busylock_release( &mgr->lock );
494
[651]495#if DEBUG_VMM_MMAP
496vmm_mmap_display( vmm );
497#endif
[625]498
[651]499}  // end vmm_mmap_free()
500
[625]501////////////////////////////////////////////////////////////////////////////////////////////
502// This static function registers one vseg in the VSL of a local process descriptor.
503////////////////////////////////////////////////////////////////////////////////////////////
504// vmm       : [in] pointer on VMM.
505// vseg      : [in] pointer on vseg.
506////////////////////////////////////////////////////////////////////////////////////////////
507void vmm_attach_vseg_to_vsl( vmm_t  * vmm,
508                             vseg_t * vseg )
509{
510    // update vseg descriptor
511    vseg->vmm = vmm;
512
513    // increment vsegs number
514    vmm->vsegs_nr++;
515
516    // add vseg in vmm list
517    xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ),
518                    XPTR( local_cxy , &vseg->xlist ) );
519
[683]520}  // end vmm_attach_vseg_to_vsl()
[625]521
522////////////////////////////////////////////////////////////////////////////////////////////
523// This static function removes one vseg from the VSL of a local process descriptor.
524////////////////////////////////////////////////////////////////////////////////////////////
525// vmm       : [in] pointer on VMM.
526// vseg      : [in] pointer on vseg.
527////////////////////////////////////////////////////////////////////////////////////////////
528void vmm_detach_vseg_from_vsl( vmm_t  * vmm,
529                               vseg_t * vseg )
530{
531    // update vseg descriptor
532    vseg->vmm = NULL;
533
534    // decrement vsegs number
535    vmm->vsegs_nr--;
536
537    // remove vseg from VSL
538    xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
539
[683]540}  // end vmm_detach_vseg_from_vsl()
[625]541
542////////////////////////////////////////////
543error_t vmm_user_init( process_t * process )
544{
[1]545
[625]546#if DEBUG_VMM_USER_INIT
[567]547thread_t * this = CURRENT_THREAD;
[433]548uint32_t cycle = (uint32_t)hal_get_cycles();
[625]549if( DEBUG_VMM_USER_INIT )
[614]550printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", 
551__FUNCTION__ , this->process->pid, this->trdid, process->pid, local_cxy, cycle );
[433]552#endif
[204]553
[1]554    // get pointer on VMM
555    vmm_t   * vmm = &process->vmm;
556
[625]557// check UTILS zone
[672]558assert( __FUNCTION__ , ((CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE) <= 
559(CONFIG_VMM_ELF_BASE - CONFIG_VMM_UTILS_BASE)) , "UTILS zone too small\n" );
[21]560
[651]561    // initialize lock protecting the VSL
[640]562        remote_queuelock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
[635]563
[651]564    // initialize STACK allocator
565    vmm_stack_init( vmm );
566
567    // initialize MMAP allocator
568    vmm_mmap_init( vmm );
569
570    // initialize instrumentation counters
571        vmm->false_pgfault_nr    = 0;
572        vmm->local_pgfault_nr    = 0;
573        vmm->global_pgfault_nr   = 0;
574        vmm->false_pgfault_cost  = 0;
575        vmm->local_pgfault_cost  = 0;
576        vmm->global_pgfault_cost = 0;
577
[124]578    hal_fence();
[1]579
[625]580#if DEBUG_VMM_USER_INIT
[433]581cycle = (uint32_t)hal_get_cycles();
[625]582if( DEBUG_VMM_USER_INIT )
[614]583printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", 
584__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
[433]585#endif
[204]586
[415]587    return 0;
588
[625]589}  // end vmm_user_init()
[204]590
[611]591//////////////////////////////////////////
[625]592void vmm_user_reset( process_t * process )
[567]593{
[625]594    xptr_t       vseg_xp;
595        vseg_t     * vseg;
596    vseg_type_t  vseg_type;
[567]597
[625]598#if DEBUG_VMM_USER_RESET
[635]599uint32_t   cycle;
[625]600thread_t * this = CURRENT_THREAD;
[635]601#endif
602
603#if (DEBUG_VMM_USER_RESET & 1 )
604cycle = (uint32_t)hal_get_cycles();
[625]605if( DEBUG_VMM_USER_RESET < cycle )
606printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
607__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
608#endif
[567]609
[625]610#if (DEBUG_VMM_USER_RESET & 1 )
611if( DEBUG_VMM_USER_RESET < cycle )
[635]612hal_vmm_display( XPTR( local_cxy , process ) , true );
[625]613#endif
[567]614
[625]615    // get pointer on local VMM
616    vmm_t * vmm = &process->vmm;
[624]617
[625]618    // build extended pointer on VSL root and VSL lock
619    xptr_t   root_xp = XPTR( local_cxy , &vmm->vsegs_root );
620    xptr_t   lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
[567]621
[625]622    // take the VSL lock
[640]623        remote_queuelock_acquire( lock_xp );
[567]624
[625]625    // scan the VSL to delete all non kernel vsegs
626    // (we don't use a FOREACH in case of item deletion)
627    xptr_t   iter_xp;
628    xptr_t   next_xp;
629        for( iter_xp = hal_remote_l64( root_xp ) ; 
630         iter_xp != root_xp ;
631         iter_xp = next_xp )
632        {
633        // save extended pointer on next item in xlist
634        next_xp = hal_remote_l64( iter_xp );
[611]635
[625]636        // get pointers on current vseg in VSL
637        vseg_xp   = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
638        vseg      = GET_PTR( vseg_xp );
639        vseg_type = vseg->type;
[567]640
[625]641#if( DEBUG_VMM_USER_RESET & 1 )
642if( DEBUG_VMM_USER_RESET < cycle )
643printk("\n[%s] found %s vseg / vpn_base %x / vpn_size %d\n",
644__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
645#endif
646        // delete non kernel vseg 
647        if( (vseg_type != VSEG_TYPE_KCODE) && 
648            (vseg_type != VSEG_TYPE_KDATA) && 
649            (vseg_type != VSEG_TYPE_KDEV ) )
650        {
651            // remove vseg from VSL
652            vmm_remove_vseg( process , vseg );
[567]653
[625]654#if( DEBUG_VMM_USER_RESET & 1 )
655if( DEBUG_VMM_USER_RESET < cycle )
656printk("\n[%s] %s vseg deleted / vpn_base %x / vpn_size %d\n",
657__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
658#endif
659        }
660        else
661        {
[567]662
[625]663#if( DEBUG_VMM_USER_RESET & 1 )
664if( DEBUG_VMM_USER_RESET < cycle )
665printk("\n[%s] keep %s vseg / vpn_base %x / vpn_size %d\n",
666__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
667#endif
668        }
669        }  // end loop on vsegs in VSL
[567]670
[625]671    // release the VSL lock
[640]672        remote_queuelock_release( lock_xp );
[567]673
[625]674// FIXME il faut gérer les process copies...
[611]675
[672]676    // re-initialise VMM
677    vmm_user_init( process );
678
[625]679#if DEBUG_VMM_USER_RESET
680cycle = (uint32_t)hal_get_cycles();
681if( DEBUG_VMM_USER_RESET < cycle )
682printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
683__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
684#endif
[611]685
[635]686#if (DEBUG_VMM_USER_RESET & 1 )
687if( DEBUG_VMM_USER_RESET < cycle )
688hal_vmm_display( XPTR( local_cxy , process ) , true );
689#endif
690
[625]691}  // end vmm_user_reset()
[611]692
[640]693/////////////////////////////////////////////////
694void vmm_global_delete_vseg( process_t * process,
695                             intptr_t    base )
696{
697    cxy_t           owner_cxy;
698    lpid_t          owner_lpid;
[641]699    reg_t           save_sr;
[640]700
[641]701    xptr_t          process_lock_xp;
[640]702    xptr_t          process_root_xp;
703    xptr_t          process_iter_xp;
704
705    xptr_t          remote_process_xp;
706    cxy_t           remote_process_cxy;
707    process_t     * remote_process_ptr;
708
709    xptr_t          vsl_root_xp;
710    xptr_t          vsl_lock_xp;
711    xptr_t          vsl_iter_xp;
712
[641]713    rpc_desc_t      rpc;                  // shared rpc descriptor for parallel RPCs
714    uint32_t        responses;            // RPC responses counter
715
716    thread_t      * this    = CURRENT_THREAD;
717    pid_t           pid     = process->pid;
718    cluster_t     * cluster = LOCAL_CLUSTER;
719
[640]720#if DEBUG_VMM_GLOBAL_DELETE_VSEG
721uint32_t cycle = (uint32_t)hal_get_cycles();
722#endif
723
724#if (DEBUG_VMM_GLOBAL_DELETE_VSEG & 1)
725if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle )
[641]726printk("\n[%s] thread[%x,%x] enters / process %x / base %x / cycle %d\n",
[640]727__FUNCTION__, this->process->pid, this->trdid, process->pid, base, cycle );
728#endif
729
[641]730    // initialize a shared RPC descriptor
731    rpc.rsp       = &responses;
732    rpc.blocking  = false;                  // non blocking behaviour for rpc_send()
733    rpc.index     = RPC_VMM_REMOVE_VSEG;
734    rpc.thread    = this;
735    rpc.lid       = this->core->lid;
736    rpc.args[0]   = this->process->pid;
737    rpc.args[1]   = base;
738
[640]739    // get owner process cluster and local index
740    owner_cxy        = CXY_FROM_PID( pid );
741    owner_lpid       = LPID_FROM_PID( pid );
742
[641]743    // get extended pointer on root and lock of process copies xlist in owner cluster
744    process_root_xp  = XPTR( owner_cxy , &cluster->pmgr.copies_root[owner_lpid] );
745    process_lock_xp  = XPTR( owner_cxy , &cluster->pmgr.copies_lock[owner_lpid] );
[640]746
[641]747    // mask IRQs
748    hal_disable_irq( &save_sr );
749
750    // client thread blocks itself
751    thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC );
752
753    // take the lock protecting process copies
754    remote_queuelock_acquire( process_lock_xp );
755
756    // initialize responses counter
757    responses = 0;
758
[640]759    // loop on process copies
760    XLIST_FOREACH( process_root_xp , process_iter_xp )
761    {
762        // get cluster and local pointer on remote process
763        remote_process_xp  = XLIST_ELEMENT( process_iter_xp , process_t , copies_list );
764        remote_process_ptr = GET_PTR( remote_process_xp );
765        remote_process_cxy = GET_CXY( remote_process_xp );
766
767        // build extended pointers on remote VSL root and lock
768        vsl_root_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsegs_root );
769        vsl_lock_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsl_lock );
770
771        // get lock on remote VSL
772        remote_queuelock_acquire( vsl_lock_xp );
773
774        // loop on vsegs in remote process VSL
775        XLIST_FOREACH( vsl_root_xp , vsl_iter_xp )
776        {
777            // get pointers on current vseg
778            xptr_t   vseg_xp  = XLIST_ELEMENT( vsl_iter_xp , vseg_t , xlist );
779            vseg_t * vseg_ptr = GET_PTR( vseg_xp );
780
781            // get current vseg base address
782            intptr_t vseg_base = (intptr_t)hal_remote_lpt( XPTR( remote_process_cxy,
783                                                                 &vseg_ptr->min ) );
784
785            if( vseg_base == base )   // found searched vseg
786            {
[641]787                // atomically increment responses counter
788                hal_atomic_add( &responses , 1 );
[640]789
790#if (DEBUG_VMM_GLOBAL_DELETE_VSEG & 1)
791if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle )
[641]792printk("\n[%s] thread[%x,%x] register RPC request in cluster %x\n",
793__FUNCTION__, this->process->pid, this->trdid, remote_process_cxy );
[640]794#endif
[641]795                // send RPC to remote cluster
796                rpc_send( remote_process_cxy , &rpc );
[640]797
[641]798                // exit loop on vsegs
799                break;
[640]800            }
801        }  // end of loop on vsegs
802
[641]803        // release lock on remote VSL
804        remote_queuelock_release( vsl_lock_xp );
805
806    }  // end of loop on process copies
807
808    // release the lock protecting process copies
809    remote_queuelock_release( process_lock_xp );
810
[640]811#if (DEBUG_VMM_GLOBAL_DELETE_VSEG & 1)
812if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle )
[641]813printk("\n[%s] thread[%x,%x] deschedule / process %x / base %x\n",
814__FUNCTION__, this->process->pid, this->trdid, process->pid, base );
[640]815#endif
816
[641]817    // client thread deschedule
818    sched_yield("blocked on rpc_vmm_delete_vseg");
819 
820    // restore IRQs
821    hal_restore_irq( save_sr );
[640]822
823#if DEBUG_VMM_GLOBAL_DELETE_VSEG
824cycle = (uint32_t)hal_get_cycles();
825if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle )
[641]826printk("\n[%s] thread[%x,%x] exit / process %x / base %x / cycle %d\n",
827__FUNCTION__, this->process->pid, this->trdid, process->pid, base, cycle );
[640]828#endif
829
830}  // end vmm_global_delete_vseg()
831
[595]832////////////////////////////////////////////////
[640]833void vmm_global_resize_vseg( process_t * process,
834                             intptr_t    base,
835                             intptr_t    new_base,
836                             intptr_t    new_size )
837{
838    cxy_t           owner_cxy;
839    lpid_t          owner_lpid;
[641]840    reg_t           save_sr;
[640]841
[641]842    xptr_t          process_lock_xp;
[640]843    xptr_t          process_root_xp;
844    xptr_t          process_iter_xp;
845
846    xptr_t          remote_process_xp;
847    cxy_t           remote_process_cxy;
848    process_t     * remote_process_ptr;
849
850    xptr_t          vsl_root_xp;
851    xptr_t          vsl_lock_xp;
852    xptr_t          vsl_iter_xp;
853
[641]854    rpc_desc_t      rpc;                  // shared rpc descriptor for parallel RPCs
855    uint32_t        responses;            // RPC responses counter
856
857    thread_t      * this    = CURRENT_THREAD; 
858    pid_t           pid     = process->pid;
859    cluster_t     * cluster = LOCAL_CLUSTER;
860
[640]861#if DEBUG_VMM_GLOBAL_RESIZE_VSEG
862uint32_t cycle = (uint32_t)hal_get_cycles();
863#endif
864
865#if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1)
866if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle )
867printk("\n[%s] thread[%x,%x] : process %x / base %x / new_base %x / new_size %x / cycle %d\n",
868__FUNCTION__, this->process->pid, this->trdid, process->pid, base, new_base, new_size, cycle );
869#endif
870
[641]871    // initialize a shared RPC descriptor
872    rpc.rsp       = &responses;
873    rpc.blocking  = false;                  // non blocking behaviour for rpc_send()
874    rpc.index     = RPC_VMM_REMOVE_VSEG;
875    rpc.thread    = this;
876    rpc.lid       = this->core->lid;
877    rpc.args[0]   = this->process->pid;
878    rpc.args[1]   = base;
879    rpc.args[2]   = new_base;
880    rpc.args[3]   = new_size;
881
882    // get owner process cluster and local index
[640]883    owner_cxy        = CXY_FROM_PID( pid );
884    owner_lpid       = LPID_FROM_PID( pid );
885
[641]886    // get extended pointer on root and lock of process copies xlist in owner cluster
887    process_root_xp  = XPTR( owner_cxy , &cluster->pmgr.copies_root[owner_lpid] );
888    process_lock_xp  = XPTR( owner_cxy , &cluster->pmgr.copies_lock[owner_lpid] );
[640]889
[641]890    // mask IRQs
891    hal_disable_irq( &save_sr );
892
893    // client thread blocks itself
894    thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC );
895
896    // take the lock protecting process copies
897    remote_queuelock_acquire( process_lock_xp );
898
899    // initialize responses counter
900    responses = 0;
901
[640]902    // loop on process copies
903    XLIST_FOREACH( process_root_xp , process_iter_xp )
904    {
905        // get cluster and local pointer on remote process
906        remote_process_xp  = XLIST_ELEMENT( process_iter_xp , process_t , copies_list );
907        remote_process_ptr = GET_PTR( remote_process_xp );
908        remote_process_cxy = GET_CXY( remote_process_xp );
909
910        // build extended pointers on remote VSL root and lock
911        vsl_root_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsegs_root );
912        vsl_lock_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsl_lock );
913
914        // get lock on remote VSL
915        remote_queuelock_acquire( vsl_lock_xp );
916
917        // loop on vsegs in remote process VSL
918        XLIST_FOREACH( vsl_root_xp , vsl_iter_xp )
919        {
920            // get pointers on current vseg
921            xptr_t   vseg_xp  = XLIST_ELEMENT( vsl_iter_xp , vseg_t , xlist );
922            vseg_t * vseg_ptr = GET_PTR( vseg_xp );
923
924            // get current vseg base address
925            intptr_t vseg_base = (intptr_t)hal_remote_lpt( XPTR( remote_process_cxy,
926                                                                 &vseg_ptr->min ) );
927
928            if( vseg_base == base )   // found searched vseg
929            {
[641]930                // atomically increment responses counter
931                hal_atomic_add( &responses , 1 );
932
[640]933#if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1)
934if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle )
[641]935printk("\n[%s] thread[%x,%x] register RPC request in cluster %x\n",
936__FUNCTION__, this->process->pid, this->trdid, remote_process_cxy );
[640]937#endif
[641]938                // send RPC to remote cluster
939                rpc_send( remote_process_cxy , & rpc );
[640]940
[641]941                // exit loop on vsegs
942                break;
[640]943            }
[641]944
[640]945        }  // end of loop on vsegs
946
947#if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1)
948if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle )
949hal_vmm_display( remote_process_xp , false );
950#endif
951
952        // release lock on remote VSL
953        remote_queuelock_release( vsl_lock_xp );
[641]954
[640]955    }  // end of loop on process copies
956
[641]957    // release the lock protecting process copies
958    remote_queuelock_release( process_lock_xp );
959
960#if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1)
961if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle )
962printk("\n[%s] thread[%x,%x] deschedule / process %x / base %x\n",
963__FUNCTION__, this->process->pid, this->trdid, process->pid, base );
964#endif
965
966    // client thread deschedule
967    sched_yield("blocked on rpc_vmm_delete_vseg");
968
969    // restore IRQs
970    hal_restore_irq( save_sr );
971
[640]972#if DEBUG_VMM_GLOBAL_RESIZE_VSEG
973cycle = (uint32_t)hal_get_cycles();
974if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle )
975printk("\n[%s] thread[%x,%x] exit for process %x / base %x / cycle %d\n",
976__FUNCTION__, this->process->pid, this->trdid, process->pid , base, cycle );
977#endif
978
979}  // end vmm_global_resize_vseg()
980
981////////////////////////////////////////////////
[433]982void vmm_global_update_pte( process_t * process,
983                            vpn_t       vpn,
984                            uint32_t    attr,
985                            ppn_t       ppn )
[23]986{
[640]987    pid_t           pid;
988    cxy_t           owner_cxy;
989    lpid_t          owner_lpid;
990
[408]991    xlist_entry_t * process_root_ptr;
992    xptr_t          process_root_xp;
993    xptr_t          process_iter_xp;
[23]994
[408]995    xptr_t          remote_process_xp;
996    cxy_t           remote_process_cxy;
997    process_t     * remote_process_ptr;
998    xptr_t          remote_gpt_xp;
[23]999
[640]1000#if DEBUG_VMM_GLOBAL_UPDATE_PTE
[433]1001uint32_t cycle = (uint32_t)hal_get_cycles();
[595]1002thread_t * this = CURRENT_THREAD;
[640]1003#endif
1004
1005
1006#if (DEBUG_VMM_GLOBAL_UPDATE_PTE & 1)
1007if( DEBUG_VMM_GLOBAL_UPDATE_PTE < cycle )
[635]1008printk("\n[%s] thread[%x,%x] enter for process %x / vpn %x / attr %x / ppn %x / ycle %d\n",
1009__FUNCTION__, this->process->pid, this->trdid, process->pid, vpn, attr, ppn, cycle );
[433]1010#endif
1011
[640]1012    // get owner process cluster and local index
[408]1013    pid              = process->pid;
1014    owner_cxy        = CXY_FROM_PID( pid );
1015    owner_lpid       = LPID_FROM_PID( pid );
[640]1016
1017    // get extended pointer on root of process copies xlist in owner cluster
[408]1018    process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid];
1019    process_root_xp  = XPTR( owner_cxy , process_root_ptr );
[23]1020
[640]1021    // loop on process copies
[408]1022    XLIST_FOREACH( process_root_xp , process_iter_xp )
1023    {
1024        // get cluster and local pointer on remote process
1025        remote_process_xp  = XLIST_ELEMENT( process_iter_xp , process_t , copies_list );
[433]1026        remote_process_ptr = GET_PTR( remote_process_xp );
[408]1027        remote_process_cxy = GET_CXY( remote_process_xp );
[407]1028
[640]1029#if (DEBUG_VMM_GLOBAL_UPDATE_PTE & 1)
1030if( DEBUG_VMM_GLOBAL_UPDATE_PTE < cycle )
[635]1031printk("\n[%s] thread[%x,%x] handling vpn %x for process %x in cluster %x\n",
[595]1032__FUNCTION__, this->process->pid, this->trdid, vpn, process->pid, remote_process_cxy );
[433]1033#endif
1034
[408]1035        // get extended pointer on remote gpt
1036        remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt );
1037
[433]1038        // update remote GPT
1039        hal_gpt_update_pte( remote_gpt_xp, vpn, attr, ppn );
[408]1040    } 
1041
[640]1042#if DEBUG_VMM_GLOBAL_UPDATE_PTE
[433]1043cycle = (uint32_t)hal_get_cycles();
[640]1044if( DEBUG_VMM_GLOBAL_UPDATE_PTE < cycle )
[595]1045printk("\n[%s] thread[%x,%x] exit for process %x / vpn %x / cycle %d\n",
1046__FUNCTION__, this->process->pid, this->trdid, process->pid , vpn , cycle );
[433]1047#endif
1048
[640]1049#if (DEBUG_VMM_GLOBAL_UPDATE_PTE & 1)
[635]1050hal_vmm_display( process , true );
1051#endif
1052
[433]1053}  // end vmm_global_update_pte()
1054
[408]1055///////////////////////////////////////
1056void vmm_set_cow( process_t * process )
1057{
1058    vmm_t         * vmm;
1059
1060    xlist_entry_t * process_root_ptr;
1061    xptr_t          process_root_xp;
1062    xptr_t          process_iter_xp;
1063
1064    xptr_t          remote_process_xp;
1065    cxy_t           remote_process_cxy;
1066    process_t     * remote_process_ptr;
1067    xptr_t          remote_gpt_xp;
1068
1069    xptr_t          vseg_root_xp;
1070    xptr_t          vseg_iter_xp;
1071
1072    xptr_t          vseg_xp;
1073    vseg_t        * vseg;
1074
1075    pid_t           pid;
1076    cxy_t           owner_cxy;
1077    lpid_t          owner_lpid;
1078
[635]1079    // get target process PID
1080    pid = process->pid;
1081
[438]1082#if DEBUG_VMM_SET_COW
[595]1083uint32_t   cycle = (uint32_t)hal_get_cycles();
1084thread_t * this  = CURRENT_THREAD;
[438]1085if( DEBUG_VMM_SET_COW < cycle )
[595]1086printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
[635]1087__FUNCTION__, this->process->pid, this->trdid, pid , cycle );
[433]1088#endif
[408]1089
[635]1090#if (DEBUG_VMM_SET_COW & 1)
1091if( DEBUG_VMM_SET_COW < cycle )
1092hal_vmm_display( process , true );
1093#endif
1094
[567]1095// check cluster is reference
[672]1096assert( __FUNCTION__, (XPTR( local_cxy , process ) == process->ref_xp),
[635]1097"local cluster must be process reference cluster\n");
[408]1098
1099    // get pointer on reference VMM
1100    vmm = &process->vmm;
1101
1102    // get extended pointer on root of process copies xlist in owner cluster
1103    owner_cxy        = CXY_FROM_PID( pid );
1104    owner_lpid       = LPID_FROM_PID( pid );
1105    process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid];
1106    process_root_xp  = XPTR( owner_cxy , process_root_ptr );
1107
1108    // get extended pointer on root of vsegs xlist from reference VMM
1109    vseg_root_xp  = XPTR( local_cxy , &vmm->vsegs_root ); 
1110
[635]1111    // loop on target process copies
[408]1112    XLIST_FOREACH( process_root_xp , process_iter_xp )
1113    {
[635]1114        // get cluster and local pointer on remote process copy
[408]1115        remote_process_xp  = XLIST_ELEMENT( process_iter_xp , process_t , copies_list );
[433]1116        remote_process_ptr = GET_PTR( remote_process_xp );
[408]1117        remote_process_cxy = GET_CXY( remote_process_xp );
1118
[595]1119#if (DEBUG_VMM_SET_COW & 1)
[438]1120if( DEBUG_VMM_SET_COW < cycle )
[635]1121printk("\n[%s] thread[%x,%x] (%x) handles process %x in cluster %x\n",
1122__FUNCTION__, this->process->pid, this->trdid, this, pid, remote_process_cxy );
[433]1123#endif
[408]1124
1125        // get extended pointer on remote gpt
1126        remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt );
1127
1128        // loop on vsegs in (local) reference process VSL
1129        XLIST_FOREACH( vseg_root_xp , vseg_iter_xp )
1130        {
1131            // get pointer on vseg
1132            vseg_xp  = XLIST_ELEMENT( vseg_iter_xp , vseg_t , xlist );
[433]1133            vseg     = GET_PTR( vseg_xp );
[408]1134
1135            // get vseg type, base and size
1136            uint32_t type     = vseg->type;
1137            vpn_t    vpn_base = vseg->vpn_base;
1138            vpn_t    vpn_size = vseg->vpn_size;
1139
[595]1140#if (DEBUG_VMM_SET_COW & 1)
[438]1141if( DEBUG_VMM_SET_COW < cycle )
[635]1142printk("\n[%s] thread[%x,%x] found vseg %s / vpn_base = %x / vpn_size = %x\n",
[595]1143__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), vpn_base, vpn_size );
[433]1144#endif
1145            // only DATA, ANON and REMOTE vsegs
[408]1146            if( (type == VSEG_TYPE_DATA)  ||
1147                (type == VSEG_TYPE_ANON)  ||
1148                (type == VSEG_TYPE_REMOTE) )
1149            {
[433]1150                vpn_t      vpn;
1151                uint32_t   attr;
1152                ppn_t      ppn;
1153                xptr_t     page_xp;
1154                cxy_t      page_cxy;
1155                page_t   * page_ptr;
1156                xptr_t     forks_xp;
[469]1157                xptr_t     lock_xp;
[433]1158
1159                // update flags in remote GPT
1160                hal_gpt_set_cow( remote_gpt_xp,
1161                                 vpn_base,
1162                                 vpn_size ); 
1163
1164                // atomically increment pending forks counter in physical pages,
[635]1165                // this is only done once, when handling the reference copy
[433]1166                if( remote_process_cxy == local_cxy )
1167                {
[635]1168
1169#if (DEBUG_VMM_SET_COW & 1)
1170if( DEBUG_VMM_SET_COW < cycle )
1171printk("\n[%s] thread[%x,%x] handles vseg %s / vpn_base = %x / vpn_size = %x\n",
1172__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), vpn_base, vpn_size );
1173#endif
[433]1174                    // scan all pages in vseg
1175                    for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ )
1176                    {
1177                        // get page attributes and PPN from reference GPT
[585]1178                        hal_gpt_get_pte( remote_gpt_xp , vpn , &attr , &ppn ); 
[433]1179
1180                        // atomically update pending forks counter if page is mapped
1181                        if( attr & GPT_MAPPED )
1182                        {
[469]1183                            // get pointers and cluster on page descriptor
[433]1184                            page_xp  = ppm_ppn2page( ppn );
1185                            page_cxy = GET_CXY( page_xp );
1186                            page_ptr = GET_PTR( page_xp );
[469]1187
1188                            // get extended pointers on "forks" and "lock"
[433]1189                            forks_xp = XPTR( page_cxy , &page_ptr->forks );
[469]1190                            lock_xp  = XPTR( page_cxy , &page_ptr->lock );
1191
[567]1192                            // take lock protecting "forks" counter
1193                            remote_busylock_acquire( lock_xp );
1194
[469]1195                            // increment "forks"
[433]1196                            hal_remote_atomic_add( forks_xp , 1 );
[567]1197
1198                            // release lock protecting "forks" counter
1199                            remote_busylock_release( lock_xp );
[433]1200                        }
1201                    }   // end loop on vpn
[635]1202
1203#if (DEBUG_VMM_SET_COW & 1)
1204if( DEBUG_VMM_SET_COW < cycle )
1205printk("\n[%s] thread[%x,%x] completes vseg %s / vpn_base = %x / vpn_size = %x\n",
1206__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), vpn_base, vpn_size );
1207#endif
[433]1208                }   // end if local
1209            }   // end if vseg type
1210        }   // end loop on vsegs
[408]1211    }   // end loop on process copies
1212 
[438]1213#if DEBUG_VMM_SET_COW
[433]1214cycle = (uint32_t)hal_get_cycles();
[438]1215if( DEBUG_VMM_SET_COW < cycle )
[595]1216printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
1217__FUNCTION__, this->process->pid, this->trdid, process->pid , cycle );
[433]1218#endif
[408]1219
1220}  // end vmm_set-cow()
1221
1222/////////////////////////////////////////////////
1223error_t vmm_fork_copy( process_t * child_process,
1224                       xptr_t      parent_process_xp )
1225{
1226    error_t     error;
1227    cxy_t       parent_cxy;
1228    process_t * parent_process;
1229    vmm_t     * parent_vmm;
1230    xptr_t      parent_lock_xp;
1231    vmm_t     * child_vmm;
1232    xptr_t      iter_xp;
1233    xptr_t      parent_vseg_xp;
1234    vseg_t    * parent_vseg;
1235    vseg_t    * child_vseg;
1236    uint32_t    type;
1237    vpn_t       vpn;           
1238    vpn_t       vpn_base;
1239    vpn_t       vpn_size;
1240    xptr_t      parent_root_xp;
1241    bool_t      mapped; 
1242    ppn_t       ppn;
1243
[438]1244#if DEBUG_VMM_FORK_COPY
[433]1245uint32_t cycle = (uint32_t)hal_get_cycles();
[595]1246thread_t * this = CURRENT_THREAD;
[438]1247if( DEBUG_VMM_FORK_COPY < cycle )
[595]1248printk("\n[%s] thread %x enter / cycle %d\n",
1249__FUNCTION__ , this->process->pid, this->trdid, cycle );
[433]1250#endif
[408]1251
1252    // get parent process cluster and local pointer
1253    parent_cxy     = GET_CXY( parent_process_xp );
[433]1254    parent_process = GET_PTR( parent_process_xp );
[408]1255
1256    // get local pointers on parent and child VMM
1257    parent_vmm = &parent_process->vmm; 
1258    child_vmm  = &child_process->vmm;
1259
[625]1260    // build extended pointer on parent VSL root and lock
[408]1261    parent_root_xp = XPTR( parent_cxy , &parent_vmm->vsegs_root );
[625]1262    parent_lock_xp = XPTR( parent_cxy , &parent_vmm->vsl_lock );
[408]1263
[640]1264    // take the lock protecting the parent VSL
1265    remote_queuelock_acquire( parent_lock_xp );
[415]1266
[408]1267    // loop on parent VSL xlist
1268    XLIST_FOREACH( parent_root_xp , iter_xp )
[23]1269    {
[625]1270        // get pointers on current parent vseg
[408]1271        parent_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
[433]1272        parent_vseg    = GET_PTR( parent_vseg_xp );
[23]1273
[408]1274        // get vseg type
[567]1275        type = hal_remote_l32( XPTR( parent_cxy , &parent_vseg->type ) );
[408]1276       
[438]1277#if DEBUG_VMM_FORK_COPY
[433]1278cycle = (uint32_t)hal_get_cycles();
[438]1279if( DEBUG_VMM_FORK_COPY < cycle )
[595]1280printk("\n[%s] thread[%x,%x] found parent vseg %s / vpn_base = %x / cycle %d\n",
1281__FUNCTION__ , this->process->pid, this->trdid, vseg_type_str(type),
[567]1282hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );
[433]1283#endif
[23]1284
[623]1285        // all parent vsegs - but STACK and kernel vsegs - must be copied in child VSL
1286        if( (type != VSEG_TYPE_STACK) && (type != VSEG_TYPE_KCODE) &&
1287            (type != VSEG_TYPE_KDATA) && (type != VSEG_TYPE_KDEV) )
[23]1288        {
[408]1289            // allocate memory for a new child vseg
1290            child_vseg = vseg_alloc();
1291            if( child_vseg == NULL )   // release all allocated vsegs
[23]1292            {
[683]1293
1294#if DEBUG_VMM_ERROR
1295printk("\n[ERROR] in %s : cannot create vseg for child in cluster %x\n",
1296__FUNCTION__, local_cxy );
1297#endif
[408]1298                vmm_destroy( child_process );
1299                return -1;
[23]1300            }
1301
[408]1302            // copy parent vseg to child vseg
1303            vseg_init_from_ref( child_vseg , parent_vseg_xp );
[23]1304
[640]1305            // build extended pointer on child VSL lock
1306            xptr_t child_lock_xp = XPTR( local_cxy , &child_vmm->vsl_lock );
[625]1307 
[640]1308            // take the child VSL lock
1309            remote_queuelock_acquire( child_lock_xp );
[625]1310
[408]1311            // register child vseg in child VSL
[611]1312            vmm_attach_vseg_to_vsl( child_vmm , child_vseg );
[407]1313
[640]1314            // release the child VSL lock
1315            remote_queuelock_release( child_lock_xp );
[625]1316
[438]1317#if DEBUG_VMM_FORK_COPY
[433]1318cycle = (uint32_t)hal_get_cycles();
[438]1319if( DEBUG_VMM_FORK_COPY < cycle )
[595]1320printk("\n[%s] thread[%x,%x] copied vseg %s / vpn_base = %x to child VSL / cycle %d\n",
1321__FUNCTION__ , this->process->pid, this->trdid, vseg_type_str(type),
[567]1322hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );
[433]1323#endif
[625]1324            // copy DATA, ANON, REMOTE, FILE parent GPT entries to child GPT
[408]1325            if( type != VSEG_TYPE_CODE )
1326            {
[625]1327                // activate the COW for DATA, ANON, REMOTE vsegs only
[635]1328                // cow = ( type != VSEG_TYPE_FILE );
[23]1329
[408]1330                vpn_base = child_vseg->vpn_base;
1331                vpn_size = child_vseg->vpn_size;
[23]1332
[408]1333                // scan pages in parent vseg
1334                for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ )
1335                {
1336                    error = hal_gpt_pte_copy( &child_vmm->gpt,
[625]1337                                              vpn,
[408]1338                                              XPTR( parent_cxy , &parent_vmm->gpt ),
1339                                              vpn,
[635]1340                                              false,      // does not handle COW flag
1341                                              &ppn,       // unused
1342                                              &mapped );  // unused
[408]1343                    if( error )
1344                    {
[683]1345
1346#if DEBUG_VMM_ERROR
1347printk("\n[ERROR] in %s : cannot copy GPT\n",
1348__FUNCTION__ );
1349#endif
[408]1350                        vmm_destroy( child_process );
1351                        return -1;
1352                    }
1353
[438]1354#if DEBUG_VMM_FORK_COPY
[433]1355cycle = (uint32_t)hal_get_cycles();
[438]1356if( DEBUG_VMM_FORK_COPY < cycle )
[595]1357printk("\n[%s] thread[%x,%x] copied vpn %x to child GPT / cycle %d\n",
1358__FUNCTION__ , this->process->pid, this->trdid , vpn , cycle );
[433]1359#endif
[408]1360                }
1361            }   // end if no code & no stack
1362        }   // end if no stack
1363    }   // end loop on vsegs
1364
[567]1365    // release the parent VSL lock in read mode
[640]1366    remote_queuelock_release( parent_lock_xp );
[408]1367
1368    // copy base addresses from parent VMM to child VMM
1369    child_vmm->args_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->args_vpn_base));
1370    child_vmm->envs_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->envs_vpn_base));
1371    child_vmm->heap_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->heap_vpn_base));
1372    child_vmm->code_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->code_vpn_base));
1373    child_vmm->data_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->data_vpn_base));
[23]1374
[408]1375    child_vmm->entry_point = (intptr_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->entry_point));
[23]1376
[124]1377    hal_fence();
[23]1378
[438]1379#if DEBUG_VMM_FORK_COPY
[433]1380cycle = (uint32_t)hal_get_cycles();
[438]1381if( DEBUG_VMM_FORK_COPY < cycle )
[595]1382printk("\n[%s] thread[%x,%x] exit successfully / cycle %d\n",
1383__FUNCTION__ , this->process->pid, this->trdid , cycle );
[433]1384#endif
1385
[23]1386    return 0;
1387
[408]1388}  // vmm_fork_copy()
[204]1389
[1]1390///////////////////////////////////////
1391void vmm_destroy( process_t * process )
1392{
[408]1393    xptr_t   vseg_xp;
[1]1394        vseg_t * vseg;
1395
[438]1396#if DEBUG_VMM_DESTROY
[635]1397uint32_t   cycle = (uint32_t)hal_get_cycles();
1398thread_t * this  = CURRENT_THREAD;
[438]1399if( DEBUG_VMM_DESTROY < cycle )
[595]1400printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
1401__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
[433]1402#endif
[416]1403
[438]1404#if (DEBUG_VMM_DESTROY & 1 )
[443]1405if( DEBUG_VMM_DESTROY < cycle )
[635]1406hal_vmm_display( XPTR( local_cxy, process ) , true );
[437]1407#endif
1408
[433]1409    // get pointer on local VMM
[1]1410    vmm_t  * vmm = &process->vmm;
1411
[625]1412    // build extended pointer on VSL root, VSL lock and GPT lock
1413    xptr_t   vsl_root_xp = XPTR( local_cxy , &vmm->vsegs_root );
1414    xptr_t   vsl_lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
[408]1415
[625]1416    // take the VSL lock
[640]1417    remote_queuelock_acquire( vsl_lock_xp );
[625]1418
[611]1419    // scan the VSL to delete all registered vsegs
[625]1420    // (we don't use a FOREACH in case of item deletion)
1421    xptr_t  iter_xp;
1422    xptr_t  next_xp;
1423        for( iter_xp = hal_remote_l64( vsl_root_xp ) ; 
1424         iter_xp != vsl_root_xp ;
1425         iter_xp = next_xp )
[1]1426        {
[625]1427        // save extended pointer on next item in xlist
1428        next_xp = hal_remote_l64( iter_xp );
[409]1429
[625]1430        // get pointers on current vseg in VSL
1431        vseg_xp   = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
1432        vseg      = GET_PTR( vseg_xp );
1433
[611]1434        // delete vseg and release physical pages
[625]1435        vmm_remove_vseg( process , vseg );
[409]1436
[443]1437#if( DEBUG_VMM_DESTROY & 1 )
1438if( DEBUG_VMM_DESTROY < cycle )
[611]1439printk("\n[%s] %s vseg deleted / vpn_base %x / vpn_size %d\n",
[443]1440__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
1441#endif
1442
[1]1443        }
1444
[625]1445    // release the VSL lock
[640]1446    remote_queuelock_release( vsl_lock_xp );
[625]1447
[651]1448    // remove all registered MMAP vsegs from free_lists in MMAP allocator
[1]1449    uint32_t i;
[651]1450    for( i = 0 ; i <= CONFIG_VMM_HEAP_MAX_ORDER ; i++ )
[1]1451    {
[651]1452        // build extended pointer on free list root
1453        xptr_t root_xp = XPTR( local_cxy , &vmm->mmap_mgr.free_list_root[i] );
[625]1454 
1455        // scan zombi_list[i]
1456            while( !xlist_is_empty( root_xp ) )
[1]1457            {
[625]1458                    vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist );
1459            vseg    = GET_PTR( vseg_xp );
[443]1460
1461#if( DEBUG_VMM_DESTROY & 1 )
1462if( DEBUG_VMM_DESTROY < cycle )
[595]1463printk("\n[%s] found zombi vseg / vpn_base %x / vpn_size %d\n",
[443]1464__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
1465#endif
[611]1466            // clean vseg descriptor
1467            vseg->vmm = NULL;
1468
[625]1469            // remove vseg from  zombi_list
[611]1470            xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
1471
1472                    // release vseg descriptor
[1]1473            vseg_free( vseg );
[443]1474
1475#if( DEBUG_VMM_DESTROY & 1 )
1476if( DEBUG_VMM_DESTROY < cycle )
[595]1477printk("\n[%s] zombi vseg released / vpn_base %x / vpn_size %d\n",
[443]1478__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
1479#endif
[1]1480            }
1481    }
1482
[409]1483    // release memory allocated to the GPT itself
[1]1484    hal_gpt_destroy( &vmm->gpt );
1485
[438]1486#if DEBUG_VMM_DESTROY
[433]1487cycle = (uint32_t)hal_get_cycles();
[438]1488if( DEBUG_VMM_DESTROY < cycle )
[595]1489printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
1490__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
[433]1491#endif
[416]1492
[204]1493}  // end vmm_destroy()
1494
[1]1495/////////////////////////////////////////////////
1496vseg_t * vmm_check_conflict( process_t * process,
[21]1497                             vpn_t       vpn_base,
[1]1498                             vpn_t       vpn_size )
1499{
1500    vmm_t        * vmm = &process->vmm;
[408]1501
1502    // scan the VSL
[1]1503        vseg_t       * vseg;
[408]1504    xptr_t         iter_xp;
1505    xptr_t         vseg_xp;
1506    xptr_t         root_xp = XPTR( local_cxy , &vmm->vsegs_root );
[1]1507
[408]1508        XLIST_FOREACH( root_xp , iter_xp )
[1]1509        {
[408]1510                vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
[433]1511        vseg    = GET_PTR( vseg_xp );
[204]1512
[21]1513                if( ((vpn_base + vpn_size) > vseg->vpn_base) &&
1514             (vpn_base < (vseg->vpn_base + vseg->vpn_size)) ) return vseg;
[1]1515        }
1516    return NULL;
1517
[204]1518}  // end vmm_check_conflict()
1519
[407]1520////////////////////////////////////////////////
1521vseg_t * vmm_create_vseg( process_t   * process,
1522                              vseg_type_t   type,
[635]1523                          intptr_t      base,         // ltid for VSEG_TYPE_STACK
[407]1524                              uint32_t      size,
1525                          uint32_t      file_offset,
1526                          uint32_t      file_size,
1527                          xptr_t        mapper_xp,
1528                          cxy_t         cxy )
[1]1529{
[651]1530    vseg_t     * vseg;          // pointer on allocated vseg descriptor
[1]1531
[640]1532#if DEBUG_VMM_CREATE_VSEG
1533thread_t * this  = CURRENT_THREAD;
1534uint32_t   cycle;
1535#endif
1536
[635]1537#if (DEBUG_VMM_CREATE_VSEG & 1)
[640]1538cycle = (uint32_t)hal_get_cycles();
[438]1539if( DEBUG_VMM_CREATE_VSEG < cycle )
[635]1540printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cxy %x / cycle %d\n",
1541__FUNCTION__, this->process->pid, this->trdid,
1542process->pid, vseg_type_str(type), base, cxy, cycle );
[433]1543#endif
[21]1544
[407]1545    // get pointer on VMM
1546        vmm_t * vmm    = &process->vmm;
[21]1547
[651]1548    // allocate a vseg descriptor and initialize it, depending on type
1549    // we use specific allocators for "stack" and "mmap" types
[595]1550
[651]1551    /////////////////////////////
[1]1552    if( type == VSEG_TYPE_STACK )
1553    {
[651]1554        // get vseg from STACK allocator
1555        vseg = vmm_stack_alloc( vmm , base );    // base == ltid
1556       
1557        if( vseg == NULL )
1558        {
[683]1559
1560#if DEBUG_VMM_ERROR
1561printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
1562__FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
1563#endif
[651]1564            return NULL;
1565        }
[1]1566
[651]1567        // initialize vseg
1568        vseg->type = type;
1569        vseg->vmm  = vmm;
[683]1570        vseg->min  = vseg->vpn_base << CONFIG_PPM_PAGE_ORDER;
1571        vseg->max  = vseg->min + (vseg->vpn_size << CONFIG_PPM_PAGE_ORDER);
[651]1572        vseg->cxy  = cxy;
1573
1574        vseg_init_flags( vseg , type );
[1]1575    }
[651]1576    /////////////////////////////////
[595]1577    else if( type == VSEG_TYPE_FILE )
1578    {
[651]1579        // compute page index (in mapper) for first and last byte
[683]1580        vpn_t    vpn_min    = file_offset >> CONFIG_PPM_PAGE_ORDER;
1581        vpn_t    vpn_max    = (file_offset + size - 1) >> CONFIG_PPM_PAGE_ORDER;
[595]1582
[651]1583        // compute offset in first page and number of pages
[595]1584        uint32_t offset = file_offset & CONFIG_PPM_PAGE_MASK;
1585        vpn_t    npages      = vpn_max - vpn_min + 1;
1586
[651]1587        // get vseg from MMAP allocator
1588        vseg = vmm_mmap_alloc( vmm , npages );
1589
1590        if( vseg == NULL )
[595]1591        {
[683]1592
1593#if DEBUG_VMM_ERROR
1594printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
1595__FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
1596#endif
[595]1597            return NULL;
1598        }
1599
[651]1600        // initialize vseg
1601        vseg->type        = type;
1602        vseg->vmm         = vmm;
[683]1603        vseg->min         = (vseg->vpn_base << CONFIG_PPM_PAGE_ORDER) + offset; 
[651]1604        vseg->max         = vseg->min + size;
1605        vseg->file_offset = file_offset;
1606        vseg->file_size   = file_size;
1607        vseg->mapper_xp   = mapper_xp;
1608        vseg->cxy         = cxy;
1609
1610        vseg_init_flags( vseg , type );
[595]1611    }
[651]1612    /////////////////////////////////////////////////////////////////
1613    else if( (type == VSEG_TYPE_ANON) || (type == VSEG_TYPE_REMOTE) )
[1]1614    {
[595]1615        // compute number of required pages in virtual space
[683]1616        vpn_t npages = size >> CONFIG_PPM_PAGE_ORDER;
[595]1617        if( size & CONFIG_PPM_PAGE_MASK) npages++;
1618       
[651]1619        // allocate vseg from MMAP allocator
1620        vseg = vmm_mmap_alloc( vmm , npages );
1621
1622        if( vseg == NULL )
[1]1623        {
[683]1624
1625#if DEBUG_VMM_ERROR
1626printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
1627__FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
1628#endif
[1]1629            return NULL;
1630        }
1631
[651]1632        // initialize vseg
1633        vseg->type = type;
1634        vseg->vmm  = vmm;
[683]1635        vseg->min  = vseg->vpn_base << CONFIG_PPM_PAGE_ORDER;
1636        vseg->max  = vseg->min + (vseg->vpn_size << CONFIG_PPM_PAGE_ORDER);
[651]1637        vseg->cxy  = cxy;
1638
1639        vseg_init_flags( vseg , type );
[1]1640    }
[651]1641    /////////////////////////////////////////////////////////////////
[623]1642    else    // VSEG_TYPE_DATA, VSEG_TYPE_CODE or KERNEL vseg
[1]1643    {
[683]1644        uint32_t vpn_min = base >> CONFIG_PPM_PAGE_ORDER;
1645        uint32_t vpn_max = (base + size - 1) >> CONFIG_PPM_PAGE_ORDER;
[204]1646
[651]1647        // allocate vseg descriptor
1648            vseg = vseg_alloc();
1649
1650            if( vseg == NULL )
1651            {
[683]1652
1653#if DEBUG_VMM_ERROR
1654printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
1655__FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
1656#endif
[651]1657            return NULL;
1658            }
[683]1659
[651]1660        // initialize vseg
1661        vseg->type        = type;
1662        vseg->vmm         = vmm;
1663        vseg->min         = base;
1664        vseg->max         = base + size;
[683]1665        vseg->vpn_base    = base >> CONFIG_PPM_PAGE_ORDER;
[651]1666        vseg->vpn_size    = vpn_max - vpn_min + 1;
1667        vseg->file_offset = file_offset;
1668        vseg->file_size   = file_size;
1669        vseg->mapper_xp   = mapper_xp;
1670        vseg->cxy         = cxy;
1671
1672        vseg_init_flags( vseg , type );
[1]1673    }
1674
1675    // check collisions
[651]1676    vseg_t * existing_vseg = vmm_check_conflict( process , vseg->vpn_base , vseg->vpn_size );
[624]1677
[651]1678    if( existing_vseg != NULL )
[1]1679    {
[683]1680
1681#if DEBUG_VMM_ERROR
1682printk("\n[ERROR] in %s for process %x : new vseg %s [vpn_base %x / vpn_size %x]\n"
1683       "        overlap existing vseg %s [vpn_base %x / vpn_size %x]\n",
1684__FUNCTION__ , process->pid, vseg_type_str(vseg->type), vseg->vpn_base, vseg->vpn_size, 
1685vseg_type_str(existing_vseg->type), existing_vseg->vpn_base, existing_vseg->vpn_size );
1686#endif
[651]1687        vseg_free( vseg );
[1]1688        return NULL;
1689    }
1690
[625]1691    // build extended pointer on VSL lock
1692    xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
1693 
1694    // take the VSL lock in write mode
[640]1695    remote_queuelock_acquire( lock_xp );
[625]1696
[408]1697    // attach vseg to VSL
[611]1698        vmm_attach_vseg_to_vsl( vmm , vseg );
[1]1699
[625]1700    // release the VSL lock
[640]1701    remote_queuelock_release( lock_xp );
[625]1702
[651]1703#if DEBUG_VMM_CREATE_VSEG
[433]1704cycle = (uint32_t)hal_get_cycles();
[651]1705if( DEBUG_VMM_CREATE_VSEG < cycle )
1706printk("\n[%s] thread[%x,%x] exit / %s / vpn_base %x / vpn_size %x / cycle %d\n",
1707__FUNCTION__, this->process->pid, this->trdid,
1708vseg_type_str(type), vseg->vpn_base, vseg->vpn_size, cycle );
[433]1709#endif
[21]1710
[1]1711        return vseg;
1712
[406]1713}  // vmm_create_vseg()
1714
[640]1715////////////////////////////////////////////////////////////////////////////////////////////
[656]1716// This static function is called by the vmm_remove_vseg() and vmm_resize_vseg() functions
1717// to update the physical page descriptor identified by the <ppn> argument.
1718// It decrements the refcount, set the dirty bit when required, and releases the physical
1719// page to kmem depending on the vseg type.
1720// - KERNEL : refcount decremented / not released to kmem    / dirty bit not set
1721// - FILE   : refcount decremented / not released to kmem    / dirty bit set when required.
1722// - CODE   : refcount decremented / released to kmem        / dirty bit not set.
1723// - STAK   : refcount decremented / released to kmem        / dirty bit not set.
1724// - DATA   : refcount decremented / released to kmem if ref / dirty bit not set.
1725// - MMAP   : refcount decremented / released to kmem if ref / dirty bit not set.
[640]1726////////////////////////////////////////////////////////////////////////////////////////////
1727// @ process  : local pointer on process.
1728// @ vseg     : local pointer on vseg.
1729// @ ppn      : released pysical page index.
[656]1730// @ dirty    : set the dirty bit in page descriptor when non zero.
[640]1731////////////////////////////////////////////////////////////////////////////////////////////
1732static void vmm_ppn_release( process_t * process,
1733                             vseg_t    * vseg,
[656]1734                             ppn_t       ppn,
1735                             uint32_t    dirty )
[640]1736{
[656]1737    bool_t do_kmem_release;
[625]1738
[640]1739    // get vseg type
1740    vseg_type_t type = vseg->type;
1741
[656]1742    // compute is_ref <=> this vseg is the reference vseg
[640]1743    bool_t is_ref = (GET_CXY( process->ref_xp ) == local_cxy);
1744
1745    // get pointers on physical page descriptor
1746    xptr_t   page_xp  = ppm_ppn2page( ppn );
1747    cxy_t    page_cxy = GET_CXY( page_xp );
1748    page_t * page_ptr = GET_PTR( page_xp );
1749
1750    // decrement page refcount
1751    xptr_t count_xp = XPTR( page_cxy , &page_ptr->refcount );
1752    hal_remote_atomic_add( count_xp , -1 );
1753
[656]1754    // compute the do_kmem_release condition depending on vseg type
1755    if( (type == VSEG_TYPE_KCODE) || 
[640]1756        (type == VSEG_TYPE_KDATA) || 
1757        (type == VSEG_TYPE_KDEV) )           
1758    {
[656]1759        // no physical page release for KERNEL
1760        do_kmem_release = false;
[640]1761    }
[656]1762    else if( type == VSEG_TYPE_FILE )
1763    {
1764        // no physical page release for KERNEL
1765        do_kmem_release = false;
1766
1767        // set dirty bit if required
1768        if( dirty ) ppm_page_do_dirty( page_xp );
1769    }   
[640]1770    else if( (type == VSEG_TYPE_CODE)  ||
1771             (type == VSEG_TYPE_STACK) ) 
1772    {
1773        // always release physical page for private vsegs
[656]1774        do_kmem_release = true;
[640]1775    }
1776    else if( (type == VSEG_TYPE_ANON)  ||
1777             (type == VSEG_TYPE_REMOTE) )
1778    {
1779        // release physical page if reference cluster
[656]1780        do_kmem_release = is_ref;
[640]1781    }
1782    else if( is_ref )  // vseg_type == DATA in reference cluster
1783    {
1784        // get extended pointers on forks and lock field in page descriptor
1785        xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks );
1786        xptr_t lock_xp  = XPTR( page_cxy , &page_ptr->lock );
1787
1788        // take lock protecting "forks" counter
1789        remote_busylock_acquire( lock_xp );
1790
1791        // get number of pending forks from page descriptor
1792        uint32_t forks = hal_remote_l32( forks_xp );
1793
1794        // decrement pending forks counter if required
1795        if( forks )  hal_remote_atomic_add( forks_xp , -1 );
1796
1797        // release lock protecting "forks" counter
1798        remote_busylock_release( lock_xp );
1799
1800        // release physical page if forks == 0
[656]1801        do_kmem_release = (forks == 0); 
[640]1802    }
1803    else              // vseg_type == DATA not in reference cluster
1804    {
1805        // no physical page release if not in reference cluster
[656]1806        do_kmem_release = false;
[640]1807    }
1808
1809    // release physical page to relevant kmem when required
[656]1810    if( do_kmem_release )
[640]1811    {
[683]1812        // get physical page order
1813        uint32_t order = CONFIG_PPM_PAGE_ORDER +
1814                         hal_remote_l32( XPTR( page_cxy , &page_ptr->order ));
[640]1815
[683]1816        // get physical page base
1817        void * base = GET_PTR( ppm_ppn2base( ppn ) );
[656]1818
[683]1819        // release physical page
1820        kmem_remote_free( page_cxy , base , order );
1821
[640]1822#if DEBUG_VMM_PPN_RELEASE
1823thread_t * this = CURRENT_THREAD;
1824if( DEBUG_VMM_PPN_RELEASE < cycle )
1825printk("\n[%s] thread[%x,%x] released ppn %x to kmem\n",
1826__FUNCTION__, this->process->pid, this->trdid, ppn );
1827#endif
1828
1829    }
1830} // end vmm_ppn_release()
1831
[625]1832//////////////////////////////////////////
1833void vmm_remove_vseg( process_t * process,
1834                      vseg_t    * vseg )
[1]1835{
[625]1836    uint32_t    vseg_type;  // vseg type
[21]1837    vpn_t       vpn;        // VPN of current PTE
1838    vpn_t       vpn_min;    // VPN of first PTE
[1]1839    vpn_t       vpn_max;    // VPN of last PTE (excluded)
[409]1840    ppn_t       ppn;        // current PTE ppn value
1841    uint32_t    attr;       // current PTE attributes
[1]1842
[625]1843// check arguments
[672]1844assert( __FUNCTION__, (process != NULL), "process argument is NULL" );
1845assert( __FUNCTION__, (vseg    != NULL), "vseg argument is NULL" );
[409]1846
[625]1847    // get pointers on local process VMM
[640]1848    vmm_t * vmm = &process->vmm;
[611]1849
[629]1850    // build extended pointer on GPT
[640]1851    xptr_t gpt_xp = XPTR( local_cxy , &vmm->gpt );
[629]1852
[623]1853    // get relevant vseg infos
[624]1854    vseg_type = vseg->type;
1855    vpn_min   = vseg->vpn_base;
1856    vpn_max   = vpn_min + vseg->vpn_size;
[623]1857
[625]1858#if DEBUG_VMM_REMOVE_VSEG
1859uint32_t   cycle = (uint32_t)hal_get_cycles();
1860thread_t * this  = CURRENT_THREAD;
[640]1861#endif
1862
1863#if (DEBUG_VMM_REMOVE_VSEG & 1 )
[625]1864if( DEBUG_VMM_REMOVE_VSEG < cycle )
[641]1865printk("\n[%s] thread[%x,%x] enters / process %x / type %s / base %x / cycle %d\n",
[625]1866__FUNCTION__, this->process->pid, this->trdid, 
1867process->pid, vseg_type_str(vseg->type), vseg->min, cycle );
1868#endif
1869
[683]1870    // the loop on PTEs in GPT to unmap all mapped PTEs
1871    for( vpn = vpn_min ; vpn < vpn_max ; vpn++ )
[1]1872    {
[625]1873        // get ppn and attr
[629]1874        hal_gpt_get_pte( gpt_xp , vpn , &attr , &ppn );
[409]1875
[625]1876        if( attr & GPT_MAPPED )  // PTE is mapped
[409]1877        { 
[437]1878
[625]1879#if( DEBUG_VMM_REMOVE_VSEG & 1 )
1880if( DEBUG_VMM_REMOVE_VSEG < cycle )
[641]1881printk("\n[%s] thread[%x,%x] unmap vpn %x / ppn %x / type %s\n",
[640]1882__FUNCTION__, this->process->pid, this->trdid, vpn , ppn, vseg_type_str(vseg_type) );
[437]1883#endif
[585]1884            // unmap GPT entry in local GPT
[629]1885            hal_gpt_reset_pte( gpt_xp , vpn );
[409]1886
[656]1887            // release physical page depending on vseg type
1888            vmm_ppn_release( process , vseg , ppn , attr & GPT_DIRTY );
[409]1889        }
[1]1890    }
[433]1891
[625]1892    // remove vseg from VSL
[611]1893    vmm_detach_vseg_from_vsl( vmm , vseg );
1894
[625]1895    // release vseg descriptor depending on vseg type
1896    if( vseg_type == VSEG_TYPE_STACK )
1897    {
1898        // release slot to local stack allocator
1899        vmm_stack_free( vmm , vseg );
1900    }
1901    else if( (vseg_type == VSEG_TYPE_ANON) || 
1902             (vseg_type == VSEG_TYPE_FILE) || 
1903             (vseg_type == VSEG_TYPE_REMOTE) ) 
1904    {
1905        // release vseg to local mmap allocator
1906        vmm_mmap_free( vmm , vseg );
1907    }
1908    else
1909    {
1910        // release vseg descriptor to local kmem
1911        vseg_free( vseg );
1912    }
1913
1914#if DEBUG_VMM_REMOVE_VSEG
[433]1915cycle = (uint32_t)hal_get_cycles();
[625]1916if( DEBUG_VMM_REMOVE_VSEG < cycle )
[641]1917printk("\n[%s] thread[%x,%x] exit / process %x / type %s / base %x / cycle %d\n",
[625]1918__FUNCTION__, this->process->pid, this->trdid, 
1919process->pid, vseg_type_str(vseg->type), vseg->min, cycle );
[433]1920#endif
1921
[625]1922}  // end vmm_remove_vseg()
[1]1923
[611]1924/////////////////////////////////////////////
[640]1925void vmm_resize_vseg( process_t * process,
1926                      vseg_t    * vseg,
1927                      intptr_t    new_base,
1928                      intptr_t    new_size )
[406]1929{
[640]1930    vpn_t     vpn;
1931    ppn_t     ppn;
1932    uint32_t  attr;
[406]1933
[640]1934// check arguments
[672]1935assert( __FUNCTION__, (process != NULL), "process argument is NULL" );
1936assert( __FUNCTION__, (vseg    != NULL), "vseg argument is NULL" );
[406]1937
[623]1938#if DEBUG_VMM_RESIZE_VSEG
1939uint32_t   cycle = (uint32_t)hal_get_cycles();
1940thread_t * this  = CURRENT_THREAD;
[640]1941#endif
1942
1943#if (DEBUG_VMM_RESIZE_VSEG & 1)
[623]1944if( DEBUG_VMM_RESIZE_VSEG < cycle )
[640]1945printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cycle %d\n",
1946__FUNCTION__, this->process->pid, this->trdid, 
1947process->pid, vseg_type_str(vseg->type), old_base, cycle );
[623]1948#endif
1949
[640]1950    // get existing vseg vpn_min and vpn_max
1951    vpn_t     old_vpn_min = vseg->vpn_base;
1952    vpn_t     old_vpn_max = old_vpn_min + vseg->vpn_size - 1;
[1]1953
[640]1954    // compute new vseg vpn_min & vpn_max 
1955    intptr_t min          = new_base;
1956    intptr_t max          = new_base + new_size;
[683]1957    vpn_t    new_vpn_min  = min >> CONFIG_PPM_PAGE_ORDER;
1958    vpn_t    new_vpn_max  = (max - 1) >> CONFIG_PPM_PAGE_ORDER;
[1]1959
[640]1960    // build extended pointer on GPT
1961    xptr_t gpt_xp = XPTR( local_cxy , &process->vmm.gpt );
[1]1962
[657]1963    // loop on PTEs in GPT to unmap PTE if (old_vpn_min <= vpn < new_vpn_min)
[640]1964        for( vpn = old_vpn_min ; vpn < new_vpn_min ; vpn++ )
[623]1965    {
[640]1966        // get ppn and attr
1967        hal_gpt_get_pte( gpt_xp , vpn , &attr , &ppn );
[21]1968
[640]1969        if( attr & GPT_MAPPED )  // PTE is mapped
1970        { 
[623]1971
1972#if( DEBUG_VMM_RESIZE_VSEG & 1 )
1973if( DEBUG_VMM_RESIZE_VSEG < cycle )
[640]1974printk("\n[%s] thread[%x,%x] unmap vpn %x / ppn %x / %s",
1975__FUNCTION__, this->process->pid, this->trdid, vpn , ppn, vseg_type_str(vseg_type) );
[623]1976#endif
[640]1977            // unmap GPT entry
1978            hal_gpt_reset_pte( gpt_xp , vpn );
[623]1979
[640]1980            // release physical page when required
[656]1981            vmm_ppn_release( process , vseg , ppn , attr & GPT_DIRTY );
[640]1982        }
[1]1983    }
[640]1984
1985    // loop on PTEs in GPT to unmap PTE if (new vpn_max <= vpn < old_vpn_max)
1986        for( vpn = new_vpn_max ; vpn < old_vpn_max ; vpn++ )
[1]1987    {
[640]1988        // get ppn and attr
1989        hal_gpt_get_pte( gpt_xp , vpn , &attr , &ppn );
[623]1990
[640]1991        if( attr & GPT_MAPPED )  // PTE is mapped
1992        { 
1993
[641]1994#if( DEBUG_VMM_RESIZE_VSEG & 1 )
[623]1995if( DEBUG_VMM_RESIZE_VSEG < cycle )
[640]1996printk("\n[%s] thread[%x,%x] unmap vpn %x / ppn %x / %s",
1997__FUNCTION__, this->process->pid, this->trdid, vpn , ppn, vseg_type_str(vseg_type) );
[623]1998#endif
[640]1999            // unmap GPT entry in local GPT
2000            hal_gpt_reset_pte( gpt_xp , vpn );
[406]2001
[640]2002            // release physical page when required
[656]2003            vmm_ppn_release( process , vseg , ppn , attr & GPT_DIRTY );
[640]2004        }
[1]2005    }
[623]2006
[640]2007    // resize vseg in VSL
2008    vseg->min      = min;
2009    vseg->max      = max;
2010    vseg->vpn_base = new_vpn_min;
2011    vseg->vpn_size = new_vpn_max - new_vpn_min + 1;
2012
2013#if DEBUG_VMM_RESIZE_VSEG
2014cycle = (uint32_t)hal_get_cycles();
[623]2015if( DEBUG_VMM_RESIZE_VSEG < cycle )
[640]2016printk("[%s] thread[%x,%x] exit / process %x / %s / base %x / cycle %d\n",
2017__FUNCTION__, this->process->pid, this->trdid, 
2018process->pid, vseg_type_str(vseg->type), vseg->min, cycle );
[623]2019#endif
[406]2020
[640]2021}  // end vmm_resize_vseg
[623]2022
[640]2023/////////////////////////////////////////////////////////////////////////////////////////////
2024// This static function is called twice by the vmm_get_vseg() function.
2025// It scan the - possibly remote - VSL defined by the <vmm_xp> argument to find the vseg
2026// containing a given virtual address <vaddr>. It uses remote accesses to access the remote
2027// VSL if required. The VSL lock protecting the VSL must be taken by the caller.
2028/////////////////////////////////////////////////////////////////////////////////////////////
2029// @ vmm_xp  : extended pointer on the process VMM.
2030// @ vaddr   : virtual address.
2031// @ return local pointer on remote vseg if success / return NULL if not found.
2032/////////////////////////////////////////////////////////////////////////////////////////////
2033static vseg_t * vmm_vseg_from_vaddr( xptr_t     vmm_xp,
2034                                     intptr_t   vaddr )
2035{
2036    xptr_t   iter_xp;
2037    xptr_t   vseg_xp;
2038    vseg_t * vseg;
2039    intptr_t min;
2040    intptr_t max;
[623]2041
[640]2042    // get cluster and local pointer on target VMM
2043    vmm_t * vmm_ptr = GET_PTR( vmm_xp );
2044    cxy_t   vmm_cxy = GET_CXY( vmm_xp );
[623]2045
[640]2046    // build extended pointer on VSL root
2047    xptr_t root_xp = XPTR( vmm_cxy , &vmm_ptr->vsegs_root );
[406]2048
[640]2049    // scan the list of vsegs in VSL
2050    XLIST_FOREACH( root_xp , iter_xp )
2051    {
2052        vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
2053        vseg    = GET_PTR( vseg_xp );
[406]2054
[640]2055        min = hal_remote_l32( XPTR( vmm_cxy , &vseg->min ) );
2056        max = hal_remote_l32( XPTR( vmm_cxy , &vseg->max ) );
[407]2057
[640]2058        // return success when match
2059        if( (vaddr >= min) && (vaddr < max) ) return vseg;
[1]2060    }
2061
[640]2062    // return failure
2063    return NULL;
[1]2064
[640]2065}  // end vmm_vseg_from_vaddr()
[1]2066
2067///////////////////////////////////////////
[388]2068error_t  vmm_get_vseg( process_t * process,
[394]2069                       intptr_t    vaddr,
[388]2070                       vseg_t   ** found_vseg )
[1]2071{
[640]2072    xptr_t    loc_lock_xp;     // extended pointer on local VSL lock
2073    xptr_t    ref_lock_xp;     // extended pointer on reference VSL lock
2074    vseg_t  * loc_vseg;        // local pointer on local vseg
2075    vseg_t  * ref_vseg;        // local pointer on reference vseg
[1]2076
[640]2077    // build extended pointer on local VSL lock
2078    loc_lock_xp = XPTR( local_cxy , &process->vmm.vsl_lock );
2079     
2080    // get local VSL lock
2081    remote_queuelock_acquire( loc_lock_xp );
[1]2082
[665]2083    // try to get vseg from local VSL
[640]2084    loc_vseg = vmm_vseg_from_vaddr( XPTR( local_cxy, &process->vmm ) , vaddr );
[440]2085
[640]2086    if (loc_vseg == NULL)   // vseg not found => access reference VSL
2087    {
[388]2088        // get extended pointer on reference process
2089        xptr_t ref_xp = process->ref_xp;
[1]2090
[640]2091        // get cluster and local pointer on reference process
[388]2092        cxy_t       ref_cxy = GET_CXY( ref_xp );
[433]2093        process_t * ref_ptr = GET_PTR( ref_xp );
[388]2094
[665]2095        if( ref_cxy == local_cxy )    // local is ref => return error
[640]2096        {
[388]2097
[683]2098#if DEBUG_VMM_ERROR
2099printk("\n[ERROR] in %s : vaddr %x in process %x out of segment\n",
2100__FUNCTION__, vaddr, process->pid );
2101#endif
[665]2102            remote_queuelock_release( loc_lock_xp );
[640]2103            return -1;
2104        }
[665]2105        else                          // ref != local => access ref VSL                     
[640]2106        {
[665]2107            // build extended pointer on reference VSL lock
2108            ref_lock_xp = XPTR( ref_cxy , &ref_ptr->vmm.vsl_lock );
2109     
2110            // get reference VSL lock
2111            remote_queuelock_acquire( ref_lock_xp );
[625]2112
[665]2113            // try to get vseg from reference VSL
2114            ref_vseg = vmm_vseg_from_vaddr( XPTR( ref_cxy , &ref_ptr->vmm ) , vaddr );
2115
2116            if( ref_vseg == NULL )  // vseg not found => return error
[640]2117            {
[683]2118
2119#if DEBUG_VMM_ERROR
2120printk("\n[ERROR] in %s : vaddr %x in process %x out of segment\n",
2121__FUNCTION__, vaddr, process->pid );
2122#endif
[665]2123                remote_queuelock_release( loc_lock_xp );
2124                remote_queuelock_release( ref_lock_xp );
[640]2125                return -1;
2126            }
[665]2127            else                    // vseg found => try to update local VSL
[640]2128            {
[665]2129                // allocate a local vseg descriptor
2130                loc_vseg = vseg_alloc();
[640]2131
[665]2132                if( loc_vseg == NULL )   // no memory => return error
2133                {
[640]2134
[683]2135#if DEBUG_VMM_ERROR
2136printk("\n[ERROR] in %s : vaddr %x in process %x / no memory\n",
2137__FUNCTION__, vaddr, process->pid );
2138#endif
[665]2139                    remote_queuelock_release( ref_lock_xp );
2140                    remote_queuelock_release( loc_lock_xp );
2141                    return -1;
2142                }
2143                else                     // update local VSL and return success
2144                {
2145                    // initialize local vseg
2146                    vseg_init_from_ref( loc_vseg , XPTR( ref_cxy , ref_vseg ) );
2147
2148                    // register local vseg in local VSL
2149                    vmm_attach_vseg_to_vsl( &process->vmm , loc_vseg );
2150
2151                    // release both VSL locks
2152                    remote_queuelock_release( ref_lock_xp );
2153                    remote_queuelock_release( loc_lock_xp );
2154
2155                    *found_vseg = loc_vseg;
2156                    return 0;
2157                }
[640]2158            }
2159        }
2160    }
2161    else                        // vseg found in local VSL => return success
2162    {
[665]2163        // release local VSL lock
[640]2164        remote_queuelock_release( loc_lock_xp );
2165
2166        *found_vseg = loc_vseg;
2167        return 0;
2168    }
[388]2169}  // end vmm_get_vseg()
2170
[407]2171//////////////////////////////////////////////////////////////////////////////////////
2172// This static function compute the target cluster to allocate a physical page
[683]2173// for a given <vpn> in a given <vseg>, allocates the physical page from a local
2174// or remote cluster (depending on the vseg type), and returns an extended pointer
2175// on the allocated page descriptor.
[407]2176// The vseg cannot have the FILE type.
2177//////////////////////////////////////////////////////////////////////////////////////
[640]2178// @ vseg   : local pointer on vseg.
2179// @ vpn    : unmapped vpn.
[683]2180// @ return xptr on page descriptor if success / return XPTR_NULL if failure
[640]2181//////////////////////////////////////////////////////////////////////////////////////
[407]2182static xptr_t vmm_page_allocate( vseg_t * vseg,
2183                                 vpn_t    vpn )
2184{
[433]2185
[632]2186#if DEBUG_VMM_PAGE_ALLOCATE
[619]2187uint32_t   cycle   = (uint32_t)hal_get_cycles();
2188thread_t * this    = CURRENT_THREAD;
[632]2189if( DEBUG_VMM_PAGE_ALLOCATE < cycle )
[595]2190printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n",
2191__FUNCTION__ , this->process->pid, this->trdid, vpn, cycle );
[433]2192#endif
2193
[632]2194    xptr_t       page_xp;
[407]2195    cxy_t        page_cxy;
[577]2196    uint32_t     index;
[407]2197
[577]2198    uint32_t     type   = vseg->type;
2199    uint32_t     flags  = vseg->flags;
2200    uint32_t     x_size = LOCAL_CLUSTER->x_size;
2201    uint32_t     y_size = LOCAL_CLUSTER->y_size;
[407]2202
[567]2203// check vseg type
[672]2204assert( __FUNCTION__, ( type != VSEG_TYPE_FILE ) , "illegal vseg type\n" );
[407]2205
[656]2206    // compute target cluster identifier
[407]2207    if( flags & VSEG_DISTRIB )    // distributed => cxy depends on vpn LSB
2208    {
[577]2209        index    = vpn & ((x_size * y_size) - 1);
2210        page_cxy = HAL_CXY_FROM_XY( (index / y_size) , (index % y_size) );
[561]2211
[577]2212        // If the cluster selected from VPN's LSBs is empty, we select one randomly
2213        if ( cluster_is_active( page_cxy ) == false )
2214        {
2215            page_cxy = cluster_random_select();
[561]2216        }
[407]2217    }
2218    else                          // other cases => cxy specified in vseg
2219    {
[561]2220        page_cxy = vseg->cxy;
[407]2221    }
2222
[656]2223    // get local pointer on page base
[683]2224    void * ptr = kmem_remote_alloc( page_cxy , CONFIG_PPM_PAGE_ORDER , AF_ZERO );
[635]2225
[683]2226    if( ptr == NULL )
2227    {
2228
2229#if DEBUG_VMM_ERROR
2230printk("\n[ERROR] in %s : cannot allocate memory from cluster %x\n",
2231__FUNCTION__, page_cxy );
2232#endif
2233        return XPTR_NULL;
2234    }     
[656]2235    // get extended pointer on page descriptor
2236    page_xp = ppm_base2page( XPTR( page_cxy , ptr ) );
2237
[632]2238#if DEBUG_VMM_PAGE_ALLOCATE
[595]2239cycle = (uint32_t)hal_get_cycles();
[632]2240if( DEBUG_VMM_PAGE_ALLOCATE < cycle )
[635]2241printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle %d\n",
2242__FUNCTION__ , this->process->pid, this->trdid, vpn, ppm_page2ppn(page_xp), cycle );
[433]2243#endif
2244
[632]2245    return page_xp;
[407]2246
2247}  // end vmm_page_allocate() 
2248
[313]2249////////////////////////////////////////
2250error_t vmm_get_one_ppn( vseg_t * vseg,
2251                         vpn_t    vpn,
2252                         ppn_t  * ppn )
2253{
2254    error_t    error;
[407]2255    xptr_t     page_xp;           // extended pointer on physical page descriptor
[606]2256    uint32_t   page_id;           // missing page index in vseg mapper
[406]2257    uint32_t   type;              // vseg type;
[313]2258
[406]2259    type      = vseg->type;
[606]2260    page_id   = vpn - vseg->vpn_base;
[313]2261
[438]2262#if DEBUG_VMM_GET_ONE_PPN
[595]2263uint32_t   cycle = (uint32_t)hal_get_cycles();
2264thread_t * this  = CURRENT_THREAD;
[656]2265if( DEBUG_VMM_GET_ONE_PPN < cycle )
2266printk("\n[%s] thread[%x,%x] enter for vpn %x / vseg %s / page_id  %d / cycle %d\n",
[606]2267__FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(type), page_id, cycle );
[433]2268#endif
[313]2269
[656]2270#if (DEBUG_VMM_GET_ONE_PPN & 2)
2271if( DEBUG_VMM_GET_ONE_PPN < cycle )
2272hal_vmm_display( XPTR( local_cxy , this->process ) , true );
2273#endif
2274
[406]2275    // FILE type : get the physical page from the file mapper
[313]2276    if( type == VSEG_TYPE_FILE )
2277    {
[406]2278        // get extended pointer on mapper
[407]2279        xptr_t mapper_xp = vseg->mapper_xp;
[313]2280
[672]2281assert( __FUNCTION__, (mapper_xp != XPTR_NULL),
[567]2282"mapper not defined for a FILE vseg\n" );
[406]2283       
[606]2284        // get extended pointer on page descriptor
[657]2285        page_xp = mapper_get_page( mapper_xp , page_id );
[406]2286
[606]2287        if ( page_xp == XPTR_NULL ) return EINVAL;
[313]2288    }
2289
[406]2290    // Other types : allocate a physical page from target cluster,
[407]2291    // as defined by vseg type and vpn value
[313]2292    else
2293    {
[433]2294        // allocate one physical page
[407]2295        page_xp = vmm_page_allocate( vseg , vpn );
[406]2296
[635]2297        if( page_xp == XPTR_NULL ) return -1;
[313]2298
[406]2299        // initialise missing page from .elf file mapper for DATA and CODE types
[440]2300        // the vseg->mapper_xp field is an extended pointer on the .elf file mapper
[313]2301        if( (type == VSEG_TYPE_CODE) || (type == VSEG_TYPE_DATA) )
2302        {
[406]2303            // get extended pointer on mapper
2304            xptr_t     mapper_xp = vseg->mapper_xp;
[313]2305
[672]2306assert( __FUNCTION__, (mapper_xp != XPTR_NULL),
[567]2307"mapper not defined for a CODE or DATA vseg\n" );
[406]2308       
2309            // compute missing page offset in vseg
[683]2310            uint32_t offset = page_id << CONFIG_PPM_PAGE_ORDER;
[406]2311
[313]2312            // compute missing page offset in .elf file
[406]2313            uint32_t elf_offset = vseg->file_offset + offset;
[313]2314
[438]2315#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
[656]2316if( DEBUG_VMM_GET_ONE_PPN < cycle )
[595]2317printk("\n[%s] thread[%x,%x] for vpn = %x / elf_offset = %x\n",
2318__FUNCTION__, this->process->pid, this->trdid, vpn, elf_offset );
[433]2319#endif
[406]2320            // compute extended pointer on page base
[407]2321            xptr_t base_xp  = ppm_page2base( page_xp );
[313]2322
[406]2323            // file_size (in .elf mapper) can be smaller than vseg_size (BSS)
2324            uint32_t file_size = vseg->file_size;
2325
2326            if( file_size < offset )                 // missing page fully in  BSS
[313]2327            {
[406]2328
[438]2329#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
[656]2330if( DEBUG_VMM_GET_ONE_PPN < cycle )
[595]2331printk("\n[%s] thread[%x,%x] for vpn  %x / fully in BSS\n",
2332__FUNCTION__, this->process->pid, this->trdid, vpn );
[433]2333#endif
[407]2334                if( GET_CXY( page_xp ) == local_cxy )
[313]2335                {
[315]2336                    memset( GET_PTR( base_xp ) , 0 , CONFIG_PPM_PAGE_SIZE );
[313]2337                }
2338                else
2339                {
[315]2340                   hal_remote_memset( base_xp , 0 , CONFIG_PPM_PAGE_SIZE );       
[313]2341                }
2342            }
[406]2343            else if( file_size >= (offset + CONFIG_PPM_PAGE_SIZE) )  // fully in  mapper
[315]2344            {
[406]2345
[438]2346#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
[656]2347if( DEBUG_VMM_GET_ONE_PPN < cycle )
[595]2348printk("\n[%s] thread[%x,%x] for vpn  %x / fully in mapper\n",
2349__FUNCTION__, this->process->pid, this->trdid, vpn );
[433]2350#endif
[606]2351                error = mapper_move_kernel( mapper_xp,
2352                                            true,             // to_buffer
2353                                            elf_offset,
2354                                            base_xp,
2355                                            CONFIG_PPM_PAGE_SIZE ); 
[313]2356                if( error ) return EINVAL;
2357            }
[406]2358            else  // both in mapper and in BSS :
2359                  // - (file_size - offset)             bytes from mapper
2360                  // - (page_size + offset - file_size) bytes from BSS
[313]2361            {
[406]2362
[438]2363#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
[656]2364if( DEBUG_VMM_GET_ONE_PPN < cycle )
[610]2365printk("\n[%s] thread[%x,%x] for vpn  %x / both mapper & BSS\n"
[433]2366"      %d bytes from mapper / %d bytes from BSS\n",
[595]2367__FUNCTION__, this->process->pid, this->trdid, vpn,
[407]2368file_size - offset , offset + CONFIG_PPM_PAGE_SIZE - file_size  );
[433]2369#endif
[313]2370                // initialize mapper part
[606]2371                error = mapper_move_kernel( mapper_xp,
2372                                            true,         // to buffer
2373                                            elf_offset,
2374                                            base_xp,
2375                                            file_size - offset ); 
[313]2376                if( error ) return EINVAL;
2377
2378                // initialize BSS part
[407]2379                if( GET_CXY( page_xp ) == local_cxy )
[313]2380                {
[406]2381                    memset( GET_PTR( base_xp ) + file_size - offset , 0 , 
2382                            offset + CONFIG_PPM_PAGE_SIZE - file_size );
[313]2383                }
2384                else
2385                {
[406]2386                   hal_remote_memset( base_xp + file_size - offset , 0 , 
2387                                      offset + CONFIG_PPM_PAGE_SIZE - file_size );
[313]2388                }
2389            }   
[656]2390
2391        }  // end if CODE or DATA types   
[313]2392    } 
2393
2394    // return ppn
[407]2395    *ppn = ppm_page2ppn( page_xp );
[406]2396
[438]2397#if DEBUG_VMM_GET_ONE_PPN
[656]2398if( DEBUG_VMM_GET_ONE_PPN < cycle )
[635]2399printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle %d\n",
[595]2400__FUNCTION__ , this->process->pid, this->trdid , vpn , *ppn, cycle );
[433]2401#endif
[406]2402
[656]2403#if (DEBUG_VMM_GET_ONE_PPN & 2)
2404if( DEBUG_VMM_GET_ONE_PPN < cycle )
2405hal_vmm_display( XPTR( local_cxy , this->process ) , true );
2406#endif
2407
[313]2408    return 0;
2409
2410}  // end vmm_get_one_ppn()
2411
[585]2412///////////////////////////////////////////////////
2413error_t vmm_handle_page_fault( process_t * process,
2414                               vpn_t       vpn )
[1]2415{
[585]2416    vseg_t         * vseg;            // vseg containing vpn
[629]2417    uint32_t         attr;            // PTE_ATTR value
2418    ppn_t            ppn;             // PTE_PPN value
[585]2419    uint32_t         ref_attr;        // PTE_ATTR value in reference GPT
2420    ppn_t            ref_ppn;         // PTE_PPN value in reference GPT
2421    cxy_t            ref_cxy;         // reference cluster for missing vpn
2422    process_t      * ref_ptr;         // reference process for missing vpn
2423    xptr_t           local_gpt_xp;    // extended pointer on local GPT
2424    xptr_t           ref_gpt_xp;      // extended pointer on reference GPT
2425    error_t          error;           // value returned by called functions
[1]2426
[629]2427    thread_t * this  = CURRENT_THREAD;
2428
2429#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
2430uint32_t start_cycle = (uint32_t)hal_get_cycles();
2431#endif
2432
[625]2433#if DEBUG_VMM_HANDLE_PAGE_FAULT
[656]2434if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) & (vpn > 0) )
[625]2435printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n",
[629]2436__FUNCTION__, this->process->pid, this->trdid, vpn, start_cycle );
[625]2437#endif
2438
[656]2439#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2)
[635]2440if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
[656]2441hal_vmm_display( XPTR( local_cxy , this->process ) , true );
[629]2442#endif
2443
[585]2444    // get local vseg (access to reference VSL can be required)
2445    error = vmm_get_vseg( process, 
[683]2446                          (intptr_t)vpn<<CONFIG_PPM_PAGE_ORDER,
[585]2447                          &vseg );
2448    if( error )
2449    {
[629]2450        printk("\n[ERROR] in %s : vpn %x in thread[%x,%x] not in registered vseg\n",
2451        __FUNCTION__ , vpn , process->pid, this->trdid );
[585]2452       
2453        return EXCP_USER_ERROR;
2454    }
2455
[635]2456#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2457if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
[634]2458printk("\n[%s] thread[%x,%x] found vseg %s\n",
2459__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type) );
[433]2460#endif
[406]2461
[629]2462    // build extended pointer on local GPT
2463    local_gpt_xp  = XPTR( local_cxy , &process->vmm.gpt );
2464
[632]2465    // lock PTE in local GPT and get current PPN and attributes
[629]2466    error = hal_gpt_lock_pte( local_gpt_xp,
2467                              vpn,
2468                              &attr,
2469                              &ppn );
2470    if( error )
[438]2471    {
[629]2472        printk("\n[PANIC] in %s : cannot lock PTE in local GPT / vpn %x / process %x\n",
2473        __FUNCTION__ , vpn , process->pid );
2474       
2475        return EXCP_KERNEL_PANIC;
2476    }
[407]2477
[635]2478#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2479if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2480printk("\n[%s] thread[%x,%x] locked vpn %x in cluster %x\n",
[634]2481__FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy );
[632]2482#endif
2483
2484    // handle page fault only if local PTE still unmapped after lock
[629]2485    if( (attr & GPT_MAPPED) == 0 )
2486    {
2487        // get reference process cluster and local pointer
2488        ref_cxy = GET_CXY( process->ref_xp );
2489        ref_ptr = GET_PTR( process->ref_xp );
[407]2490
[630]2491        /////////////// private vseg or (local == reference)
2492        /////////////// => access only the local GPT
[629]2493        if( (vseg->type == VSEG_TYPE_STACK) ||
2494            (vseg->type == VSEG_TYPE_CODE)  ||
2495            (ref_cxy    == local_cxy ) )
2496        {
[632]2497
[635]2498#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2499if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2500printk("\n[%s] thread[%x,%x] access local gpt : cxy %x / ref_cxy %x / type %s / cycle %d\n",
2501__FUNCTION__, this->process->pid, this->trdid,
2502local_cxy, ref_cxy, vseg_type_str(vseg->type), (uint32_t)hal_get_cycles() );
[632]2503#endif
2504            // allocate and initialise a physical page
[629]2505            error = vmm_get_one_ppn( vseg , vpn , &ppn );
[407]2506
[585]2507            if( error )
[408]2508            {
[629]2509                printk("\n[ERROR] in %s : no physical page / process = %x / vpn = %x\n",
[408]2510                __FUNCTION__ , process->pid , vpn );
[1]2511
[629]2512                // unlock PTE in local GPT
2513                hal_gpt_unlock_pte( local_gpt_xp , vpn );
[406]2514
[585]2515                return EXCP_KERNEL_PANIC;
[407]2516            }
2517
[629]2518            // define attr from vseg flags
[632]2519            attr = GPT_MAPPED | GPT_SMALL | GPT_READABLE;
[629]2520            if( vseg->flags & VSEG_USER  ) attr |= GPT_USER;
2521            if( vseg->flags & VSEG_WRITE ) attr |= GPT_WRITABLE;
2522            if( vseg->flags & VSEG_EXEC  ) attr |= GPT_EXECUTABLE;
2523            if( vseg->flags & VSEG_CACHE ) attr |= GPT_CACHABLE;
[407]2524
[629]2525            // set PTE to local GPT
[632]2526            // it unlocks this PTE
[629]2527            hal_gpt_set_pte( local_gpt_xp,
2528                             vpn,
2529                             attr,
2530                             ppn );
[585]2531
[629]2532#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
2533uint32_t end_cycle = (uint32_t)hal_get_cycles();
2534#endif
[585]2535
2536#if DEBUG_VMM_HANDLE_PAGE_FAULT
[635]2537if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
[632]2538printk("\n[%s] thread[%x,%x] handled local pgfault / ppn %x / attr %x / cycle %d\n",
2539__FUNCTION__, this->process->pid, this->trdid, ppn, attr, end_cycle );
[585]2540#endif
2541
[656]2542#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2)
2543if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2544hal_vmm_display( XPTR( local_cxy , this->process ) , true );
2545#endif
2546
[629]2547#if CONFIG_INSTRUMENTATION_PGFAULTS
[656]2548uint32_t cost      = end_cycle - start_cycle;
[629]2549this->info.local_pgfault_nr++;
[641]2550this->info.local_pgfault_cost += cost;
2551if( cost > this->info.local_pgfault_max ) this->info.local_pgfault_max = cost;
[629]2552#endif
2553            return EXCP_NON_FATAL;
[585]2554
[629]2555        }   // end local GPT access
[585]2556
[630]2557        /////////////////// public vseg and (local != reference)
2558        /////////////////// => access ref GPT to update local GPT
[629]2559        else                               
2560        {
[632]2561
[635]2562#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2563if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2564printk("\n[%s] thread[%x,%x] access ref gpt : cxy %x / ref_cxy %x / type %s / cycle %d\n",
2565__FUNCTION__, this->process->pid, this->trdid, 
2566local_cxy, ref_cxy, vseg_type_str(vseg->type), (uint32_t)hal_get_cycles() );
[632]2567#endif
[629]2568            // build extended pointer on reference GPT
2569            ref_gpt_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt );
[585]2570
[632]2571            // lock PTE in reference GPT and get current PPN and attributes
2572            error = hal_gpt_lock_pte( ref_gpt_xp,
2573                                      vpn,
2574                                      &ref_attr,
2575                                      &ref_ppn );
2576            if( error )
2577            {
2578                printk("\n[PANIC] in %s : cannot lock PTE in ref GPT / vpn %x / process %x\n",
2579                __FUNCTION__ , vpn , process->pid );
2580       
2581                // unlock PTE in local GPT
2582                hal_gpt_unlock_pte( local_gpt_xp , vpn );
2583                   
2584                return EXCP_KERNEL_PANIC;
2585            }
[1]2586
[635]2587#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2588if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
[632]2589printk("\n[%s] thread[%x,%x] get pte from ref gpt / attr %x / ppn %x\n",
2590__FUNCTION__, this->process->pid, this->trdid, ref_attr, ref_ppn );
2591#endif
2592
2593            if( ref_attr & GPT_MAPPED )        // false page fault
[585]2594            {
[629]2595                // update local GPT from reference GPT values
[632]2596                // this unlocks the PTE in local GPT
[629]2597                hal_gpt_set_pte( local_gpt_xp,
2598                                 vpn,
2599                                 ref_attr,
2600                                 ref_ppn );
[585]2601
[635]2602#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2603if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
[632]2604printk("\n[%s] thread[%x,%x] updated local gpt for a false pgfault\n",
2605__FUNCTION__, this->process->pid, this->trdid );
2606#endif
2607
2608                // unlock the PTE in reference GPT
2609                hal_gpt_unlock_pte( ref_gpt_xp, vpn );
2610                             
[635]2611#if (DEBUG_VMM_HANDLE_PAGE_FAULT &1)
2612if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
[632]2613printk("\n[%s] thread[%x,%x] unlock the ref gpt after a false pgfault\n",
2614__FUNCTION__, this->process->pid, this->trdid );
2615#endif
2616
[629]2617#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
2618uint32_t end_cycle = (uint32_t)hal_get_cycles();
2619#endif
2620
[585]2621#if DEBUG_VMM_HANDLE_PAGE_FAULT
[635]2622if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
[632]2623printk("\n[%s] thread[%x,%x] handled false pgfault / ppn %x / attr %x / cycle %d\n",
2624__FUNCTION__, this->process->pid, this->trdid, ref_ppn, ref_attr, end_cycle );
[433]2625#endif
[406]2626
[656]2627#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2)
2628if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2629hal_vmm_display( XPTR( local_cxy , this->process ) , true );
2630#endif
2631
[629]2632#if CONFIG_INSTRUMENTATION_PGFAULTS
[656]2633uint32_t cost      = end_cycle - start_cycle;
[629]2634this->info.false_pgfault_nr++;
[641]2635this->info.false_pgfault_cost += cost;
2636if( cost > this->info.false_pgfault_max ) this->info.false_pgfault_max = cost;
[629]2637#endif
2638                return EXCP_NON_FATAL;
2639            }
[632]2640            else                            // true page fault
[629]2641            {
[585]2642                // allocate and initialise a physical page depending on the vseg type
[629]2643                error = vmm_get_one_ppn( vseg , vpn , &ppn );
[1]2644
[585]2645                if( error )
2646                {
2647                    printk("\n[ERROR] in %s : no memory / process = %x / vpn = %x\n",
2648                    __FUNCTION__ , process->pid , vpn );
[313]2649
[632]2650                    // unlock PTE in local GPT and in reference GPT
[629]2651                    hal_gpt_unlock_pte( local_gpt_xp , vpn );
[632]2652                    hal_gpt_unlock_pte( ref_gpt_xp   , vpn );
[585]2653                   
[629]2654                    return EXCP_KERNEL_PANIC;
[585]2655                }
[1]2656
[629]2657                // define attr from vseg flags
[632]2658                attr = GPT_MAPPED | GPT_SMALL | GPT_READABLE;
[629]2659                if( vseg->flags & VSEG_USER  ) attr |= GPT_USER;
2660                if( vseg->flags & VSEG_WRITE ) attr |= GPT_WRITABLE;
2661                if( vseg->flags & VSEG_EXEC  ) attr |= GPT_EXECUTABLE;
2662                if( vseg->flags & VSEG_CACHE ) attr |= GPT_CACHABLE;
[585]2663
[635]2664#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2665if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
[632]2666printk("\n[%s] thread[%x,%x] build a new PTE for a true pgfault\n",
2667__FUNCTION__, this->process->pid, this->trdid );
2668#endif
[629]2669                // set PTE in reference GPT
[632]2670                // this unlock the PTE
[629]2671                hal_gpt_set_pte( ref_gpt_xp,
2672                                 vpn,
2673                                 attr,
2674                                 ppn );
2675
[635]2676#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
2677if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
[632]2678printk("\n[%s] thread[%x,%x] set new PTE in ref gpt for a true page fault\n",
2679__FUNCTION__, this->process->pid, this->trdid );
2680#endif
2681
[629]2682                // set PTE in local GPT
[632]2683                // this unlock the PTE
[629]2684                hal_gpt_set_pte( local_gpt_xp,
2685                                 vpn,
2686                                 attr,
2687                                 ppn );
2688
2689#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
2690uint32_t end_cycle = (uint32_t)hal_get_cycles();
2691#endif
2692
[440]2693#if DEBUG_VMM_HANDLE_PAGE_FAULT
[635]2694if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
[632]2695printk("\n[%s] thread[%x,%x] handled global pgfault / ppn %x / attr %x / cycle %d\n",
2696__FUNCTION__, this->process->pid, this->trdid, ppn, attr, end_cycle );
[435]2697#endif
[629]2698
[656]2699#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2)
2700if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
2701hal_vmm_display( XPTR( local_cxy , this->process ) , true );
2702#endif
2703
[629]2704#if CONFIG_INSTRUMENTATION_PGFAULTS
[656]2705uint32_t cost      = end_cycle - start_cycle;
[629]2706this->info.global_pgfault_nr++;
[641]2707this->info.global_pgfault_cost += cost;
2708if( cost > this->info.global_pgfault_max ) this->info.global_pgfault_max = cost;
[629]2709#endif
2710                return EXCP_NON_FATAL;
2711            }
[585]2712        }
2713    }
[629]2714    else   // page has been locally mapped by another concurrent thread
2715    {
[632]2716        // unlock the PTE in local GPT
[629]2717        hal_gpt_unlock_pte( local_gpt_xp , vpn );
2718
[632]2719#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
2720uint32_t end_cycle = (uint32_t)hal_get_cycles();
2721#endif
2722
2723#if DEBUG_VMM_HANDLE_PAGE_FAULT
[635]2724if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
[632]2725printk("\n[%s] handled by another thread / vpn %x / ppn %x / attr %x / cycle %d\n",
2726__FUNCTION__, vpn, ppn, attr, end_cycle );
2727#endif
2728
2729#if CONFIG_INSTRUMENTATION_PGFAULTS
[656]2730uint32_t cost      = end_cycle - start_cycle;
[632]2731this->info.false_pgfault_nr++;
[641]2732this->info.false_pgfault_cost += cost;
2733if( cost > this->info.false_pgfault_max ) this->info.false_pgfault_max = cost;
[632]2734#endif
[629]2735        return EXCP_NON_FATAL;
2736    }
2737
[585]2738}   // end vmm_handle_page_fault()
[435]2739
[585]2740////////////////////////////////////////////
2741error_t vmm_handle_cow( process_t * process,
2742                        vpn_t       vpn )
2743{
2744    vseg_t         * vseg;            // vseg containing vpn
[629]2745    xptr_t           gpt_xp;          // extended pointer on GPT (local or reference)
2746    gpt_t          * gpt_ptr;         // local pointer on GPT (local or reference)
2747    cxy_t            gpt_cxy;         // GPT cluster identifier
[585]2748    uint32_t         old_attr;        // current PTE_ATTR value
2749    ppn_t            old_ppn;         // current PTE_PPN value
2750    uint32_t         new_attr;        // new PTE_ATTR value
2751    ppn_t            new_ppn;         // new PTE_PPN value
[629]2752    cxy_t            ref_cxy;         // reference process cluster
2753    process_t      * ref_ptr;         // local pointer on reference process
[585]2754    error_t          error;
[1]2755
[629]2756    thread_t * this  = CURRENT_THREAD;
[625]2757
[585]2758#if DEBUG_VMM_HANDLE_COW
[629]2759uint32_t   cycle = (uint32_t)hal_get_cycles();
[640]2760if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
[595]2761printk("\n[%s] thread[%x,%x] enter for vpn %x / core[%x,%d] / cycle %d\n",
[619]2762__FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle );
[629]2763#endif
2764
[656]2765#if (DEBUG_VMM_HANDLE_COW & 2)
[640]2766hal_vmm_display( XPTR( local_cxy , process ) , true );
[585]2767#endif
2768
2769    // get local vseg
2770    error = vmm_get_vseg( process, 
[683]2771                          (intptr_t)vpn<<CONFIG_PPM_PAGE_ORDER,
[585]2772                          &vseg );
[440]2773    if( error )
[1]2774    {
[629]2775        printk("\n[ERROR] in %s : vpn %x in thread[%x,%x] not in a registered vseg\n",
[625]2776        __FUNCTION__, vpn, process->pid, this->trdid );
[585]2777
[629]2778        return EXCP_USER_ERROR;
[440]2779    }
[407]2780
[629]2781#if DEBUG_VMM_HANDLE_COW
[640]2782if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
[629]2783printk("\n[%s] thread[%x,%x] get vseg %s\n",
2784__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type) );
[619]2785#endif
2786
[629]2787    // get reference process cluster and local pointer
[585]2788    ref_cxy = GET_CXY( process->ref_xp );
2789    ref_ptr = GET_PTR( process->ref_xp );
[407]2790
[629]2791    // build pointers on relevant GPT
2792    // - access only local GPT for a private vseg 
2793    // - access reference GPT and all copies for a public vseg
[585]2794    if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) )
[440]2795    {
[629]2796        gpt_cxy = local_cxy;
2797        gpt_ptr = &process->vmm.gpt;
2798        gpt_xp  = XPTR( gpt_cxy , gpt_ptr );
[1]2799    }
[440]2800    else
[1]2801    {
[629]2802        gpt_cxy = ref_cxy;
2803        gpt_ptr = &ref_ptr->vmm.gpt;
2804        gpt_xp  = XPTR( gpt_cxy , gpt_ptr );
[1]2805    }
2806
[629]2807    // lock target PTE in relevant GPT (local or reference)
[632]2808    // and get current PTE value
[629]2809    error = hal_gpt_lock_pte( gpt_xp,
2810                              vpn,
2811                              &old_attr,
2812                              &old_ppn );
2813    if( error )
2814    {
2815        printk("\n[PANIC] in %s : cannot lock PTE in GPT / cxy %x / vpn %x / process %x\n",
2816        __FUNCTION__ , gpt_cxy, vpn , process->pid );
2817       
2818        return EXCP_KERNEL_PANIC;
2819    }
[441]2820
[629]2821#if DEBUG_VMM_HANDLE_COW
[640]2822if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
[619]2823printk("\n[%s] thread[%x,%x] get pte for vpn %x : ppn %x / attr %x\n",
2824__FUNCTION__, this->process->pid, this->trdid, vpn, old_ppn, old_attr );
2825#endif
2826
[629]2827    // return user error if COW attribute not set or PTE2 unmapped
2828    if( ((old_attr & GPT_COW) == 0) || ((old_attr & GPT_MAPPED) == 0) )
[585]2829    {
[629]2830        hal_gpt_unlock_pte( gpt_xp , vpn );
[407]2831
[629]2832        return EXCP_USER_ERROR;
[407]2833    }
2834
[619]2835    // get pointers on physical page descriptor
[585]2836    xptr_t   page_xp  = ppm_ppn2page( old_ppn );
2837    cxy_t    page_cxy = GET_CXY( page_xp );
2838    page_t * page_ptr = GET_PTR( page_xp );
[435]2839
[585]2840    // get extended pointers on forks and lock field in page descriptor
2841    xptr_t forks_xp       = XPTR( page_cxy , &page_ptr->forks );
2842    xptr_t forks_lock_xp  = XPTR( page_cxy , &page_ptr->lock );
[407]2843
[585]2844    // take lock protecting "forks" counter
2845    remote_busylock_acquire( forks_lock_xp );
[407]2846
[585]2847    // get number of pending forks from page descriptor
2848    uint32_t forks = hal_remote_l32( forks_xp );
[441]2849
[629]2850#if DEBUG_VMM_HANDLE_COW
[640]2851if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
[619]2852printk("\n[%s] thread[%x,%x] get forks = %d for vpn %x\n",
2853__FUNCTION__, this->process->pid, this->trdid, forks, vpn );
2854#endif
2855
[585]2856    if( forks )        // pending fork => allocate a new page, and copy old to new
2857    {
[619]2858        // decrement pending forks counter in page descriptor
2859        hal_remote_atomic_add( forks_xp , -1 );
2860
2861        // release lock protecting "forks" counter
2862        remote_busylock_release( forks_lock_xp );
2863
[629]2864        // allocate a new physical page depending on vseg type
[585]2865        page_xp = vmm_page_allocate( vseg , vpn );
[619]2866
[585]2867        if( page_xp == XPTR_NULL ) 
2868        {
2869            printk("\n[PANIC] in %s : no memory for vpn %x in process %x\n",
2870            __FUNCTION__ , vpn, process->pid );
[441]2871
[629]2872            hal_gpt_unlock_pte( gpt_xp , vpn ); 
[441]2873
[585]2874            return EXCP_KERNEL_PANIC;
2875        }
[441]2876
[585]2877        // compute allocated page PPN
2878        new_ppn = ppm_page2ppn( page_xp );
[441]2879
[629]2880#if DEBUG_VMM_HANDLE_COW
[640]2881if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
[619]2882printk("\n[%s] thread[%x,%x] get new ppn %x for vpn %x\n",
2883__FUNCTION__, this->process->pid, this->trdid, new_ppn, vpn );
2884#endif
2885
[585]2886        // copy old page content to new page
[619]2887        hal_remote_memcpy( ppm_ppn2base( new_ppn ),
2888                           ppm_ppn2base( old_ppn ),
2889                           CONFIG_PPM_PAGE_SIZE );
[441]2890
[629]2891#if DEBUG_VMM_HANDLE_COW
[640]2892if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
[619]2893printk("\n[%s] thread[%x,%x] copied old page to new page\n",
2894__FUNCTION__, this->process->pid, this->trdid );
[585]2895#endif
[440]2896
[585]2897    }             
2898    else               // no pending fork => keep the existing page
2899    {
[619]2900        // release lock protecting "forks" counter
2901        remote_busylock_release( forks_lock_xp );
[1]2902
[585]2903#if(DEBUG_VMM_HANDLE_COW & 1)
[640]2904if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
[635]2905printk("\n[%s] thread[%x,%x] no pending forks / keep existing PPN %x\n",
[619]2906__FUNCTION__, this->process->pid, this->trdid, old_ppn );
[585]2907#endif
2908        new_ppn = old_ppn;
2909    }
[1]2910
[629]2911    // build new_attr : set WRITABLE, reset COW, reset LOCKED
2912    new_attr = (((old_attr | GPT_WRITABLE) & (~GPT_COW)) & (~GPT_LOCKED));
[585]2913
[635]2914#if(DEBUG_VMM_HANDLE_COW & 1)
[640]2915if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
[635]2916printk("\n[%s] thread[%x,%x] new_attr %x / new_ppn %x\n",
2917__FUNCTION__, this->process->pid, this->trdid, new_attr, new_ppn );
2918#endif
2919
[629]2920    // update the relevant GPT(s)
2921    // - private vseg => update only the local GPT
2922    // - public vseg => update the reference GPT AND all the GPT copies
[585]2923    if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) )
[1]2924    {
[635]2925        // set new PTE in local gpt
[585]2926        hal_gpt_set_pte( gpt_xp,
2927                         vpn,
2928                         new_attr,
2929                         new_ppn );
[1]2930    }
[585]2931    else
[1]2932    {
[640]2933        // set new PTE in all GPT copies
2934        vmm_global_update_pte( process,
2935                               vpn,
2936                               new_attr,
2937                               new_ppn );
[1]2938    }
2939
[585]2940#if DEBUG_VMM_HANDLE_COW
2941cycle = (uint32_t)hal_get_cycles();
[640]2942if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
[595]2943printk("\n[%s] thread[%x,%x] exit for vpn %x / core[%x,%d] / cycle %d\n",
[619]2944__FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle );
[585]2945#endif
[313]2946
[656]2947#if (DEBUG_VMM_HANDLE_COW & 2)
[640]2948hal_vmm_display( XPTR( local_cxy , process ) , true );
[635]2949#endif
2950
[585]2951     return EXCP_NON_FATAL;
[1]2952
[585]2953}   // end vmm_handle_cow()
2954
Note: See TracBrowser for help on using the repository browser.