Ignore:
Timestamp:
Jun 26, 2019, 11:42:37 AM (5 years ago)
Author:
alain
Message:

This version is a major evolution: The physical memory allocators,
defined in the kmem.c, ppm.c, and kcm.c files have been modified
to support remote accesses. The RPCs that were previously user
to allocate physical memory in a remote cluster have been removed.
This has been done to cure a dead-lock in case of concurrent page-faults.

This version 2.2 has been tested on a (4 clusters / 2 cores per cluster)
TSAR architecture, for both the "sort" and the "fft" applications.

Location:
trunk/hal/tsar_mips32/core
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/hal/tsar_mips32/core/hal_context.c

    r625 r635  
    22 * hal_context.c - implementation of Thread Context API for TSAR-MIPS32
    33 *
    4  * Author  Alain Greiner    (2016)
     4 * Author  Alain Greiner    (2016,2017,2018,2019)
    55 *
    66 * Copyright (c)  UPMC Sorbonne Universites
     
    3030#include <printk.h>
    3131#include <vmm.h>
     32#include <bits.h>
    3233#include <core.h>
    3334#include <cluster.h>
     
    3637
    3738/////////////////////////////////////////////////////////////////////////////////////////
    38 //       Define various SR initialisation values for TSAR-MIPS32
     39//       Define various SR initialisation values for the TSAR-MIPS32 architecture.
    3940/////////////////////////////////////////////////////////////////////////////////////////
    4041
     
    4445
    4546/////////////////////////////////////////////////////////////////////////////////////////
    46 // This structure defines the CPU context for TSAR MIPS32.
     47// This structure defines the CPU context for the TSAR-MIPS32 architecture.
    4748// The following registers are saved/restored at each context switch:
    4849// - GPR : all, but (zero, k0, k1), plus (hi, lo)
     
    5152//
    5253// WARNING : check the two CONFIG_CPU_CTX_SIZE & CONFIG_FPU_CTX_SIZE configuration
    53 //           parameterss when modifying this structure.
     54//           parameters when modifying this structure.
    5455/////////////////////////////////////////////////////////////////////////////////////////
    5556
     
    101102
    102103/////////////////////////////////////////////////////////////////////////////////////////
    103 // This structure defines the fpu_context for TSAR MIPS32.
     104// This structure defines the fpu_context for the TSAR MIPS32 architecture.
    104105/////////////////////////////////////////////////////////////////////////////////////////
    105106
     
    124125    // allocate memory for cpu_context
    125126    kmem_req_t  req;
    126     req.type   = KMEM_CPU_CTX;
     127    req.type   = KMEM_KCM;
     128    req.order  = bits_log2( sizeof(hal_cpu_context_t) );
    127129    req.flags  = AF_KERNEL | AF_ZERO;
    128130
    129     hal_cpu_context_t * context = (hal_cpu_context_t *)kmem_alloc( &req );
     131    hal_cpu_context_t * context = kmem_alloc( &req );
     132
    130133    if( context == NULL ) return -1;
    131134
     
    175178void hal_cpu_context_fork( xptr_t child_xp )
    176179{
    177     // get pointer on calling thread
    178     thread_t * this = CURRENT_THREAD;
    179 
     180    cxy_t               parent_cxy;        // parent thread cluster
     181    thread_t          * parent_ptr;        // local pointer on parent thread
     182    hal_cpu_context_t * parent_context;    // local pointer on parent cpu_context
     183    uint32_t          * parent_uzone;      // local_pointer on parent uzone (in kernel stack)
     184    char              * parent_ksp;        // kernel stack pointer on parent kernel stack
     185    uint32_t            parent_us_base;    // parent user stack base value
     186
     187    cxy_t               child_cxy;         // parent thread cluster
     188    thread_t          * child_ptr;         // local pointer on child thread
     189    hal_cpu_context_t * child_context;     // local pointer on child cpu_context
     190    uint32_t          * child_uzone;       // local_pointer on child uzone (in kernel stack)
     191    char              * child_ksp;         // kernel stack pointer on child kernel stack
     192    uint32_t            child_us_base;     // child user stack base value
     193
     194    process_t         * child_process;     // local pointer on child processs
     195    uint32_t            child_pt_ppn;      // PPN of child process PT1
     196    vseg_t            * child_us_vseg;     // local pointer on child user stack vseg
     197   
    180198    // allocate a local CPU context in parent kernel stack
    181     hal_cpu_context_t  context;
    182 
    183     // get local parent thread cluster and local pointer
    184     cxy_t      parent_cxy = local_cxy;
    185     thread_t * parent_ptr = CURRENT_THREAD;
    186 
    187     // get remote child thread cluster and local pointer
    188     cxy_t      child_cxy = GET_CXY( child_xp );
    189     thread_t * child_ptr = GET_PTR( child_xp );
    190 
    191     // get local pointer on remote child cpu context
    192     char * child_context_ptr = hal_remote_lpt( XPTR(child_cxy , &child_ptr->cpu_context) );
     199    hal_cpu_context_t context;
     200
     201    // get (local) parent thread cluster and local pointer
     202    parent_cxy = local_cxy;
     203    parent_ptr = CURRENT_THREAD;
     204
     205    // get (remote) child thread cluster and local pointer
     206    child_cxy = GET_CXY( child_xp );
     207    child_ptr = GET_PTR( child_xp );
     208
     209    // get local pointer on (local) parent CPU context
     210    parent_context = parent_ptr->cpu_context;
     211
     212    // get local pointer on (remote) child CPU context
     213    child_context = hal_remote_lpt( XPTR(child_cxy , &child_ptr->cpu_context) );
    193214
    194215    // get local pointer on remote child process
    195     process_t * process = hal_remote_lpt( XPTR(child_cxy , &child_ptr->process) );
     216    child_process = hal_remote_lpt( XPTR(child_cxy , &child_ptr->process) );
    196217
    197218    // get ppn of remote child process page table
    198     uint32_t pt_ppn = hal_remote_l32( XPTR(child_cxy , &process->vmm.gpt.ppn) );
    199 
    200     // get local pointer on parent uzone from parent thread descriptor
    201     uint32_t * parent_uzone = parent_ptr->uzone_current;
    202 
    203     // compute  local pointer on child uzone
    204     uint32_t * child_uzone  = (uint32_t *)( (intptr_t)parent_uzone +
    205                                             (intptr_t)child_ptr    -
    206                                             (intptr_t)parent_ptr  );
     219    child_pt_ppn = hal_remote_l32( XPTR(child_cxy , &child_process->vmm.gpt.ppn) );
     220
     221    // get local pointer on local parent uzone (in parent kernel stack)
     222    parent_uzone = parent_ptr->uzone_current;
     223
     224    // compute local pointer on remote child uzone (in child kernel stack)
     225    child_uzone  = (uint32_t *)( (intptr_t)parent_uzone +
     226                                 (intptr_t)child_ptr    -
     227                                 (intptr_t)parent_ptr  );
    207228
    208229    // update the uzone pointer in child thread descriptor
     
    213234if( DEBUG_HAL_CONTEXT < cycle )
    214235printk("\n[%s] thread[%x,%x] parent_uzone %x / child_uzone %x / cycle %d\n",
    215 __FUNCTION__, this->process->pid, this->trdid, parent_uzone, child_uzone, cycle );
    216 #endif
    217 
    218     // copy parent kernel stack to child thread descriptor
     236__FUNCTION__, parent_ptr->process->pid, parent_ptr->trdid, parent_uzone, child_uzone, cycle );
     237#endif
     238
     239    // get user stack base for parent thread
     240    parent_us_base = parent_ptr->user_stack_vseg->min;
     241
     242    // get user stack base for child thread
     243    child_us_vseg  = hal_remote_lpt( XPTR( child_cxy , &child_ptr->user_stack_vseg ) );
     244    child_us_base  = hal_remote_l32( XPTR( child_cxy , &child_us_vseg->min ) );
     245
     246#if DEBUG_HAL_CONTEXT
     247if( DEBUG_HAL_CONTEXT < cycle )
     248printk("\n[%s] thread[%x,%x] parent_ustack_base %x / child_ustack_base %x\n",
     249__FUNCTION__, parent_ptr->process->pid, parent_ptr->trdid, parent_us_base, child_us_base );
     250#endif
     251
     252    // get current value of kernel stack pointer in parent kernel stack
     253    parent_ksp = (char *)hal_get_sp();
     254
     255    // compute value of kernel stack pointer in child kernel stack
     256    child_ksp  = (char *)((intptr_t)parent_ksp +
     257                          (intptr_t)child_ptr  -
     258                          (intptr_t)parent_ptr );
     259
     260#if DEBUG_HAL_CONTEXT
     261if( DEBUG_HAL_CONTEXT < cycle )
     262printk("\n[%s] thread[%x,%x] parent_ksp %x / child_ksp %x\n",
     263__FUNCTION__, parent_ptr->process->pid, parent_ptr->trdid, parent_ksp, child_ksp );
     264#endif
     265
     266    // compute number of bytes to be copied, depending on current value of parent_ksp
     267    uint32_t size = (uint32_t)parent_ptr + CONFIG_THREAD_DESC_SIZE - (uint32_t)parent_ksp;   
     268
     269    // copy parent kernel stack content to child thread descriptor
    219270    // (this includes the uzone, that is allocated in the kernel stack)
    220     char * parent_ksp = (char *)hal_get_sp();
    221     char * child_ksp  = (char *)((intptr_t)parent_ksp +
    222                                  (intptr_t)child_ptr  -
    223                                  (intptr_t)parent_ptr );
    224 
    225     uint32_t size = (uint32_t)parent_ptr + CONFIG_THREAD_DESC_SIZE - (uint32_t)parent_ksp;
    226 
    227271    hal_remote_memcpy( XPTR( child_cxy , child_ksp ),
    228272                       XPTR( local_cxy , parent_ksp ),
     
    230274
    231275#if DEBUG_HAL_CONTEXT
    232 cycle = (uint32_t)hal_get_cycles();
    233 printk("\n[%s] thread[%x,%x] copied kstack from parent %x to child %x / cycle %d\n",
    234 __FUNCTION__, this->process->pid, this->trdid, parent_ptr, child_ptr, cycle );
    235 #endif
    236 
    237     // patch the user stack pointer slot in the child uzone[UZ_SP]
    238     // because parent and child use the same offset to access the user stack,
    239     // but parent and child do not have the same user stack base address.
    240     uint32_t parent_us_base = parent_ptr->user_stack_vseg->min;
    241     vseg_t * child_us_vseg  = hal_remote_lpt( XPTR( child_cxy , &child_ptr->user_stack_vseg ) );
    242     uint32_t child_us_base  = hal_remote_l32( XPTR( child_cxy , &child_us_vseg->min ) );
    243     uint32_t parent_usp     = parent_uzone[UZ_SP];
    244     uint32_t child_usp      = parent_usp + child_us_base - parent_us_base;
    245 
    246     hal_remote_s32( XPTR( child_cxy , &child_uzone[UZ_SP] ) , child_usp );
    247 
    248 #if DEBUG_HAL_CONTEXT
    249 cycle = (uint32_t)hal_get_cycles();
    250 printk("\n[%s] thread[%x,%x] parent_usp %x / child_usp %x / cycle %d\n",
    251 __FUNCTION__, this->process->pid, this->trdid, parent_usp, child_usp, cycle );
    252 #endif
    253 
    254     // save current values of CPU registers to local CPU context
     276if( DEBUG_HAL_CONTEXT < cycle )
     277printk("\n[%s] thread[%x,%x] copied kstack from parent (%x) to child (%x)\n",
     278__FUNCTION__, parent_ptr->process->pid, parent_ptr->trdid, parent_ptr, child_ptr );
     279#endif
     280
     281    // save current values of CPU registers to local copy of CPU context
    255282    hal_do_cpu_save( &context );
    256283
    257     // From this point, both parent and child can execute the following code,
     284    // update  three slots in this local CPU context
     285    context.sp_29   = (uint32_t)child_ksp;
     286    context.c0_th   = (uint32_t)child_ptr;
     287    context.c2_ptpr = (uint32_t)child_pt_ppn >> 1;
     288
     289    // From this point, both parent and child execute the following code,
    258290    // but child thread will only execute it after being unblocked by parent thread.
    259291    // They can be distinguished by the (CURRENT_THREAD,local_cxy) values,
    260292    // and we must re-initialise the calling thread pointer from c0_th register
    261293
    262     this = CURRENT_THREAD;
     294    thread_t * this = CURRENT_THREAD;
    263295
    264296    if( (this == parent_ptr) && (local_cxy == parent_cxy) )   // parent thread
    265297    {
    266         // patch 4 slots in the local CPU context: the sp_29 / c0_th / C0_sr / c2_ptpr
    267         // slots are not identical in parent and child
    268         context.sp_29   = context.sp_29 + (intptr_t)child_ptr - (intptr_t)parent_ptr;
    269         context.c0_th   = (uint32_t)child_ptr;
    270         context.c0_sr   = SR_SYS_MODE;
    271         context.c2_ptpr = pt_ppn >> 1;
    272 
    273         // copy this patched context to remote child context
    274         hal_remote_memcpy( XPTR( child_cxy , child_context_ptr ),
     298        // parent thread must update four slots in child uzone
     299        // - UZ_TH   : parent and child have different threads descriptors
     300        // - UZ_SP   : parent and child have different user stack base addresses.
     301        // - UZ_PTPR : parent and child use different Generic Page Tables
     302
     303        // parent thread computes values for child thread
     304        uint32_t child_sp    = parent_uzone[UZ_SP]  + child_us_base - parent_us_base;
     305        uint32_t child_th    = (uint32_t)child_ptr;
     306        uint32_t child_ptpr  = (uint32_t)child_pt_ppn >> 1;
     307
     308#if DEBUG_HAL_CONTEXT
     309if( DEBUG_HAL_CONTEXT < cycle )
     310printk("\n[%s] thread[%x,%x] : parent_uz_sp %x / child_uz_sp %x\n",
     311__FUNCTION__, parent_ptr->process->pid, parent_ptr->trdid,
     312parent_uzone[UZ_SP], child_sp );
     313#endif
     314
     315        // parent thread updates the child uzone
     316        hal_remote_s32( XPTR( child_cxy , &child_uzone[UZ_SP]   ) , child_sp );
     317        hal_remote_s32( XPTR( child_cxy , &child_uzone[UZ_TH]   ) , child_th );
     318        hal_remote_s32( XPTR( child_cxy , &child_uzone[UZ_PTPR] ) , child_ptpr );
     319
     320        // parent thread copies the local context to remote child context
     321        hal_remote_memcpy( XPTR( child_cxy , child_context ),
    275322                           XPTR( local_cxy  , &context ) ,
    276323                           sizeof( hal_cpu_context_t ) );
    277324#if DEBUG_HAL_CONTEXT
     325if( DEBUG_HAL_CONTEXT < cycle )
     326printk("\n[%s] thread[%x,%x] copied parent CPU context to child CPU context\n",
     327__FUNCTION__, parent_ptr->process->pid, parent_ptr->trdid );
     328#endif
     329
     330        // parent thread unblocks child thread
     331        thread_unblock( XPTR( child_cxy , child_ptr ) , THREAD_BLOCKED_GLOBAL );
     332
     333#if DEBUG_HAL_CONTEXT
    278334cycle = (uint32_t)hal_get_cycles();
    279 printk("\n[%s] thread[%x,%x] copied CPU context to child / cycle %d\n",
    280 __FUNCTION__, this->process->pid, this->trdid, cycle );
    281 #endif
    282 
    283         // parent thread unblock child thread
    284         thread_unblock( XPTR( child_cxy , child_ptr ) , THREAD_BLOCKED_GLOBAL );
    285 
    286 #if DEBUG_HAL_CONTEXT
    287 cycle = (uint32_t)hal_get_cycles();
    288 printk("\n[%s] thread[%x,%x] unblocked child thread / cycle %d\n",
    289 __FUNCTION__, this->process->pid, this->trdid, cycle );
     335trdid_t child_trdid = hal_remote_l32( XPTR( child_cxy , &child_ptr->trdid ) );
     336pid_t   child_pid   = hal_remote_l32( XPTR( child_cxy , &child_process->pid ) );
     337printk("\n[%s] thread[%x,%x] unblocked child thread[%x,%x] / cycle %d\n",
     338__FUNCTION__, parent_ptr->process->pid, parent_ptr->trdid, child_pid, child_trdid, cycle );
    290339#endif
    291340
     
    347396    if( ctx != NULL )
    348397    {   
    349         req.type = KMEM_CPU_CTX;
     398        req.type = KMEM_KCM;
    350399        req.ptr  = ctx;
    351400        kmem_free( &req );
     
    366415    // allocate memory for fpu_context
    367416    kmem_req_t  req;
    368     req.type   = KMEM_FPU_CTX;
     417    req.type   = KMEM_KCM;
    369418    req.flags  = AF_KERNEL | AF_ZERO;
    370 
    371     hal_fpu_context_t * context = (hal_fpu_context_t *)kmem_alloc( &req );
     419    req.order  = bits_log2( sizeof(hal_fpu_context_t) );
     420
     421    hal_fpu_context_t * context = kmem_alloc( &req );
     422
    372423    if( context == NULL ) return -1;
    373424
     
    414465    if( context != NULL )
    415466    {   
    416         req.type = KMEM_FPU_CTX;
     467        req.type = KMEM_KCM;
    417468        req.ptr  = context;
    418469        kmem_free( &req );
  • trunk/hal/tsar_mips32/core/hal_exception.c

    r632 r635  
    189189    uint32_t         excp_code;
    190190
    191     // check thread type
    192    if( CURRENT_THREAD->type != THREAD_USER )
    193     {
    194         printk("\n[PANIC] in %s : illegal thread type %s\n",
    195         __FUNCTION__, thread_type_str(CURRENT_THREAD->type) );
    196 
    197         return EXCP_KERNEL_PANIC;
    198     }
    199 
    200191    // get faulty thread process 
    201192    process = this->process;
     
    447438            else                                                // undefined coprocessor
    448439            {
    449                 printk("\n[USER_ERROR] in %s for thread[%x,%x]\n"
     440                printk("\n[USER_ERROR] in %s for thread[%x,%x] / cycle %d\n"
    450441                "   undefined coprocessor / epc %x\n",
    451                 __FUNCTION__, this->process->pid, this->trdid, excPC );
     442                __FUNCTION__, this->process->pid, this->trdid,
     443                (uint32_t)hal_get_cycles() , excPC );
    452444
    453445                        error = EXCP_USER_ERROR;
     
    457449        case XCODE_OVR:    // Arithmetic Overflow : user fatal error
    458450        {
    459             printk("\n[USER_ERROR] in %s for thread[%x,%x]\n"
     451            printk("\n[USER_ERROR] in %s for thread[%x,%x] / cycle %d\n"
    460452            "   arithmetic overflow / epc %x\n",
    461             __FUNCTION__, this->process->pid, this->trdid, excPC );
     453            __FUNCTION__, this->process->pid, this->trdid,
     454            (uint32_t)hal_get_cycles() , excPC );
    462455
    463456                    error = EXCP_USER_ERROR;
     
    466459        case XCODE_RI:     // Reserved Instruction : user fatal error
    467460        {
    468             printk("\n[USER_ERROR] in %s for thread[%x,%x]\n"
     461            printk("\n[USER_ERROR] in %s for thread[%x,%x] / cycle %d\n"
    469462            "   reserved instruction / epc %x\n",
    470             __FUNCTION__, this->process->pid, this->trdid, excPC );
     463            __FUNCTION__, this->process->pid, this->trdid,
     464            (uint32_t)hal_get_cycles() , excPC );
    471465
    472466                    error = EXCP_USER_ERROR;
     
    475469        case XCODE_ADEL:   // user fatal error
    476470        {
    477             printk("\n[USER_ERROR] in %s for thread[%x,%x]\n"
     471            printk("\n[USER_ERROR] in %s for thread[%x,%x] / cycle %d\n"
    478472            "   illegal data load address / epc %x / bad_address %x\n",
    479             __FUNCTION__, this->process->pid, this->trdid, excPC, hal_get_bad_vaddr() );
     473            __FUNCTION__, this->process->pid, this->trdid,
     474            (uint32_t)hal_get_cycles(), excPC, hal_get_bad_vaddr() );
    480475
    481476                    error = EXCP_USER_ERROR;
     
    484479        case XCODE_ADES:   //   user fatal error
    485480        {
    486             printk("\n[USER_ERROR] in %s for thread[%x,%x]\n"
     481            printk("\n[USER_ERROR] in %s for thread[%x,%x] / cycle %d\n"
    487482            "   illegal data store address / epc %x / bad_address %x\n",
    488             __FUNCTION__, this->process->pid, this->trdid, excPC, hal_get_bad_vaddr() );
     483            __FUNCTION__, this->process->pid, this->trdid,
     484            (uint32_t)hal_get_cycles(), excPC, hal_get_bad_vaddr() );
    489485
    490486                    error = EXCP_USER_ERROR;
  • trunk/hal/tsar_mips32/core/hal_gpt.c

    r633 r635  
    22 * hal_gpt.c - implementation of the Generic Page Table API for TSAR-MIPS32
    33 *
    4  * Author   Alain Greiner (2016,2017,2018)
     4 * Author   Alain Greiner (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    7070#define TSAR_MMU_IX2_FROM_VPN( vpn )       (vpn & 0x1FF)
    7171
    72 #define TSAR_MMU_PTBA_FROM_PTE1( pte1 )    (pte1 & 0x0FFFFFFF)
    73 #define TSAR_MMU_PPN_FROM_PTE1( pte1 )     ((pte1 & 0x0007FFFF)<<9)
     72#define TSAR_MMU_PPN2_FROM_PTE1( pte1 )    (pte1 & 0x0FFFFFFF)
     73#define TSAR_MMU_PPN1_FROM_PTE1( pte1 )    ((pte1 & 0x0007FFFF)<<9)
    7474#define TSAR_MMU_ATTR_FROM_PTE1( pte1 )    (pte1 & 0xFFC00000)
    7575
     
    138138error_t hal_gpt_create( gpt_t * gpt )
    139139{
    140         page_t   * page;
    141     xptr_t     page_xp;
     140    void * base;
    142141
    143142    thread_t * this = CURRENT_THREAD;
     
    146145uint32_t cycle = (uint32_t)hal_get_cycles();
    147146if( DEBUG_HAL_GPT_CREATE < cycle )
    148 printk("\n[%s] : thread[%x,%x] enter / cycle %d\n",
     147printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
    149148__FUNCTION__, this->process->pid, this->trdid, cycle );
    150149#endif
    151150
    152151// check page size
    153 assert( (CONFIG_PPM_PAGE_SIZE == 4096) , "for TSAR, the page size must be 4 Kbytes\n" );
     152assert( (CONFIG_PPM_PAGE_SIZE == 4096) , "the TSAR page size must be 4 Kbytes\n" );
    154153
    155154    // allocates 2 physical pages for PT1
    156155        kmem_req_t req;
    157         req.type  = KMEM_PAGE;
    158         req.size = 1;                     // 2 small pages
     156        req.type  = KMEM_PPM;
     157        req.order = 1;                     // 2 small pages
    159158        req.flags = AF_KERNEL | AF_ZERO;
    160         page = (page_t *)kmem_alloc( &req );
    161 
    162         if( page == NULL )
     159        base = kmem_alloc( &req );
     160
     161        if( base == NULL )
    163162    {
    164163        printk("\n[PANIC] in %s : no memory for PT1 / process %x / cluster %x\n",
     
    167166    }
    168167
    169     // initialize generic page table descriptor
    170     page_xp   = XPTR( local_cxy , page );
    171         gpt->ptr  = GET_PTR( ppm_page2base( page_xp ) );
    172         gpt->ppn  = ppm_page2ppn( page_xp );
     168    gpt->ptr = base;
     169        gpt->ppn = ppm_base2ppn( XPTR( local_cxy , base ) );
    173170
    174171#if DEBUG_HAL_GPT_CREATE
    175172cycle = (uint32_t)hal_get_cycles();
    176173if( DEBUG_HAL_GPT_CREATE < cycle )
    177 printk("\n[%s] : thread[%x,%x] exit / cycle %d\n",
    178 __FUNCTION__, this->process->pid, this->trdid, cycle );
     174printk("\n[%s] thread[%x,%x] exit / pt1_base %x / pt1_ppn %x / cycle %d\n",
     175__FUNCTION__, this->process->pid, this->trdid, gpt->ptr, gpt->ppn, cycle );
    179176#endif
    180177
     
    199196thread_t * this  = CURRENT_THREAD;
    200197if( DEBUG_HAL_GPT_DESTROY < cycle )
    201 printk("\n[%s] : thread[%x,%x] enter / cycle %d\n",
     198printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
    202199__FUNCTION__, this->process->pid, this->trdid, cycle );
    203200#endif
     
    221218            {
    222219                // get local pointer on PT2
    223                 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 );
    224                 xptr_t base_xp = ppm_ppn2base( pt2_ppn );
    225                 pt2     = GET_PTR( base_xp );
     220                pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( pte1 );
     221                pt2     = GET_PTR( ppm_ppn2base( pt2_ppn ) );
    226222
    227223                // scan the PT2
     
    238234
    239235                // release the page allocated for the PT2
    240                 req.type = KMEM_PAGE;
    241                 req.ptr  = GET_PTR( ppm_base2page( XPTR(local_cxy , pt2 ) ) );
     236                req.type = KMEM_PPM;
     237                req.ptr  = pt2;
    242238                kmem_free( &req );
    243239            }
     
    246242
    247243    // release the PT1
    248     req.type = KMEM_PAGE;
    249     req.ptr  = GET_PTR( ppm_base2page( XPTR(local_cxy , pt1 ) ) );
     244    req.type = KMEM_PPM;
     245    req.ptr  = pt1;
    250246    kmem_free( &req );
    251247
     
    253249cycle = (uint32_t)hal_get_cycles();
    254250if( DEBUG_HAL_GPT_DESTROY < cycle )
    255 printk("\n[%s] : thread[%x,%x] exit / cycle %d\n",
     251printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
    256252__FUNCTION__, this->process->pid, this->trdid, cycle );
    257253#endif
    258254
    259255} // end hal_gpt_destroy()
    260 
    261 /*
    262 
    263 /////////////////////////////////////////////////////////////////////////////////////
    264 // This static function can be used for debug.
    265 /////////////////////////////////////////////////////////////////////////////////////
    266 static void hal_gpt_display( process_t * process )
    267 {
    268     gpt_t    * gpt;
    269         uint32_t   ix1;
    270         uint32_t   ix2;
    271         uint32_t * pt1;
    272     uint32_t   pte1;
    273     ppn_t      pt2_ppn;
    274     uint32_t * pt2;
    275     uint32_t   pte2_attr;
    276     ppn_t      pte2_ppn;
    277     vpn_t      vpn;
    278 
    279 // check argument
    280 assert( (process != NULL) , "NULL process pointer\n");
    281 
    282     // get pointer on gpt
    283     gpt = &(process->vmm.gpt);
    284 
    285     // get pointer on PT1
    286     pt1 = (uint32_t *)gpt->ptr;
    287 
    288     printk("\n***** Tsar Page Table for process %x : &gpt = %x / &pt1 = %x\n\n",
    289     process->pid , gpt , pt1 );
    290 
    291     // scan the PT1
    292         for( ix1 = 0 ; ix1 < 2048 ; ix1++ )
    293         {
    294         pte1 = pt1[ix1];
    295                 if( (pte1 & TSAR_PTE_MAPPED) != 0 )
    296         {
    297             if( (pte1 & TSAR_PTE_SMALL) == 0 )  // BIG page
    298             {
    299                 vpn = ix1 << 9;
    300                 printk(" - BIG   : vpn = %x / pt1[%d] = %X\n", vpn , ix1 , pte1 );
    301             }
    302             else                           // SMALL pages
    303             {
    304                 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 );
    305                 xptr_t base_xp = ppm_ppn2base ( pt2_ppn );
    306                 pt2     = GET_PTR( base_xp );
    307 
    308                 // scan the PT2
    309                     for( ix2 = 0 ; ix2 < 512 ; ix2++ )
    310                 {
    311                     pte2_attr = TSAR_MMU_ATTR_FROM_PTE2( pt2[2 * ix2] );
    312                     pte2_ppn  = TSAR_MMU_PPN_FROM_PTE2( pt2[2 * ix2 + 1] );
    313 
    314                             if( (pte2_attr & TSAR_PTE_MAPPED) != 0 )
    315                     {
    316                         vpn = (ix1 << 9) | ix2;
    317                         printk(" - SMALL : vpn %X / ppn %X / attr %X\n",
    318                         vpn , pte2_ppn , tsar2gpt(pte2_attr) );
    319                     }
    320                 }
    321             }
    322         }
    323         }
    324 } // end hal_gpt_display()
    325 
    326 */
    327256
    328257////////////////////////////////////////////
     
    332261                          ppn_t    * ppn )
    333262{
    334     uint32_t          * pt1_ptr;         // local pointer on PT1 base
    335     xptr_t              ptd1_xp;         // extended pointer on PT1[x1] entry
    336         uint32_t            ptd1;            // value of PT1[x1] entry
    337 
    338     xptr_t              page_xp;
    339 
     263    uint32_t          * pt1;             // local pointer on PT1 base
     264    xptr_t              pte1_xp;         // extended pointer on PT1[x1] entry
     265        uint32_t            pte1;            // value of PT1[x1] entry
     266
     267    kmem_req_t          req;             // kmem request fro PT2 allocation
     268
     269    uint32_t          * pt2;             // local pointer on PT2 base
    340270        ppn_t               pt2_ppn;         // PPN of page containing PT2
    341     uint32_t          * pt2_ptr;         // local pointer on PT2 base
    342271        xptr_t              pte2_xp;         // extended pointer on PT2[ix2].attr
    343272    uint32_t            pte2_attr;       // PT2[ix2].attr current value   
     
    357286uint32_t   cycle = (uint32_t)hal_get_cycles();
    358287if( DEBUG_HAL_GPT_LOCK_PTE < cycle )
    359 printk("\n[%s] : thread[%x,%x] enters / vpn %x in cluster %x / cycle %d\n",
     288printk("\n[%s] thread[%x,%x] enters / vpn %x in cluster %x / cycle %d\n",
    360289__FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy, cycle );
    361290#endif
    362291
    363292    // get indexes in PTI & PT2 from vpn
    364     uint32_t  ix1 = TSAR_MMU_IX1_FROM_VPN( vpn );    // index in PT1
    365     uint32_t  ix2 = TSAR_MMU_IX2_FROM_VPN( vpn );    // index in PT2
     293    uint32_t  ix1 = TSAR_MMU_IX1_FROM_VPN( vpn );
     294    uint32_t  ix2 = TSAR_MMU_IX2_FROM_VPN( vpn );
    366295
    367296    // get local pointer on PT1
    368     pt1_ptr = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) );
    369 
    370     // build extended pointer on PTD1 == PT1[ix1]
    371         ptd1_xp = XPTR( gpt_cxy , &pt1_ptr[ix1] );
     297    pt1 = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) );
     298
     299    // build extended pointer on PTE1 == PT1[ix1]
     300        pte1_xp = XPTR( gpt_cxy , &pt1[ix1] );
    372301
    373302    // get current PT1 entry value
    374     ptd1 = hal_remote_l32( ptd1_xp );
    375 
    376     // If PTD1 is unmapped and unlocked, try to atomically lock this PT1 entry.
    377     // This PTD1 lock prevent multiple concurrent PT2 allocations
    378     // - only the thread that successfully locked the PTD1 allocates a new PT2
    379     //   and updates the PTD1
    380     // - all other threads simply wait until the missing PTD1 is mapped.
    381 
    382     if( ptd1 == 0 ) 
     303    pte1 = hal_remote_l32( pte1_xp );
     304
     305    // If PTE1 is unmapped and unlocked, try to atomically lock this PT1 entry.
     306    // This PTE1 locking prevent multiple concurrent PT2 allocations
     307    // - only the thread that successfully locked the PTE1 allocates a new PT2
     308    //   and updates the PTE1
     309    // - all other threads simply wait until the missing PTE1 is mapped.
     310
     311    if( pte1 == 0 ) 
    383312        {
    384         // try to atomically lock the PTD1 to prevent concurrent PT2 allocations
    385         atomic = hal_remote_atomic_cas( ptd1_xp,
    386                                         ptd1,
    387                                         ptd1 | TSAR_PTE_LOCKED );
     313        // try to atomically lock the PTE1 to prevent concurrent PT2 allocations
     314        atomic = hal_remote_atomic_cas( pte1_xp,
     315                                        pte1,
     316                                        pte1 | TSAR_PTE_LOCKED );
    388317        if( atomic ) 
    389318                {
    390319            // allocate one 4 Kbytes physical page for PT2
    391             page_xp = ppm_remote_alloc_pages( gpt_cxy , 0 );
    392 
    393             if( page_xp == XPTR_NULL )
     320            req.type  = KMEM_PPM;
     321            req.order = 0; 
     322            req.flags = AF_ZERO | AF_KERNEL;
     323            pt2       = kmem_remote_alloc( gpt_cxy , &req );
     324
     325            if( pt2 == NULL )
    394326            {
    395                 printk("\n[ERROR] in %s : cannot allocate memory for PT2\n", __FUNCTION__ );
     327                printk("\n[ERROR] in %s : cannot allocate memory for PT2 in cluster %d\n",
     328                __FUNCTION__, gpt_cxy );
    396329                return -1;
    397330            }
    398331
    399332            // get the PT2 PPN
    400             pt2_ppn = ppm_page2ppn( page_xp );
    401 
    402             // build  PTD1
    403             ptd1 = TSAR_PTE_MAPPED | TSAR_PTE_SMALL | pt2_ppn;
    404 
    405             // set the PTD1 value in PT1
    406             // this unlocks the PTD1
    407             hal_remote_s32( ptd1_xp , ptd1 );
     333            pt2_ppn = ppm_base2ppn( XPTR( gpt_cxy , pt2 ) );
     334
     335            // build  PTE1
     336            pte1 = TSAR_PTE_MAPPED | TSAR_PTE_SMALL | pt2_ppn;
     337
     338            // set the PTE1 value in PT1
     339            // this unlocks the PTE1
     340            hal_remote_s32( pte1_xp , pte1 );
    408341            hal_fence();
    409342
    410343#if (DEBUG_HAL_GPT_LOCK_PTE & 1)
    411344if( DEBUG_HAL_GPT_LOCK_PTE < cycle )
    412 printk("\n[%s] : thread[%x,%x] allocates a new PT2 for vpn %x in cluster %x\n",
     345printk("\n[%s] thread[%x,%x] allocates a new PT2 for vpn %x in cluster %x\n",
    413346__FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy );
    414347#endif
    415348
    416349        }  // end if atomic
    417     }  // end if (ptd1 == 0)
    418 
    419     // wait until PTD1 is mapped by another thread
    420     while( (ptd1 & TSAR_PTE_MAPPED) == 0 )
     350    }  // end if (pte1 == 0)
     351
     352    // wait until PTE1 is mapped by another thread
     353    while( (pte1 & TSAR_PTE_MAPPED) == 0 )
    421354    {
    422         ptd1 = hal_remote_l32( ptd1_xp );
     355        pte1 = hal_remote_l32( pte1_xp );
    423356
    424357#if GPT_LOCK_WATCHDOG
     
    426359{
    427360    thread_t * thread = CURRENT_THREAD;
    428     printk("\n[PANIC] in %s : thread[%x,%x] waiting PTD1 / vpn %x / cxy %x / %d iterations\n",
     361    printk("\n[PANIC] in %s : thread[%x,%x] waiting PTE1 / vpn %x / cxy %x / %d iterations\n",
    429362    __FUNCTION__, thread->process->pid, thread->trdid, vpn, gpt_cxy, count );
    430363    hal_core_sleep();
     
    435368    }
    436369
    437 // check ptd1 because only small page can be locked
    438 assert( (ptd1 & TSAR_PTE_SMALL), "cannot lock a big page\n");
     370// check pte1 because only small page can be locked
     371assert( (pte1 & TSAR_PTE_SMALL), "cannot lock a big page\n");
    439372
    440373#if (DEBUG_HAL_GPT_LOCK_PTE & 1)
    441374if( DEBUG_HAL_GPT_LOCK_PTE < cycle )
    442 printk("\n[%s] : thread[%x,%x] get ptd1 %x for vpn %x in cluster %x\n",
    443 __FUNCTION__, this->process->pid, this->trdid, ptd1, vpn, gpt_cxy );
    444 #endif
    445 
    446     // get pointer on PT2 base from PTD1
    447     pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( ptd1 );
    448     pt2_ptr = GET_PTR( ppm_ppn2base( pt2_ppn ) );
     375printk("\n[%s] thread[%x,%x] get pte1 %x for vpn %x in cluster %x\n",
     376__FUNCTION__, this->process->pid, this->trdid, pte1, vpn, gpt_cxy );
     377#endif
     378
     379    // get pointer on PT2 base
     380    pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( pte1 );
     381    pt2    = GET_PTR( ppm_ppn2base( pt2_ppn ) );
    449382
    450383    // build extended pointers on PT2[ix2].attr 
    451     pte2_xp = XPTR( gpt_cxy , &pt2_ptr[2 * ix2] );
     384    pte2_xp = XPTR( gpt_cxy , &pt2[2 * ix2] );
    452385
    453386    // wait until PTE2 atomically set using a remote CAS
     
    491424cycle = (uint32_t)hal_get_cycles();
    492425if( DEBUG_HAL_GPT_LOCK_PTE < cycle )
    493 printk("\n[%s] : thread[%x,%x] exit / vpn %x in cluster %x / attr %x / ppn %x / cycle %d\n",
     426printk("\n[%s] thread[%x,%x] exit / vpn %x in cluster %x / attr %x / ppn %x / cycle %d\n",
    494427__FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy, pte2_attr, pte2_ppn, cycle );
    495428#endif
     
    506439                         vpn_t   vpn )
    507440{
    508     uint32_t * pt1_ptr;         // local pointer on PT1 base
    509     xptr_t     ptd1_xp;         // extended pointer on PT1[ix1]
    510         uint32_t   ptd1;            // value of PT1[ix1] entry
    511 
     441    uint32_t * pt1;             // local pointer on PT1 base
     442    xptr_t     pte1_xp;         // extended pointer on PT1[ix1]
     443        uint32_t   pte1;            // value of PT1[ix1] entry
     444
     445    uint32_t * pt2;             // PT2 base address
    512446        ppn_t      pt2_ppn;         // PPN of page containing PT2
    513     uint32_t * pt2_ptr;         // PT2 base address
    514447        xptr_t     pte2_xp;         // extended pointer on PT2[ix2].attr
    515448        uint32_t   pte2_attr;       // PTE2 attribute
     
    523456uint32_t   cycle = (uint32_t)hal_get_cycles();
    524457if( DEBUG_HAL_GPT_LOCK_PTE < cycle )
    525 printk("\n[%s] : thread[%x,%x] enters for vpn %x in cluster %x / cycle %d\n",
     458printk("\n[%s] thread[%x,%x] enters for vpn %x in cluster %x / cycle %d\n",
    526459__FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy, cycle );
    527460#endif
    528461
    529462    // compute indexes in P1 and PT2
    530     uint32_t  ix1 = TSAR_MMU_IX1_FROM_VPN( vpn );    // index in PT1
    531     uint32_t  ix2 = TSAR_MMU_IX2_FROM_VPN( vpn );    // index in PT2
     463    uint32_t  ix1 = TSAR_MMU_IX1_FROM_VPN( vpn );
     464    uint32_t  ix2 = TSAR_MMU_IX2_FROM_VPN( vpn );
    532465
    533466    // get local pointer on PT1
    534     pt1_ptr = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) );
    535 
    536     // build extended pointer on PTD1 == PT1[ix1]
    537         ptd1_xp = XPTR( gpt_cxy , &pt1_ptr[ix1] );
    538 
    539     // get current ptd1 value
    540     ptd1 = hal_remote_l32( ptd1_xp );
    541 
    542 // check PTD1 attributes
    543 assert( ((ptd1 & TSAR_PTE_MAPPED) != 0), "unmapped PTE1\n");
    544 assert( ((ptd1 & TSAR_PTE_SMALL ) != 0), "big page PTE1\n");
    545 
    546     // get pointer on PT2 base from PTD1
    547         pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( ptd1 );
    548         pt2_ptr = GET_PTR( ppm_ppn2base( pt2_ppn ) );
     467    pt1 = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) );
     468
     469    // build extended pointer on PTE1 == PT1[ix1]
     470        pte1_xp = XPTR( gpt_cxy , &pt1[ix1] );
     471
     472    // get current pte1 value
     473    pte1 = hal_remote_l32( pte1_xp );
     474
     475// check PTE1 attributes
     476assert( ((pte1 & TSAR_PTE_MAPPED) != 0), "unmapped PTE1\n");
     477assert( ((pte1 & TSAR_PTE_SMALL ) != 0), "big page PTE1\n");
     478
     479    // get pointer on PT2 base
     480        pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( pte1 );
     481        pt2    = GET_PTR( ppm_ppn2base( pt2_ppn ) );
    549482
    550483    // build extended pointers on PT2[ix2].attr 
    551     pte2_xp = XPTR( gpt_cxy , &pt2_ptr[2 * ix2] );
     484    pte2_xp = XPTR( gpt_cxy , &pt2[2 * ix2] );
    552485
    553486    // get PT2[ix2].attr
     
    564497cycle = (uint32_t)hal_get_cycles();
    565498if( DEBUG_HAL_GPT_LOCK_PTE < cycle )
    566 printk("\n[%s] : thread[%x,%x] unlocks vpn %x in cluster %x / cycle %d\n",
     499printk("\n[%s] thread[%x,%x] unlocks vpn %x in cluster %x / cycle %d\n",
    567500__FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy, cycle );
    568501#endif
     
    580513    gpt_t             * gpt_ptr;             // target GPT local pointer
    581514
    582     uint32_t          * pt1_ptr;             // local pointer on PT1 base
     515    uint32_t          * pt1;                 // local pointer on PT1 base
    583516        xptr_t              pte1_xp;             // extended pointer on PT1 entry
    584517        uint32_t            pte1;                // PT1 entry value if PTE1
    585518
     519        uint32_t          * pt2;                 // local pointer on PT2 base
    586520        ppn_t               pt2_ppn;             // PPN of PT2
    587         uint32_t          * pt2_ptr;             // local pointer on PT2 base
    588521    xptr_t              pte2_attr_xp;        // extended pointer on PT2[ix2].attr
    589522    xptr_t              pte2_ppn_xp;         // extended pointer on PT2[ix2].ppn
     
    604537    ix2 = TSAR_MMU_IX2_FROM_VPN( vpn );
    605538
    606     pt1_ptr = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) );
    607         small   = attr & GPT_SMALL;
     539#if DEBUG_HAL_GPT_SET_PTE
     540thread_t * this  = CURRENT_THREAD;
     541uint32_t   cycle = (uint32_t)hal_get_cycles();
     542if( DEBUG_HAL_GPT_SET_PTE < cycle )
     543printk("\n[%s] thread[%x,%x] enter gpt (%x,%x) / vpn %x / attr %x / ppn %x\n",
     544__FUNCTION__, this->process->pid, this->trdid, gpt_cxy, &gpt_ptr->ptr, vpn, attr, ppn );
     545#endif
     546
     547        small = attr & GPT_SMALL;
     548
     549    // get local pointer on PT1
     550    pt1 = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) );
    608551
    609552    // compute tsar attributes from generic attributes
     
    611554
    612555    // build extended pointer on PTE1 = PT1[ix1]
    613         pte1_xp = XPTR( gpt_cxy , &pt1_ptr[ix1] );
     556        pte1_xp = XPTR( gpt_cxy , &pt1[ix1] );
    614557
    615558    // get current pte1 value
     
    634577
    635578#if DEBUG_HAL_GPT_SET_PTE
    636 thread_t * this  = CURRENT_THREAD;
    637 uint32_t   cycle = (uint32_t)hal_get_cycles();
    638579if( DEBUG_HAL_GPT_SET_PTE < cycle )
    639 printk("\n[%s] : thread[%x,%x] map PTE1 / cxy %x / ix1 %x / pt1 %x / pte1 %x\n",
    640 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix1, pt1_ptr, pte1 );
     580printk("\n[%s] thread[%x,%x] map PTE1 / cxy %x / ix1 %x / pt1 %x / pte1 %x\n",
     581__FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix1, pt1, pte1 );
    641582#endif
    642583
     
    648589assert( (pte1 & TSAR_PTE_MAPPED), "PTE1 must be mapped\n" );
    649590
    650         // get PT2 base from PTE1
    651             pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 );
    652             pt2_ptr = GET_PTR( ppm_ppn2base( pt2_ppn ) );
     591        // get PT2 base
     592            pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( pte1 );
     593            pt2    = GET_PTR( ppm_ppn2base( pt2_ppn ) );
    653594
    654595        // build extended pointers on PT2[ix2].attr and PT2[ix2].ppn
    655         pte2_attr_xp = XPTR( gpt_cxy , &pt2_ptr[2 * ix2] );
    656         pte2_ppn_xp  = XPTR( gpt_cxy , &pt2_ptr[2 * ix2 + 1] );
     596        pte2_attr_xp = XPTR( gpt_cxy , &pt2[2 * ix2] );
     597        pte2_ppn_xp  = XPTR( gpt_cxy , &pt2[2 * ix2 + 1] );
    657598
    658599        // get current value of PTE2.attr
     
    672613uint32_t   cycle = (uint32_t)hal_get_cycles();
    673614if( DEBUG_HAL_GPT_SET_PTE < cycle )
    674 printk("\n[%s] : thread[%x,%x] map PTE2 / cxy %x / ix2 %x / pt2 %x / attr %x / ppn %x\n",
    675 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix2, pt2_ptr, tsar_attr, ppn );
     615printk("\n[%s] thread[%x,%x] map PTE2 / cxy %x / ix2 %x / pt2 %x / attr %x / ppn %x\n",
     616__FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix2, pt2, tsar_attr, ppn );
    676617#endif
    677618
     
    689630    uint32_t   ix2;            // index in PT2
    690631
    691     uint32_t * pt1_ptr;        // PT1 base address
     632    uint32_t * pt1;            // PT1 base address
    692633    xptr_t     pte1_xp;        // extended pointer on PT1[ix1]
    693634    uint32_t   pte1;           // PT1 entry value
    694635
     636    uint32_t * pt2;            // PT2 base address
    695637    ppn_t      pt2_ppn;        // PPN of PT2
    696     uint32_t * pt2_ptr;        // PT2 base address
    697638    xptr_t     pte2_attr_xp;   // extended pointer on PT2[ix2].attr
    698639    xptr_t     pte2_ppn_xp;    // extended pointer on PT2[ix2].ppn
     
    707648
    708649    // get local pointer on PT1 base
    709     pt1_ptr = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) );
     650    pt1 = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) );
    710651
    711652    // build extended pointer on PTE1 = PT1[ix1]
    712         pte1_xp = XPTR( gpt_cxy , &pt1_ptr[ix1] );
     653        pte1_xp = XPTR( gpt_cxy , &pt1[ix1] );
    713654
    714655    // get current PTE1 value
     
    729670uint32_t   cycle = (uint32_t)hal_get_cycles();
    730671if( DEBUG_HAL_GPT_RESET_PTE < cycle )
    731 printk("\n[%s] : thread[%x,%x] unmap PTE1 / cxy %x / vpn %x / ix1 %x\n",
     672printk("\n[%s] thread[%x,%x] unmap PTE1 / cxy %x / vpn %x / ix1 %x\n",
    732673__FUNCTION__, this->process->pid, this->trdid, gpt_cxy, vpn, ix1 );
    733674#endif
     
    737678    else                                    // it's a PTE2 => unmap it from PT2
    738679    {
    739         // compute PT2 base address
    740         pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 );
    741         pt2_ptr = GET_PTR( ppm_ppn2base( pt2_ppn ) );
     680        // get PT2 base
     681        pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( pte1 );
     682        pt2    = GET_PTR( ppm_ppn2base( pt2_ppn ) );
    742683       
    743684        // build extended pointer on PT2[ix2].attr and PT2[ix2].ppn
    744         pte2_attr_xp = XPTR( gpt_cxy , &pt2_ptr[2 * ix2] );
    745         pte2_ppn_xp  = XPTR( gpt_cxy , &pt2_ptr[2 * ix2 + 1] );
     685        pte2_attr_xp = XPTR( gpt_cxy , &pt2[2 * ix2] );
     686        pte2_ppn_xp  = XPTR( gpt_cxy , &pt2[2 * ix2 + 1] );
    746687
    747688        // unmap the PTE2
     
    755696uint32_t   cycle = (uint32_t)hal_get_cycles();
    756697if( DEBUG_HAL_GPT_RESET_PTE < cycle )
    757 printk("\n[%s] : thread[%x,%x] unmap PTE2 / cxy %x / vpn %x / ix2 %x\n",
     698printk("\n[%s] thread[%x,%x] unmap PTE2 / cxy %x / vpn %x / ix2 %x\n",
    758699__FUNCTION__, this->process->pid, this->trdid, gpt_cxy, vpn, ix2 );
    759700#endif
     
    804745        if( (pte1 & TSAR_PTE_SMALL) == 0 )     // it's a PTE1
    805746        {
    806         // get PPN & ATTR from PT1
     747        // get PPN & ATTR
    807748                *attr = tsar2gpt( TSAR_MMU_ATTR_FROM_PTE1( pte1 ) );
    808         *ppn  = TSAR_MMU_PPN_FROM_PTE1( pte1 ) | (vpn & ((1<<TSAR_MMU_IX2_WIDTH)-1));
     749        *ppn  = TSAR_MMU_PPN1_FROM_PTE1( pte1 ) | (vpn & ((1<<TSAR_MMU_IX2_WIDTH)-1));
    809750        }
    810751    else                                  // it's a PTE2
    811752    {
    812753        // compute PT2 base address
    813         pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 );
     754        pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( pte1 );
    814755        pt2     = GET_PTR( ppm_ppn2base( pt2_ppn ) );
    815756
     
    849790        uint32_t   * src_pt1;   // local pointer on SRC PT1
    850791        uint32_t   * dst_pt1;   // local pointer on DST PT1
     792
    851793    uint32_t   * src_pt2;   // local pointer on SRC PT2
    852794    uint32_t   * dst_pt2;   // local pointer on DST PT2
     
    874816thread_t * this  = CURRENT_THREAD;
    875817if( DEBUG_HAL_GPT_COPY < cycle )
    876 printk("\n[%s] : thread[%x,%x] enter / src_cxy %x / dst_cxy %x / cycle %d\n",
     818printk("\n[%s] thread[%x,%x] enter / src_cxy %x / dst_cxy %x / cycle %d\n",
    877819__FUNCTION__, this->process->pid, this->trdid, src_cxy, local_cxy, cycle );
    878820#endif
    879 
    880     // get remote src_gpt cluster and local pointer
    881     src_cxy = GET_CXY( src_gpt_xp );
    882     src_gpt = GET_PTR( src_gpt_xp );
    883821
    884822    // get remote src_pt1 and local dst_pt1
     
    907845        dst_pte1 = dst_pt1[dst_ix1];
    908846
    909         // map dst_pte1 if required
     847        // map dst_pte1 when this entry is not mapped
    910848        if( (dst_pte1 & TSAR_PTE_MAPPED) == 0 )
    911849        {
    912850            // allocate one physical page for a new PT2
    913                 req.type  = KMEM_PAGE;
    914                 req.size = 0;                     // 1 small page
     851                req.type  = KMEM_PPM;
     852                req.order = 0;                     // 1 small page
    915853                req.flags = AF_KERNEL | AF_ZERO;
    916                 page = (page_t *)kmem_alloc( &req );
    917 
    918             if( page == NULL )
     854                dst_pt2   = kmem_alloc( &req );
     855
     856            if( dst_pt2 == NULL )
    919857            {
    920858                        printk("\n[ERROR] in %s : cannot allocate PT2\n", __FUNCTION__ );
     
    926864
    927865            // get PPN for this new PT2
    928             dst_pt2_ppn = (ppn_t)ppm_page2ppn( page_xp );
    929 
    930             // build the new dst_pte1
     866            dst_pt2_ppn = ppm_base2ppn( XPTR( local_cxy , dst_pt2 ) );
     867
     868            // build new dst_pte1
    931869            dst_pte1 = TSAR_PTE_MAPPED | TSAR_PTE_SMALL | dst_pt2_ppn;
    932870
     
    936874
    937875        // get pointer on src_pt2
    938         src_pt2_ppn = (ppn_t)TSAR_MMU_PTBA_FROM_PTE1( src_pte1 );
     876        src_pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( src_pte1 );
    939877        src_pt2     = GET_PTR( ppm_ppn2base( src_pt2_ppn ) );
    940878
    941879        // get pointer on dst_pt2
    942         dst_pt2_ppn = (ppn_t)TSAR_MMU_PTBA_FROM_PTE1( dst_pte1 );
     880        dst_pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( dst_pte1 );
    943881        dst_pt2     = GET_PTR( ppm_ppn2base( dst_pt2_ppn ) );
    944882
     
    970908cycle = (uint32_t)hal_get_cycles;
    971909if( DEBUG_HAL_GPT_COPY < cycle )
    972 printk("\n[%s] : thread[%x,%x] exit / copy done for src_vpn %x / dst_vpn %x / cycle %d\n",
     910printk("\n[%s] thread[%x,%x] exit / copy done for src_vpn %x / dst_vpn %x / cycle %d\n",
    973911__FUNCTION__, this->process->pid, this->trdid, src_vpn, dst_vpn, cycle );
    974912#endif
     
    987925cycle = (uint32_t)hal_get_cycles;
    988926if( DEBUG_HAL_GPT_COPY < cycle )
    989 printk("\n[%s] : thread[%x,%x] exit / nothing done / cycle %d\n",
     927printk("\n[%s] thread[%x,%x] exit / nothing done / cycle %d\n",
    990928__FUNCTION__, this->process->pid, this->trdid, cycle );
    991929#endif
     
    1005943    gpt_t    * gpt_ptr;
    1006944
    1007     vpn_t      vpn;
    1008 
    1009     uint32_t   ix1;
    1010     uint32_t   ix2;
     945    uint32_t   ix1;       // current
     946    uint32_t   ix2;       // current
     947
     948    vpn_t      vpn_min;
     949    vpn_t      vpn_max;   // included
     950
     951    uint32_t   ix1_min;
     952    uint32_t   ix1_max;   // included
     953
     954    uint32_t   ix2_min;
     955    uint32_t   ix2_max;   // included
    1011956
    1012957    uint32_t * pt1;
     
    1021966    gpt_ptr = GET_PTR( gpt_xp );
    1022967
    1023     // get local PT1 pointer
     968#if DEBUG_HAL_GPT_SET_COW
     969uint32_t   cycle = (uint32_t)hal_get_cycles();
     970thread_t * this  = CURRENT_THREAD;
     971if(DEBUG_HAL_GPT_SET_COW < cycle )
     972printk("\n[%s] thread[%x,%x] enter / gpt[%x,%x] / vpn_base %x / vpn_size %x / cycle %d\n",
     973__FUNCTION__, this->process->pid, this->trdid, gpt_cxy, gpt_ptr, vpn_base, vpn_size, cycle );
     974#endif
     975
     976    // get PT1 pointer
    1024977    pt1 = (uint32_t *)hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) );
    1025978
    1026     // loop on pages
    1027     for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ )
     979#if (DEBUG_HAL_GPT_SET_COW & 1)
     980if(DEBUG_HAL_GPT_SET_COW < cycle )
     981printk("\n[%s] thread[%x,%x] get pt1 = %x\n",
     982__FUNCTION__, this->process->pid, this->trdid, pt1 );
     983#endif
     984
     985    vpn_min = vpn_base;
     986    vpn_max = vpn_base + vpn_size - 1;
     987
     988    ix1_min = TSAR_MMU_IX1_FROM_VPN( vpn_base );
     989    ix1_max = TSAR_MMU_IX1_FROM_VPN( vpn_max );
     990
     991    for( ix1 = ix1_min ; ix1 <= ix1_max ; ix1++ )
    1028992    {
    1029         ix1 = TSAR_MMU_IX1_FROM_VPN( vpn );
    1030         ix2 = TSAR_MMU_IX2_FROM_VPN( vpn );
    1031 
     993
     994#if (DEBUG_HAL_GPT_SET_COW & 1)
     995if(DEBUG_HAL_GPT_SET_COW < cycle )
     996printk("\n[%s] thread[%x,%x] : &pt1[%x] = %x\n",
     997__FUNCTION__, this->process->pid, this->trdid, ix1,  &pt1[ix1] );
     998#endif
    1032999        // get PTE1 value
    10331000        pte1 = hal_remote_l32( XPTR( gpt_cxy , &pt1[ix1] ) );
     1001
     1002#if (DEBUG_HAL_GPT_SET_COW & 1)
     1003if(DEBUG_HAL_GPT_SET_COW < cycle )
     1004printk("\n[%s] thread[%x,%x] : pt1[%x] = %x\n",
     1005__FUNCTION__, this->process->pid, this->trdid, ix1, pte1 );
     1006#endif
    10341007
    10351008        // only MAPPED & SMALL PTEs are modified
    10361009            if( (pte1 & TSAR_PTE_MAPPED) && (pte1 & TSAR_PTE_SMALL) )
    10371010        {
    1038             // compute PT2 base address
    1039             pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 );
     1011            // get PT2 pointer
     1012            pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( pte1 );
    10401013            pt2     = GET_PTR( ppm_ppn2base( pt2_ppn ) );
    10411014
    1042             assert( (GET_CXY( ppm_ppn2base( pt2_ppn ) ) == gpt_cxy ),
    1043             "PT2 and PT1 must be in the same cluster\n");
    1044  
    1045             // get current PTE2 attributes
    1046             attr = hal_remote_l32( XPTR( gpt_cxy , &pt2[2*ix2] ) );
    1047 
    1048             // only MAPPED PTEs are modified       
    1049             if( attr & TSAR_PTE_MAPPED )
     1015#if (DEBUG_HAL_GPT_SET_COW & 1)
     1016if(DEBUG_HAL_GPT_SET_COW < cycle )
     1017printk("\n[%s] thread[%x,%x] : get pt2 = %x\n",
     1018__FUNCTION__, this->process->pid, this->trdid, pt2 );
     1019#endif
     1020            ix2_min = (ix1 == ix1_min) ? TSAR_MMU_IX2_FROM_VPN(vpn_min) : 0;
     1021            ix2_max = (ix1 == ix1_max) ? TSAR_MMU_IX2_FROM_VPN(vpn_max) : 511;
     1022 
     1023            for( ix2 = ix2_min ; ix2 <= ix2_max ; ix2++ )
    10501024            {
    1051                 attr = (attr | TSAR_PTE_COW) & (~TSAR_PTE_WRITABLE);
    1052                 hal_remote_s32( XPTR( gpt_cxy , &pt2[2*ix2] ) , attr );
    1053             }
    1054         }
    1055     }   // end loop on pages
     1025
     1026#if (DEBUG_HAL_GPT_SET_COW & 1)
     1027if(DEBUG_HAL_GPT_SET_COW < cycle )
     1028printk("\n[%s] thread[%x,%x] : &pte2[%x] = %x\n",
     1029__FUNCTION__, this->process->pid, this->trdid, 2*ix2, &pt2[2*ix2] );
     1030#endif
     1031                // get current PTE2 attributes
     1032                attr = hal_remote_l32( XPTR( gpt_cxy , &pt2[2*ix2] ) );
     1033
     1034#if (DEBUG_HAL_GPT_SET_COW & 1)
     1035if(DEBUG_HAL_GPT_SET_COW < cycle )
     1036printk("\n[%s] thread[%x,%x] : pte2[%x] (attr) = %x\n",
     1037__FUNCTION__, this->process->pid, this->trdid, 2*ix2, attr );
     1038#endif
     1039                // only MAPPED PTEs are modified       
     1040                if( attr & TSAR_PTE_MAPPED )
     1041                {
     1042                    attr = (attr | TSAR_PTE_COW) & (~TSAR_PTE_WRITABLE);
     1043                    hal_remote_s32( XPTR( gpt_cxy , &pt2[2*ix2] ) , attr );
     1044                }
     1045            }  // end loop on ix2
     1046        }
     1047    }  // end loop on ix1
     1048
     1049#if DEBUG_HAL_GPT_SET_COW
     1050cycle = (uint32_t)hal_get_cycles();
     1051if(DEBUG_HAL_GPT_SET_COW < cycle )
     1052printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
     1053__FUNCTION__, this->process->pid, this->trdid, cycle );
     1054#endif
    10561055
    10571056}  // end hal_gpt_set_cow()
     
    10681067        ppn_t               pt2_ppn;             // PPN of PT2
    10691068        uint32_t          * pt2;                 // PT2 base address
    1070     xptr_t              pte2_xp;             // exended pointer on PTE2
     1069    xptr_t              pte2_attr_xp;        // exended pointer on pte2.attr
     1070    xptr_t              pte2_ppn_xp;         // exended pointer on pte2.ppn
    10711071
    10721072    uint32_t            ix1;                 // index in PT1
    10731073    uint32_t            ix2;                 // index in PT2
    1074 
    1075 
    1076     uint32_t            tsar_attr;           // PTE attributes for TSAR MMU
    10771074
    10781075// check MAPPED, SMALL, and not LOCKED in attr argument
     
    10921089    pt1 = (uint32_t *)hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) );
    10931090
    1094     // compute tsar_attr from generic attributes
    1095     tsar_attr = gpt2tsar( attr );
    1096 
    10971091    // get PTE1 value
    10981092    pte1 = hal_remote_l32( XPTR( gpt_cxy , &pt1[ix1] ) );
    10991093
    11001094// check MAPPED and SMALL in target PTE1
    1101 assert( ((pte1 & GPT_MAPPED) != 0), "attribute MAPPED must be set in target PTE1\n" );
    1102 assert( ((pte1 & GPT_SMALL ) != 0), "attribute SMALL  must be set in target PTE1\n" );
    1103 
    1104     // get PT2 base from PTE1
    1105     pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 );
     1095assert( ((pte1 & TSAR_PTE_MAPPED) != 0), "attribute MAPPED must be set in target PTE1\n" );
     1096assert( ((pte1 & TSAR_PTE_SMALL ) != 0), "attribute SMALL  must be set in target PTE1\n" );
     1097
     1098    // get PT2 base
     1099    pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( pte1 );
    11061100    pt2     = GET_PTR( ppm_ppn2base( pt2_ppn ) );
    11071101
    1108     // get extended pointer on PTE2
    1109     pte2_xp = XPTR( gpt_cxy , &pt2[2*ix2] );
     1102    // build extended pointers on PT2[ix2].attr and PT2[ix2].ppn
     1103    pte2_attr_xp = XPTR( gpt_cxy , &pt2[2 * ix2] );
     1104    pte2_ppn_xp  = XPTR( gpt_cxy , &pt2[2 * ix2 + 1] );
     1105
    11101106   
    11111107// check MAPPED in target PTE2
    1112 assert( ((hal_remote_l32(pte2_xp) & GPT_MAPPED) != 0),
     1108assert( ((hal_remote_l32(pte2_attr_xp) & TSAR_PTE_MAPPED) != 0),
    11131109"attribute MAPPED must be set in target PTE2\n" );
    11141110
    11151111    // set PTE2 in this order
    1116         hal_remote_s32( pte2_xp    , ppn );
     1112        hal_remote_s32( pte2_ppn_xp , ppn );
    11171113        hal_fence();
    1118         hal_remote_s32( pte2_xp + 4 , tsar_attr );
     1114        hal_remote_s32( pte2_attr_xp , gpt2tsar( attr ) );
    11191115        hal_fence();
    11201116
  • trunk/hal/tsar_mips32/core/hal_vmm.c

    r633 r635  
    4444extern process_t            process_zero;
    4545extern chdev_directory_t    chdev_dir;
     46extern char               * lock_type_str[];
    4647
    4748//////////////////////////////////////////////////////////////////////////////////////////
    4849// This function is called by the process_zero_init() function during kernel_init.
    4950// It initializes the VMM of the kernel proces_zero (containing all kernel threads)
    50 // in the local cluster: For TSAR, it registers one "kcode" vseg in kernel VSL,
    51 // and registers one big page in slot[0] of kernel GPT.
     51// in the local cluster.
     52// For TSAR, it registers one "kcode" vseg in kernel VSL, and registers one big page
     53// in slot[0] of kernel GPT.
    5254//////////////////////////////////////////////////////////////////////////////////////////
    5355error_t  hal_vmm_kernel_init( boot_info_t * info )
     
    5860    gpt_t * gpt = &process_zero.vmm.gpt;
    5961
    60     // get cluster identifier
    61     cxy_t cxy = local_cxy;
     62#if DEBUG_HAL_VMM
     63thread_t * this = CURRENT_THREAD;
     64printk("\n[%s] thread[%x,%x] enter in cluster %x\n",
     65__FUNCTION__, this->process->pid, this->trdid, local_cxy );
     66#endif
    6267
    6368    // allocate memory for kernel GPT
     
    6772    {
    6873        printk("\n[PANIC] in %s : cannot allocate kernel GPT in cluster %x\n",
    69         __FUNCTION__ , cxy );
     74        __FUNCTION__ , local_cxy );
    7075        hal_core_sleep();
    7176    }
    7277
    7378#if DEBUG_HAL_VMM
    74 thread_t * this = CURRENT_THREAD;
    75 printk("\n[%s] thread[%x,%x] enter in cluster %x / gpt %x\n",
     79printk("\n[%s] thread[%x,%x] created GPT PT1 in cluster %x / gpt %x\n",
    7680__FUNCTION__, this->process->pid, this->trdid, local_cxy, gpt );
    7781#endif
     
    7983    // compute attr and ppn for one PTE1
    8084    uint32_t attr = GPT_MAPPED | GPT_READABLE | GPT_CACHABLE | GPT_EXECUTABLE | GPT_GLOBAL;
    81     uint32_t ppn  = cxy << 20;   
     85    uint32_t ppn  = local_cxy << 20;   
    8286
    8387    // set PT1[0]
    84     hal_gpt_set_pte( XPTR( cxy , gpt ) , 0 , attr , ppn );
    85 
    86 #if DEBUG_HAL_VMM
    87 printk("\n[%s] thread[%x,%x] created PT1[0] : ppn %x / attr %x\n",
    88 __FUNCTION__, this->process->pid, this->trdid, ppn, attr );
     88    hal_gpt_set_pte( XPTR( local_cxy , gpt ) , 0 , attr , ppn );
     89
     90#if DEBUG_HAL_VMM
     91printk("\n[%s] thread[%x,%x] mapped PT1[0] in cluster %d : ppn %x / attr %x\n",
     92__FUNCTION__, this->process->pid, this->trdid, local_cxy, ppn, attr );
    8993#endif
    9094
     
    9498                                     info->kcode_base,
    9599                                     info->kcode_size,
    96                                      0, 0,                  // file ofset and file size (unused)
    97                                      XPTR_NULL,             // no mapper
     100                                     0, 0,               // file ofset and file size (unused)
     101                                     XPTR_NULL,          // no mapper
    98102                                     local_cxy );
    99103    if( vseg == NULL )
    100104    {
    101105        printk("\n[PANIC] in %s : cannot register vseg to VSL in cluster %x\n",
    102         __FUNCTION__ , cxy );
     106        __FUNCTION__ , local_cxy );
    103107        hal_core_sleep();
    104108    }
    105109
    106110#if DEBUG_HAL_VMM
    107 printk("\n[%s] thread[%x,%x] registered kcode vseg[%x,%x]\n",
    108 __FUNCTION__, this->process->pid, this->trdid, info->kcode_base, info->kcode_size );
     111printk("\n[%s] thread[%x,%x] registered kcode vseg[%x,%x] in cluster %x\n",
     112__FUNCTION__, this->process->pid, this->trdid, info->kcode_base, info->kcode_size, local_cxy );
    109113hal_vmm_display( &process_zero , true );
    110114#endif
     
    194198
    195199//////////////////////////////////////////
    196 void hal_vmm_display( process_t * process,
    197                       bool_t      mapping )
     200void hal_vmm_display( xptr_t   process_xp,
     201                      bool_t   mapping )
    198202{
    199     // get pointer on process VMM
    200     vmm_t * vmm = &process->vmm;
     203    // get target process cluster and local pointer
     204    process_t * process_ptr = GET_PTR( process_xp );
     205    cxy_t       process_cxy = GET_CXY( process_xp );
     206
     207    // get local pointer on target process VMM
     208    vmm_t * vmm = &process_ptr->vmm;
    201209
    202210    // get pointers on TXT0 chdev
     
    205213    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
    206214
    207     // build extended pointer on TXT0 lock and VSL lock
     215    // build extended pointer on TXT0 lock
    208216    xptr_t  txt_lock_xp = XPTR( txt0_cxy  , &txt0_ptr->wait_lock );
    209     xptr_t  vsl_lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
    210 
    211     // get root of vsegs list
    212     xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root );
    213 
    214     // get the locks protecting TXT0, VSL, and GPT
     217
     218    // build extended pointers on VSL lock and VSL root
     219    xptr_t vsl_root_xp = XPTR( process_cxy , &vmm->vsegs_root );
     220    xptr_t vsl_lock_xp = XPTR( process_cxy , &vmm->vsl_lock );
     221
     222    // get the locks protecting TXT0 and VSL
    215223    remote_rwlock_rd_acquire( vsl_lock_xp );
    216224    remote_busylock_acquire( txt_lock_xp );
    217225
    218     nolock_printk("\n***** VSL and GPT for process %x in cluster %x / PT1 = %x\n",
    219     process->pid , local_cxy , vmm->gpt.ptr );
    220 
    221     if( xlist_is_empty( root_xp ) )
     226    // get PID and PT1 values
     227    pid_t      pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
     228    uint32_t * pt1 = hal_remote_lpt( XPTR( process_cxy , &vmm->gpt.ptr ) );
     229
     230    nolock_printk("\n***** VSL and GPT / process %x / cluster %x / PT1 %x / cycle %d\n",
     231    pid , process_cxy , pt1 , (uint32_t)hal_get_cycles() );
     232
     233    if( xlist_is_empty( vsl_root_xp ) )
    222234    {
    223235        nolock_printk("   ... no vsegs registered\n");
     
    227239        xptr_t         iter_xp;
    228240        xptr_t         vseg_xp;
    229         vseg_t       * vseg;
    230 
    231         XLIST_FOREACH( root_xp , iter_xp )
     241        vseg_t       * vseg_ptr;
     242        cxy_t          vseg_cxy;
     243        intptr_t       min;
     244        intptr_t       max;
     245        uint32_t       type;
     246        intptr_t       vpn_base;
     247        intptr_t       vpn_size;
     248
     249        XLIST_FOREACH( vsl_root_xp , iter_xp )
    232250        {
    233             vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
    234             vseg    = GET_PTR( vseg_xp );
     251            vseg_xp  = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
     252            vseg_ptr = GET_PTR( vseg_xp );
     253            vseg_cxy = GET_CXY( vseg_xp );
     254
     255            type     =           hal_remote_l32( XPTR( vseg_cxy , &vseg_ptr->type ) );
     256            min      = (intptr_t)hal_remote_lpt( XPTR( vseg_cxy , &vseg_ptr->min ) );
     257            max      = (intptr_t)hal_remote_lpt( XPTR( vseg_cxy , &vseg_ptr->max ) );
     258            vpn_size = (intptr_t)hal_remote_lpt( XPTR( vseg_cxy , &vseg_ptr->vpn_size ) );
     259            vpn_base = (intptr_t)hal_remote_lpt( XPTR( vseg_cxy , &vseg_ptr->vpn_base ) );
    235260
    236261            nolock_printk(" - %s : base = %X / size = %X / npages = %d\n",
    237             vseg_type_str(vseg->type), vseg->min, vseg->max - vseg->min, vseg->vpn_size );
     262            vseg_type_str(type), min, max - min, vpn_size );
    238263
    239264            if( mapping ) 
    240265            {
    241                 vpn_t    vpn     = vseg->vpn_base;
    242                 vpn_t    vpn_max = vpn + vseg->vpn_size;
     266                vpn_t    vpn     = vpn_base;
     267                vpn_t    vpn_max = vpn_base + vpn_size;
    243268                ppn_t    ppn;
    244269                uint32_t attr;
     
    246271                while( vpn < vpn_max )   // scan the PTEs
    247272                {
    248                     hal_gpt_get_pte( XPTR( local_cxy , &vmm->gpt ) , vpn , &attr , &ppn );
     273                    hal_gpt_get_pte( XPTR( process_cxy , &vmm->gpt ) , vpn , &attr , &ppn );
    249274
    250275                    if( attr & GPT_MAPPED )
Note: See TracChangeset for help on using the changeset viewer.