Changeset 587


Ignore:
Timestamp:
Nov 1, 2018, 12:39:27 PM (3 years ago)
Author:
alain
Message:

Modify the GPT (Generic Page Table) API to support remote accesses,
in order to improve page faults and COW handling.

Location:
trunk/hal
Files:
7 edited

Legend:

Unmodified
Added
Removed
  • trunk/hal/generic/hal_atomic.h

    r505 r587  
    6060 *****************************************************************************************
    6161 * @ ptr     : pointer on the shared variable (signed or unsigned)
    62  * @ val     : signed value to add
     62 * @ val     : value to add
    6363 * @ return shared variable value before add
    6464 ****************************************************************************************/
  • trunk/hal/generic/hal_exception.h

    r480 r587  
    3535//   => The hal_do_exception() function call the generic vmm_handle_page_fault(),
    3636//      or the fpu_handle_exception() function, and the calling thread resumes execution
    37 //      when the exception has been handled.
     37//      when the exception can be  sucessfully handled.
    3838//
    3939// - USER_ERROR : exceptions such a "illegal vaddr" or "illegal write access" are fatal.
     
    5151
    5252/*****************************************************************************************
     53 * This enum defines the global exception types after analysis by the exception handler.
     54 ****************************************************************************************/
     55
     56typedef enum
     57{
     58    EXCP_NON_FATAL,
     59    EXCP_USER_ERROR,
     60    EXCP_KERNEL_PANIC,
     61}
     62exception_handling_type_t;
     63
     64
     65/*****************************************************************************************
    5366 * This function is called by the hal_kentry() function when an exception is detected by
    5467 * the hardware for a given thread running on a given core.
  • trunk/hal/generic/hal_gpt.h

    r457 r587  
    126126
    127127/****************************************************************************************
    128  * This function map a local GPT entry identified by its VPN, from values defined
    129  * by the ppn and attr arguments. It allocates physical memory for the local generic
    130  * page table itself if required.
     128 * This function map a - local or remote - GPT entry identified by its VPN, from values
     129 * defined by the <ppn> and <attr> arguments. It allocates physical memory in remote
     130 * cluster for the GPT PT2, using a RPC_PMEM_GET_PAGES, if required.
    131131 ****************************************************************************************
    132132 * @ gpt       : [in] pointer on the page table
     
    136136 * @ returns 0 if success / returns ENOMEM if error
    137137 ***************************************************************************************/
    138 error_t hal_gpt_set_pte( gpt_t    * gpt,
     138error_t hal_gpt_set_pte( xptr_t     gpt_xp,
    139139                         vpn_t      vpn,
    140140                         uint32_t   attr,
     
    143143/****************************************************************************************
    144144 * This function unmaps a page table entry identified by the <vpn> argument in the
    145  * local page table identified by the <gpt> argument.
     145 * local GPT identified by the <gpt> argument.
    146146 * It does NOT release the physical memory allocated for the unmapped page.
    147147 ****************************************************************************************
     
    153153
    154154/****************************************************************************************
    155  * This function returns in the <attr> and <ppn> arguments the current values
    156  * stored in a GPT entry, identified by the <gpt> and <vpn> arguments.
    157  ****************************************************************************************
    158  * @ gpt_xp    : [in]  pointer on the page table
     155 * This function returns in the <attr> and <ppn> arguments the current values stored
     156 * in a -local or remote - GPT entry, identified by the <gpt> and <vpn> arguments.
     157 ****************************************************************************************
     158 * @ gpt_xp    : [in]  extended pointer on the page table
    159159 * @ vpn       : [in]  virtual page number
    160160 * @ attr      : [out] generic attributes
    161161 * @ ppn       : [out] physical page number
    162162 ***************************************************************************************/
    163 void hal_gpt_get_pte( gpt_t    * gpt,
     163void hal_gpt_get_pte( xptr_t     gpt_xp,
    164164                      vpn_t      vpn,
    165165                      uint32_t * attr,
  • trunk/hal/tsar_mips32/core/hal_exception.c

    r570 r587  
    4646
    4747//////////////////////////////////////////////////////////////////////////////////////////
    48 // This enum defines the global exception types after analysis by the exception handler.
    49 //////////////////////////////////////////////////////////////////////////////////////////
    50 
    51 typedef enum
    52 {
    53     EXCP_NON_FATAL,
    54     EXCP_USER_ERROR,
    55     EXCP_KERNEL_PANIC,
    56 }
    57 exception_handling_type_t;
    58 
    59 //////////////////////////////////////////////////////////////////////////////////////////
    6048// This enum defines the mask values for an MMU exception code reported by the mips32.
    6149//////////////////////////////////////////////////////////////////////////////////////////
     
    219207uint32_t cycle = (uint32_t)hal_get_cycles();
    220208if( DEBUG_HAL_EXCEPTIONS < cycle )
    221 printk("\n[DBG] %s : thread %x in process %x enter / is_ins %d / %s / vaddr %x / cycle %d\n",
    222 __FUNCTION__, this->trdid, process->pid,
     209printk("\n[DBG] %s : thread[%x,%x] enter / is_ins %d / %s / vaddr %x / cycle %d\n",
     210__FUNCTION__, process->pid, this->trdid,
    223211is_ins, hal_mmu_exception_str(excp_code), bad_vaddr, cycle);
    224212#endif
     
    234222            // try to map the unmapped PTE
    235223            error = vmm_handle_page_fault( process,
    236                                            bad_vaddr >> CONFIG_PPM_PAGE_SHIFT,  // vpn
    237                                            false );                             // not a COW
    238             if( error )   
    239             {
    240                 printk("\n[USER ERROR] in %s for thread %x in process %x\n"
    241                 "   cannot map vaddr = %x / is_ins %d / epc %x\n",
    242                 __FUNCTION__, this->trdid, this->process->pid, bad_vaddr, is_ins, excPC );
    243 
    244                         return EXCP_USER_ERROR;
    245             }
    246             else            // page fault successfull
     224                                           bad_vaddr >> CONFIG_PPM_PAGE_SHIFT );
     225
     226            if( error == EXCP_NON_FATAL )            // page-fault successfully handled
    247227            {
    248228
     
    250230cycle = (uint32_t)hal_get_cycles();
    251231if( DEBUG_HAL_EXCEPTIONS < cycle )
    252 printk("\n[DBG] %s : thread %x in process %x exit / page-fault handled for vaddr = %x\n",
    253 __FUNCTION__, this->trdid, process->pid, bad_vaddr );
     232printk("\n[DBG] %s : thread[%x,%x] exit / page-fault handled for vaddr = %x\n",
     233__FUNCTION__, process->pid, this->trdid, bad_vaddr );
    254234#endif
    255235 
    256236                return EXCP_NON_FATAL;
    257237            }
     238            else if( error == EXCP_USER_ERROR )      // illegal vaddr
     239            {
     240                printk("\n[USER ERROR] in %s for thread %x in process %x\n"
     241                "   illegal vaddr = %x / is_ins %d / epc %x\n",
     242                __FUNCTION__, this->trdid, this->process->pid, bad_vaddr, is_ins, excPC );
     243
     244                        return EXCP_USER_ERROR;
     245            }
     246            else  // error == EXCP_KERNEL_PANIC 
     247            {
     248                printk("\n[KERNEL ERROR] in %s for thread %x in process %x\n"
     249                "   no memory to map vaddr = %x / is_ins %d / epc %x\n",
     250                __FUNCTION__, this->trdid, this->process->pid, bad_vaddr, is_ins, excPC );
     251
     252                        return EXCP_KERNEL_PANIC;
     253            }
    258254        }
    259255        case MMU_WRITE_PRIVILEGE_VIOLATION:  // illegal access user error
    260256        case MMU_READ_PRIVILEGE_VIOLATION:
    261257        {
    262             printk("\n[USER ERROR] in %s for thread %x in process %x\n"
     258            printk("\n[USER ERROR] in %s : thread %x in process %x\n"
    263259            "   illegal user access to vaddr = %x / is_ins %d / epc %x\n",
    264260            __FUNCTION__, this->trdid, this->process->pid, bad_vaddr, is_ins, excPC );
     
    275271            {
    276272                // try to allocate and copy the page
    277                 error = vmm_handle_page_fault( process,
    278                                                bad_vaddr >> CONFIG_PPM_PAGE_SHIFT,  // vpn
    279                                                true );                              // COW
    280                 if( error )
    281                 {
    282                     printk("\n[USER ERROR] in %s for thread %x in process %x\n"
    283                     "   cannot cow vaddr = %x / is_ins %d / epc %x\n",
    284                     __FUNCTION__, this->trdid, this->process->pid, bad_vaddr, is_ins, excPC );
    285 
    286                             return EXCP_USER_ERROR;
    287                 }
    288                 else         // Copy on write successfull
     273                error = vmm_handle_cow( process,
     274                                        bad_vaddr >> CONFIG_PPM_PAGE_SHIFT );
     275
     276                if( error == EXCP_NON_FATAL )        // Copy on write successfull
    289277                {
    290278
     
    292280cycle = (uint32_t)hal_get_cycles();
    293281if( DEBUG_HAL_EXCEPTIONS < cycle )
    294 printk("\n[DBG] %s : thread %x in process %x exit / copy-on-write handled for vaddr = %x\n",
    295 __FUNCTION__, this->trdid, process->pid, bad_vaddr );
     282printk("\n[DBG] %s : thread[%x,%x] exit / copy-on-write handled for vaddr = %x\n",
     283__FUNCTION__, process->pid, this->trdid, bad_vaddr );
    296284#endif
    297285
    298286                    return EXCP_NON_FATAL;
    299287                }
     288                else if( error == EXCP_USER_ERROR )  // illegal user access
     289                {
     290                    printk("\n[USER ERROR] in %s : thread %x in process %x\n"
     291                    "   cannot cow vaddr = %x / is_ins %d / epc %x\n",
     292                    __FUNCTION__, this->trdid, this->process->pid, bad_vaddr, is_ins, excPC );
     293
     294                            return EXCP_USER_ERROR;
     295                }
     296                else   // error == EXCP_KERNEL_PANIC
     297                {
     298                    printk("\n[KERNEL ERROR] in %s : thread %x in process %x\n"
     299                    "   no memoty to cow vaddr = %x / is_ins %d / epc %x\n",
     300                    __FUNCTION__, this->trdid, this->process->pid, bad_vaddr, is_ins, excPC );
     301
     302                            return EXCP_USER_ERROR;
     303                }
    300304            }
    301305            else                             // non writable user error
    302306            {
    303                 printk("\n[USER ERROR] in %s for thread %x in process %x\n"
     307                printk("\n[USER ERROR] in %s : thread %x in process %x\n"
    304308                "   non-writable vaddr = %x / is_ins %d / epc %x\n",
    305309                __FUNCTION__, this->trdid, this->process->pid, bad_vaddr, is_ins, excPC );
     
    310314        case MMU_READ_EXEC_VIOLATION:        // user error
    311315        {
    312             printk("\n[USER_ERROR] in %s for thread %x in process %x\n"
     316            printk("\n[USER_ERROR] in %s : thread %x in process %x\n"
    313317            "   non-executable vaddr = %x / is_ins %d / epc %x\n",
    314318            __FUNCTION__, this->trdid, this->process->pid, bad_vaddr, is_ins, excPC );
     
    318322        default:                             // this is a kernel error   
    319323        {
    320             printk("\n[KERNEL ERROR] in %s for thread %x in process %x\n"
     324            printk("\n[KERNEL ERROR] in %s : thread %x in process %x\n"
    321325            "  epc %x / badvaddr %x / is_ins %d\n",
    322326            __FUNCTION__, this->trdid, this->process->pid, excPC, bad_vaddr, is_ins );
  • trunk/hal/tsar_mips32/core/hal_gpt.c

    r570 r587  
    132132    xptr_t     page_xp;
    133133
     134    thread_t * this = CURRENT_THREAD;
     135
    134136#if DEBUG_HAL_GPT_CREATE
    135 uint32_t cycle = (uint32_t)hal_get_cycles;
     137uint32_t cycle = (uint32_t)hal_get_cycles();
    136138if( DEBUG_HAL_GPT_CREATE < cycle )
    137 printk("\n[DBG] %s : thread %x enter / cycle %d\n",
    138 __FUNCTION__, CURRENT_THREAD, cycle );
     139printk("\n[DBG] %s : thread[%x,%x] enter / cycle %d\n",
     140__FUNCTION__, this->process->pid, this->trdid, cycle );
    139141#endif
    140142
     
    152154        if( page == NULL )
    153155    {
    154         printk("\n[ERROR] in %s : cannot allocate memory for PT1\n", __FUNCTION__ );
     156        printk("\n[PANIC] in %s : no memory for PT1 / process %x / cluster %x\n",
     157        __FUNCTION__, this->process->pid, local_cxy );
    155158        return ENOMEM;
    156159    }
     
    162165
    163166#if DEBUG_HAL_GPT_CREATE
    164 cycle = (uint32_t)hal_get_cycles;
     167cycle = (uint32_t)hal_get_cycles();
    165168if( DEBUG_HAL_GPT_CREATE < cycle )
    166 printk("\n[DBG] %s : thread %x exit / cycle %d\n",
    167 __FUNCTION__, CURRENT_THREAD, cycle );
     169printk("\n[DBG] %s : thread[%x,%x] exit / cycle %d\n",
     170__FUNCTION__, this->process->pid, this->trdid, cycle );
    168171#endif
    169172
     
    188191
    189192#if DEBUG_HAL_GPT_DESTROY
    190 uint32_t cycle = (uint32_t)hal_get_cycles;
     193uint32_t   cycle = (uint32_t)hal_get_cycles();
     194thread_t * this  = CURRENT_THREAD;
    191195if( DEBUG_HAL_GPT_DESTROY < cycle )
    192 printk("\n[DBG] %s : thread %x enter / cycle %d\n",
    193 __FUNCTION__, CURRENT_THREAD, cycle );
     196printk("\n[DBG] %s : thread[%x,%x] enter / cycle %d\n",
     197__FUNCTION__, this->process->pid, this->trdid, cycle );
    194198#endif
    195199
     
    230234                pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 );
    231235                xptr_t base_xp = ppm_ppn2base( pt2_ppn );
    232                 pt2 = (uint32_t *)GET_PTR( base_xp );
     236                pt2     = GET_PTR( base_xp );
    233237
    234238                // scan the PT2 to release all entries VALID and USER if reference cluster
     
    261265
    262266#if DEBUG_HAL_GPT_DESTROY
    263 cycle = (uint32_t)hal_get_cycles;
     267cycle = (uint32_t)hal_get_cycles();
    264268if( DEBUG_HAL_GPT_DESTROY < cycle )
    265 printk("\n[DBG] %s : thread %x exit / cycle %d\n",
    266 __FUNCTION__, CURRENT_THREAD, cycle );
     269printk("\n[DBG] %s : thread[%x,%x] exit / cycle %d\n",
     270__FUNCTION__, this->process->pid, this->trdid, cycle );
    267271#endif
    268272
     
    309313                pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 );
    310314                xptr_t base_xp = ppm_ppn2base ( pt2_ppn );
    311                 pt2 = (uint32_t *)GET_PTR( base_xp );
     315                pt2     = GET_PTR( base_xp );
    312316
    313317                // scan the PT2
     
    330334
    331335
    332 ///////////////////////////////////////
    333 error_t hal_gpt_set_pte( gpt_t   * gpt,
     336//////////////////////////////////////////
     337error_t hal_gpt_set_pte( xptr_t    gpt_xp,
    334338                         vpn_t     vpn,
    335                          uint32_t  attr,     // generic GPT attributes
     339                         uint32_t  attr,     // GPT attributes
    336340                         ppn_t     ppn )
    337341{
    338     uint32_t          * pt1;                 // PT1 base addres
    339         uint32_t          * pte1_ptr;            // pointer on PT1 entry
    340         uint32_t            pte1;                // PT1 entry value
     342    cxy_t               gpt_cxy;             // target GPT cluster
     343    gpt_t             * gpt_ptr;             // target GPT local pointer
     344    uint32_t          * pt1_ptr;             // local pointer on PT1
     345        xptr_t              pte1_xp;             // extended pointer on PT1 entry
     346        uint32_t            pte1;                // PT1 entry value if PTE1
    341347
    342348        ppn_t               pt2_ppn;             // PPN of PT2
    343         uint32_t          * pt2;                 // PT2 base address
     349        uint32_t          * pt2_ptr;             // PT2 base address
    344350
    345351        uint32_t            small;               // requested PTE is for a small page
    346     bool_t              success;             // exit condition for while loop below
    347352
    348353        page_t            * page;                // pointer on new physical page descriptor
     
    354359    uint32_t            tsar_attr;           // PTE attributes for TSAR MMU
    355360
    356 #if DEBUG_HAL_GPT_ACCESS
    357 uint32_t cycle = (uint32_t)hal_get_cycles;
    358 if( DEBUG_HAL_GPT_ACCESS < cycle )
    359 printk("\n[DBG] %s : thread %x enter / vpn %x / attr %x / ppn %x / cycle %d\n",
    360 __FUNCTION__, CURRENT_THREAD, vpn, attr, ppn, cycle );
     361    thread_t * this = CURRENT_THREAD;
     362
     363    // get cluster and local pointer on GPT
     364    gpt_cxy = GET_CXY( gpt_xp );
     365    gpt_ptr = GET_PTR( gpt_xp );
     366
     367#if DEBUG_HAL_GPT_SET_PTE
     368uint32_t cycle = (uint32_t)hal_get_cycles();
     369if( DEBUG_HAL_GPT_SET_PTE < cycle )
     370printk("\n[DBG] %s : thread[%x,%x] enter / vpn %x / attr %x / ppn %x / cluster %x / cycle %d\n",
     371__FUNCTION__, this->process->pid, this->trdid, vpn, attr, ppn, gpt_cxy, cycle );
    361372#endif
    362373
     
    365376    ix2 = TSAR_MMU_IX2_FROM_VPN( vpn );
    366377
    367     pt1   = gpt->ptr;
    368         small = attr & GPT_SMALL;
     378    pt1_ptr = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) );
     379        small   = attr & GPT_SMALL;
    369380
    370381    // compute tsar attributes from generic attributes
    371382    tsar_attr = gpt2tsar( attr );
    372383
    373 #if (DEBUG_HAL_GPT_ACCESS & 1)
    374 if( DEBUG_HAL_GPT_ACCESS < cycle )
    375 printk("\n[DBG] %s : thread %x / vpn %x / &pt1 %x / tsar_attr %x\n",
    376 __FUNCTION__, CURRENT_THREAD, vpn, pt1, tsar_attr );
    377 #endif
    378 
    379     // get pointer on PT1[ix1]
    380         pte1_ptr  = &pt1[ix1];
    381 
    382     // PTE1 (big page) are only set for the kernel vsegs, in the kernel init phase.
    383     // There is no risk of concurrent access.
    384         if( small == 0 )
    385     {
    386         // get current pte1 value
    387         pte1 = *pte1_ptr;
    388 
     384    // build extended pointer on PTE1 = PT1[ix1]
     385        pte1_xp = XPTR( gpt_cxy , &pt1_ptr[ix1] );
     386
     387    // get current pte1 value
     388    pte1 = hal_remote_l32( pte1_xp );
     389
     390        if( small == 0 )     // map a big page in PT1
     391    {
    389392        assert( (pte1 == 0) ,
    390393                "try to set a big page in a mapped PT1 entry / PT1[%d] = %x\n", ix1 , pte1 );
    391394     
    392         // set the PTE1
    393                 *pte1_ptr = (tsar_attr  & TSAR_MMU_PTE1_ATTR_MASK) |
    394                     ((ppn >> 9) & TSAR_MMU_PTE1_PPN_MASK);
     395        // set the PTE1 value in PT1
     396        pte1 = (tsar_attr  & TSAR_MMU_PTE1_ATTR_MASK) | ((ppn >> 9) & TSAR_MMU_PTE1_PPN_MASK);
     397        hal_remote_s32( pte1_xp , pte1 );
    395398                hal_fence();
     399
     400#if DEBUG_HAL_GPT_SET_PTE
     401if( DEBUG_HAL_GPT_SET_PTE < cycle )
     402printk("\n[DBG] %s : thread[%x,%x] map PTE1 / cxy %x / ix1 %x / pt1 %x / pte1 %x\n",
     403__FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix1, pt1_ptr, pte1 );
     404#endif
     405
    396406                return 0;
    397407        }
    398 
    399     // From this point, the requested PTE is a PTE2 (small page)
    400 
    401     // loop to access PTE1 and get pointer on PT2
    402     success = false;
    403     do
    404     {
    405         // get current pte1 value
    406         pte1 = *pte1_ptr;
    407        
    408 #if (DEBUG_HAL_GPT_ACCESS & 1)
    409 if( DEBUG_HAL_GPT_ACCESS < cycle )
    410 printk("\n[DBG] %s : thread %x / vpn %x / current_pte1 %x\n",
    411 __FUNCTION__, CURRENT_THREAD, vpn, pte1 );
    412 #endif
    413        
    414         // allocate a PT2 if PT1 entry not valid
    415         if( (pte1 & TSAR_MMU_MAPPED) == 0 )             // PT1 entry not valid
    416             {
    417             // allocate one physical page for the PT2
    418                 kmem_req_t req;
    419                 req.type  = KMEM_PAGE;
    420                 req.size  = 0;                       // 1 small page
    421                 req.flags = AF_KERNEL | AF_ZERO;
    422             page = (page_t *)kmem_alloc( &req );
     408    else                 // map a small page in PT1 & PT2
     409    {
     410        if( (pte1 & TSAR_MMU_MAPPED) == 0 )    // PT1 entry unmapped => map it
     411        {
     412            // allocate one physical page for PT2
     413            if( gpt_cxy == local_cxy )
     414            {
     415                    kmem_req_t req;
     416                    req.type  = KMEM_PAGE;
     417                    req.size  = 0;                     // 1 small page
     418                    req.flags = AF_KERNEL | AF_ZERO;
     419                    page = (page_t *)kmem_alloc( &req );
     420            }
     421            else
     422            {
     423                rpc_pmem_get_pages_client( gpt_cxy , 0 , &page );
     424            }
     425
    423426            if( page == NULL )
    424427            {
    425                         printk("\n[ERROR] in %s : cannot allocate PT2\n", __FUNCTION__ );
     428                printk("\n[PANIC] in %s : no memory for GPT PT2 / process %x / cluster %x\n",
     429                __FUNCTION__, this->process->pid, gpt_cxy );
    426430                return ENOMEM;
    427431            }
    428432
    429433            // get the PT2 PPN
    430             page_xp = XPTR( local_cxy , page );       
     434            page_xp = XPTR( gpt_cxy , page );       
    431435            pt2_ppn = ppm_page2ppn( page_xp );
    432436
    433             // try to atomicaly set the PT1 entry
     437            // build PTD1 value
    434438            pte1 = TSAR_MMU_MAPPED | TSAR_MMU_SMALL | pt2_ppn;
    435                     success = hal_atomic_cas( pte1_ptr , 0 , pte1 );
    436 
    437             // release allocated PT2 if PT1 entry modified by another thread
    438             if( success == false ) ppm_free_pages( page );
     439
     440            // set the PTD1 value in PT1
     441            hal_remote_s32( pte1_xp , pte1 );
     442
     443#if DEBUG_HAL_GPT_SET_PTE
     444if( DEBUG_HAL_GPT_SET_PTE < cycle )
     445printk("\n[DBG] %s : thread[%x,%x] map PTD1 / cxy %x / ix1 %d / pt1 %x / ptd1 %x\n",
     446__FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix1, pt1_ptr, pte1 );
     447#endif
    439448        }
    440         else                                           // PT1 entry is valid
     449        else                                   // pt1 entry mapped => use it
    441450        {
    442             // This valid entry must be a PTD1
    443             assert( (pte1 & TSAR_MMU_SMALL) ,
    444             "try to set a small page in a big PT1 entry / PT1[%d] = %x\n", ix1 , pte1 );
    445 
    446             success = true;
     451
     452#if DEBUG_HAL_GPT_SET_PTE
     453if( DEBUG_HAL_GPT_SET_PTE < cycle )
     454printk("\n[DBG] %s : thread[%x,%x] get PTD1 / cxy %x / ix1 %d / pt1 %x / ptd1 %x\n",
     455__FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix1, pt1_ptr, pte1 );
     456#endif
     457
    447458        }
    448459
    449460        // get PT2 base from pte1
    450461            pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 );
    451             pt2     = (uint32_t *)GET_PTR( ppm_ppn2base( pt2_ppn ) );
    452 
    453 #if (DEBUG_HAL_GPT_ACCESS & 1)
    454 if( DEBUG_HAL_GPT_ACCESS < cycle )
    455 printk("\n[DBG] %s : thread %x / vpn %x / pte1 %x / &pt2 %x\n",
    456 __FUNCTION__, CURRENT_THREAD, vpn, pte1, pt2 );
    457 #endif
    458        
     462            pt2_ptr = GET_PTR( ppm_ppn2base( pt2_ppn ) );
     463
     464        // set PTE2 in PT2 (in this order)
     465            hal_remote_s32( XPTR( gpt_cxy , &pt2_ptr[2 * ix2 + 1] ) , ppn );
     466            hal_fence();
     467            hal_remote_s32( XPTR( gpt_cxy , &pt2_ptr[2 * ix2] ) , tsar_attr );
     468            hal_fence();
     469
     470#if DEBUG_HAL_GPT_SET_PTE
     471if( DEBUG_HAL_GPT_SET_PTE < cycle )
     472printk("\n[DBG] %s : thread[%x,%x] map PTE2 / cxy %x / ix2 %x / pt2 %x / attr %x / ppn %x\n",
     473__FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix2, pt2_ptr, tsar_attr, ppn );
     474#endif
     475
     476            return 0;
    459477    }
    460     while (success == false);
    461 
    462     // set PTE2 in this order
    463         pt2[2 * ix2 + 1] = ppn;
    464         hal_fence();
    465         pt2[2 * ix2]     = tsar_attr;
    466         hal_fence();
    467 
    468 #if DEBUG_HAL_GPT_ACCESS
    469 cycle = (uint32_t)hal_get_cycles;
    470 if( DEBUG_HAL_GPT_ACCESS < cycle )
    471 printk("\n[DBG] %s : thread %x exit / vpn %x / pte2_attr %x / pte2_ppn %x / cycle %d\n",
    472 __FUNCTION__, CURRENT_THREAD, vpn, pt2[2 * ix2], pt2[2 * ix2 + 1], cycle );
    473 #endif
    474 
    475         return 0;
    476 
    477478} // end of hal_gpt_set_pte()
    478479
    479 
    480 /////////////////////////////////////
    481 void hal_gpt_get_pte( gpt_t    * gpt,
     480////////////////////////////////////////
     481void hal_gpt_get_pte( xptr_t     gpt_xp,
    482482                      vpn_t      vpn,
    483483                      uint32_t * attr,
     
    490490    ppn_t      pt2_ppn;
    491491
     492    // get cluster and local pointer on GPT
     493    cxy_t   gpt_cxy = GET_CXY( gpt_xp );
     494    gpt_t * gpt_ptr = GET_PTR( gpt_xp );
     495
     496    // compute indexes in PT1 and PT2
    492497    uint32_t   ix1 = TSAR_MMU_IX1_FROM_VPN( vpn );
    493498    uint32_t   ix2 = TSAR_MMU_IX2_FROM_VPN( vpn );
    494499
    495     // get PTE1 value
    496         pt1  = gpt->ptr;
    497     pte1 = pt1[ix1];
    498 
     500    // get PT1 base
     501    pt1 = (uint32_t *)hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) );
     502   
     503    // get pte1
     504    pte1 = hal_remote_l32( XPTR( gpt_cxy , &pt1[ix1] ) );
     505
     506    // check PTE1 mapped
    499507        if( (pte1 & TSAR_MMU_MAPPED) == 0 )   // PT1 entry not present
    500508        {
    501509                *attr = 0;
    502510                *ppn  = 0;
     511        return;
    503512        }
    504513
     514    // access GPT
    505515        if( (pte1 & TSAR_MMU_SMALL) == 0 )     // it's a PTE1
    506516        {
     517        // get PPN & ATTR from PT1
    507518                *attr = tsar2gpt( TSAR_MMU_ATTR_FROM_PTE1( pte1 ) );
    508519        *ppn  = TSAR_MMU_PPN_FROM_PTE1( pte1 ) | (vpn & ((1<<TSAR_MMU_IX2_WIDTH)-1));
    509520        }
    510     else                              // it's a PTD1
     521    else                                  // it's a PTD1
    511522    {
    512523        // compute PT2 base address
    513524        pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 );
    514         pt2     = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) );
    515 
    516             *ppn  = pt2[2*ix2+1] & ((1<<TSAR_MMU_PPN_WIDTH)-1);
    517             *attr = tsar2gpt( pt2[2*ix2] );
     525        pt2     = GET_PTR( ppm_ppn2base( pt2_ppn ) );
     526
     527        // get PPN & ATTR from PT2
     528        *ppn  = hal_remote_l32( XPTR( gpt_cxy , &pt2[2*ix2+1] ) ) & ((1<<TSAR_MMU_PPN_WIDTH)-1);
     529        *attr = tsar2gpt( hal_remote_l32( XPTR( gpt_cxy , &pt2[2*ix2] ) ) );
    518530    }
    519531} // end hal_gpt_get_pte()
     
    528540    ppn_t      pt2_ppn;     // PPN of PT2
    529541    uint32_t * pt2;         // PT2 base address
    530 
    531     ppn_t      ppn;         // PPN of page to be released
    532542
    533543    // get ix1 & ix2 indexes
     
    546556        if( (pte1 & TSAR_MMU_SMALL) == 0 )      // it's a PTE1
    547557        {
    548         // get PPN
    549         ppn = TSAR_MMU_PPN_FROM_PTE1( pte1 );
    550 
    551558        // unmap the big page
    552559        pt1[ix1] = 0;
     
    559566        // compute PT2 base address
    560567        pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 );
    561         pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) );
     568        pt2     = GET_PTR( ppm_ppn2base( pt2_ppn ) );
    562569       
    563         // get PPN
    564             ppn = TSAR_MMU_PPN_FROM_PTE2( pt2[2*ix2+1] );
    565 
    566570        // unmap the small page
    567             pt2[2*ix2]   = 0;            // only attr is reset
     571            pt2[2*ix2]   = 0;         
    568572            hal_fence();       
    569573
     
    623627        page_xp = XPTR( local_cxy , page );
    624628        pt2_ppn = ppm_page2ppn( page_xp );
    625         pt2     = (uint32_t *)GET_PTR( ppm_page2base( page_xp ) );
     629        pt2     = GET_PTR( ppm_page2base( page_xp ) );
    626630
    627631        // try to set the PT1 entry
     
    643647            // get the PT2 base address
    644648                        pt2_ppn = TSAR_MMU_PPN_FROM_PTE1( pte1 );
    645                         pt2     = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) );
     649                        pt2     = GET_PTR( ppm_ppn2base( pt2_ppn ) );
    646650                }
    647651        }
     
    660664
    661665        // compute pointer on PT2 base
    662             pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( pt2_ppn ) );
     666            pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) );
    663667    }
    664668   
     
    720724    // get pointer on PT2 base
    721725    pt2_ppn = TSAR_MMU_PPN_FROM_PTE1( pte1 );
    722     pt2     = (uint32_t *)GET_PTR( ppm_ppn2base( pt2_ppn ) );
     726    pt2     = GET_PTR( ppm_ppn2base( pt2_ppn ) );
    723727 
    724728    // get pointer on PTE2
     
    762766    uint32_t   * dst_pt2;   // local pointer on DST PT2
    763767
    764         kmem_req_t   req;       // for dynamic PT2 allocation
     768        kmem_req_t   req;       // for PT2 allocation
    765769
    766770    uint32_t     src_pte1;
     
    776780    ppn_t        dst_pt2_ppn;
    777781
    778 #if DEBUG_HAL_GPT_ACCESS
    779 uint32_t cycle = (uint32_t)hal_get_cycles;
    780 if( DEBUG_HAL_GPT_ACCESS < cycle )
    781 printk("\n[DBG] %s : thread %x enter / vpn %x / cycle %d\n",
    782 __FUNCTION__, CURRENT_THREAD, vpn, cycle );
    783 #endif
    784 
    785782    // get remote src_gpt cluster and local pointer
    786783    src_cxy = GET_CXY( src_gpt_xp );
    787     src_gpt = (gpt_t *)GET_PTR( src_gpt_xp );
     784    src_gpt = GET_PTR( src_gpt_xp );
     785
     786#if DEBUG_HAL_GPT_COPY
     787uint32_t   cycle = (uint32_t)hal_get_cycles();
     788thread_t * this  = CURRENT_THREAD;
     789if( DEBUG_HAL_GPT_COPY < cycle )
     790printk("\n[DBG] %s : thread[%x,%x] enter / vpn %x / src_cxy %x / dst_cxy %x / cycle %d\n",
     791__FUNCTION__, this->process->pid, this->trdid, vpn, src_cxy, local_cxy, cycle );
     792#endif
     793
     794    // get remote src_gpt cluster and local pointer
     795    src_cxy = GET_CXY( src_gpt_xp );
     796    src_gpt = GET_PTR( src_gpt_xp );
    788797
    789798    // get remote src_pt1 and local dst_pt1
     
    837846        // get pointer on src_pt2
    838847        src_pt2_ppn = (ppn_t)TSAR_MMU_PTBA_FROM_PTE1( src_pte1 );
    839         src_pt2     = (uint32_t *)GET_PTR( ppm_ppn2base( src_pt2_ppn ) );
     848        src_pt2     = GET_PTR( ppm_ppn2base( src_pt2_ppn ) );
    840849
    841850        // get pointer on dst_pt2
    842851        dst_pt2_ppn = (ppn_t)TSAR_MMU_PTBA_FROM_PTE1( dst_pte1 );
    843         dst_pt2     = (uint32_t *)GET_PTR( ppm_ppn2base( dst_pt2_ppn ) );
     852        dst_pt2     = GET_PTR( ppm_ppn2base( dst_pt2_ppn ) );
    844853
    845854        // get attr and ppn from SRC_PT2
     
    867876            *ppn    = src_pte2_ppn;
    868877       
    869 #if DEBUG_HAL_GPT_ACCESS
     878#if DEBUG_HAL_GPT_COPY
    870879cycle = (uint32_t)hal_get_cycles;
    871 if( DEBUG_HAL_GPT_ACCESS < cycle )
    872 printk("\n[DBG] %s : thread %x exit / copy done for vpn %x / cycle %d\n",
    873 __FUNCTION__, CURRENT_THREAD, vpn, cycle );
     880if( DEBUG_HAL_GPT_COPY < cycle )
     881printk("\n[DBG] %s : thread[%x,%x] exit / copy done for vpn %x / cycle %d\n",
     882__FUNCTION__, this->process->pid, this->trdid, vpn, cycle );
    874883#endif
    875884
     
    884893    *ppn    = 0;
    885894   
    886 #if DEBUG_HAL_GPT_ACCESS
     895#if DEBUG_HAL_GPT_COPY
    887896cycle = (uint32_t)hal_get_cycles;
    888 if( DEBUG_HAL_GPT_ACCESS < cycle )
    889 printk("\n[DBG] %s : thread %x exit / nothing done for vpn %x / cycle %d\n",
    890 __FUNCTION__, CURRENT_THREAD, vpn, cycle );
     897if( DEBUG_HAL_GPT_COPY < cycle )
     898printk("\n[DBG] %s : thread[%x,%x] exit / nothing done for vpn %x / cycle %d\n",
     899__FUNCTION__, this->process->pid, this->trdid, vpn, cycle );
    891900#endif
    892901
     
    921930    // compute PT2 base address
    922931    pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 );
    923     pt2     = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) );
     932    pt2     = GET_PTR( ppm_ppn2base( pt2_ppn ) );
    924933
    925934    // get pte2_attr
     
    955964    // compute PT2 base address
    956965    pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 );
    957     pt2     = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) );
     966    pt2     = GET_PTR( ppm_ppn2base( pt2_ppn ) );
    958967
    959968    // get pte2_attr
     
    989998    // get GPT cluster and local pointer
    990999    gpt_cxy = GET_CXY( gpt_xp );
    991     gpt_ptr = (gpt_t *)GET_PTR( gpt_xp );
     1000    gpt_ptr = GET_PTR( gpt_xp );
    9921001
    9931002    // get local PT1 pointer
     
    10081017            // compute PT2 base address
    10091018            pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 );
    1010             pt2     = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) );
     1019            pt2     = GET_PTR( ppm_ppn2base( pt2_ppn ) );
    10111020
    10121021            assert( (GET_CXY( ppm_ppn2base( pt2_ppn ) ) == gpt_cxy ),
     
    10501059    // get cluster and local pointer on remote GPT
    10511060    cxy_t   gpt_cxy = GET_CXY( gpt_xp );
    1052     gpt_t * gpt_ptr = (gpt_t *)GET_PTR( gpt_xp );
     1061    gpt_t * gpt_ptr = GET_PTR( gpt_xp );
    10531062
    10541063    // compute indexes in PT1 and PT2
     
    10701079    // get PT2 base from PTE1
    10711080    pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 );
    1072     pt2     = (uint32_t *)GET_PTR( ppm_ppn2base( pt2_ppn ) );
    1073 
    1074     // reset PTE2
    1075         hal_remote_s32( XPTR( gpt_cxy, &pt2[2 * ix2]     ) , 0 );
    1076         hal_fence();
     1081    pt2     = GET_PTR( ppm_ppn2base( pt2_ppn ) );
    10771082
    10781083    // set PTE2 in this order
  • trunk/hal/tsar_mips32/core/hal_vmm.c

    r579 r587  
    11/*
    2  * hal_vmm.c - Generic Virtual Memory Manager Initialisation for TSAR
     2 * hal_vmm.c - Virtual Memory Manager Initialisation for TSAR
    33 *
    44 * Authors  Alain Greiner (2016,2017)
     
    3838// identity mapped. The following function is called by the generic vmm_init() function
    3939// and identity map all pages of the "kentry" vseg.
     40// We dont take the locks protecting the VSL and the GPT, because there is no concurrent
     41// accesses to VMM during VMM initialization.
    4042//////////////////////////////////////////////////////////////////////////////////////////
    4143
     
    4446{
    4547    error_t error;
    46 
    47     // get pointer on GPT
    48     gpt_t * gpt = &vmm->gpt;
    4948
    5049    // map all pages of "kentry" vseg
     
    5554         vpn < (CONFIG_VMM_KENTRY_BASE + CONFIG_VMM_KENTRY_SIZE); vpn++ )
    5655    {
    57         error = hal_gpt_set_pte( gpt,
     56        error = hal_gpt_set_pte( XPTR( local_cxy , &vmm->gpt ),
    5857                                 vpn,
    5958                                 attr,
     
    6261        if( error ) return error;
    6362    }
    64 
    65     // get extended pointer on lock protecting the VSL
    66     xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock );
    67 
    68     // get VSL lock
    69     remote_rwlock_wr_acquire( lock_xp );
    7063
    7164    // scan the VSL to found the "kentry" vseg
     
    9083    }
    9184
    92     // release the VSL lock
    93     remote_rwlock_wr_release( lock_xp );
    94 
    95     if( found == false ) return error;
     85    if( found == false ) return 0XFFFFFFFF;
    9686
    9787    return 0;
  • trunk/hal/tsar_mips32/drivers/soclib_mty.c

    r570 r587  
    546546    // get extended pointers on MTY_WRITE & MTY_STATUS registers
    547547    xptr_t write_xp  = XPTR( mty_cxy , mty_ptr + MTY_WRITE );
    548     xptr_t status_xp = XPTR( mty_cxy , mty_ptr + MTY_STATUS );
    549548
    550549    // loop on characters (two bytes per character)
Note: See TracChangeset for help on using the changeset viewer.