/* * hal_gpt.c - implementation of the Generic Page Table API for TSAR-MIPS32 * * Author Alain Greiner (2016) * * Copyright (c) UPMC Sorbonne Universites * * This file is part of ALMOS-MKH. * * ALMOS-MKH.is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2.0 of the License. * * ALMOS-MKH.is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with ALMOS-MKH.; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include //////////////////////////////////////////////////////////////////////////////////////// // This define the masks for the TSAR MMU PTE attributes (from TSAR MMU specification) //////////////////////////////////////////////////////////////////////////////////////// #define TSAR_MMU_MAPPED 0x80000000 #define TSAR_MMU_SMALL 0x40000000 #define TSAR_MMU_LOCAL 0x20000000 #define TSAR_MMU_REMOTE 0x10000000 #define TSAR_MMU_CACHABLE 0x08000000 #define TSAR_MMU_WRITABLE 0x04000000 #define TSAR_MMU_EXECUTABLE 0x02000000 #define TSAR_MMU_USER 0x01000000 #define TSAR_MMU_GLOBAL 0x00800000 #define TSAR_MMU_DIRTY 0x00400000 #define TSAR_MMU_COW 0x00000001 // only for small pages #define TSAR_MMU_SWAP 0x00000004 // only for small pages #define TSAR_MMU_LOCKED 0x00000008 // only for small pages //////////////////////////////////////////////////////////////////////////////////////// // TSAR MMU related macros (from the TSAR MMU specification) // - IX1 on 11 bits // - IX2 on 9 bits // - PPN on 28 bits //////////////////////////////////////////////////////////////////////////////////////// #define TSAR_MMU_IX1_WIDTH 11 #define TSAR_MMU_IX2_WIDTH 9 #define TSAR_MMU_PPN_WIDTH 28 #define TSAR_MMU_PTE1_ATTR_MASK 0xFFC00000 #define TSAR_MMU_PTE1_PPN_MASK 0x0007FFFF #define TSAR_MMU_IX1_FROM_VPN( vpn ) ((vpn >> 9) & 0x7FF) #define TSAR_MMU_IX2_FROM_VPN( vpn ) (vpn & 0x1FF) #define TSAR_MMU_PTBA_FROM_PTE1( pte1 ) (pte1 & 0x0FFFFFFF) #define TSAR_MMU_PPN_FROM_PTE1( pte1 ) ((pte1 & 0x0007FFFF)<<9) #define TSAR_MMU_ATTR_FROM_PTE1( pte1 ) (pte1 & 0xFFC00000) #define TSAR_MMU_PPN_FROM_PTE2( pte2 ) (pte2 & 0x0FFFFFFF) #define TSAR_MMU_ATTR_FROM_PTE2( pte2 ) (pte2 & 0xFFC000FF) /////////////////////////////////////////////////////////////////////////////////////// // This static function translates the GPT attributes to the TSAR attributes /////////////////////////////////////////////////////////////////////////////////////// static inline uint32_t gpt2tsar( uint32_t gpt_attr ) { uint32_t tsar_attr = 0; if( gpt_attr & GPT_MAPPED ) tsar_attr |= TSAR_MMU_MAPPED; if( gpt_attr & GPT_SMALL ) tsar_attr |= TSAR_MMU_SMALL; if( gpt_attr & GPT_WRITABLE ) tsar_attr |= TSAR_MMU_WRITABLE; if( gpt_attr & GPT_EXECUTABLE ) tsar_attr |= TSAR_MMU_EXECUTABLE; if( gpt_attr & GPT_CACHABLE ) tsar_attr |= TSAR_MMU_CACHABLE; if( gpt_attr & GPT_USER ) tsar_attr |= TSAR_MMU_USER; if( gpt_attr & GPT_DIRTY ) tsar_attr |= TSAR_MMU_DIRTY; if( gpt_attr & GPT_ACCESSED ) tsar_attr |= TSAR_MMU_LOCAL; if( gpt_attr & GPT_GLOBAL ) tsar_attr |= TSAR_MMU_GLOBAL; if( gpt_attr & GPT_COW ) tsar_attr |= TSAR_MMU_COW; if( gpt_attr & GPT_SWAP ) tsar_attr |= TSAR_MMU_SWAP; if( gpt_attr & GPT_LOCKED ) tsar_attr |= TSAR_MMU_LOCKED; return tsar_attr; } /////////////////////////////////////////////////////////////////////////////////////// // This static function translates the TSAR attributes to the GPT attributes /////////////////////////////////////////////////////////////////////////////////////// static inline uint32_t tsar2gpt( uint32_t tsar_attr ) { uint32_t gpt_attr = 0; if( tsar_attr & TSAR_MMU_MAPPED ) gpt_attr |= GPT_MAPPED; if( tsar_attr & TSAR_MMU_MAPPED ) gpt_attr |= GPT_READABLE; if( tsar_attr & TSAR_MMU_SMALL ) gpt_attr |= GPT_SMALL; if( tsar_attr & TSAR_MMU_WRITABLE ) gpt_attr |= GPT_WRITABLE; if( tsar_attr & TSAR_MMU_EXECUTABLE ) gpt_attr |= GPT_EXECUTABLE; if( tsar_attr & TSAR_MMU_CACHABLE ) gpt_attr |= GPT_CACHABLE; if( tsar_attr & TSAR_MMU_USER ) gpt_attr |= GPT_USER; if( tsar_attr & TSAR_MMU_DIRTY ) gpt_attr |= GPT_DIRTY; if( tsar_attr & TSAR_MMU_LOCAL ) gpt_attr |= GPT_ACCESSED; if( tsar_attr & TSAR_MMU_REMOTE ) gpt_attr |= GPT_ACCESSED; if( tsar_attr & TSAR_MMU_GLOBAL ) gpt_attr |= GPT_GLOBAL; if( tsar_attr & TSAR_MMU_COW ) gpt_attr |= GPT_COW; if( tsar_attr & TSAR_MMU_SWAP ) gpt_attr |= GPT_SWAP; if( tsar_attr & TSAR_MMU_LOCKED ) gpt_attr |= GPT_LOCKED; return gpt_attr; } ///////////////////////////////////// error_t hal_gpt_create( gpt_t * gpt ) { page_t * page; xptr_t page_xp; gpt_dmsg("\n[DBG] %s : core[%x,%d] enter\n", __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ); // check page size assert( (CONFIG_PPM_PAGE_SIZE == 4096) , __FUNCTION__ , "for TSAR, the page must be 4 Kbytes\n" ); // allocates 2 physical pages for PT1 kmem_req_t req; req.type = KMEM_PAGE; req.size = 1; // 2 small pages req.flags = AF_KERNEL | AF_ZERO; page = (page_t *)kmem_alloc( &req ); if( page == NULL ) { printk("\n[ERROR] in %s : cannot allocate memory for PT1\n", __FUNCTION__ ); return ENOMEM; } // initialize generic page table descriptor page_xp = XPTR( local_cxy , page ); gpt->ptr = GET_PTR( ppm_page2base( page_xp ) ); gpt->ppn = ppm_page2ppn( page_xp ); gpt_dmsg("\n[DBG] %s : core[%x,%d] exit\n", __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ); return 0; } // end hal_gpt_create() /////////////////////////////////// void hal_gpt_destroy( gpt_t * gpt ) { uint32_t ix1; uint32_t ix2; uint32_t * pt1; uint32_t pte1; ppn_t pt2_ppn; uint32_t * pt2; uint32_t attr; vpn_t vpn; kmem_req_t req; bool_t is_ref; // get pointer on calling process process_t * process = CURRENT_THREAD->process; // compute is_ref is_ref = ( GET_CXY( process->ref_xp ) == local_cxy ); // get pointer on PT1 pt1 = (uint32_t *)gpt->ptr; // scan the PT1 for( ix1 = 0 ; ix1 < 2048 ; ix1++ ) { pte1 = pt1[ix1]; if( (pte1 & TSAR_MMU_MAPPED) != 0 ) // PTE1 valid { if( (pte1 & TSAR_MMU_SMALL) == 0 ) // BIG page { if( (pte1 & TSAR_MMU_USER) != 0 ) { // warning message printk("\n[WARNING] in %s : found an USER BIG page / ix1 = %d\n", __FUNCTION__ , ix1 ); // release the big physical page if reference cluster if( is_ref ) { vpn = (vpn_t)(ix1 << TSAR_MMU_IX2_WIDTH); hal_gpt_reset_pte( gpt , vpn ); } } } else // SMALL page { // get local pointer on PT2 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); xptr_t base_xp = ppm_ppn2base( pt2_ppn ); pt2 = (uint32_t *)GET_PTR( base_xp ); // scan the PT2 to release all entries VALID and USER if reference cluster if( is_ref ) { for( ix2 = 0 ; ix2 < 512 ; ix2++ ) { attr = TSAR_MMU_ATTR_FROM_PTE2( pt2[2 * ix2] ); if( ((attr & TSAR_MMU_MAPPED) != 0 ) && ((attr & TSAR_MMU_USER) != 0) ) { // release the physical page vpn = (vpn_t)((ix1 << TSAR_MMU_IX2_WIDTH) | ix2); hal_gpt_reset_pte( gpt , vpn ); } } } // release the PT2 req.type = KMEM_PAGE; req.ptr = GET_PTR( ppm_base2page( XPTR(local_cxy , pt2 ) ) ); kmem_free( &req ); } } } // release the PT1 req.type = KMEM_PAGE; req.ptr = GET_PTR( ppm_base2page( XPTR(local_cxy , pt1 ) ) ); kmem_free( &req ); } // end hal_gpt_destroy() /////////////////////////////////////////// void hal_gpt_display( process_t * process ) { gpt_t * gpt; uint32_t ix1; uint32_t ix2; uint32_t * pt1; uint32_t pte1; ppn_t pt2_ppn; uint32_t * pt2; uint32_t pte2_attr; ppn_t pte2_ppn; vpn_t vpn; assert( (process != NULL) , __FUNCTION__ , "NULL process pointer\n"); // get pointer on gpt gpt = &(process->vmm.gpt); // get pointer on PT1 pt1 = (uint32_t *)gpt->ptr; printk("\n***** Generic Page Table for process %x : &gpt = %x / &pt1 = %x\n\n", process->pid , gpt , pt1 ); // scan the PT1 for( ix1 = 0 ; ix1 < 2048 ; ix1++ ) { pte1 = pt1[ix1]; if( (pte1 & TSAR_MMU_MAPPED) != 0 ) { if( (pte1 & TSAR_MMU_SMALL) == 0 ) // BIG page { vpn = ix1 << 9; printk(" - BIG : vpn = %x / pt1[%d] = %X\n", vpn , ix1 , pte1 ); } else // SMALL pages { pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); xptr_t base_xp = ppm_ppn2base ( pt2_ppn ); pt2 = (uint32_t *)GET_PTR( base_xp ); // scan the PT2 for( ix2 = 0 ; ix2 < 512 ; ix2++ ) { pte2_attr = TSAR_MMU_ATTR_FROM_PTE2( pt2[2 * ix2] ); pte2_ppn = TSAR_MMU_PPN_FROM_PTE2( pt2[2 * ix2 + 1] ); if( (pte2_attr & TSAR_MMU_MAPPED) != 0 ) { vpn = (ix1 << 9) | ix2; printk(" - SMALL : vpn %X / ppn %X / attr %X\n", vpn , pte2_ppn , tsar2gpt(pte2_attr) ); } } } } } } // end hal_gpt_display() /////////////////////////////////////// error_t hal_gpt_set_pte( gpt_t * gpt, vpn_t vpn, uint32_t attr, // generic GPT attributes ppn_t ppn ) { uint32_t * pt1; // PT1 base addres uint32_t * pte1_ptr; // pointer on PT1 entry uint32_t pte1; // PT1 entry value ppn_t pt2_ppn; // PPN of PT2 uint32_t * pt2; // PT2 base address uint32_t small; // requested PTE is for a small page bool_t success; // exit condition for while loop below page_t * page; // pointer on new physical page descriptor xptr_t page_xp; // extended pointer on new page descriptor uint32_t ix1; // index in PT1 uint32_t ix2; // index in PT2 uint32_t tsar_attr; // PTE attributes for TSAR MMU gpt_dmsg("\n[DBG] %s : core[%x,%d] enter for vpn = %x / ppn = %x / gpt_attr = %x\n", __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , ppn , attr ); // compute indexes in PT1 and PT2 ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); pt1 = gpt->ptr; small = attr & GPT_SMALL; // compute tsar_attr from generic attributes tsar_attr = gpt2tsar( attr ); gpt_dmsg("\n[DBG] %s : core[%x,%d] / vpn = %x / &pt1 = %x / tsar_attr = %x\n", __FUNCTION__, local_cxy , CURRENT_THREAD->core->lid , vpn , pt1 , tsar_attr ); // get pointer on PT1[ix1] pte1_ptr = &pt1[ix1]; // PTE1 (big page) are only set for the kernel vsegs, in the kernel init phase. // There is no risk of concurrent access. if( small == 0 ) { // get current pte1 value pte1 = *pte1_ptr; assert( (pte1 == 0) , __FUNCTION__ , "try to set a big page in a mapped PT1 entry / PT1[%d] = %x\n", ix1 , pte1 ); // set the PTE1 *pte1_ptr = (tsar_attr & TSAR_MMU_PTE1_ATTR_MASK) | ((ppn >> 9) & TSAR_MMU_PTE1_PPN_MASK); hal_fence(); return 0; } // From this point, the requested PTE is a PTE2 (small page) // loop to access PTE1 and get pointer on PT2 success = false; do { // get current pte1 value pte1 = *pte1_ptr; gpt_dmsg("\n[DBG] %s : core[%x,%d] / vpn = %x / current_pte1 = %x\n", __FUNCTION__, local_cxy , CURRENT_THREAD->core->lid , vpn , pte1 ); // allocate a PT2 if PT1 entry not valid if( (pte1 & TSAR_MMU_MAPPED) == 0 ) // PT1 entry not valid { // allocate one physical page for the PT2 kmem_req_t req; req.type = KMEM_PAGE; req.size = 0; // 1 small page req.flags = AF_KERNEL | AF_ZERO; page = (page_t *)kmem_alloc( &req ); if( page == NULL ) { printk("\n[ERROR] in %s : cannot allocate PT2\n", __FUNCTION__ ); return ENOMEM; } // get the PT2 PPN page_xp = XPTR( local_cxy , page ); pt2_ppn = ppm_page2ppn( page_xp ); // try to atomicaly set the PT1 entry pte1 = TSAR_MMU_MAPPED | TSAR_MMU_SMALL | pt2_ppn; success = hal_atomic_cas( pte1_ptr , 0 , pte1 ); // release allocated PT2 if PT1 entry modified by another thread if( success == false ) ppm_free_pages( page ); } else // PT1 entry is valid { // This valid entry must be a PTD1 assert( (pte1 & TSAR_MMU_SMALL) , __FUNCTION__ , "try to set a small page in a big PT1 entry / PT1[%d] = %x\n", ix1 , pte1 ); success = true; } // get PT2 base from pte1 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( pt2_ppn ) ); gpt_dmsg("\n[DBG] %s : core[%x,%d] / vpn = %x / pte1 = %x / &pt2 = %x\n", __FUNCTION__, local_cxy , CURRENT_THREAD->core->lid , vpn , pte1 , pt2 ); } while (success == false); // set PTE2 in this order pt2[2 * ix2 + 1] = ppn; hal_fence(); pt2[2 * ix2] = tsar_attr; hal_fence(); gpt_dmsg("\n[DBG] %s : core[%x,%d] exit / vpn = %x / pte2_attr = %x / pte2_ppn = %x\n", __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , pt2[2 * ix2] , pt2[2 * ix2 + 1] ); return 0; } // end of hal_gpt_set_pte() ///////////////////////////////////// void hal_gpt_get_pte( gpt_t * gpt, vpn_t vpn, uint32_t * attr, ppn_t * ppn ) { uint32_t * pt1; uint32_t pte1; uint32_t * pt2; ppn_t pt2_ppn; uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); // get PTE1 value pt1 = gpt->ptr; pte1 = pt1[ix1]; if( (pte1 & TSAR_MMU_MAPPED) == 0 ) // PT1 entry not present { *attr = 0; *ppn = 0; } if( (pte1 & TSAR_MMU_SMALL) == 0 ) // it's a PTE1 { *attr = tsar2gpt( TSAR_MMU_ATTR_FROM_PTE1( pte1 ) ); *ppn = TSAR_MMU_PPN_FROM_PTE1( pte1 ) | (vpn & ((1<ptr; pte1 = pt1[ix1]; if( (pte1 & TSAR_MMU_MAPPED) == 0 ) // PT1 entry not present { return; } if( (pte1 & TSAR_MMU_SMALL) == 0 ) // it's a PTE1 { // get PPN ppn = TSAR_MMU_PPN_FROM_PTE1( pte1 ); // unmap the big page pt1[ix1] = 0; hal_fence(); return; } else // it's a PTD1 { // compute PT2 base address pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) ); // get PPN ppn = TSAR_MMU_PPN_FROM_PTE2( pt2[2*ix2+1] ); // unmap the small page pt2[2*ix2] = 0; // only attr is reset hal_fence(); return; } } // end hal_gpt_reset_pte() ////////////////////////////////////// error_t hal_gpt_lock_pte( gpt_t * gpt, vpn_t vpn ) { uint32_t * pt1; // PT1 base address volatile uint32_t * pte1_ptr; // address of PT1 entry uint32_t pte1; // value of PT1 entry uint32_t * pt2; // PT2 base address ppn_t pt2_ppn; // PPN of PT2 page if missing PT2 volatile uint32_t * pte2_ptr; // address of PT2 entry uint32_t attr; bool_t atomic; page_t * page; xptr_t page_xp; uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); // index in PT1 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); // index in PT2 // get the PTE1 value pt1 = gpt->ptr; pte1_ptr = &pt1[ix1]; pte1 = *pte1_ptr; // If present, the page must be small if( ((pte1 & TSAR_MMU_MAPPED) != 0) && ((pte1 & TSAR_MMU_SMALL) == 0) ) { printk("\n[ERROR] in %s : try to lock a big page / PT1[%d] = %x\n", __FUNCTION__ , ix1 , pte1 ); return EINVAL; } if( (pte1 & TSAR_MMU_MAPPED) == 0 ) // missing PT1 entry { // allocate one physical page for PT2 kmem_req_t req; req.type = KMEM_PAGE; req.size = 0; // 1 small page req.flags = AF_KERNEL | AF_ZERO; page = (page_t *)kmem_alloc( &req ); if( page == NULL ) { printk("\n[ERROR] in %s : try to set a small page but cannot allocate PT2\n", __FUNCTION__ ); return ENOMEM; } page_xp = XPTR( local_cxy , page ); pt2_ppn = ppm_page2ppn( page_xp ); pt2 = (uint32_t *)GET_PTR( ppm_page2base( page_xp ) ); // try to set the PT1 entry do { atomic = hal_atomic_cas( (void*)pte1_ptr , 0 , TSAR_MMU_MAPPED | TSAR_MMU_SMALL | pt2_ppn ); } while( (atomic == false) && (*pte1_ptr == 0) ); if( atomic == false ) // missing PT2 has been allocate by another core { // release the allocated page ppm_free_pages( page ); // read again the PTE1 pte1 = *pte1_ptr; // get the PT2 base address pt2_ppn = TSAR_MMU_PPN_FROM_PTE1( pte1 ); pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) ); } } else { // This valid entry must be a PTD1 if( (pte1 & TSAR_MMU_SMALL) == 0 ) { printk("\n[ERROR] in %s : set a small page in a big PT1 entry / PT1[%d] = %x\n", __FUNCTION__ , ix1 , pte1 ); return EINVAL; } // compute PPN of PT2 base pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); // compute pointer on PT2 base pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( pt2_ppn ) ); } // from here we have the PT2 pointer // compute pointer on PTE2 pte2_ptr = &pt2[2 * ix2]; // try to atomically lock the PTE2 until success do { // busy waiting until TSAR_MMU_LOCK == 0 do { attr = *pte2_ptr; hal_rdbar(); } while( (attr & TSAR_MMU_LOCKED) != 0 ); atomic = hal_atomic_cas( (void*)pte2_ptr, attr , (attr | TSAR_MMU_LOCKED) ); } while( atomic == 0 ); return 0; } // end hal_gpt_lock_pte() //////////////////////////////////////// error_t hal_gpt_unlock_pte( gpt_t * gpt, vpn_t vpn ) { uint32_t * pt1; // PT1 base address uint32_t pte1; // value of PT1 entry uint32_t * pt2; // PT2 base address ppn_t pt2_ppn; // PPN of PT2 page if missing PT2 uint32_t * pte2_ptr; // address of PT2 entry uint32_t attr; // PTE2 attribute // compute indexes in P1 and PT2 uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); // index in PT1 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); // index in PT2 // get pointer on PT1 base pt1 = (uint32_t*)gpt->ptr; // get PTE1 pte1 = pt1[ix1]; // check PTE1 present and small page if( ((pte1 & TSAR_MMU_MAPPED) == 0) || ((pte1 & TSAR_MMU_SMALL) == 0) ) { printk("\n[ERROR] in %s : try to unlock a big or undefined page / PT1[%d] = %x\n", __FUNCTION__ , ix1 , pte1 ); return EINVAL; } // get pointer on PT2 base pt2_ppn = TSAR_MMU_PPN_FROM_PTE1( pte1 ); pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( pt2_ppn ) ); // get pointer on PTE2 pte2_ptr = &pt2[2 * ix2]; // get PTE2_ATTR attr = *pte2_ptr; // check PTE2 present and locked if( ((attr & TSAR_MMU_MAPPED) == 0) || ((attr & TSAR_MMU_LOCKED) == 0) ); { printk("\n[ERROR] in %s : unlock an unlocked/unmapped page / PT1[%d] = %x\n", __FUNCTION__ , ix1 , pte1 ); return EINVAL; } // reset GPT_LOCK *pte2_ptr = attr & ~TSAR_MMU_LOCKED; return 0; } // end hal_gpt_unlock_pte() /////////////////////////////////////////// error_t hal_gpt_pte_copy( gpt_t * dst_gpt, xptr_t src_gpt_xp, vpn_t vpn, bool_t cow, ppn_t * ppn, bool_t * mapped ) { uint32_t ix1; // index in PT1 uint32_t ix2; // index in PT2 cxy_t src_cxy; // SRC GPT cluster gpt_t * src_gpt; // SRC GPT local pointer uint32_t * src_pt1; // local pointer on SRC PT1 uint32_t * dst_pt1; // local pointer on DST PT1 uint32_t * src_pt2; // local pointer on SRC PT2 uint32_t * dst_pt2; // local pointer on DST PT2 kmem_req_t req; // for dynamic PT2 allocation uint32_t src_pte1; uint32_t dst_pte1; uint32_t src_pte2_attr; uint32_t src_pte2_ppn; page_t * page; xptr_t page_xp; ppn_t src_pt2_ppn; ppn_t dst_pt2_ppn; gpt_dmsg("\n[DBG] %s : core[%x,%d] enter for vpn %x\n", __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn ); // get remote src_gpt cluster and local pointer src_cxy = GET_CXY( src_gpt_xp ); src_gpt = (gpt_t *)GET_PTR( src_gpt_xp ); // get remote src_pt1 and local dst_pt1 src_pt1 = (uint32_t *)hal_remote_lpt( XPTR( src_cxy , &src_gpt->ptr ) ); dst_pt1 = (uint32_t *)dst_gpt->ptr; // check src_pt1 and dst_pt1 existence assert( (src_pt1 != NULL) , __FUNCTION__ , "src_pt1 does not exist\n"); assert( (dst_pt1 != NULL) , __FUNCTION__ , "dst_pt1 does not exist\n"); ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); // get src_pte1 src_pte1 = hal_remote_lw( XPTR( src_cxy , &src_pt1[ix1] ) ); // do nothing if src_pte1 not MAPPED or not SMALL if( (src_pte1 & TSAR_MMU_MAPPED) && (src_pte1 & TSAR_MMU_SMALL) ) { // get dst_pt1 entry dst_pte1 = dst_pt1[ix1]; // map dst_pte1 if required if( (dst_pte1 & TSAR_MMU_MAPPED) == 0 ) { // allocate one physical page for a new PT2 req.type = KMEM_PAGE; req.size = 0; // 1 small page req.flags = AF_KERNEL | AF_ZERO; page = (page_t *)kmem_alloc( &req ); if( page == NULL ) { printk("\n[ERROR] in %s : cannot allocate PT2\n", __FUNCTION__ ); return -1; } // build extended pointer on page descriptor page_xp = XPTR( local_cxy , page ); // get PPN for this new PT2 dst_pt2_ppn = (ppn_t)ppm_page2ppn( page_xp ); // build the new dst_pte1 dst_pte1 = TSAR_MMU_MAPPED | TSAR_MMU_SMALL | dst_pt2_ppn; // register it in DST_GPT dst_pt1[ix1] = dst_pte1; } // get pointer on src_pt2 src_pt2_ppn = (ppn_t)TSAR_MMU_PTBA_FROM_PTE1( src_pte1 ); src_pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( src_pt2_ppn ) ); // get pointer on dst_pt2 dst_pt2_ppn = (ppn_t)TSAR_MMU_PTBA_FROM_PTE1( dst_pte1 ); dst_pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( dst_pt2_ppn ) ); // get attr and ppn from SRC_PT2 src_pte2_attr = hal_remote_lw( XPTR( src_cxy , &src_pt2[2 * ix2] ) ); src_pte2_ppn = hal_remote_lw( XPTR( src_cxy , &src_pt2[2 * ix2 + 1] ) ); // do nothing if src_pte2 not MAPPED if( (src_pte2_attr & TSAR_MMU_MAPPED) != 0 ) { // set PPN in DST PTE2 dst_pt2[2*ix2+1] = src_pte2_ppn; // set attributes in DST PTE2 if( cow && (src_pte2_attr & TSAR_MMU_WRITABLE) ) { dst_pt2[2*ix2] = (src_pte2_attr | TSAR_MMU_COW) & (~TSAR_MMU_WRITABLE); } else { dst_pt2[2*ix2] = src_pte2_attr; } // return "successfully copied" *mapped = true; *ppn = src_pte2_ppn; gpt_dmsg("\n[DBG] %s : core[%x,%d] exit for vpn %x / copy done\n", __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn ); hal_fence(); return 0; } // end if PTE2 mapped } // end if PTE1 mapped // return "nothing done" *mapped = false; *ppn = 0; gpt_dmsg("\n[DBG] %s : core[%x,%d] exit for vpn %x / nothing done\n", __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn ); hal_fence(); return 0; } // end hal_gpt_pte_copy() ////////////////////////////////////////// bool_t hal_gpt_pte_is_mapped( gpt_t * gpt, vpn_t vpn ) { uint32_t * pt1; uint32_t pte1; uint32_t pte2_attr; uint32_t * pt2; ppn_t pt2_ppn; uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); // get PTE1 value pt1 = gpt->ptr; pte1 = pt1[ix1]; if( (pte1 & TSAR_MMU_MAPPED) == 0 ) return false; if( (pte1 & TSAR_MMU_SMALL) == 0 ) return false; // compute PT2 base address pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) ); // get pte2_attr pte2_attr = pt2[2*ix2]; if( (pte2_attr & TSAR_MMU_MAPPED) == 0 ) return false; else return true; } // end hal_gpt_pte_is_mapped() /////////////////////////////////////// bool_t hal_gpt_pte_is_cow( gpt_t * gpt, vpn_t vpn ) { uint32_t * pt1; uint32_t pte1; uint32_t pte2_attr; uint32_t * pt2; ppn_t pt2_ppn; uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); // get PTE1 value pt1 = gpt->ptr; pte1 = pt1[ix1]; if( (pte1 & TSAR_MMU_MAPPED) == 0 ) return false; if( (pte1 & TSAR_MMU_SMALL) == 0 ) return false; // compute PT2 base address pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) ); // get pte2_attr pte2_attr = pt2[2*ix2]; if( (pte2_attr & TSAR_MMU_MAPPED) == 0 ) return false; if( (pte2_attr & TSAR_MMU_COW) == 0 ) return false; else return true; } // end hal_gpt_pte_is_cow() ///////////////////////////////////////// void hal_gpt_flip_cow( bool_t set_cow, xptr_t gpt_xp, vpn_t vpn_base, vpn_t vpn_size ) { cxy_t gpt_cxy; gpt_t * gpt_ptr; vpn_t vpn; uint32_t ix1; uint32_t ix2; uint32_t * pt1; uint32_t pte1; uint32_t * pt2; ppn_t pt2_ppn; uint32_t old_attr; uint32_t new_attr; // get GPT cluster and local pointer gpt_cxy = GET_CXY( gpt_xp ); gpt_ptr = (gpt_t *)GET_PTR( gpt_xp ); // get local PT1 pointer pt1 = (uint32_t *)hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); // loop on pages for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) { ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); // get PTE1 value pte1 = hal_remote_lw( XPTR( gpt_cxy , &pt1[ix1] ) ); // only MAPPED & SMALL PTEs are modified if( (pte1 & TSAR_MMU_MAPPED) && (pte1 & TSAR_MMU_SMALL) ) { // compute PT2 base address pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) ); assert( (GET_CXY( ppm_ppn2base( pt2_ppn ) ) == gpt_cxy ), __FUNCTION__, "PT2 and PT1 must be in the same cluster\n"); // get current PTE2 attributes old_attr = hal_remote_lw( XPTR( gpt_cxy , &pt2[2*ix2] ) ); // only MAPPED PTEs are modified if( old_attr & TSAR_MMU_MAPPED ) { if( (set_cow != 0) && (old_attr & TSAR_MMU_WRITABLE) ) { new_attr = (old_attr | TSAR_MMU_COW) & (~TSAR_MMU_WRITABLE); hal_remote_sw( XPTR( gpt_cxy , &pt2[2*ix2] ) , new_attr ); } if( (set_cow == 0) && (old_attr & TSAR_MMU_COW ) ) { new_attr = (old_attr | TSAR_MMU_WRITABLE) & (~TSAR_MMU_COW); hal_remote_sw( XPTR( gpt_cxy , &pt2[2*ix2] ) , new_attr ); } } // end if PTE2 mapped } // end if PTE1 mapped } // end loop on pages } // end hal_gpt_flip_cow() ////////////////////////////////////////// void hal_gpt_update_pte( xptr_t gpt_xp, vpn_t vpn, uint32_t attr, // generic GPT attributes ppn_t ppn ) { uint32_t * pt1; // PT1 base addres uint32_t pte1; // PT1 entry value ppn_t pt2_ppn; // PPN of PT2 uint32_t * pt2; // PT2 base address uint32_t ix1; // index in PT1 uint32_t ix2; // index in PT2 uint32_t tsar_attr; // PTE attributes for TSAR MMU // check attr argument MAPPED and SMALL if( (attr & GPT_MAPPED) == 0 ) return; if( (attr & GPT_SMALL ) == 0 ) return; // get cluster and local pointer on remote GPT cxy_t gpt_cxy = GET_CXY( gpt_xp ); gpt_t * gpt_ptr = (gpt_t *)GET_PTR( gpt_xp ); // compute indexes in PT1 and PT2 ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); // get PT1 base pt1 = (uint32_t *)hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); // compute tsar_attr from generic attributes tsar_attr = gpt2tsar( attr ); // get PTE1 value pte1 = hal_remote_lw( XPTR( gpt_cxy , &pt1[ix1] ) ); if( (pte1 & TSAR_MMU_MAPPED) == 0 ) return; if( (pte1 & TSAR_MMU_SMALL ) == 0 ) return; // get PT2 base from PTE1 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( pt2_ppn ) ); // reset PTE2 hal_remote_sw( XPTR( gpt_cxy, &pt2[2 * ix2] ) , 0 ); hal_fence(); // set PTE2 in this order hal_remote_sw( XPTR( gpt_cxy, &pt2[2 * ix2 + 1] ) , ppn ); hal_fence(); hal_remote_sw( XPTR( gpt_cxy, &pt2[2 * ix2] ) , tsar_attr ); hal_fence(); } // end hal_gpt_update_pte()