Changeset 635
- Timestamp:
- Jun 26, 2019, 11:42:37 AM (4 years ago)
- Location:
- trunk
- Files:
-
- 81 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/hal/generic/hal_gpt.h
r632 r635 181 181 * @ src_gpt_xp : [in] extended pointer on remote source GPT. 182 182 * @ src_vpn : [in] vpn defining the PTE in the source GPT. 183 * @ cow : [in] activate the COPY-On-Write mechanismif true.183 * @ cow : [in] set COW flag & reset WRITABLE flag if true. 184 184 * @ ppn : [out] PPN value (only if mapped is true). 185 185 * @ mapped : [out] true if src_gpt[vpn] actually copied to dst_gpt[vpn]. -
trunk/hal/generic/hal_vmm.h
r625 r635 64 64 * It displays all valit GPT entries when the <mapping> argument is true. 65 65 **************************************************************************************** 66 * @ process : local pointer on userprocess descriptor.67 * @ return 0 if success / return ENOMEM if failure.66 * @ process_xp : extended pointer on process descriptor. 67 * @ mapping : display all mapped GPT entries when true. 68 68 ***************************************************************************************/ 69 void hal_vmm_display( struct process_s * process,70 bool_t 69 void hal_vmm_display( xptr_t process_xp, 70 bool_t mapping ); 71 71 72 72 -
trunk/hal/tsar_mips32/core/hal_context.c
r625 r635 2 2 * hal_context.c - implementation of Thread Context API for TSAR-MIPS32 3 3 * 4 * Author Alain Greiner (2016 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 30 30 #include <printk.h> 31 31 #include <vmm.h> 32 #include <bits.h> 32 33 #include <core.h> 33 34 #include <cluster.h> … … 36 37 37 38 ///////////////////////////////////////////////////////////////////////////////////////// 38 // Define various SR initialisation values for TSAR-MIPS3239 // Define various SR initialisation values for the TSAR-MIPS32 architecture. 39 40 ///////////////////////////////////////////////////////////////////////////////////////// 40 41 … … 44 45 45 46 ///////////////////////////////////////////////////////////////////////////////////////// 46 // This structure defines the CPU context for TSAR MIPS32.47 // This structure defines the CPU context for the TSAR-MIPS32 architecture. 47 48 // The following registers are saved/restored at each context switch: 48 49 // - GPR : all, but (zero, k0, k1), plus (hi, lo) … … 51 52 // 52 53 // WARNING : check the two CONFIG_CPU_CTX_SIZE & CONFIG_FPU_CTX_SIZE configuration 53 // parameters swhen modifying this structure.54 // parameters when modifying this structure. 54 55 ///////////////////////////////////////////////////////////////////////////////////////// 55 56 … … 101 102 102 103 ///////////////////////////////////////////////////////////////////////////////////////// 103 // This structure defines the fpu_context for TSAR MIPS32.104 // This structure defines the fpu_context for the TSAR MIPS32 architecture. 104 105 ///////////////////////////////////////////////////////////////////////////////////////// 105 106 … … 124 125 // allocate memory for cpu_context 125 126 kmem_req_t req; 126 req.type = KMEM_CPU_CTX; 127 req.type = KMEM_KCM; 128 req.order = bits_log2( sizeof(hal_cpu_context_t) ); 127 129 req.flags = AF_KERNEL | AF_ZERO; 128 130 129 hal_cpu_context_t * context = (hal_cpu_context_t *)kmem_alloc( &req ); 131 hal_cpu_context_t * context = kmem_alloc( &req ); 132 130 133 if( context == NULL ) return -1; 131 134 … … 175 178 void hal_cpu_context_fork( xptr_t child_xp ) 176 179 { 177 // get pointer on calling thread 178 thread_t * this = CURRENT_THREAD; 179 180 cxy_t parent_cxy; // parent thread cluster 181 thread_t * parent_ptr; // local pointer on parent thread 182 hal_cpu_context_t * parent_context; // local pointer on parent cpu_context 183 uint32_t * parent_uzone; // local_pointer on parent uzone (in kernel stack) 184 char * parent_ksp; // kernel stack pointer on parent kernel stack 185 uint32_t parent_us_base; // parent user stack base value 186 187 cxy_t child_cxy; // parent thread cluster 188 thread_t * child_ptr; // local pointer on child thread 189 hal_cpu_context_t * child_context; // local pointer on child cpu_context 190 uint32_t * child_uzone; // local_pointer on child uzone (in kernel stack) 191 char * child_ksp; // kernel stack pointer on child kernel stack 192 uint32_t child_us_base; // child user stack base value 193 194 process_t * child_process; // local pointer on child processs 195 uint32_t child_pt_ppn; // PPN of child process PT1 196 vseg_t * child_us_vseg; // local pointer on child user stack vseg 197 180 198 // allocate a local CPU context in parent kernel stack 181 hal_cpu_context_t context; 182 183 // get local parent thread cluster and local pointer 184 cxy_t parent_cxy = local_cxy; 185 thread_t * parent_ptr = CURRENT_THREAD; 186 187 // get remote child thread cluster and local pointer 188 cxy_t child_cxy = GET_CXY( child_xp ); 189 thread_t * child_ptr = GET_PTR( child_xp ); 190 191 // get local pointer on remote child cpu context 192 char * child_context_ptr = hal_remote_lpt( XPTR(child_cxy , &child_ptr->cpu_context) ); 199 hal_cpu_context_t context; 200 201 // get (local) parent thread cluster and local pointer 202 parent_cxy = local_cxy; 203 parent_ptr = CURRENT_THREAD; 204 205 // get (remote) child thread cluster and local pointer 206 child_cxy = GET_CXY( child_xp ); 207 child_ptr = GET_PTR( child_xp ); 208 209 // get local pointer on (local) parent CPU context 210 parent_context = parent_ptr->cpu_context; 211 212 // get local pointer on (remote) child CPU context 213 child_context = hal_remote_lpt( XPTR(child_cxy , &child_ptr->cpu_context) ); 193 214 194 215 // get local pointer on remote child process 195 process_t *process = hal_remote_lpt( XPTR(child_cxy , &child_ptr->process) );216 child_process = hal_remote_lpt( XPTR(child_cxy , &child_ptr->process) ); 196 217 197 218 // get ppn of remote child process page table 198 uint32_t pt_ppn = hal_remote_l32( XPTR(child_cxy , &process->vmm.gpt.ppn) );199 200 // get local pointer on parent uzone from parent thread descriptor201 uint32_t *parent_uzone = parent_ptr->uzone_current;202 203 // compute local pointer on child uzone204 uint32_t *child_uzone = (uint32_t *)( (intptr_t)parent_uzone +205 206 219 child_pt_ppn = hal_remote_l32( XPTR(child_cxy , &child_process->vmm.gpt.ppn) ); 220 221 // get local pointer on local parent uzone (in parent kernel stack) 222 parent_uzone = parent_ptr->uzone_current; 223 224 // compute local pointer on remote child uzone (in child kernel stack) 225 child_uzone = (uint32_t *)( (intptr_t)parent_uzone + 226 (intptr_t)child_ptr - 227 (intptr_t)parent_ptr ); 207 228 208 229 // update the uzone pointer in child thread descriptor … … 213 234 if( DEBUG_HAL_CONTEXT < cycle ) 214 235 printk("\n[%s] thread[%x,%x] parent_uzone %x / child_uzone %x / cycle %d\n", 215 __FUNCTION__, this->process->pid, this->trdid, parent_uzone, child_uzone, cycle ); 216 #endif 217 218 // copy parent kernel stack to child thread descriptor 236 __FUNCTION__, parent_ptr->process->pid, parent_ptr->trdid, parent_uzone, child_uzone, cycle ); 237 #endif 238 239 // get user stack base for parent thread 240 parent_us_base = parent_ptr->user_stack_vseg->min; 241 242 // get user stack base for child thread 243 child_us_vseg = hal_remote_lpt( XPTR( child_cxy , &child_ptr->user_stack_vseg ) ); 244 child_us_base = hal_remote_l32( XPTR( child_cxy , &child_us_vseg->min ) ); 245 246 #if DEBUG_HAL_CONTEXT 247 if( DEBUG_HAL_CONTEXT < cycle ) 248 printk("\n[%s] thread[%x,%x] parent_ustack_base %x / child_ustack_base %x\n", 249 __FUNCTION__, parent_ptr->process->pid, parent_ptr->trdid, parent_us_base, child_us_base ); 250 #endif 251 252 // get current value of kernel stack pointer in parent kernel stack 253 parent_ksp = (char *)hal_get_sp(); 254 255 // compute value of kernel stack pointer in child kernel stack 256 child_ksp = (char *)((intptr_t)parent_ksp + 257 (intptr_t)child_ptr - 258 (intptr_t)parent_ptr ); 259 260 #if DEBUG_HAL_CONTEXT 261 if( DEBUG_HAL_CONTEXT < cycle ) 262 printk("\n[%s] thread[%x,%x] parent_ksp %x / child_ksp %x\n", 263 __FUNCTION__, parent_ptr->process->pid, parent_ptr->trdid, parent_ksp, child_ksp ); 264 #endif 265 266 // compute number of bytes to be copied, depending on current value of parent_ksp 267 uint32_t size = (uint32_t)parent_ptr + CONFIG_THREAD_DESC_SIZE - (uint32_t)parent_ksp; 268 269 // copy parent kernel stack content to child thread descriptor 219 270 // (this includes the uzone, that is allocated in the kernel stack) 220 char * parent_ksp = (char *)hal_get_sp();221 char * child_ksp = (char *)((intptr_t)parent_ksp +222 (intptr_t)child_ptr -223 (intptr_t)parent_ptr );224 225 uint32_t size = (uint32_t)parent_ptr + CONFIG_THREAD_DESC_SIZE - (uint32_t)parent_ksp;226 227 271 hal_remote_memcpy( XPTR( child_cxy , child_ksp ), 228 272 XPTR( local_cxy , parent_ksp ), … … 230 274 231 275 #if DEBUG_HAL_CONTEXT 232 cycle = (uint32_t)hal_get_cycles(); 233 printk("\n[%s] thread[%x,%x] copied kstack from parent %x to child %x / cycle %d\n", 234 __FUNCTION__, this->process->pid, this->trdid, parent_ptr, child_ptr, cycle ); 235 #endif 236 237 // patch the user stack pointer slot in the child uzone[UZ_SP] 238 // because parent and child use the same offset to access the user stack, 239 // but parent and child do not have the same user stack base address. 240 uint32_t parent_us_base = parent_ptr->user_stack_vseg->min; 241 vseg_t * child_us_vseg = hal_remote_lpt( XPTR( child_cxy , &child_ptr->user_stack_vseg ) ); 242 uint32_t child_us_base = hal_remote_l32( XPTR( child_cxy , &child_us_vseg->min ) ); 243 uint32_t parent_usp = parent_uzone[UZ_SP]; 244 uint32_t child_usp = parent_usp + child_us_base - parent_us_base; 245 246 hal_remote_s32( XPTR( child_cxy , &child_uzone[UZ_SP] ) , child_usp ); 247 248 #if DEBUG_HAL_CONTEXT 249 cycle = (uint32_t)hal_get_cycles(); 250 printk("\n[%s] thread[%x,%x] parent_usp %x / child_usp %x / cycle %d\n", 251 __FUNCTION__, this->process->pid, this->trdid, parent_usp, child_usp, cycle ); 252 #endif 253 254 // save current values of CPU registers to local CPU context 276 if( DEBUG_HAL_CONTEXT < cycle ) 277 printk("\n[%s] thread[%x,%x] copied kstack from parent (%x) to child (%x)\n", 278 __FUNCTION__, parent_ptr->process->pid, parent_ptr->trdid, parent_ptr, child_ptr ); 279 #endif 280 281 // save current values of CPU registers to local copy of CPU context 255 282 hal_do_cpu_save( &context ); 256 283 257 // From this point, both parent and child can execute the following code, 284 // update three slots in this local CPU context 285 context.sp_29 = (uint32_t)child_ksp; 286 context.c0_th = (uint32_t)child_ptr; 287 context.c2_ptpr = (uint32_t)child_pt_ppn >> 1; 288 289 // From this point, both parent and child execute the following code, 258 290 // but child thread will only execute it after being unblocked by parent thread. 259 291 // They can be distinguished by the (CURRENT_THREAD,local_cxy) values, 260 292 // and we must re-initialise the calling thread pointer from c0_th register 261 293 262 th is = CURRENT_THREAD;294 thread_t * this = CURRENT_THREAD; 263 295 264 296 if( (this == parent_ptr) && (local_cxy == parent_cxy) ) // parent thread 265 297 { 266 // patch 4 slots in the local CPU context: the sp_29 / c0_th / C0_sr / c2_ptpr 267 // slots are not identical in parent and child 268 context.sp_29 = context.sp_29 + (intptr_t)child_ptr - (intptr_t)parent_ptr; 269 context.c0_th = (uint32_t)child_ptr; 270 context.c0_sr = SR_SYS_MODE; 271 context.c2_ptpr = pt_ppn >> 1; 272 273 // copy this patched context to remote child context 274 hal_remote_memcpy( XPTR( child_cxy , child_context_ptr ), 298 // parent thread must update four slots in child uzone 299 // - UZ_TH : parent and child have different threads descriptors 300 // - UZ_SP : parent and child have different user stack base addresses. 301 // - UZ_PTPR : parent and child use different Generic Page Tables 302 303 // parent thread computes values for child thread 304 uint32_t child_sp = parent_uzone[UZ_SP] + child_us_base - parent_us_base; 305 uint32_t child_th = (uint32_t)child_ptr; 306 uint32_t child_ptpr = (uint32_t)child_pt_ppn >> 1; 307 308 #if DEBUG_HAL_CONTEXT 309 if( DEBUG_HAL_CONTEXT < cycle ) 310 printk("\n[%s] thread[%x,%x] : parent_uz_sp %x / child_uz_sp %x\n", 311 __FUNCTION__, parent_ptr->process->pid, parent_ptr->trdid, 312 parent_uzone[UZ_SP], child_sp ); 313 #endif 314 315 // parent thread updates the child uzone 316 hal_remote_s32( XPTR( child_cxy , &child_uzone[UZ_SP] ) , child_sp ); 317 hal_remote_s32( XPTR( child_cxy , &child_uzone[UZ_TH] ) , child_th ); 318 hal_remote_s32( XPTR( child_cxy , &child_uzone[UZ_PTPR] ) , child_ptpr ); 319 320 // parent thread copies the local context to remote child context 321 hal_remote_memcpy( XPTR( child_cxy , child_context ), 275 322 XPTR( local_cxy , &context ) , 276 323 sizeof( hal_cpu_context_t ) ); 277 324 #if DEBUG_HAL_CONTEXT 325 if( DEBUG_HAL_CONTEXT < cycle ) 326 printk("\n[%s] thread[%x,%x] copied parent CPU context to child CPU context\n", 327 __FUNCTION__, parent_ptr->process->pid, parent_ptr->trdid ); 328 #endif 329 330 // parent thread unblocks child thread 331 thread_unblock( XPTR( child_cxy , child_ptr ) , THREAD_BLOCKED_GLOBAL ); 332 333 #if DEBUG_HAL_CONTEXT 278 334 cycle = (uint32_t)hal_get_cycles(); 279 printk("\n[%s] thread[%x,%x] copied CPU context to child / cycle %d\n", 280 __FUNCTION__, this->process->pid, this->trdid, cycle ); 281 #endif 282 283 // parent thread unblock child thread 284 thread_unblock( XPTR( child_cxy , child_ptr ) , THREAD_BLOCKED_GLOBAL ); 285 286 #if DEBUG_HAL_CONTEXT 287 cycle = (uint32_t)hal_get_cycles(); 288 printk("\n[%s] thread[%x,%x] unblocked child thread / cycle %d\n", 289 __FUNCTION__, this->process->pid, this->trdid, cycle ); 335 trdid_t child_trdid = hal_remote_l32( XPTR( child_cxy , &child_ptr->trdid ) ); 336 pid_t child_pid = hal_remote_l32( XPTR( child_cxy , &child_process->pid ) ); 337 printk("\n[%s] thread[%x,%x] unblocked child thread[%x,%x] / cycle %d\n", 338 __FUNCTION__, parent_ptr->process->pid, parent_ptr->trdid, child_pid, child_trdid, cycle ); 290 339 #endif 291 340 … … 347 396 if( ctx != NULL ) 348 397 { 349 req.type = KMEM_ CPU_CTX;398 req.type = KMEM_KCM; 350 399 req.ptr = ctx; 351 400 kmem_free( &req ); … … 366 415 // allocate memory for fpu_context 367 416 kmem_req_t req; 368 req.type = KMEM_ FPU_CTX;417 req.type = KMEM_KCM; 369 418 req.flags = AF_KERNEL | AF_ZERO; 370 371 hal_fpu_context_t * context = (hal_fpu_context_t *)kmem_alloc( &req ); 419 req.order = bits_log2( sizeof(hal_fpu_context_t) ); 420 421 hal_fpu_context_t * context = kmem_alloc( &req ); 422 372 423 if( context == NULL ) return -1; 373 424 … … 414 465 if( context != NULL ) 415 466 { 416 req.type = KMEM_ FPU_CTX;467 req.type = KMEM_KCM; 417 468 req.ptr = context; 418 469 kmem_free( &req ); -
trunk/hal/tsar_mips32/core/hal_exception.c
r632 r635 189 189 uint32_t excp_code; 190 190 191 // check thread type192 if( CURRENT_THREAD->type != THREAD_USER )193 {194 printk("\n[PANIC] in %s : illegal thread type %s\n",195 __FUNCTION__, thread_type_str(CURRENT_THREAD->type) );196 197 return EXCP_KERNEL_PANIC;198 }199 200 191 // get faulty thread process 201 192 process = this->process; … … 447 438 else // undefined coprocessor 448 439 { 449 printk("\n[USER_ERROR] in %s for thread[%x,%x] \n"440 printk("\n[USER_ERROR] in %s for thread[%x,%x] / cycle %d\n" 450 441 " undefined coprocessor / epc %x\n", 451 __FUNCTION__, this->process->pid, this->trdid, excPC ); 442 __FUNCTION__, this->process->pid, this->trdid, 443 (uint32_t)hal_get_cycles() , excPC ); 452 444 453 445 error = EXCP_USER_ERROR; … … 457 449 case XCODE_OVR: // Arithmetic Overflow : user fatal error 458 450 { 459 printk("\n[USER_ERROR] in %s for thread[%x,%x] \n"451 printk("\n[USER_ERROR] in %s for thread[%x,%x] / cycle %d\n" 460 452 " arithmetic overflow / epc %x\n", 461 __FUNCTION__, this->process->pid, this->trdid, excPC ); 453 __FUNCTION__, this->process->pid, this->trdid, 454 (uint32_t)hal_get_cycles() , excPC ); 462 455 463 456 error = EXCP_USER_ERROR; … … 466 459 case XCODE_RI: // Reserved Instruction : user fatal error 467 460 { 468 printk("\n[USER_ERROR] in %s for thread[%x,%x] \n"461 printk("\n[USER_ERROR] in %s for thread[%x,%x] / cycle %d\n" 469 462 " reserved instruction / epc %x\n", 470 __FUNCTION__, this->process->pid, this->trdid, excPC ); 463 __FUNCTION__, this->process->pid, this->trdid, 464 (uint32_t)hal_get_cycles() , excPC ); 471 465 472 466 error = EXCP_USER_ERROR; … … 475 469 case XCODE_ADEL: // user fatal error 476 470 { 477 printk("\n[USER_ERROR] in %s for thread[%x,%x] \n"471 printk("\n[USER_ERROR] in %s for thread[%x,%x] / cycle %d\n" 478 472 " illegal data load address / epc %x / bad_address %x\n", 479 __FUNCTION__, this->process->pid, this->trdid, excPC, hal_get_bad_vaddr() ); 473 __FUNCTION__, this->process->pid, this->trdid, 474 (uint32_t)hal_get_cycles(), excPC, hal_get_bad_vaddr() ); 480 475 481 476 error = EXCP_USER_ERROR; … … 484 479 case XCODE_ADES: // user fatal error 485 480 { 486 printk("\n[USER_ERROR] in %s for thread[%x,%x] \n"481 printk("\n[USER_ERROR] in %s for thread[%x,%x] / cycle %d\n" 487 482 " illegal data store address / epc %x / bad_address %x\n", 488 __FUNCTION__, this->process->pid, this->trdid, excPC, hal_get_bad_vaddr() ); 483 __FUNCTION__, this->process->pid, this->trdid, 484 (uint32_t)hal_get_cycles(), excPC, hal_get_bad_vaddr() ); 489 485 490 486 error = EXCP_USER_ERROR; -
trunk/hal/tsar_mips32/core/hal_gpt.c
r633 r635 2 2 * hal_gpt.c - implementation of the Generic Page Table API for TSAR-MIPS32 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 70 70 #define TSAR_MMU_IX2_FROM_VPN( vpn ) (vpn & 0x1FF) 71 71 72 #define TSAR_MMU_P TBA_FROM_PTE1( pte1 ) (pte1 & 0x0FFFFFFF)73 #define TSAR_MMU_PPN _FROM_PTE1( pte1 )((pte1 & 0x0007FFFF)<<9)72 #define TSAR_MMU_PPN2_FROM_PTE1( pte1 ) (pte1 & 0x0FFFFFFF) 73 #define TSAR_MMU_PPN1_FROM_PTE1( pte1 ) ((pte1 & 0x0007FFFF)<<9) 74 74 #define TSAR_MMU_ATTR_FROM_PTE1( pte1 ) (pte1 & 0xFFC00000) 75 75 … … 138 138 error_t hal_gpt_create( gpt_t * gpt ) 139 139 { 140 page_t * page; 141 xptr_t page_xp; 140 void * base; 142 141 143 142 thread_t * this = CURRENT_THREAD; … … 146 145 uint32_t cycle = (uint32_t)hal_get_cycles(); 147 146 if( DEBUG_HAL_GPT_CREATE < cycle ) 148 printk("\n[%s] :thread[%x,%x] enter / cycle %d\n",147 printk("\n[%s] thread[%x,%x] enter / cycle %d\n", 149 148 __FUNCTION__, this->process->pid, this->trdid, cycle ); 150 149 #endif 151 150 152 151 // check page size 153 assert( (CONFIG_PPM_PAGE_SIZE == 4096) , " for TSAR, thepage size must be 4 Kbytes\n" );152 assert( (CONFIG_PPM_PAGE_SIZE == 4096) , "the TSAR page size must be 4 Kbytes\n" ); 154 153 155 154 // allocates 2 physical pages for PT1 156 155 kmem_req_t req; 157 req.type = KMEM_P AGE;158 req. size= 1; // 2 small pages156 req.type = KMEM_PPM; 157 req.order = 1; // 2 small pages 159 158 req.flags = AF_KERNEL | AF_ZERO; 160 page = (page_t *)kmem_alloc( &req );161 162 if( page == NULL )159 base = kmem_alloc( &req ); 160 161 if( base == NULL ) 163 162 { 164 163 printk("\n[PANIC] in %s : no memory for PT1 / process %x / cluster %x\n", … … 167 166 } 168 167 169 // initialize generic page table descriptor 170 page_xp = XPTR( local_cxy , page ); 171 gpt->ptr = GET_PTR( ppm_page2base( page_xp ) ); 172 gpt->ppn = ppm_page2ppn( page_xp ); 168 gpt->ptr = base; 169 gpt->ppn = ppm_base2ppn( XPTR( local_cxy , base ) ); 173 170 174 171 #if DEBUG_HAL_GPT_CREATE 175 172 cycle = (uint32_t)hal_get_cycles(); 176 173 if( DEBUG_HAL_GPT_CREATE < cycle ) 177 printk("\n[%s] : thread[%x,%x] exit/ cycle %d\n",178 __FUNCTION__, this->process->pid, this->trdid, cycle );174 printk("\n[%s] thread[%x,%x] exit / pt1_base %x / pt1_ppn %x / cycle %d\n", 175 __FUNCTION__, this->process->pid, this->trdid, gpt->ptr, gpt->ppn, cycle ); 179 176 #endif 180 177 … … 199 196 thread_t * this = CURRENT_THREAD; 200 197 if( DEBUG_HAL_GPT_DESTROY < cycle ) 201 printk("\n[%s] :thread[%x,%x] enter / cycle %d\n",198 printk("\n[%s] thread[%x,%x] enter / cycle %d\n", 202 199 __FUNCTION__, this->process->pid, this->trdid, cycle ); 203 200 #endif … … 221 218 { 222 219 // get local pointer on PT2 223 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 224 xptr_t base_xp = ppm_ppn2base( pt2_ppn ); 225 pt2 = GET_PTR( base_xp ); 220 pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( pte1 ); 221 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 226 222 227 223 // scan the PT2 … … 238 234 239 235 // release the page allocated for the PT2 240 req.type = KMEM_P AGE;241 req.ptr = GET_PTR( ppm_base2page( XPTR(local_cxy , pt2 ) ) );236 req.type = KMEM_PPM; 237 req.ptr = pt2; 242 238 kmem_free( &req ); 243 239 } … … 246 242 247 243 // release the PT1 248 req.type = KMEM_P AGE;249 req.ptr = GET_PTR( ppm_base2page( XPTR(local_cxy , pt1 ) ) );244 req.type = KMEM_PPM; 245 req.ptr = pt1; 250 246 kmem_free( &req ); 251 247 … … 253 249 cycle = (uint32_t)hal_get_cycles(); 254 250 if( DEBUG_HAL_GPT_DESTROY < cycle ) 255 printk("\n[%s] :thread[%x,%x] exit / cycle %d\n",251 printk("\n[%s] thread[%x,%x] exit / cycle %d\n", 256 252 __FUNCTION__, this->process->pid, this->trdid, cycle ); 257 253 #endif 258 254 259 255 } // end hal_gpt_destroy() 260 261 /*262 263 /////////////////////////////////////////////////////////////////////////////////////264 // This static function can be used for debug.265 /////////////////////////////////////////////////////////////////////////////////////266 static void hal_gpt_display( process_t * process )267 {268 gpt_t * gpt;269 uint32_t ix1;270 uint32_t ix2;271 uint32_t * pt1;272 uint32_t pte1;273 ppn_t pt2_ppn;274 uint32_t * pt2;275 uint32_t pte2_attr;276 ppn_t pte2_ppn;277 vpn_t vpn;278 279 // check argument280 assert( (process != NULL) , "NULL process pointer\n");281 282 // get pointer on gpt283 gpt = &(process->vmm.gpt);284 285 // get pointer on PT1286 pt1 = (uint32_t *)gpt->ptr;287 288 printk("\n***** Tsar Page Table for process %x : &gpt = %x / &pt1 = %x\n\n",289 process->pid , gpt , pt1 );290 291 // scan the PT1292 for( ix1 = 0 ; ix1 < 2048 ; ix1++ )293 {294 pte1 = pt1[ix1];295 if( (pte1 & TSAR_PTE_MAPPED) != 0 )296 {297 if( (pte1 & TSAR_PTE_SMALL) == 0 ) // BIG page298 {299 vpn = ix1 << 9;300 printk(" - BIG : vpn = %x / pt1[%d] = %X\n", vpn , ix1 , pte1 );301 }302 else // SMALL pages303 {304 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 );305 xptr_t base_xp = ppm_ppn2base ( pt2_ppn );306 pt2 = GET_PTR( base_xp );307 308 // scan the PT2309 for( ix2 = 0 ; ix2 < 512 ; ix2++ )310 {311 pte2_attr = TSAR_MMU_ATTR_FROM_PTE2( pt2[2 * ix2] );312 pte2_ppn = TSAR_MMU_PPN_FROM_PTE2( pt2[2 * ix2 + 1] );313 314 if( (pte2_attr & TSAR_PTE_MAPPED) != 0 )315 {316 vpn = (ix1 << 9) | ix2;317 printk(" - SMALL : vpn %X / ppn %X / attr %X\n",318 vpn , pte2_ppn , tsar2gpt(pte2_attr) );319 }320 }321 }322 }323 }324 } // end hal_gpt_display()325 326 */327 256 328 257 //////////////////////////////////////////// … … 332 261 ppn_t * ppn ) 333 262 { 334 uint32_t * pt1_ptr; // local pointer on PT1 base 335 xptr_t ptd1_xp; // extended pointer on PT1[x1] entry 336 uint32_t ptd1; // value of PT1[x1] entry 337 338 xptr_t page_xp; 339 263 uint32_t * pt1; // local pointer on PT1 base 264 xptr_t pte1_xp; // extended pointer on PT1[x1] entry 265 uint32_t pte1; // value of PT1[x1] entry 266 267 kmem_req_t req; // kmem request fro PT2 allocation 268 269 uint32_t * pt2; // local pointer on PT2 base 340 270 ppn_t pt2_ppn; // PPN of page containing PT2 341 uint32_t * pt2_ptr; // local pointer on PT2 base342 271 xptr_t pte2_xp; // extended pointer on PT2[ix2].attr 343 272 uint32_t pte2_attr; // PT2[ix2].attr current value … … 357 286 uint32_t cycle = (uint32_t)hal_get_cycles(); 358 287 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 359 printk("\n[%s] :thread[%x,%x] enters / vpn %x in cluster %x / cycle %d\n",288 printk("\n[%s] thread[%x,%x] enters / vpn %x in cluster %x / cycle %d\n", 360 289 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy, cycle ); 361 290 #endif 362 291 363 292 // get indexes in PTI & PT2 from vpn 364 uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); // index in PT1365 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); // index in PT2293 uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 294 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 366 295 367 296 // get local pointer on PT1 368 pt1 _ptr= hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) );369 370 // build extended pointer on PT D1 == PT1[ix1]371 pt d1_xp = XPTR( gpt_cxy , &pt1_ptr[ix1] );297 pt1 = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 298 299 // build extended pointer on PTE1 == PT1[ix1] 300 pte1_xp = XPTR( gpt_cxy , &pt1[ix1] ); 372 301 373 302 // get current PT1 entry value 374 pt d1 = hal_remote_l32( ptd1_xp );375 376 // If PT D1 is unmapped and unlocked, try to atomically lock this PT1 entry.377 // This PT D1 lockprevent multiple concurrent PT2 allocations378 // - only the thread that successfully locked the PT D1 allocates a new PT2379 // and updates the PT D1380 // - all other threads simply wait until the missing PT D1 is mapped.381 382 if( pt d1 == 0 )303 pte1 = hal_remote_l32( pte1_xp ); 304 305 // If PTE1 is unmapped and unlocked, try to atomically lock this PT1 entry. 306 // This PTE1 locking prevent multiple concurrent PT2 allocations 307 // - only the thread that successfully locked the PTE1 allocates a new PT2 308 // and updates the PTE1 309 // - all other threads simply wait until the missing PTE1 is mapped. 310 311 if( pte1 == 0 ) 383 312 { 384 // try to atomically lock the PT D1 to prevent concurrent PT2 allocations385 atomic = hal_remote_atomic_cas( pt d1_xp,386 pt d1,387 pt d1 | TSAR_PTE_LOCKED );313 // try to atomically lock the PTE1 to prevent concurrent PT2 allocations 314 atomic = hal_remote_atomic_cas( pte1_xp, 315 pte1, 316 pte1 | TSAR_PTE_LOCKED ); 388 317 if( atomic ) 389 318 { 390 319 // allocate one 4 Kbytes physical page for PT2 391 page_xp = ppm_remote_alloc_pages( gpt_cxy , 0 ); 392 393 if( page_xp == XPTR_NULL ) 320 req.type = KMEM_PPM; 321 req.order = 0; 322 req.flags = AF_ZERO | AF_KERNEL; 323 pt2 = kmem_remote_alloc( gpt_cxy , &req ); 324 325 if( pt2 == NULL ) 394 326 { 395 printk("\n[ERROR] in %s : cannot allocate memory for PT2\n", __FUNCTION__ ); 327 printk("\n[ERROR] in %s : cannot allocate memory for PT2 in cluster %d\n", 328 __FUNCTION__, gpt_cxy ); 396 329 return -1; 397 330 } 398 331 399 332 // get the PT2 PPN 400 pt2_ppn = ppm_ page2ppn( page_xp);401 402 // build PT D1403 pt d1 = TSAR_PTE_MAPPED | TSAR_PTE_SMALL | pt2_ppn;404 405 // set the PT D1 value in PT1406 // this unlocks the PT D1407 hal_remote_s32( pt d1_xp , ptd1 );333 pt2_ppn = ppm_base2ppn( XPTR( gpt_cxy , pt2 ) ); 334 335 // build PTE1 336 pte1 = TSAR_PTE_MAPPED | TSAR_PTE_SMALL | pt2_ppn; 337 338 // set the PTE1 value in PT1 339 // this unlocks the PTE1 340 hal_remote_s32( pte1_xp , pte1 ); 408 341 hal_fence(); 409 342 410 343 #if (DEBUG_HAL_GPT_LOCK_PTE & 1) 411 344 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 412 printk("\n[%s] :thread[%x,%x] allocates a new PT2 for vpn %x in cluster %x\n",345 printk("\n[%s] thread[%x,%x] allocates a new PT2 for vpn %x in cluster %x\n", 413 346 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy ); 414 347 #endif 415 348 416 349 } // end if atomic 417 } // end if (pt d1 == 0)418 419 // wait until PT D1 is mapped by another thread420 while( (pt d1 & TSAR_PTE_MAPPED) == 0 )350 } // end if (pte1 == 0) 351 352 // wait until PTE1 is mapped by another thread 353 while( (pte1 & TSAR_PTE_MAPPED) == 0 ) 421 354 { 422 pt d1 = hal_remote_l32( ptd1_xp );355 pte1 = hal_remote_l32( pte1_xp ); 423 356 424 357 #if GPT_LOCK_WATCHDOG … … 426 359 { 427 360 thread_t * thread = CURRENT_THREAD; 428 printk("\n[PANIC] in %s : thread[%x,%x] waiting PT D1 / vpn %x / cxy %x / %d iterations\n",361 printk("\n[PANIC] in %s : thread[%x,%x] waiting PTE1 / vpn %x / cxy %x / %d iterations\n", 429 362 __FUNCTION__, thread->process->pid, thread->trdid, vpn, gpt_cxy, count ); 430 363 hal_core_sleep(); … … 435 368 } 436 369 437 // check pt d1 because only small page can be locked438 assert( (pt d1 & TSAR_PTE_SMALL), "cannot lock a big page\n");370 // check pte1 because only small page can be locked 371 assert( (pte1 & TSAR_PTE_SMALL), "cannot lock a big page\n"); 439 372 440 373 #if (DEBUG_HAL_GPT_LOCK_PTE & 1) 441 374 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 442 printk("\n[%s] : thread[%x,%x] get ptd1 %x for vpn %x in cluster %x\n",443 __FUNCTION__, this->process->pid, this->trdid, pt d1, vpn, gpt_cxy );444 #endif 445 446 // get pointer on PT2 base from PTD1447 pt2_ppn = TSAR_MMU_P TBA_FROM_PTE1( ptd1 );448 pt2 _ptr= GET_PTR( ppm_ppn2base( pt2_ppn ) );375 printk("\n[%s] thread[%x,%x] get pte1 %x for vpn %x in cluster %x\n", 376 __FUNCTION__, this->process->pid, this->trdid, pte1, vpn, gpt_cxy ); 377 #endif 378 379 // get pointer on PT2 base 380 pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( pte1 ); 381 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 449 382 450 383 // build extended pointers on PT2[ix2].attr 451 pte2_xp = XPTR( gpt_cxy , &pt2 _ptr[2 * ix2] );384 pte2_xp = XPTR( gpt_cxy , &pt2[2 * ix2] ); 452 385 453 386 // wait until PTE2 atomically set using a remote CAS … … 491 424 cycle = (uint32_t)hal_get_cycles(); 492 425 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 493 printk("\n[%s] :thread[%x,%x] exit / vpn %x in cluster %x / attr %x / ppn %x / cycle %d\n",426 printk("\n[%s] thread[%x,%x] exit / vpn %x in cluster %x / attr %x / ppn %x / cycle %d\n", 494 427 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy, pte2_attr, pte2_ppn, cycle ); 495 428 #endif … … 506 439 vpn_t vpn ) 507 440 { 508 uint32_t * pt1_ptr; // local pointer on PT1 base 509 xptr_t ptd1_xp; // extended pointer on PT1[ix1] 510 uint32_t ptd1; // value of PT1[ix1] entry 511 441 uint32_t * pt1; // local pointer on PT1 base 442 xptr_t pte1_xp; // extended pointer on PT1[ix1] 443 uint32_t pte1; // value of PT1[ix1] entry 444 445 uint32_t * pt2; // PT2 base address 512 446 ppn_t pt2_ppn; // PPN of page containing PT2 513 uint32_t * pt2_ptr; // PT2 base address514 447 xptr_t pte2_xp; // extended pointer on PT2[ix2].attr 515 448 uint32_t pte2_attr; // PTE2 attribute … … 523 456 uint32_t cycle = (uint32_t)hal_get_cycles(); 524 457 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 525 printk("\n[%s] :thread[%x,%x] enters for vpn %x in cluster %x / cycle %d\n",458 printk("\n[%s] thread[%x,%x] enters for vpn %x in cluster %x / cycle %d\n", 526 459 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy, cycle ); 527 460 #endif 528 461 529 462 // compute indexes in P1 and PT2 530 uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); // index in PT1531 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); // index in PT2463 uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 464 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 532 465 533 466 // get local pointer on PT1 534 pt1 _ptr= hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) );535 536 // build extended pointer on PT D1 == PT1[ix1]537 pt d1_xp = XPTR( gpt_cxy , &pt1_ptr[ix1] );538 539 // get current pt d1 value540 pt d1 = hal_remote_l32( ptd1_xp );541 542 // check PT D1 attributes543 assert( ((pt d1 & TSAR_PTE_MAPPED) != 0), "unmapped PTE1\n");544 assert( ((pt d1 & TSAR_PTE_SMALL ) != 0), "big page PTE1\n");545 546 // get pointer on PT2 base from PTD1547 pt2_ppn = TSAR_MMU_P TBA_FROM_PTE1( ptd1 );548 pt2 _ptr= GET_PTR( ppm_ppn2base( pt2_ppn ) );467 pt1 = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 468 469 // build extended pointer on PTE1 == PT1[ix1] 470 pte1_xp = XPTR( gpt_cxy , &pt1[ix1] ); 471 472 // get current pte1 value 473 pte1 = hal_remote_l32( pte1_xp ); 474 475 // check PTE1 attributes 476 assert( ((pte1 & TSAR_PTE_MAPPED) != 0), "unmapped PTE1\n"); 477 assert( ((pte1 & TSAR_PTE_SMALL ) != 0), "big page PTE1\n"); 478 479 // get pointer on PT2 base 480 pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( pte1 ); 481 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 549 482 550 483 // build extended pointers on PT2[ix2].attr 551 pte2_xp = XPTR( gpt_cxy , &pt2 _ptr[2 * ix2] );484 pte2_xp = XPTR( gpt_cxy , &pt2[2 * ix2] ); 552 485 553 486 // get PT2[ix2].attr … … 564 497 cycle = (uint32_t)hal_get_cycles(); 565 498 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 566 printk("\n[%s] :thread[%x,%x] unlocks vpn %x in cluster %x / cycle %d\n",499 printk("\n[%s] thread[%x,%x] unlocks vpn %x in cluster %x / cycle %d\n", 567 500 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy, cycle ); 568 501 #endif … … 580 513 gpt_t * gpt_ptr; // target GPT local pointer 581 514 582 uint32_t * pt1 _ptr;// local pointer on PT1 base515 uint32_t * pt1; // local pointer on PT1 base 583 516 xptr_t pte1_xp; // extended pointer on PT1 entry 584 517 uint32_t pte1; // PT1 entry value if PTE1 585 518 519 uint32_t * pt2; // local pointer on PT2 base 586 520 ppn_t pt2_ppn; // PPN of PT2 587 uint32_t * pt2_ptr; // local pointer on PT2 base588 521 xptr_t pte2_attr_xp; // extended pointer on PT2[ix2].attr 589 522 xptr_t pte2_ppn_xp; // extended pointer on PT2[ix2].ppn … … 604 537 ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 605 538 606 pt1_ptr = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 607 small = attr & GPT_SMALL; 539 #if DEBUG_HAL_GPT_SET_PTE 540 thread_t * this = CURRENT_THREAD; 541 uint32_t cycle = (uint32_t)hal_get_cycles(); 542 if( DEBUG_HAL_GPT_SET_PTE < cycle ) 543 printk("\n[%s] thread[%x,%x] enter gpt (%x,%x) / vpn %x / attr %x / ppn %x\n", 544 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, &gpt_ptr->ptr, vpn, attr, ppn ); 545 #endif 546 547 small = attr & GPT_SMALL; 548 549 // get local pointer on PT1 550 pt1 = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 608 551 609 552 // compute tsar attributes from generic attributes … … 611 554 612 555 // build extended pointer on PTE1 = PT1[ix1] 613 pte1_xp = XPTR( gpt_cxy , &pt1 _ptr[ix1] );556 pte1_xp = XPTR( gpt_cxy , &pt1[ix1] ); 614 557 615 558 // get current pte1 value … … 634 577 635 578 #if DEBUG_HAL_GPT_SET_PTE 636 thread_t * this = CURRENT_THREAD;637 uint32_t cycle = (uint32_t)hal_get_cycles();638 579 if( DEBUG_HAL_GPT_SET_PTE < cycle ) 639 printk("\n[%s] :thread[%x,%x] map PTE1 / cxy %x / ix1 %x / pt1 %x / pte1 %x\n",640 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix1, pt1 _ptr, pte1 );580 printk("\n[%s] thread[%x,%x] map PTE1 / cxy %x / ix1 %x / pt1 %x / pte1 %x\n", 581 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix1, pt1, pte1 ); 641 582 #endif 642 583 … … 648 589 assert( (pte1 & TSAR_PTE_MAPPED), "PTE1 must be mapped\n" ); 649 590 650 // get PT2 base from PTE1651 pt2_ppn = TSAR_MMU_P TBA_FROM_PTE1( pte1 );652 pt2 _ptr= GET_PTR( ppm_ppn2base( pt2_ppn ) );591 // get PT2 base 592 pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( pte1 ); 593 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 653 594 654 595 // build extended pointers on PT2[ix2].attr and PT2[ix2].ppn 655 pte2_attr_xp = XPTR( gpt_cxy , &pt2 _ptr[2 * ix2] );656 pte2_ppn_xp = XPTR( gpt_cxy , &pt2 _ptr[2 * ix2 + 1] );596 pte2_attr_xp = XPTR( gpt_cxy , &pt2[2 * ix2] ); 597 pte2_ppn_xp = XPTR( gpt_cxy , &pt2[2 * ix2 + 1] ); 657 598 658 599 // get current value of PTE2.attr … … 672 613 uint32_t cycle = (uint32_t)hal_get_cycles(); 673 614 if( DEBUG_HAL_GPT_SET_PTE < cycle ) 674 printk("\n[%s] :thread[%x,%x] map PTE2 / cxy %x / ix2 %x / pt2 %x / attr %x / ppn %x\n",675 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix2, pt2 _ptr, tsar_attr, ppn );615 printk("\n[%s] thread[%x,%x] map PTE2 / cxy %x / ix2 %x / pt2 %x / attr %x / ppn %x\n", 616 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix2, pt2, tsar_attr, ppn ); 676 617 #endif 677 618 … … 689 630 uint32_t ix2; // index in PT2 690 631 691 uint32_t * pt1 _ptr;// PT1 base address632 uint32_t * pt1; // PT1 base address 692 633 xptr_t pte1_xp; // extended pointer on PT1[ix1] 693 634 uint32_t pte1; // PT1 entry value 694 635 636 uint32_t * pt2; // PT2 base address 695 637 ppn_t pt2_ppn; // PPN of PT2 696 uint32_t * pt2_ptr; // PT2 base address697 638 xptr_t pte2_attr_xp; // extended pointer on PT2[ix2].attr 698 639 xptr_t pte2_ppn_xp; // extended pointer on PT2[ix2].ppn … … 707 648 708 649 // get local pointer on PT1 base 709 pt1 _ptr= hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) );650 pt1 = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 710 651 711 652 // build extended pointer on PTE1 = PT1[ix1] 712 pte1_xp = XPTR( gpt_cxy , &pt1 _ptr[ix1] );653 pte1_xp = XPTR( gpt_cxy , &pt1[ix1] ); 713 654 714 655 // get current PTE1 value … … 729 670 uint32_t cycle = (uint32_t)hal_get_cycles(); 730 671 if( DEBUG_HAL_GPT_RESET_PTE < cycle ) 731 printk("\n[%s] :thread[%x,%x] unmap PTE1 / cxy %x / vpn %x / ix1 %x\n",672 printk("\n[%s] thread[%x,%x] unmap PTE1 / cxy %x / vpn %x / ix1 %x\n", 732 673 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, vpn, ix1 ); 733 674 #endif … … 737 678 else // it's a PTE2 => unmap it from PT2 738 679 { 739 // compute PT2 base address740 pt2_ppn = TSAR_MMU_P TBA_FROM_PTE1( pte1 );741 pt2 _ptr= GET_PTR( ppm_ppn2base( pt2_ppn ) );680 // get PT2 base 681 pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( pte1 ); 682 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 742 683 743 684 // build extended pointer on PT2[ix2].attr and PT2[ix2].ppn 744 pte2_attr_xp = XPTR( gpt_cxy , &pt2 _ptr[2 * ix2] );745 pte2_ppn_xp = XPTR( gpt_cxy , &pt2 _ptr[2 * ix2 + 1] );685 pte2_attr_xp = XPTR( gpt_cxy , &pt2[2 * ix2] ); 686 pte2_ppn_xp = XPTR( gpt_cxy , &pt2[2 * ix2 + 1] ); 746 687 747 688 // unmap the PTE2 … … 755 696 uint32_t cycle = (uint32_t)hal_get_cycles(); 756 697 if( DEBUG_HAL_GPT_RESET_PTE < cycle ) 757 printk("\n[%s] :thread[%x,%x] unmap PTE2 / cxy %x / vpn %x / ix2 %x\n",698 printk("\n[%s] thread[%x,%x] unmap PTE2 / cxy %x / vpn %x / ix2 %x\n", 758 699 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, vpn, ix2 ); 759 700 #endif … … 804 745 if( (pte1 & TSAR_PTE_SMALL) == 0 ) // it's a PTE1 805 746 { 806 // get PPN & ATTR from PT1747 // get PPN & ATTR 807 748 *attr = tsar2gpt( TSAR_MMU_ATTR_FROM_PTE1( pte1 ) ); 808 *ppn = TSAR_MMU_PPN _FROM_PTE1( pte1 ) | (vpn & ((1<<TSAR_MMU_IX2_WIDTH)-1));749 *ppn = TSAR_MMU_PPN1_FROM_PTE1( pte1 ) | (vpn & ((1<<TSAR_MMU_IX2_WIDTH)-1)); 809 750 } 810 751 else // it's a PTE2 811 752 { 812 753 // compute PT2 base address 813 pt2_ppn = TSAR_MMU_P TBA_FROM_PTE1( pte1 );754 pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( pte1 ); 814 755 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 815 756 … … 849 790 uint32_t * src_pt1; // local pointer on SRC PT1 850 791 uint32_t * dst_pt1; // local pointer on DST PT1 792 851 793 uint32_t * src_pt2; // local pointer on SRC PT2 852 794 uint32_t * dst_pt2; // local pointer on DST PT2 … … 874 816 thread_t * this = CURRENT_THREAD; 875 817 if( DEBUG_HAL_GPT_COPY < cycle ) 876 printk("\n[%s] :thread[%x,%x] enter / src_cxy %x / dst_cxy %x / cycle %d\n",818 printk("\n[%s] thread[%x,%x] enter / src_cxy %x / dst_cxy %x / cycle %d\n", 877 819 __FUNCTION__, this->process->pid, this->trdid, src_cxy, local_cxy, cycle ); 878 820 #endif 879 880 // get remote src_gpt cluster and local pointer881 src_cxy = GET_CXY( src_gpt_xp );882 src_gpt = GET_PTR( src_gpt_xp );883 821 884 822 // get remote src_pt1 and local dst_pt1 … … 907 845 dst_pte1 = dst_pt1[dst_ix1]; 908 846 909 // map dst_pte1 if required847 // map dst_pte1 when this entry is not mapped 910 848 if( (dst_pte1 & TSAR_PTE_MAPPED) == 0 ) 911 849 { 912 850 // allocate one physical page for a new PT2 913 req.type = KMEM_P AGE;914 req. size= 0; // 1 small page851 req.type = KMEM_PPM; 852 req.order = 0; // 1 small page 915 853 req.flags = AF_KERNEL | AF_ZERO; 916 page = (page_t *)kmem_alloc( &req );917 918 if( page== NULL )854 dst_pt2 = kmem_alloc( &req ); 855 856 if( dst_pt2 == NULL ) 919 857 { 920 858 printk("\n[ERROR] in %s : cannot allocate PT2\n", __FUNCTION__ ); … … 926 864 927 865 // get PPN for this new PT2 928 dst_pt2_ppn = (ppn_t)ppm_page2ppn( page_xp);929 930 // build thenew dst_pte1866 dst_pt2_ppn = ppm_base2ppn( XPTR( local_cxy , dst_pt2 ) ); 867 868 // build new dst_pte1 931 869 dst_pte1 = TSAR_PTE_MAPPED | TSAR_PTE_SMALL | dst_pt2_ppn; 932 870 … … 936 874 937 875 // get pointer on src_pt2 938 src_pt2_ppn = (ppn_t)TSAR_MMU_PTBA_FROM_PTE1( src_pte1 );876 src_pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( src_pte1 ); 939 877 src_pt2 = GET_PTR( ppm_ppn2base( src_pt2_ppn ) ); 940 878 941 879 // get pointer on dst_pt2 942 dst_pt2_ppn = (ppn_t)TSAR_MMU_PTBA_FROM_PTE1( dst_pte1 );880 dst_pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( dst_pte1 ); 943 881 dst_pt2 = GET_PTR( ppm_ppn2base( dst_pt2_ppn ) ); 944 882 … … 970 908 cycle = (uint32_t)hal_get_cycles; 971 909 if( DEBUG_HAL_GPT_COPY < cycle ) 972 printk("\n[%s] :thread[%x,%x] exit / copy done for src_vpn %x / dst_vpn %x / cycle %d\n",910 printk("\n[%s] thread[%x,%x] exit / copy done for src_vpn %x / dst_vpn %x / cycle %d\n", 973 911 __FUNCTION__, this->process->pid, this->trdid, src_vpn, dst_vpn, cycle ); 974 912 #endif … … 987 925 cycle = (uint32_t)hal_get_cycles; 988 926 if( DEBUG_HAL_GPT_COPY < cycle ) 989 printk("\n[%s] :thread[%x,%x] exit / nothing done / cycle %d\n",927 printk("\n[%s] thread[%x,%x] exit / nothing done / cycle %d\n", 990 928 __FUNCTION__, this->process->pid, this->trdid, cycle ); 991 929 #endif … … 1005 943 gpt_t * gpt_ptr; 1006 944 1007 vpn_t vpn; 1008 1009 uint32_t ix1; 1010 uint32_t ix2; 945 uint32_t ix1; // current 946 uint32_t ix2; // current 947 948 vpn_t vpn_min; 949 vpn_t vpn_max; // included 950 951 uint32_t ix1_min; 952 uint32_t ix1_max; // included 953 954 uint32_t ix2_min; 955 uint32_t ix2_max; // included 1011 956 1012 957 uint32_t * pt1; … … 1021 966 gpt_ptr = GET_PTR( gpt_xp ); 1022 967 1023 // get local PT1 pointer 968 #if DEBUG_HAL_GPT_SET_COW 969 uint32_t cycle = (uint32_t)hal_get_cycles(); 970 thread_t * this = CURRENT_THREAD; 971 if(DEBUG_HAL_GPT_SET_COW < cycle ) 972 printk("\n[%s] thread[%x,%x] enter / gpt[%x,%x] / vpn_base %x / vpn_size %x / cycle %d\n", 973 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, gpt_ptr, vpn_base, vpn_size, cycle ); 974 #endif 975 976 // get PT1 pointer 1024 977 pt1 = (uint32_t *)hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 1025 978 1026 // loop on pages 1027 for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) 979 #if (DEBUG_HAL_GPT_SET_COW & 1) 980 if(DEBUG_HAL_GPT_SET_COW < cycle ) 981 printk("\n[%s] thread[%x,%x] get pt1 = %x\n", 982 __FUNCTION__, this->process->pid, this->trdid, pt1 ); 983 #endif 984 985 vpn_min = vpn_base; 986 vpn_max = vpn_base + vpn_size - 1; 987 988 ix1_min = TSAR_MMU_IX1_FROM_VPN( vpn_base ); 989 ix1_max = TSAR_MMU_IX1_FROM_VPN( vpn_max ); 990 991 for( ix1 = ix1_min ; ix1 <= ix1_max ; ix1++ ) 1028 992 { 1029 ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 1030 ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 1031 993 994 #if (DEBUG_HAL_GPT_SET_COW & 1) 995 if(DEBUG_HAL_GPT_SET_COW < cycle ) 996 printk("\n[%s] thread[%x,%x] : &pt1[%x] = %x\n", 997 __FUNCTION__, this->process->pid, this->trdid, ix1, &pt1[ix1] ); 998 #endif 1032 999 // get PTE1 value 1033 1000 pte1 = hal_remote_l32( XPTR( gpt_cxy , &pt1[ix1] ) ); 1001 1002 #if (DEBUG_HAL_GPT_SET_COW & 1) 1003 if(DEBUG_HAL_GPT_SET_COW < cycle ) 1004 printk("\n[%s] thread[%x,%x] : pt1[%x] = %x\n", 1005 __FUNCTION__, this->process->pid, this->trdid, ix1, pte1 ); 1006 #endif 1034 1007 1035 1008 // only MAPPED & SMALL PTEs are modified 1036 1009 if( (pte1 & TSAR_PTE_MAPPED) && (pte1 & TSAR_PTE_SMALL) ) 1037 1010 { 1038 // compute PT2 base address1039 pt2_ppn = TSAR_MMU_P TBA_FROM_PTE1( pte1 );1011 // get PT2 pointer 1012 pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( pte1 ); 1040 1013 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 1041 1014 1042 assert( (GET_CXY( ppm_ppn2base( pt2_ppn ) ) == gpt_cxy ), 1043 "PT2 and PT1 must be in the same cluster\n"); 1044 1045 // get current PTE2 attributes 1046 attr = hal_remote_l32( XPTR( gpt_cxy , &pt2[2*ix2] ) ); 1047 1048 // only MAPPED PTEs are modified 1049 if( attr & TSAR_PTE_MAPPED ) 1015 #if (DEBUG_HAL_GPT_SET_COW & 1) 1016 if(DEBUG_HAL_GPT_SET_COW < cycle ) 1017 printk("\n[%s] thread[%x,%x] : get pt2 = %x\n", 1018 __FUNCTION__, this->process->pid, this->trdid, pt2 ); 1019 #endif 1020 ix2_min = (ix1 == ix1_min) ? TSAR_MMU_IX2_FROM_VPN(vpn_min) : 0; 1021 ix2_max = (ix1 == ix1_max) ? TSAR_MMU_IX2_FROM_VPN(vpn_max) : 511; 1022 1023 for( ix2 = ix2_min ; ix2 <= ix2_max ; ix2++ ) 1050 1024 { 1051 attr = (attr | TSAR_PTE_COW) & (~TSAR_PTE_WRITABLE); 1052 hal_remote_s32( XPTR( gpt_cxy , &pt2[2*ix2] ) , attr ); 1053 } 1054 } 1055 } // end loop on pages 1025 1026 #if (DEBUG_HAL_GPT_SET_COW & 1) 1027 if(DEBUG_HAL_GPT_SET_COW < cycle ) 1028 printk("\n[%s] thread[%x,%x] : &pte2[%x] = %x\n", 1029 __FUNCTION__, this->process->pid, this->trdid, 2*ix2, &pt2[2*ix2] ); 1030 #endif 1031 // get current PTE2 attributes 1032 attr = hal_remote_l32( XPTR( gpt_cxy , &pt2[2*ix2] ) ); 1033 1034 #if (DEBUG_HAL_GPT_SET_COW & 1) 1035 if(DEBUG_HAL_GPT_SET_COW < cycle ) 1036 printk("\n[%s] thread[%x,%x] : pte2[%x] (attr) = %x\n", 1037 __FUNCTION__, this->process->pid, this->trdid, 2*ix2, attr ); 1038 #endif 1039 // only MAPPED PTEs are modified 1040 if( attr & TSAR_PTE_MAPPED ) 1041 { 1042 attr = (attr | TSAR_PTE_COW) & (~TSAR_PTE_WRITABLE); 1043 hal_remote_s32( XPTR( gpt_cxy , &pt2[2*ix2] ) , attr ); 1044 } 1045 } // end loop on ix2 1046 } 1047 } // end loop on ix1 1048 1049 #if DEBUG_HAL_GPT_SET_COW 1050 cycle = (uint32_t)hal_get_cycles(); 1051 if(DEBUG_HAL_GPT_SET_COW < cycle ) 1052 printk("\n[%s] thread[%x,%x] exit / cycle %d\n", 1053 __FUNCTION__, this->process->pid, this->trdid, cycle ); 1054 #endif 1056 1055 1057 1056 } // end hal_gpt_set_cow() … … 1068 1067 ppn_t pt2_ppn; // PPN of PT2 1069 1068 uint32_t * pt2; // PT2 base address 1070 xptr_t pte2_xp; // exended pointer on PTE2 1069 xptr_t pte2_attr_xp; // exended pointer on pte2.attr 1070 xptr_t pte2_ppn_xp; // exended pointer on pte2.ppn 1071 1071 1072 1072 uint32_t ix1; // index in PT1 1073 1073 uint32_t ix2; // index in PT2 1074 1075 1076 uint32_t tsar_attr; // PTE attributes for TSAR MMU1077 1074 1078 1075 // check MAPPED, SMALL, and not LOCKED in attr argument … … 1092 1089 pt1 = (uint32_t *)hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 1093 1090 1094 // compute tsar_attr from generic attributes1095 tsar_attr = gpt2tsar( attr );1096 1097 1091 // get PTE1 value 1098 1092 pte1 = hal_remote_l32( XPTR( gpt_cxy , &pt1[ix1] ) ); 1099 1093 1100 1094 // check MAPPED and SMALL in target PTE1 1101 assert( ((pte1 & GPT_MAPPED) != 0), "attribute MAPPED must be set in target PTE1\n" );1102 assert( ((pte1 & GPT_SMALL ) != 0), "attribute SMALL must be set in target PTE1\n" );1103 1104 // get PT2 base from PTE11105 pt2_ppn = TSAR_MMU_P TBA_FROM_PTE1( pte1 );1095 assert( ((pte1 & TSAR_PTE_MAPPED) != 0), "attribute MAPPED must be set in target PTE1\n" ); 1096 assert( ((pte1 & TSAR_PTE_SMALL ) != 0), "attribute SMALL must be set in target PTE1\n" ); 1097 1098 // get PT2 base 1099 pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( pte1 ); 1106 1100 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 1107 1101 1108 // get extended pointer on PTE2 1109 pte2_xp = XPTR( gpt_cxy , &pt2[2*ix2] ); 1102 // build extended pointers on PT2[ix2].attr and PT2[ix2].ppn 1103 pte2_attr_xp = XPTR( gpt_cxy , &pt2[2 * ix2] ); 1104 pte2_ppn_xp = XPTR( gpt_cxy , &pt2[2 * ix2 + 1] ); 1105 1110 1106 1111 1107 // check MAPPED in target PTE2 1112 assert( ((hal_remote_l32(pte2_ xp) & GPT_MAPPED) != 0),1108 assert( ((hal_remote_l32(pte2_attr_xp) & TSAR_PTE_MAPPED) != 0), 1113 1109 "attribute MAPPED must be set in target PTE2\n" ); 1114 1110 1115 1111 // set PTE2 in this order 1116 hal_remote_s32( pte2_ xp, ppn );1112 hal_remote_s32( pte2_ppn_xp , ppn ); 1117 1113 hal_fence(); 1118 hal_remote_s32( pte2_ xp + 4 , tsar_attr);1114 hal_remote_s32( pte2_attr_xp , gpt2tsar( attr ) ); 1119 1115 hal_fence(); 1120 1116 -
trunk/hal/tsar_mips32/core/hal_vmm.c
r633 r635 44 44 extern process_t process_zero; 45 45 extern chdev_directory_t chdev_dir; 46 extern char * lock_type_str[]; 46 47 47 48 ////////////////////////////////////////////////////////////////////////////////////////// 48 49 // This function is called by the process_zero_init() function during kernel_init. 49 50 // It initializes the VMM of the kernel proces_zero (containing all kernel threads) 50 // in the local cluster: For TSAR, it registers one "kcode" vseg in kernel VSL, 51 // and registers one big page in slot[0] of kernel GPT. 51 // in the local cluster. 52 // For TSAR, it registers one "kcode" vseg in kernel VSL, and registers one big page 53 // in slot[0] of kernel GPT. 52 54 ////////////////////////////////////////////////////////////////////////////////////////// 53 55 error_t hal_vmm_kernel_init( boot_info_t * info ) … … 58 60 gpt_t * gpt = &process_zero.vmm.gpt; 59 61 60 // get cluster identifier 61 cxy_t cxy = local_cxy; 62 #if DEBUG_HAL_VMM 63 thread_t * this = CURRENT_THREAD; 64 printk("\n[%s] thread[%x,%x] enter in cluster %x\n", 65 __FUNCTION__, this->process->pid, this->trdid, local_cxy ); 66 #endif 62 67 63 68 // allocate memory for kernel GPT … … 67 72 { 68 73 printk("\n[PANIC] in %s : cannot allocate kernel GPT in cluster %x\n", 69 __FUNCTION__ , cxy );74 __FUNCTION__ , local_cxy ); 70 75 hal_core_sleep(); 71 76 } 72 77 73 78 #if DEBUG_HAL_VMM 74 thread_t * this = CURRENT_THREAD; 75 printk("\n[%s] thread[%x,%x] enter in cluster %x / gpt %x\n", 79 printk("\n[%s] thread[%x,%x] created GPT PT1 in cluster %x / gpt %x\n", 76 80 __FUNCTION__, this->process->pid, this->trdid, local_cxy, gpt ); 77 81 #endif … … 79 83 // compute attr and ppn for one PTE1 80 84 uint32_t attr = GPT_MAPPED | GPT_READABLE | GPT_CACHABLE | GPT_EXECUTABLE | GPT_GLOBAL; 81 uint32_t ppn = cxy << 20;85 uint32_t ppn = local_cxy << 20; 82 86 83 87 // set PT1[0] 84 hal_gpt_set_pte( XPTR( cxy , gpt ) , 0 , attr , ppn );85 86 #if DEBUG_HAL_VMM 87 printk("\n[%s] thread[%x,%x] created PT1[0]: ppn %x / attr %x\n",88 __FUNCTION__, this->process->pid, this->trdid, ppn, attr );88 hal_gpt_set_pte( XPTR( local_cxy , gpt ) , 0 , attr , ppn ); 89 90 #if DEBUG_HAL_VMM 91 printk("\n[%s] thread[%x,%x] mapped PT1[0] in cluster %d : ppn %x / attr %x\n", 92 __FUNCTION__, this->process->pid, this->trdid, local_cxy, ppn, attr ); 89 93 #endif 90 94 … … 94 98 info->kcode_base, 95 99 info->kcode_size, 96 0, 0, 97 XPTR_NULL, 100 0, 0, // file ofset and file size (unused) 101 XPTR_NULL, // no mapper 98 102 local_cxy ); 99 103 if( vseg == NULL ) 100 104 { 101 105 printk("\n[PANIC] in %s : cannot register vseg to VSL in cluster %x\n", 102 __FUNCTION__ , cxy );106 __FUNCTION__ , local_cxy ); 103 107 hal_core_sleep(); 104 108 } 105 109 106 110 #if DEBUG_HAL_VMM 107 printk("\n[%s] thread[%x,%x] registered kcode vseg[%x,%x] \n",108 __FUNCTION__, this->process->pid, this->trdid, info->kcode_base, info->kcode_size );111 printk("\n[%s] thread[%x,%x] registered kcode vseg[%x,%x] in cluster %x\n", 112 __FUNCTION__, this->process->pid, this->trdid, info->kcode_base, info->kcode_size, local_cxy ); 109 113 hal_vmm_display( &process_zero , true ); 110 114 #endif … … 194 198 195 199 ////////////////////////////////////////// 196 void hal_vmm_display( process_t * process,197 bool_t 200 void hal_vmm_display( xptr_t process_xp, 201 bool_t mapping ) 198 202 { 199 // get pointer on process VMM 200 vmm_t * vmm = &process->vmm; 203 // get target process cluster and local pointer 204 process_t * process_ptr = GET_PTR( process_xp ); 205 cxy_t process_cxy = GET_CXY( process_xp ); 206 207 // get local pointer on target process VMM 208 vmm_t * vmm = &process_ptr->vmm; 201 209 202 210 // get pointers on TXT0 chdev … … 205 213 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 206 214 207 // build extended pointer on TXT0 lock and VSL lock215 // build extended pointer on TXT0 lock 208 216 xptr_t txt_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 209 xptr_t vsl_lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); 210 211 // get root of vsegs list212 xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root);213 214 // get the locks protecting TXT0 , VSL, and GPT217 218 // build extended pointers on VSL lock and VSL root 219 xptr_t vsl_root_xp = XPTR( process_cxy , &vmm->vsegs_root ); 220 xptr_t vsl_lock_xp = XPTR( process_cxy , &vmm->vsl_lock ); 221 222 // get the locks protecting TXT0 and VSL 215 223 remote_rwlock_rd_acquire( vsl_lock_xp ); 216 224 remote_busylock_acquire( txt_lock_xp ); 217 225 218 nolock_printk("\n***** VSL and GPT for process %x in cluster %x / PT1 = %x\n", 219 process->pid , local_cxy , vmm->gpt.ptr ); 220 221 if( xlist_is_empty( root_xp ) ) 226 // get PID and PT1 values 227 pid_t pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) ); 228 uint32_t * pt1 = hal_remote_lpt( XPTR( process_cxy , &vmm->gpt.ptr ) ); 229 230 nolock_printk("\n***** VSL and GPT / process %x / cluster %x / PT1 %x / cycle %d\n", 231 pid , process_cxy , pt1 , (uint32_t)hal_get_cycles() ); 232 233 if( xlist_is_empty( vsl_root_xp ) ) 222 234 { 223 235 nolock_printk(" ... no vsegs registered\n"); … … 227 239 xptr_t iter_xp; 228 240 xptr_t vseg_xp; 229 vseg_t * vseg; 230 231 XLIST_FOREACH( root_xp , iter_xp ) 241 vseg_t * vseg_ptr; 242 cxy_t vseg_cxy; 243 intptr_t min; 244 intptr_t max; 245 uint32_t type; 246 intptr_t vpn_base; 247 intptr_t vpn_size; 248 249 XLIST_FOREACH( vsl_root_xp , iter_xp ) 232 250 { 233 vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 234 vseg = GET_PTR( vseg_xp ); 251 vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 252 vseg_ptr = GET_PTR( vseg_xp ); 253 vseg_cxy = GET_CXY( vseg_xp ); 254 255 type = hal_remote_l32( XPTR( vseg_cxy , &vseg_ptr->type ) ); 256 min = (intptr_t)hal_remote_lpt( XPTR( vseg_cxy , &vseg_ptr->min ) ); 257 max = (intptr_t)hal_remote_lpt( XPTR( vseg_cxy , &vseg_ptr->max ) ); 258 vpn_size = (intptr_t)hal_remote_lpt( XPTR( vseg_cxy , &vseg_ptr->vpn_size ) ); 259 vpn_base = (intptr_t)hal_remote_lpt( XPTR( vseg_cxy , &vseg_ptr->vpn_base ) ); 235 260 236 261 nolock_printk(" - %s : base = %X / size = %X / npages = %d\n", 237 vseg_type_str( vseg->type), vseg->min, vseg->max - vseg->min, vseg->vpn_size );262 vseg_type_str(type), min, max - min, vpn_size ); 238 263 239 264 if( mapping ) 240 265 { 241 vpn_t vpn = v seg->vpn_base;242 vpn_t vpn_max = vpn + vseg->vpn_size;266 vpn_t vpn = vpn_base; 267 vpn_t vpn_max = vpn_base + vpn_size; 243 268 ppn_t ppn; 244 269 uint32_t attr; … … 246 271 while( vpn < vpn_max ) // scan the PTEs 247 272 { 248 hal_gpt_get_pte( XPTR( local_cxy , &vmm->gpt ) , vpn , &attr , &ppn );273 hal_gpt_get_pte( XPTR( process_cxy , &vmm->gpt ) , vpn , &attr , &ppn ); 249 274 250 275 if( attr & GPT_MAPPED ) -
trunk/hal/tsar_mips32/drivers/soclib_nic.c
r570 r635 2 2 * soclib_nic.c - SOCLIB_NIC (Network Interface Controler) driver implementation. 3 3 * 4 * Author Alain Greiner (2016 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 55 55 "chbuf descriptor exceeds one page" ); 56 56 57 req.type = KMEM_P AGE;58 req. size= 0;57 req.type = KMEM_PPM; 58 req.order = 0; 59 59 req.flags = AF_KERNEL; 60 60 61 nic_chbuf_t * chbuf = (nic_chbuf_t *)kmem_alloc( &req ); 62 63 assert( (chbuf != NULL) , 64 "cannot allocate chbuf descriptor" ); 61 nic_chbuf_t * chbuf = kmem_alloc( &req ); 62 63 if( chbuf == NULL ) 64 { 65 printk("\n[PANIC] in %s : cannot allocate chbuf descriptor\n", 66 __FUNCTION__ ); 67 } 65 68 66 69 // initialise chbuf state … … 76 79 for( i = 0 ; i < CONFIG_NIC_CHBUF_DEPTH ; i++ ) 77 80 { 78 uint32_t * container = (uint32_t *)kmem_alloc( &req );81 uint32_t * container = kmem_alloc( &req ); 79 82 80 83 assert( (container != NULL) , -
trunk/hal/tsar_mips32/drivers/soclib_pic.c
r629 r635 2 2 * soclib_pic.c - soclib PIC driver implementation. 3 3 * 4 * Author Alain Greiner (2016,2017 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 27 27 #include <errno.h> 28 28 #include <string.h> 29 #include <bits.h> 29 30 #include <vfs.h> 30 31 #include <rpc.h> … … 287 288 { 288 289 // allocate memory for core extension 289 req.type = KMEM_ GENERIC;290 req. size = sizeof(soclib_pic_core_t);290 req.type = KMEM_KCM; 291 req.order = bits_log2( sizeof(soclib_pic_core_t) ); 291 292 req.flags = AF_KERNEL; 292 293 core_ext_ptr = kmem_alloc( &req ); 293 294 294 assert( (core_ext_ptr != NULL) , 295 "cannot allocate memory for core extension\n"); 295 if( core_ext_ptr == NULL ) 296 { 297 printk("\n[PANIC] in %s : cannot allocate memory for core extension\n", 298 __FUNCTION__ ); 299 } 296 300 297 301 // reset the HWI / WTI interrupt vectors … … 304 308 305 309 // allocate memory for cluster extension 306 req.type = KMEM_ GENERIC;307 req. size = sizeof(soclib_pic_cluster_t);310 req.type = KMEM_KCM; 311 req.order = bits_log2( sizeof(soclib_pic_cluster_t) ); 308 312 req.flags = AF_KERNEL; 309 313 cluster_ext_ptr = kmem_alloc( &req ); 310 314 311 assert( (cluster_ext_ptr != NULL) , 312 "cannot allocate memory for cluster extension\n"); 315 if( cluster_ext_ptr == NULL ) 316 { 317 printk("\n[PANIC] in %s : cannot allocate memory for cluster extension\n", 318 __FUNCTION__ ); 319 } 320 321 assert( (cluster_ext_ptr != NULL) , "cannot allocate memory for cluster extension"); 313 322 314 323 // get XCU characteristics from the XCU config register -
trunk/hal/x86_64/core/hal_gpt.c
r482 r635 282 282 /* allocate a physical page for L4 */ 283 283 kmem_req_t req; 284 req.type = KMEM_P AGE;284 req.type = KMEM_PPM; 285 285 req.size = 1; 286 286 req.flags = AF_KERNEL | AF_ZERO; … … 330 330 xptr_t page_xp; 331 331 332 req.type = KMEM_P AGE;332 req.type = KMEM_PPM; 333 333 req.size = 0; 334 334 req.flags = AF_KERNEL | AF_ZERO; -
trunk/kernel/fs/devfs.c
r624 r635 58 58 kmem_req_t req; 59 59 60 req.type = KMEM_ DEVFS_CTX;61 req. size = sizeof(devfs_ctx_t);60 req.type = KMEM_KCM; 61 req.order = bits_log2( sizeof(devfs_ctx_t) ); 62 62 req.flags = AF_KERNEL | AF_ZERO; 63 63 64 return (devfs_ctx_t *)kmem_alloc( &req );64 return kmem_alloc( &req ); 65 65 } 66 66 … … 81 81 kmem_req_t req; 82 82 83 req.type = KMEM_ DEVFS_CTX;83 req.type = KMEM_KCM; 84 84 req.ptr = devfs_ctx; 85 85 kmem_free( &req ); -
trunk/kernel/fs/fatfs.c
r633 r635 1069 1069 { 1070 1070 kmem_req_t req; 1071 req.type = KMEM_ FATFS_CTX;1072 req. size = sizeof(fatfs_ctx_t);1071 req.type = KMEM_KCM; 1072 req.order = bits_log2( sizeof(fatfs_ctx_t) ); 1073 1073 req.flags = AF_KERNEL | AF_ZERO; 1074 1074 1075 return (fatfs_ctx_t *)kmem_alloc( &req );1075 return kmem_alloc( &req ); 1076 1076 } 1077 1077 … … 1101 1101 // - temporarily the BOOT sector 1102 1102 // - permanently the FS_INFO sector 1103 req.type = KMEM_512_BYTES; 1103 req.type = KMEM_KCM; 1104 req.order = 9; // 512 bytes 1104 1105 req.flags = AF_KERNEL | AF_ZERO; 1105 buffer = (uint8_t *)kmem_alloc( &req ); 1106 buffer_xp = XPTR( local_cxy , buffer ); 1106 buffer = kmem_alloc( &req ); 1107 1107 1108 1108 if( buffer == NULL ) … … 1112 1112 } 1113 1113 1114 buffer_xp = XPTR( local_cxy , buffer ); 1115 1114 1116 // load the BOOT record from device 1115 1117 error = dev_ioc_sync_read( buffer_xp , 0 , 1 ); … … 1242 1244 { 1243 1245 kmem_req_t req; 1244 req.type = KMEM_ FATFS_CTX;1246 req.type = KMEM_KCM; 1245 1247 req.ptr = fatfs_ctx; 1246 1248 kmem_free( &req ); -
trunk/kernel/fs/vfs.c
r634 r635 150 150 mapper_t * mapper; // associated mapper( to be allocated) 151 151 vfs_inode_t * inode; // inode descriptor (to be allocated) 152 152 153 uint32_t inum; // inode identifier (to be allocated) 153 154 vfs_ctx_t * ctx; // file system context … … 155 156 error_t error; 156 157 157 #if DEBUG_VFS_INODE_CREATE158 char name[CONFIG_VFS_MAX_NAME_LENGTH];159 uint32_t cycle = (uint32_t)hal_get_cycles();160 cxy_t dentry_cxy = GET_CXY( dentry_xp );161 vfs_dentry_t * dentry_ptr = GET_PTR( dentry_xp );162 thread_t * this = CURRENT_THREAD;163 if( dentry_xp != XPTR_NULL ) hal_remote_strcpy( XPTR( local_cxy , name ),164 XPTR( dentry_cxy , dentry_ptr->name ) );165 else strcpy( name , "/" );166 if( DEBUG_VFS_INODE_CREATE < cycle )167 printk("\n[%s] thread[%x,%x] enter for <%s> / cycle %d\n",168 __FUNCTION__, this->process->pid, this->trdid, name, cycle );169 #endif170 171 158 // check fs type and get pointer on context 172 159 if ( fs_type == FS_TYPE_FATFS ) ctx = &fs_context[FS_TYPE_FATFS]; … … 198 185 } 199 186 200 // allocate memory for VFS inode descriptor 201 req.type = KMEM_VFS_INODE; 202 req.size = sizeof(vfs_inode_t); 187 // check inode descriptor contained in one page 188 assert( (sizeof(vfs_inode_t) <= CONFIG_PPM_PAGE_SIZE), 189 "inode descriptor must fit in one page" ); 190 191 // allocate one page for VFS inode descriptor 192 // because the embedded "children xhtab footprint 193 req.type = KMEM_PPM; 194 req.order = 0; 203 195 req.flags = AF_KERNEL | AF_ZERO; 204 inode = (vfs_inode_t *)kmem_alloc( &req );196 inode = kmem_alloc( &req ); 205 197 206 198 if( inode == NULL ) … … 243 235 244 236 #if DEBUG_VFS_INODE_CREATE 245 cycle = (uint32_t)hal_get_cycles(); 237 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 238 uint32_t cycle = (uint32_t)hal_get_cycles(); 239 thread_t * this = CURRENT_THREAD; 240 vfs_inode_get_name( *inode_xp , name ); 246 241 if( DEBUG_VFS_INODE_CREATE < cycle ) 247 printk("\n[%s] thread[%x,%x] exit for<%s> / inode [%x,%x] / cycle %d\n",242 printk("\n[%s] thread[%x,%x] created <%s> / inode [%x,%x] / cycle %d\n", 248 243 __FUNCTION__, this->process->pid, this->trdid, name, local_cxy, inode, cycle ); 249 244 #endif … … 261 256 // release memory allocate for inode descriptor 262 257 kmem_req_t req; 258 req.type = KMEM_PPM; 263 259 req.ptr = inode; 264 req.type = KMEM_VFS_INODE;265 260 kmem_free( &req ); 266 261 … … 477 472 kmem_req_t req; // request to kernel memory allocator 478 473 479 #if DEBUG_VFS_DENTRY_CREATE480 thread_t * this = CURRENT_THREAD;481 uint32_t cycle = (uint32_t)hal_get_cycles();482 if( DEBUG_VFS_DENTRY_CREATE < cycle )483 printk("\n[%s] thread[%x,%x] enter for <%s> / cycle %d\n",484 __FUNCTION__, this->process->pid, this->trdid, name, cycle );485 #endif486 487 474 // get pointer on context 488 475 if ( fs_type == FS_TYPE_FATFS ) ctx = &fs_context[FS_TYPE_FATFS]; … … 501 488 502 489 // allocate memory for dentry descriptor 503 req.type = KMEM_ VFS_DENTRY;504 req. size = sizeof(vfs_dentry_t);490 req.type = KMEM_KCM; 491 req.order = bits_log2( sizeof(vfs_dentry_t) ); 505 492 req.flags = AF_KERNEL | AF_ZERO; 506 dentry = (vfs_dentry_t *)kmem_alloc( &req );493 dentry = kmem_alloc( &req ); 507 494 508 495 if( dentry == NULL ) … … 523 510 524 511 #if DEBUG_VFS_DENTRY_CREATE 525 cycle = (uint32_t)hal_get_cycles(); 512 thread_t * this = CURRENT_THREAD; 513 uint32_t cycle = (uint32_t)hal_get_cycles(); 526 514 if( DEBUG_VFS_DENTRY_CREATE < cycle ) 527 printk("\n[%s] thread[%x,%x] exit for<%s> / dentry [%x,%x] / cycle %d\n",515 printk("\n[%s] thread[%x,%x] created <%s> / dentry [%x,%x] / cycle %d\n", 528 516 __FUNCTION__, this->process->pid, this->trdid, name, local_cxy, dentry, cycle ); 529 517 #endif … … 538 526 // release memory allocated to dentry 539 527 kmem_req_t req; 528 req.type = KMEM_KCM; 540 529 req.ptr = dentry; 541 req.type = KMEM_VFS_DENTRY;542 530 kmem_free( &req ); 543 531 … … 566 554 567 555 // allocate memory for new file descriptor 568 req.type = KMEM_ VFS_FILE;569 req. size = sizeof(vfs_file_t);556 req.type = KMEM_KCM; 557 req.order = bits_log2( sizeof(vfs_file_t) ); 570 558 req.flags = AF_KERNEL | AF_ZERO; 571 file = (vfs_file_t *)kmem_alloc( &req );559 file = kmem_alloc( &req ); 572 560 573 561 if( file == NULL ) return ENOMEM; … … 602 590 { 603 591 kmem_req_t req; 592 req.type = KMEM_KCM; 604 593 req.ptr = file; 605 req.type = KMEM_VFS_FILE;606 594 kmem_free( &req ); 607 595 … … 3347 3335 #endif 3348 3336 3337 3349 3338 // 3. register new_dentry in new_inode xlist of parents 3350 3339 parents_root_xp = XPTR( child_cxy , &new_inode_ptr->parents ); -
trunk/kernel/fs/vfs.h
r633 r635 306 306 /****************************************************************************************** 307 307 * This function allocates memory from local cluster for an inode descriptor and the 308 * associated mapper. It initialise these descriptors from arguments values. 308 * associated mapper, and partially initialise this inode from arguments values. 309 * It does NOT link it to the Inode Tree, as this is done by add_child_in_parent(). 309 310 * It must called by a local thread. Use the RPC_INODE_CREATE if client thread is remote. 310 311 ****************************************************************************************** -
trunk/kernel/kern/chdev.c
r625 r635 90 90 91 91 // allocate memory for chdev 92 req.type = KMEM_DEVICE; 93 req.flags = AF_ZERO; 94 chdev = (chdev_t *)kmem_alloc( &req ); 92 req.type = KMEM_KCM; 93 req.order = bits_log2( sizeof(chdev_t) ); 94 req.flags = AF_ZERO | AF_KERNEL; 95 chdev = kmem_alloc( &req ); 95 96 96 97 if( chdev == NULL ) return NULL; -
trunk/kernel/kern/cluster.c
r627 r635 4 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017,2018 )6 * Alain Greiner (2016,2017,2018,2019) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites … … 109 109 cluster_t * cluster = LOCAL_CLUSTER; 110 110 111 // initialize the lock protecting the embedded kcm allocator112 busylock_init( &cluster->kcm_lock , LOCK_CLUSTER_KCM );113 114 111 #if DEBUG_CLUSTER_INIT 115 112 uint32_t cycle = (uint32_t)hal_get_cycles(); … … 148 145 149 146 // initialises embedded KCM 150 kcm_init( &cluster->kcm , KMEM_KCM ); 147 uint32_t i; 148 for( i = 0 ; i < 6 ; i++ ) kcm_init( &cluster->kcm[i] , i+6 ); 151 149 152 150 #if( DEBUG_CLUSTER_INIT & 1 ) 153 151 cycle = (uint32_t)hal_get_cycles(); 154 152 if( DEBUG_CLUSTER_INIT < cycle ) 155 printk("\n[%s] KCM initialized in cluster %x at cycle %d\n",153 printk("\n[%s] KCM[6:11] initialized in cluster %x at cycle %d\n", 156 154 __FUNCTION__ , local_cxy , hal_get_cycles() ); 157 155 #endif -
trunk/kernel/kern/cluster.h
r611 r635 126 126 ppm_t ppm; /*! embedded kernel page manager */ 127 127 khm_t khm; /*! embedded kernel heap manager */ 128 kcm_t kcm; /*! embedded kernel KCMs manager */ 129 130 kcm_t * kcm_tbl[KMEM_TYPES_NR]; /*! pointers on allocated KCMs */ 131 busylock_t kcm_lock; /*! protect kcm_tbl[] updates */ 128 kcm_t kcm[6]; /*! embedded kernel cache managers [6:11] */ 132 129 133 130 // RPC -
trunk/kernel/kern/kernel_init.c
r633 r635 251 251 "\n\n\t\t Advanced Locality Management Operating System / Multi Kernel Hybrid\n" 252 252 "\n\n\t\t %s / %d cluster(s) / %d core(s) per cluster\n\n", 253 CONFIG_ ALMOS_VERSION , nclusters , ncores );253 CONFIG_VERSION , nclusters , ncores ); 254 254 } 255 255 … … 1428 1428 } 1429 1429 1430 #if ( DEBUG_KERNEL_INIT & 1 )1430 #if CONFIG_INSTRUMENTATION_FOOTPRINT 1431 1431 if( (core_lid == 0) & (local_cxy == 0) ) 1432 1432 printk("\n\n***** memory fooprint for main kernel objects\n\n" … … 1439 1439 " - rpc fifo : %d bytes\n" 1440 1440 " - page descriptor : %d bytes\n" 1441 " - mapper root : %d bytes\n" 1441 " - mapper descriptor : %d bytes\n" 1442 " - vseg descriptor : %d bytes\n" 1442 1443 " - ppm manager : %d bytes\n" 1443 1444 " - kcm manager : %d bytes\n" … … 1445 1446 " - vmm manager : %d bytes\n" 1446 1447 " - gpt root : %d bytes\n" 1448 " - vfs inode : %d bytes\n" 1449 " - vfs dentry : %d bytes\n" 1450 " - vfs file : %d bytes\n" 1451 " - vfs context : %d bytes\n" 1452 " - xhtab root : %d bytes\n" 1447 1453 " - list item : %d bytes\n" 1448 1454 " - xlist item : %d bytes\n" … … 1462 1468 sizeof( page_t ), 1463 1469 sizeof( mapper_t ), 1470 sizeof( vseg_t ), 1464 1471 sizeof( ppm_t ), 1465 1472 sizeof( kcm_t ), … … 1467 1474 sizeof( vmm_t ), 1468 1475 sizeof( gpt_t ), 1476 sizeof( vfs_inode_t ), 1477 sizeof( vfs_dentry_t ), 1478 sizeof( vfs_file_t ), 1479 sizeof( vfs_ctx_t ), 1480 sizeof( xhtab_t ), 1469 1481 sizeof( list_entry_t ), 1470 1482 sizeof( xlist_entry_t ), … … 1486 1498 ///////////////////////////////////////////////////////////////////////////////// 1487 1499 1488 #if ( DEBUG_KERNEL_INIT & 1 )1500 #if DEBUG_KERNEL_INIT 1489 1501 thread_t * this = CURRENT_THREAD; 1490 1502 printk("\n[%s] : thread[%x,%x] on core[%x,%d] jumps to thread_idle_func() / cycle %d\n", -
trunk/kernel/kern/process.c
r633 r635 72 72 process_t * process_alloc( void ) 73 73 { 74 kmem_req_t 75 76 req.type = KMEM_ PROCESS;77 req. size = sizeof(process_t);74 kmem_req_t req; 75 76 req.type = KMEM_KCM; 77 req.order = bits_log2( sizeof(process_t) ); 78 78 req.flags = AF_KERNEL; 79 79 80 return (process_t *)kmem_alloc( &req );80 return kmem_alloc( &req ); 81 81 } 82 82 … … 86 86 kmem_req_t req; 87 87 88 req.type = KMEM_ PROCESS;88 req.type = KMEM_KCM; 89 89 req.ptr = process; 90 90 kmem_free( &req ); … … 166 166 #endif 167 167 168 // initialize VSL lock s168 // initialize VSL lock 169 169 remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL ); 170 170 171 // register kernel vsegs in VMM as required by the architecture171 // register kernel vsegs in user process VMM as required by the architecture 172 172 error = hal_vmm_kernel_update( process ); 173 173 if( error ) … … 179 179 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) 180 180 if( DEBUG_PROCESS_REFERENCE_INIT < cycle ) 181 printk("\n[%s] thread[%x,%x] registered kernel vsegs for process %x\n",181 printk("\n[%s] thread[%x,%x] registered kernel vsegs in VSL for process %x\n", 182 182 __FUNCTION__, parent_pid, this->trdid, pid ); 183 183 #endif … … 374 374 printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", 375 375 __FUNCTION__, parent_pid, this->trdid, pid, cycle ); 376 #endif 377 378 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) 379 hal_vmm_display( parent_xp , false ); 380 hal_vmm_display( XPTR( local_cxy , process ) , false ); 376 381 #endif 377 382 … … 1088 1093 } 1089 1094 } 1095 1090 1096 //////////////////////////////////////////////////// 1091 1097 error_t process_fd_register( xptr_t process_xp, … … 1356 1362 1357 1363 #if DEBUG_PROCESS_MAKE_FORK 1358 uint32_t cycle = (uint32_t)hal_get_cycles();1364 uint32_t cycle; 1359 1365 thread_t * this = CURRENT_THREAD; 1360 1366 trdid_t trdid = this->trdid; 1361 1367 pid_t pid = this->process->pid; 1368 #endif 1369 1370 #if( DEBUG_PROCESS_MAKE_FORK & 1 ) 1371 cycle = (uint32_t)hal_get_cycles(); 1362 1372 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1363 1373 printk("\n[%s] thread[%x,%x] enter / cluster %x / cycle %d\n", … … 1367 1377 // allocate a process descriptor 1368 1378 process = process_alloc(); 1379 1369 1380 if( process == NULL ) 1370 1381 { … … 1427 1438 printk("\n[%s] thread[%x,%x] copied VMM from parent to child / cycle %d\n", 1428 1439 __FUNCTION__, pid, trdid, cycle ); 1440 hal_vmm_display( XPTR( local_cxy , process ) , true ); 1429 1441 #endif 1430 1442 … … 1438 1450 cycle = (uint32_t)hal_get_cycles(); 1439 1451 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1440 printk("\n[%s] thread[%x,%x] / child takes TXT ownership / cycle %d\n",1441 __FUNCTION__ , pid, trdid, cycle );1452 printk("\n[%s] thread[%x,%x] / child_process %x takes TXT ownership / cycle %d\n", 1453 __FUNCTION__ , pid, trdid, new_pid, cycle ); 1442 1454 #endif 1443 1455 … … 1471 1483 #endif 1472 1484 1473 // set COW flag in DATA, ANON, REMOTE vsegs forparent process VMM1485 // set COW flag in DATA, ANON, REMOTE vsegs in parent process VMM 1474 1486 // this includes all parent process copies in all clusters 1475 1487 if( parent_process_cxy == local_cxy ) // reference is local … … 1489 1501 cycle = (uint32_t)hal_get_cycles(); 1490 1502 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1491 printk("\n[%s] thread[%x,%x] set COW in parent and child / cycle %d\n",1503 printk("\n[%s] thread[%x,%x] set COW in DATA / ANON / REMOTE for parent and child / cycle %d\n", 1492 1504 __FUNCTION__, pid, trdid, cycle ); 1493 1505 #endif … … 1546 1558 #if DEBUG_PROCESS_MAKE_EXEC 1547 1559 uint32_t cycle = (uint32_t)hal_get_cycles(); 1548 if( local_cxy == 0x11)1560 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1549 1561 printk("\n[%s] thread[%x,%x] enters for %s / cycle %d\n", 1550 1562 __FUNCTION__, pid, thread->trdid, path, cycle ); … … 1569 1581 #if (DEBUG_PROCESS_MAKE_EXEC & 1) 1570 1582 cycle = (uint32_t)hal_get_cycles(); 1571 if( local_cxy == 0x11)1583 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1572 1584 printk("\n[%s] thread[%x,%x] opened file <%s> / cycle %d\n", 1573 1585 __FUNCTION__, pid, thread->trdid, path, cycle ); … … 1579 1591 #if (DEBUG_PROCESS_MAKE_EXEC & 1) 1580 1592 cycle = (uint32_t)hal_get_cycles(); 1581 if( local_cxy == 0x11)1593 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1582 1594 printk("\n[%s] thread[%x,%x] deleted existing threads / cycle %d\n", 1583 1595 __FUNCTION__, pid, thread->trdid, cycle ); … … 1589 1601 #if( DEBUG_PROCESS_MAKE_EXEC & 1 ) 1590 1602 cycle = (uint32_t)hal_get_cycles(); 1591 if( local_cxy == 0x11)1603 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1592 1604 printk("\n[%s] thread[%x,%x] completed VMM reset / cycle %d\n", 1593 1605 __FUNCTION__, pid, thread->trdid, cycle ); … … 1606 1618 #if( DEBUG_PROCESS_MAKE_EXEC & 1 ) 1607 1619 cycle = (uint32_t)hal_get_cycles(); 1608 if( local_cxy == 0x11)1620 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1609 1621 printk("\n[%s] thread[%x,%x] registered args/envs vsegs / cycle %d\n", 1610 1622 __FUNCTION__, pid, thread->trdid, cycle ); … … 1624 1636 #if( DEBUG_PROCESS_MAKE_EXEC & 1 ) 1625 1637 cycle = (uint32_t)hal_get_cycles(); 1626 if( local_cxy == 0x11)1638 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1627 1639 printk("\n[%s] thread[%x,%x] registered code/data vsegs / cycle %d\n", 1628 1640 __FUNCTION__, pid, thread->trdid, cycle ); … … 1674 1686 hal_core_sleep(); 1675 1687 } 1688 1689 #if (DEBUG_PROCESS_ZERO_CREATE & 1) 1690 if( DEBUG_PROCESS_ZERO_CREATE < cycle ) 1691 printk("\n[%s] allocated pid %x in cluster %x\n", __FUNCTION__, pid, local_cxy ); 1692 #endif 1676 1693 1677 1694 // initialize PID, REF_XP, PARENT_XP, and STATE … … 1684 1701 process->term_state = 0; 1685 1702 1686 // initi lise VSL as empty1703 // initialize VSL as empty 1687 1704 vmm->vsegs_nr = 0; 1688 1705 xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) ); 1689 1706 1690 // initialise GPT as empty 1707 #if (DEBUG_PROCESS_ZERO_CREATE & 1) 1708 if( DEBUG_PROCESS_ZERO_CREATE < cycle ) 1709 printk("\n[%s] initialized VSL empty in cluster %x\n", __FUNCTION__, local_cxy ); 1710 #endif 1711 1712 // initialize GPT as empty 1691 1713 error = hal_gpt_create( &vmm->gpt ); 1714 1692 1715 if( error ) 1693 1716 { … … 1695 1718 hal_core_sleep(); 1696 1719 } 1720 1721 #if (DEBUG_PROCESS_ZERO_CREATE & 1) 1722 if( DEBUG_PROCESS_ZERO_CREATE < cycle ) 1723 printk("\n[%s] initialized GPT empty in cluster %x\n", __FUNCTION__, local_cxy ); 1724 #endif 1697 1725 1698 1726 // initialize VSL and GPT locks … … 1701 1729 // create kernel vsegs in GPT and VSL, as required by the hardware architecture 1702 1730 error = hal_vmm_kernel_init( info ); 1731 1703 1732 if( error ) 1704 1733 { … … 1706 1735 hal_core_sleep(); 1707 1736 } 1737 1738 #if (DEBUG_PROCESS_ZERO_CREATE & 1) 1739 if( DEBUG_PROCESS_ZERO_CREATE < cycle ) 1740 printk("\n[%s] initialized hal specific VMM in cluster%x\n", __FUNCTION__, local_cxy ); 1741 #endif 1708 1742 1709 1743 // reset th_tbl[] array and associated fields … … 1716 1750 rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL ); 1717 1751 1752 #if (DEBUG_PROCESS_ZERO_CREATE & 1) 1753 if( DEBUG_PROCESS_ZERO_CREATE < cycle ) 1754 printk("\n[%s] initialized th_tbl[] in cluster%x\n", __FUNCTION__, local_cxy ); 1755 #endif 1718 1756 1719 1757 // reset children list as empty … … 1722 1760 remote_queuelock_init( XPTR( local_cxy , &process->children_lock ), 1723 1761 LOCK_PROCESS_CHILDREN ); 1762 1763 #if (DEBUG_PROCESS_ZERO_CREATE & 1) 1764 if( DEBUG_PROCESS_ZERO_CREATE < cycle ) 1765 printk("\n[%s] initialized children list in cluster%x\n", __FUNCTION__, local_cxy ); 1766 #endif 1724 1767 1725 1768 // register kernel process in cluster manager local_list … … 1759 1802 // allocates memory for process descriptor from local cluster 1760 1803 process = process_alloc(); 1804 1805 1761 1806 if( process == NULL ) 1762 1807 { … … 1840 1885 1841 1886 #if (DEBUG_PROCESS_INIT_CREATE & 1) 1842 hal_vmm_display( process, true );1887 hal_vmm_display( XPTR( local_cxy , process ) , true ); 1843 1888 #endif 1844 1889 -
trunk/kernel/kern/process.h
r625 r635 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017,2018 )6 * Alain Greiner (2016,2017,2018,2019) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites … … 231 231 * descriptor, defined by the <parent_xp> argument. The <process> and <pid> arguments 232 232 * are previously allocated by the caller. This function can be called by two functions: 233 * 1)process_init_create() : process is the INIT process, and parent is process-zero.234 * 2) process_make_fork(): the parent process descriptor is generally remote.233 * - process_init_create() : process is the INIT process, and parent is process-zero. 234 * - process_make_fork() : the parent process descriptor is generally remote. 235 235 * The following fields are initialised : 236 236 * - It set the pid / ppid / ref_xp / parent_xp / state fields. -
trunk/kernel/kern/rpc.c
r632 r635 75 75 &rpc_vmm_get_vseg_server, // 20 76 76 &rpc_vmm_global_update_pte_server, // 21 77 &rpc_ kcm_alloc_server,// 2278 &rpc_ kcm_free_server,// 2377 &rpc_undefined, // 22 78 &rpc_undefined, // 23 79 79 &rpc_mapper_sync_server, // 24 80 &rpc_ mapper_handle_miss_server,// 2580 &rpc_undefined, // 25 81 81 &rpc_vmm_delete_vseg_server, // 26 82 82 &rpc_vmm_create_vseg_server, // 27 83 83 &rpc_vmm_set_cow_server, // 28 84 &rpc_ hal_vmm_display_server,// 2984 &rpc_undefined, // 29 85 85 }; 86 86 … … 111 111 "GET_VSEG", // 20 112 112 "GLOBAL_UPDATE_PTE", // 21 113 " KCM_ALLOC",// 22114 " KCM_FREE",// 23113 "undefined_22", // 22 114 "undefined_23", // 23 115 115 "MAPPER_SYNC", // 24 116 " MAPPER_HANDLE_MISS",// 25116 "undefined_25", // 25 117 117 "VMM_DELETE_VSEG", // 26 118 118 "VMM_CREATE_VSEG", // 27 119 119 "VMM_SET_COW", // 28 120 " VMM_DISPLAY",// 29120 "undefined_29", // 29 121 121 }; 122 122 … … 557 557 // release memory to local pmem 558 558 kmem_req_t req; 559 req.type = KMEM_P AGE;559 req.type = KMEM_PPM; 560 560 req.ptr = page; 561 561 kmem_free( &req ); … … 2231 2231 ///////////////////////////////////////////////////////////////////////////////////////// 2232 2232 2233 /* 2233 2234 ////////////////////////////////////////// 2234 2235 void rpc_kcm_alloc_client( cxy_t cxy, … … 2304 2305 #endif 2305 2306 } 2307 */ 2306 2308 2307 2309 ///////////////////////////////////////////////////////////////////////////////////////// … … 2309 2311 ///////////////////////////////////////////////////////////////////////////////////////// 2310 2312 2313 /* 2311 2314 ///////////////////////////////////////// 2312 2315 void rpc_kcm_free_client( cxy_t cxy, … … 2377 2380 #endif 2378 2381 } 2379 2380 ///////////////////////////////////////////////////////////////////////////////////////// 2381 // [25] Marshaling functions attached to RPC_MAPPER_SYNC 2382 */ 2383 2384 ///////////////////////////////////////////////////////////////////////////////////////// 2385 // [24] Marshaling functions attached to RPC_MAPPER_SYNC 2382 2386 ///////////////////////////////////////////////////////////////////////////////////////// 2383 2387 … … 2459 2463 ///////////////////////////////////////////////////////////////////////////////////////// 2460 2464 2465 /* 2461 2466 ////////////////////////////////////////////////////////// 2462 2467 void rpc_mapper_handle_miss_client( cxy_t cxy, … … 2541 2546 #endif 2542 2547 } 2548 */ 2543 2549 2544 2550 ///////////////////////////////////////////////////////////////////////////////////////// … … 2784 2790 2785 2791 ///////////////////////////////////////////////////////////////////////////////////////// 2786 // [29] Marshaling functions attached to RPC_VMM_DISPLAY 2787 ///////////////////////////////////////////////////////////////////////////////////////// 2788 2792 // [29] RPC_VMM_DISPLAY deprecated [AG] June 2019 2793 ///////////////////////////////////////////////////////////////////////////////////////// 2794 2795 /* 2789 2796 ///////////////////////////////////////////// 2790 2797 void rpc_hal_vmm_display_client( cxy_t cxy, … … 2856 2863 } 2857 2864 2858 2865 */ -
trunk/kernel/kern/rpc.h
r632 r635 60 60 typedef enum 61 61 { 62 RPC_UNDEFINED_0 = 0, // RPC_PMEM_GET_PAGES deprecated [AG]63 RPC_UNDEFINED_1 = 1, // RPC_PMEM_RELEASE_PAGES deprecated [AG]64 RPC_UNDEFINED_2 = 2, // RPC_PMEM_DISPLAY deprecated [AG]62 RPC_UNDEFINED_0 = 0, // RPC_PMEM_GET_PAGES deprecated [AG] 63 RPC_UNDEFINED_1 = 1, // RPC_PMEM_RELEASE_PAGES deprecated [AG] 64 RPC_UNDEFINED_2 = 2, // RPC_PMEM_DISPLAY deprecated [AG] 65 65 RPC_PROCESS_MAKE_FORK = 3, 66 66 RPC_USER_DIR_CREATE = 4, … … 84 84 RPC_VMM_GET_VSEG = 20, 85 85 RPC_VMM_GLOBAL_UPDATE_PTE = 21, 86 RPC_ KCM_ALLOC = 22,87 RPC_ KCM_FREE = 23,86 RPC_UNDEFINED_22 = 22, // RPC_KCM_ALLOC deprecated [AG] 87 RPC_UNDEFINED_23 = 23, // RPC_KCM_FREE deprecated [AG] 88 88 RPC_MAPPER_SYNC = 24, 89 RPC_ MAPPER_HANDLE_MISS = 25,89 RPC_UNDEFUNED_25 = 25, // RPC_MAPPER_HANDLE_MISS deprecated [AG] 90 90 RPC_VMM_DELETE_VSEG = 26, 91 91 RPC_VMM_CREATE_VSEG = 27, 92 92 RPC_VMM_SET_COW = 28, 93 RPC_ VMM_DISPLAY = 29,93 RPC_UNDEFINED_29 = 29, // RPC_VMM_DISPLAY deprecated [AG] 94 94 95 95 RPC_MAX_INDEX = 30, … … 574 574 * @ buf_xp : [out] buffer for extended pointer on allocated buffer. 575 575 **********************************************************************************/ 576 577 /* 576 578 void rpc_kcm_alloc_client( cxy_t cxy, 577 579 uint32_t kmem_type, … … 579 581 580 582 void rpc_kcm_alloc_server( xptr_t xp ); 583 */ 581 584 582 585 /*********************************************************************************** … … 588 591 * @ kmem_type : [in] KCM object type (as defined in kmem.h). 589 592 **********************************************************************************/ 593 594 /* 590 595 void rpc_kcm_free_client( cxy_t cxy, 591 596 void * buf, … … 593 598 594 599 void rpc_kcm_free_server( xptr_t xp ); 600 */ 595 601 596 602 /*********************************************************************************** … … 621 627 * @ error : [out] error status (0 if success). 622 628 **********************************************************************************/ 629 /* 623 630 void rpc_mapper_handle_miss_client( cxy_t cxy, 624 631 struct mapper_s * mapper, … … 628 635 629 636 void rpc_mapper_handle_miss_server( xptr_t xp ); 630 637 */ 631 638 /*********************************************************************************** 632 639 * [26] The RPC_VMM_DELETE_VSEG allows any client thread to request a remote … … 699 706 * @ detailed : [in] detailed display if true. 700 707 **********************************************************************************/ 708 709 /* 701 710 void rpc_hal_vmm_display_client( cxy_t cxy, 702 711 struct process_s * process, … … 704 713 705 714 void rpc_hal_vmm_display_server( xptr_t xp ); 706 715 */ 707 716 708 717 #endif -
trunk/kernel/kern/scheduler.c
r630 r635 180 180 sched = &core->scheduler; 181 181 182 ////////////////// scan user threads to handle bothACK and DELETE requests182 ////////////////// scan user threads to handle ACK and DELETE requests 183 183 root = &sched->u_root; 184 184 iter = root->next; … … 195 195 { 196 196 197 // check t hread blocked197 // check target thread blocked 198 198 assert( (thread->blocked & THREAD_BLOCKED_GLOBAL) , "thread not blocked" ); 199 199 … … 206 206 207 207 // handle REQ_DELETE only if target thread != calling thread 208 if( (thread->flags & THREAD_FLAG_REQ_DELETE) && (thread != CURRENT_THREAD) ) 209 { 208 if( thread->flags & THREAD_FLAG_REQ_DELETE ) 209 { 210 211 // check calling thread != target thread 212 assert( (thread != CURRENT_THREAD) , "calling thread cannot delete itself" ); 213 210 214 // get thread process descriptor 211 215 process = thread->process; … … 497 501 remote_fifo_t * fifo = &LOCAL_CLUSTER->rpc_fifo[lid]; 498 502 503 #if DEBUG_SCHED_YIELD 504 uint32_t cycle = (uint32_t)hal_get_cycles(); 505 #endif 506 499 507 #if (DEBUG_SCHED_YIELD & 0x1) 500 if( sched->trace )508 if( sched->trace || (cycle > DEBUG_SCHED_YIELD) ) 501 509 sched_display( lid ); 502 510 #endif … … 551 559 552 560 #if DEBUG_SCHED_YIELD 553 if( sched->trace )561 if( sched->trace || (cycle > DEBUG_SCHED_YIELD) ) 554 562 printk("\n[%s] core[%x,%d] / cause = %s\n" 555 563 " thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n", 556 564 __FUNCTION__, local_cxy, lid, cause, 557 565 current, thread_type_str(current->type), current->process->pid, current->trdid,next , 558 thread_type_str(next->type) , next->process->pid , next->trdid , (uint32_t)hal_get_cycles());566 thread_type_str(next->type) , next->process->pid , next->trdid , cycle ); 559 567 #endif 560 568 … … 567 575 busylock_release( &sched->lock ); 568 576 569 #if (DEBUG_SCHED_YIELD & 1)570 if( sched->trace )577 #if DEBUG_SCHED_YIELD 578 if( sched->trace || (cycle > DEBUG_SCHED_YIELD) ) 571 579 printk("\n[%s] core[%x,%d] / cause = %s\n" 572 580 " thread %x (%s) (%x,%x) continue / cycle %d\n", -
trunk/kernel/kern/thread.c
r633 r635 78 78 static thread_t * thread_alloc( void ) 79 79 { 80 page_t * page; // pointer on page descriptor containing thread descriptor81 80 kmem_req_t req; // kmem request 82 81 83 82 // allocates memory for thread descriptor + kernel stack 84 req.type = KMEM_P AGE;85 req. size= CONFIG_THREAD_DESC_ORDER;83 req.type = KMEM_PPM; 84 req.order = CONFIG_THREAD_DESC_ORDER; 86 85 req.flags = AF_KERNEL | AF_ZERO; 87 page = kmem_alloc( &req ); 88 89 if( page == NULL ) return NULL; 90 91 // return pointer on new thread descriptor 92 xptr_t base_xp = ppm_page2base( XPTR(local_cxy , page ) ); 93 return GET_PTR( base_xp ); 86 87 return kmem_alloc( &req ); 94 88 95 89 } // end thread_alloc() … … 125 119 { 126 120 127 // check type and trdid fields initialized121 // check type and trdid fields are initialized 128 122 assert( (thread->type == type) , "bad type argument" ); 129 123 assert( (thread->trdid == trdid) , "bad trdid argument" ); … … 133 127 thread_t * this = CURRENT_THREAD; 134 128 if( DEBUG_THREAD_INIT < cycle ) 135 printk("\n[%s] thread[%x,%x] enter for thread %x in process %x/ cycle %d\n",136 __FUNCTION__, this->process->pid, this->trdid, thread->trdid, process->pid, cycle );129 printk("\n[%s] thread[%x,%x] enter for thread[%x,%x] / cycle %d\n", 130 __FUNCTION__, this->process->pid, this->trdid, process->pid, thread->trdid, cycle ); 137 131 #endif 138 132 … … 192 186 cycle = (uint32_t)hal_get_cycles(); 193 187 if( DEBUG_THREAD_INIT < cycle ) 194 printk("\n[%s] thread[%x,%x] exit for thread %x in process %x/ cycle %d\n",195 __FUNCTION__, this->process->pid, this->trdid, thread, process->pid, cycle );188 printk("\n[%s] thread[%x,%x] exit for thread[%x,%x] / cycle %d\n", 189 __FUNCTION__, this->process->pid, this->trdid, process->pid, thread->trdid, cycle ); 196 190 #endif 197 191 … … 580 574 vpn_t parent_vpn_size = hal_remote_l32( XPTR( parent_cxy, &parent_us_vseg->vpn_size ) ); 581 575 vpn_t child_vpn_base = child_us_vseg->vpn_base; 576 582 577 for( parent_vpn = parent_vpn_base , child_vpn = child_vpn_base ; 583 578 parent_vpn < (parent_vpn_base + parent_vpn_size) ; … … 625 620 #if (DEBUG_THREAD_USER_FORK & 1) 626 621 if( DEBUG_THREAD_USER_FORK < cycle ) 627 printk("\n[%s] thread[%x,%x] copied all stack vseg PTEs tochild GPT\n",622 printk("\n[%s] thread[%x,%x] copied STACK vseg PTEs & set COW in child GPT\n", 628 623 __FUNCTION__, this->process->pid, this->trdid ); 629 624 #endif … … 636 631 #if (DEBUG_THREAD_USER_FORK & 1) 637 632 if( DEBUG_THREAD_USER_FORK < cycle ) 638 printk("\n[%s] thread[%x,%x] set the COW flag for stackvseg in parent GPT\n",633 printk("\n[%s] thread[%x,%x] set COW for STACK vseg in parent GPT\n", 639 634 __FUNCTION__, this->process->pid, this->trdid ); 640 635 #endif … … 906 901 thread_assert_can_yield( thread , __FUNCTION__ ); 907 902 908 // update target process instrumentation counter 909 // process->vmm.pgfault_nr += thread->info.pgfault_nr; 903 #if CONFIG_INSTRUMENTATION_PGFAULTS 904 process->vmm.false_pgfault_nr += thread->info.false_pgfault_nr; 905 process->vmm.local_pgfault_nr += thread->info.local_pgfault_nr; 906 process->vmm.global_pgfault_nr += thread->info.global_pgfault_nr; 907 process->vmm.false_pgfault_cost += thread->info.false_pgfault_cost; 908 process->vmm.local_pgfault_cost += thread->info.local_pgfault_cost; 909 process->vmm.global_pgfault_cost += thread->info.global_pgfault_cost; 910 #endif 910 911 911 912 // remove thread from process th_tbl[] 912 913 count = process_remove_thread( thread ); 913 914 914 // release memory allocated for CPU context and FPU context if required915 // release memory allocated for CPU context and FPU context 915 916 hal_cpu_context_destroy( thread ); 916 917 hal_fpu_context_destroy( thread ); … … 933 934 // release memory for thread descriptor (including kernel stack) 934 935 kmem_req_t req; 935 xptr_t base_xp = ppm_base2page( XPTR(local_cxy , thread ) ); 936 937 req.type = KMEM_PAGE; 938 req.ptr = GET_PTR( base_xp ); 936 req.type = KMEM_PPM; 937 req.ptr = thread; 939 938 kmem_free( &req ); 940 939 -
trunk/kernel/kern/thread.h
r629 r635 101 101 { 102 102 uint32_t false_pgfault_nr; /*! number of local page fault */ 103 uint32_t local_pgfault_nr; /*! number of local page fault */ 104 uint32_t global_pgfault_nr; /*! number of global page fault */ 103 105 uint32_t false_pgfault_cost; /*! cumulated cost */ 104 uint32_t local_pgfault_nr; /*! number of local page fault */105 106 uint32_t local_pgfault_cost; /*! cumulated cost */ 106 uint32_t global_pgfault_nr; /*! number of global page fault */107 107 uint32_t global_pgfault_cost; /*! cumulated cost */ 108 108 … … 339 339 * this. This includes the thread descriptor itself, the associated CPU and FPU context, 340 340 * and the physical memory allocated for an user thread stack. 341 * This function does not remove the thread from the scheduler, as this is done by 342 * the scheduler itself. 341 343 *************************************************************************************** 342 344 * @ thread : pointer on the thread descriptor to release. … … 394 396 * The calling thread can run in any cluster, as it uses remote accesses. 395 397 * This function makes a kernel panic if the target thread is the main thread, 396 * because *the main thread deletion will cause the process deletion, and a process398 * because the main thread deletion will cause the process deletion, and a process 397 399 * must be deleted by the parent process, running the wait function. 398 400 * If the target thread is running in "attached" mode, and the <is_forced> argument -
trunk/kernel/kernel_config.h
r634 r635 25 25 #define _KERNEL_CONFIG_H_ 26 26 27 #define CONFIG_ALMOS_VERSION "Version 2.1 / May 2019"28 29 27 //////////////////////////////////////////////////////////////////////////////////////////// 30 28 // KERNEL DEBUG … … 98 96 #define DEBUG_HAL_GPT_DESTROY 0 99 97 #define DEBUG_HAL_GPT_LOCK_PTE 0 98 #define DEBUG_HAL_GPT_SET_COW 0 100 99 #define DEBUG_HAL_GPT_SET_PTE 0 101 100 #define DEBUG_HAL_IOC_RX 0 … … 109 108 110 109 #define DEBUG_KCM 0 110 #define DEBUG_KCM_REMOTE 0 111 111 112 #define DEBUG_KMEM 0 113 #define DEBUG_KMEM_REMOTE 0 112 114 113 115 #define DEBUG_KERNEL_INIT 0 … … 145 147 #define DEBUG_RPC_SERVER_GENERIC 0 146 148 147 #define DEBUG_RPC_KCM_ALLOC 0148 #define DEBUG_RPC_KCM_FREE 0149 #define DEBUG_RPC_MAPPER_HANDLE_MISS 0150 149 #define DEBUG_RPC_MAPPER_MOVE_USER 0 151 150 #define DEBUG_RPC_PROCESS_MAKE_FORK 0 … … 233 232 #define DEBUG_VFS_CHDIR 0 234 233 #define DEBUG_VFS_CLOSE 0 235 #define DEBUG_VFS_DENTRY_CREATE 0 234 #define DEBUG_VFS_DENTRY_CREATE 0 236 235 #define DEBUG_VFS_FILE_CREATE 0 237 236 #define DEBUG_VFS_GET_PATH 0 … … 256 255 #define DEBUG_VMM_GET_ONE_PPN 0 257 256 #define DEBUG_VMM_GET_PTE 0 258 #define DEBUG_VMM_HANDLE_PAGE_FAULT 19000000257 #define DEBUG_VMM_HANDLE_PAGE_FAULT 0 259 258 #define DEBUG_VMM_HANDLE_COW 0 260 259 #define DEBUG_VMM_MMAP_ALLOC 0 … … 316 315 #define LOCK_FATFS_FAT 36 // remote (RW) protect exclusive access to the FATFS FAT 317 316 317 //////////////////////////////////////////////////////////////////////////////////////////// 318 // GENERAL CONFIGURATION 319 //////////////////////////////////////////////////////////////////////////////////////////// 320 321 #define CONFIG_VERSION "Version 2.2 / June 2019" 318 322 319 323 //////////////////////////////////////////////////////////////////////////////////////////// … … 376 380 #define CONFIG_VFS_ROOT_IS_EX2FS 0 // root FS is EX2FS if non zero 377 381 378 #define CONFIG_MAPPER_GRDXT_W1 7// number of bits for RADIX_TREE_IX1379 #define CONFIG_MAPPER_GRDXT_W2 7 // number of bits for RADIX_TREE_IX2380 #define CONFIG_MAPPER_GRDXT_W3 6// number of bits for RADIX_TREE_IX3382 #define CONFIG_MAPPER_GRDXT_W1 6 // number of bits for RADIX_TREE_IX1 383 #define CONFIG_MAPPER_GRDXT_W2 7 // number of bits for RADIX_TREE_IX2 384 #define CONFIG_MAPPER_GRDXT_W3 7 // number of bits for RADIX_TREE_IX3 381 385 382 386 //////////////////////////////////////////////////////////////////////////////////////////// … … 445 449 #define CONFIG_PPM_MAX_RSVD 32 // max reserved zones on the machine 446 450 447 #define CONFIG_KCM_SLOT_SIZE 64 // smallest allocated block (bytes)448 449 451 #define CONFIG_PPM_PAGE_ALIGNED __attribute__((aligned(CONFIG_PPM_PAGE_SIZE))) 450 452 451 452 453 //////////////////////////////////////////////////////////////////////////////////////////// 453 454 // INSTRUMENTATION 454 455 //////////////////////////////////////////////////////////////////////////////////////////// 455 456 456 #define CONFIG_INSTRUMENTATION_SYSCALLS 0457 #define CONFIG_INSTRUMENTATION_PGFAULTS 1458 457 #define CONFIG_INSTRUMENTATION_SYSCALLS 0 458 #define CONFIG_INSTRUMENTATION_PGFAULTS 1 459 #define CONFIG_INSTRUMENTATION_FOOTPRINT 1 459 460 460 461 -
trunk/kernel/libk/bits.c
r473 r635 3 3 * 4 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016 )5 * Alain Greiner (2016,2017,2018,2019) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/libk/bits.h
r457 r635 3 3 * 4 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016 )5 * Alain Greiner (2016,2017,2018,2019) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 189 189 190 190 /********************************************************************************************* 191 * This function returns the number of bits to code a non-zero unsigned integer value. 192 ********************************************************************************************* 193 * @ val : value to analyse 194 * @ returns number of bits 195 ********************************************************************************************/ 196 static inline uint32_t bits_nr( uint32_t val ) 197 { 198 register uint32_t i; 199 200 for( i=0 ; val > 0 ; i++ ) 201 val = val >> 1; 202 203 return i; 204 } 205 206 /********************************************************************************************* 207 * This function takes an unsigned integer value as input argument, and returns another 208 * unsigned integer, that is the (base 2) logarithm of the smallest power of 2 contained 209 * in the input value. 191 * This function takes a positive integer <val> as input argument, and returns the smallest 192 * integer <order> such as : 1<<order >= val. 193 * In other words, <order> is the min number of bits to encode <val> values. 210 194 ********************************************************************************************* 211 195 * @ val : value to analyse … … 214 198 static inline uint32_t bits_log2( uint32_t val ) 215 199 { 216 return (val == 0) ? 1 : bits_nr( val ) - 1; 200 uint32_t i; 201 202 if( val > 0 ) 203 { 204 val--; 205 for( i=0 ; val > 0 ; i++ ) val = val >> 1; 206 return i; 207 } 208 return 0; 217 209 } 218 210 -
trunk/kernel/libk/elf.c
r625 r635 2 2 * elf.c - elf parser: find and map process CODE and DATA segments 3 3 * 4 * Authors Alain Greiner (2016 )4 * Authors Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 201 201 printk("\n[%s] thread[%x,%x] found %s vseg / base %x / size %x\n" 202 202 " file_size %x / file_offset %x / mapper_xp %l / cycle %d\n", 203 __FUNCTION__ , this->process _pid, this->trdid,203 __FUNCTION__ , this->process->pid, this->trdid, 204 204 vseg_type_str(vseg->type) , vseg->min , vseg->max - vseg->min , 205 vseg->file_size , vseg->file_offset , vseg->mapper_xp );205 vseg->file_size , vseg->file_offset , vseg->mapper_xp, cycle ); 206 206 #endif 207 207 … … 262 262 263 263 // allocate memory for segment descriptors array 264 req.type = KMEM_ GENERIC;265 req. size = segs_size;264 req.type = KMEM_KCM; 265 req.order = bits_log2(segs_size); 266 266 req.flags = AF_KERNEL; 267 267 segs_base = kmem_alloc( &req ); -
trunk/kernel/libk/grdxt.c
r626 r635 30 30 #include <grdxt.h> 31 31 32 //////////////////////////////////////////////////////////////////////////////////////// 33 // Local access functions 34 //////////////////////////////////////////////////////////////////////////////////////// 35 32 36 ///////////////////////////////// 33 37 error_t grdxt_init( grdxt_t * rt, … … 44 48 45 49 // allocates first level array 46 req.type = KMEM_ GENERIC;47 req. size = sizeof(void *) << ix1_width;50 req.type = KMEM_KCM; 51 req.order = ix1_width + ( (sizeof(void*) == 4) ? 2 : 3 ); 48 52 req.flags = AF_KERNEL | AF_ZERO; 49 53 root = kmem_alloc( &req ); 50 if( root == NULL ) return ENOMEM; 54 55 if( root == NULL ) 56 { 57 printk("\n[ERROR] in %s : cannot allocate first level array\n", __FUNCTION__); 58 return -1; 59 } 51 60 52 61 rt->root = root; … … 71 80 uint32_t ix1; 72 81 uint32_t ix2; 73 74 // check rt 82 uint32_t ix3; 83 75 84 assert( (rt != NULL) , "pointer on radix tree is NULL\n" ); 76 77 req.type = KMEM_GENERIC;78 85 79 86 for( ix1=0 ; ix1 < (uint32_t)(1 << w1) ; ix1++ ) … … 89 96 if( ptr3 == NULL ) continue; 90 97 98 for( ix3=0 ; ix3 < (uint32_t)(1 << w3) ; ix3++ ) 99 { 100 if( ptr3[ix3] != NULL ) 101 { 102 printk("\n[WARNING] in %s : ptr3[%d][%d][%d] non empty\n", 103 __FUNCTION__, ix1, ix2, ix3 ); 104 } 105 } 106 91 107 // release level 3 array 108 req.type = KMEM_KCM; 92 109 req.ptr = ptr3; 93 req.type = KMEM_GENERIC;94 req.size = sizeof(void *) * (1 << w3);95 110 kmem_free( &req ); 96 111 } 97 112 98 113 // release level 2 array 114 req.type = KMEM_KCM; 99 115 req.ptr = ptr2; 100 req.type = KMEM_GENERIC;101 req.size = sizeof(void *) * (1 << w2);102 116 kmem_free( &req ); 103 117 } 104 118 105 119 // release level 1 array 120 req.type = KMEM_KCM; 106 121 req.ptr = ptr1; 107 req.type = KMEM_GENERIC;108 req.size = sizeof(void *) * (1 << w1);109 122 kmem_free( &req ); 110 123 111 124 } // end grdxt_destroy() 112 113 ////////////////////////////////////114 void grdxt_display( xptr_t rt_xp,115 char * name )116 {117 uint32_t ix1;118 uint32_t ix2;119 uint32_t ix3;120 121 // check rt_xp122 assert( (rt_xp != XPTR_NULL) , "pointer on radix tree is NULL\n" );123 124 // get cluster and local pointer on remote rt descriptor125 grdxt_t * rt_ptr = GET_PTR( rt_xp );126 cxy_t rt_cxy = GET_CXY( rt_xp );127 128 // get widths129 uint32_t w1 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix1_width ) );130 uint32_t w2 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix2_width ) );131 uint32_t w3 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix3_width ) );132 133 void ** ptr1 = hal_remote_lpt( XPTR( rt_cxy , &rt_ptr->root ) );134 135 printk("\n***** Generic Radix Tree for <%s>\n", name );136 137 for( ix1=0 ; ix1 < (uint32_t)(1<<w1) ; ix1++ )138 {139 void ** ptr2 = hal_remote_lpt( XPTR( rt_cxy , &ptr1[ix1] ) );140 if( ptr2 == NULL ) continue;141 142 for( ix2=0 ; ix2 < (uint32_t)(1<<w2) ; ix2++ )143 {144 void ** ptr3 = hal_remote_lpt( XPTR( rt_cxy , &ptr2[ix2] ) );145 if( ptr3 == NULL ) continue;146 147 for( ix3=0 ; ix3 < (uint32_t)(1<<w3) ; ix3++ )148 {149 void * value = hal_remote_lpt( XPTR( rt_cxy , &ptr3[ix3] ) );150 if( value == NULL ) continue;151 152 uint32_t key = (ix1<<(w2+w3)) + (ix2<<w3) + ix3;153 printk(" - key = %x / value = %x\n", key , (intptr_t)value );154 }155 }156 }157 158 } // end grdxt_display()159 125 160 126 //////////////////////////////////// … … 177 143 uint32_t ix3 = key & ((1 << w3) - 1); // index in level 3 array 178 144 179 void ** ptr1 = rt->root; // pointer on level 1 array 180 void ** ptr2; // pointer on level 2 array 181 void ** ptr3; // pointer on level 3 array 182 183 // If required, we must allocate memory for the selected level 2 array, 184 // and update the level 1 array. 185 if( ptr1[ix1] == NULL ) 145 // get ptr1 146 void ** ptr1 = rt->root; 147 148 if( ptr1 == NULL ) return -1; 149 150 // get ptr2 151 void ** ptr2 = ptr1[ix1]; 152 153 // If required, allocate memory for the missing level 2 array 154 if( ptr2 == NULL ) 186 155 { 187 156 // allocate memory for level 2 array 188 req.type = KMEM_GENERIC;189 req. size = sizeof(void *) << w2;157 req.type = KMEM_KCM; 158 req.order = w2 + ( (sizeof(void*) == 4) ? 2 : 3 ); 190 159 req.flags = AF_KERNEL | AF_ZERO; 191 160 ptr2 = kmem_alloc( &req ); 192 if( ptr2 == NULL) return ENOMEM; 161 162 if( ptr2 == NULL) return -1; 193 163 194 164 // update level 1 array 195 165 ptr1[ix1] = ptr2; 196 166 } 197 else // get pointer on selected level 2 array. 198 { 199 ptr2 = ptr1[ix1]; 200 } 201 202 // If required, we must allocate memory for the selected level 3 array, 203 // and update the level 2 array. 204 if( ptr2[ix2] == NULL ) 167 168 // get ptr3 169 void ** ptr3 = ptr2[ix2]; 170 171 // If required, allocate memory for the missing level 3 array 172 if( ptr3 == NULL ) 205 173 { 206 174 // allocate memory for level 3 array 207 req.type = KMEM_ GENERIC;208 req. size = sizeof(void *) << w3;175 req.type = KMEM_KCM; 176 req.order = w3 + ( (sizeof(void*) == 4) ? 2 : 3 ); 209 177 req.flags = AF_KERNEL | AF_ZERO; 210 178 ptr3 = kmem_alloc( &req ); 211 if( ptr3 == NULL) return ENOMEM; 179 180 if( ptr3 == NULL) return -1; 212 181 213 182 // update level 3 array 214 183 ptr2[ix2] = ptr3; 215 184 } 216 else // get pointer on selected level 3 array.217 {218 ptr3 = ptr2[ix2];219 }220 221 // selected slot in level 3 array must be empty222 if( ptr3[ix3] != NULL ) return EEXIST;223 185 224 186 // register the value 225 187 ptr3[ix3] = value; 188 226 189 hal_fence(); 227 190 … … 246 209 uint32_t ix3 = key & ((1 << w3) - 1); // index in level 3 array 247 210 248 void ** ptr1 = rt->root; // pointer on level 1 array 249 void ** ptr2; // pointer on level 2 array 250 void ** ptr3; // pointer on level 3 array 211 // get ptr1 212 void ** ptr1 = rt->root; 213 214 if( ptr1 == NULL ) return NULL; 251 215 252 216 // get ptr2 253 ptr2 = ptr1[ix1]; 217 void ** ptr2 = ptr1[ix1]; 218 254 219 if( ptr2 == NULL ) return NULL; 255 220 256 221 // get ptr3 257 ptr3 = ptr2[ix2]; 222 void ** ptr3 = ptr2[ix2]; 223 258 224 if( ptr3 == NULL ) return NULL; 259 225 … … 303 269 304 270 } // end grdxt_lookup() 305 306 ////////////////////////////////////////////307 xptr_t grdxt_remote_lookup( xptr_t rt_xp,308 uint32_t key )309 {310 // get cluster and local pointer on remote rt descriptor311 grdxt_t * rt_ptr = GET_PTR( rt_xp );312 cxy_t rt_cxy = GET_CXY( rt_xp );313 314 // get widths315 uint32_t w1 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix1_width ) );316 uint32_t w2 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix2_width ) );317 uint32_t w3 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix3_width ) );318 319 // Check key value320 assert( ((key >> (w1 + w2 + w3)) == 0 ), "illegal key value %x\n", key );321 322 // compute indexes323 uint32_t ix1 = key >> (w2 + w3); // index in level 1 array324 uint32_t ix2 = (key >> w3) & ((1 << w2) -1); // index in level 2 array325 uint32_t ix3 = key & ((1 << w3) - 1); // index in level 3 array326 327 // get ptr1328 void ** ptr1 = hal_remote_lpt( XPTR( rt_cxy , &rt_ptr->root ) );329 330 // get ptr2331 void ** ptr2 = hal_remote_lpt( XPTR( rt_cxy , &ptr1[ix1] ) );332 if( ptr2 == NULL ) return XPTR_NULL;333 334 // get ptr3335 void ** ptr3 = hal_remote_lpt( XPTR( rt_cxy , &ptr2[ix2] ) );336 if( ptr3 == NULL ) return XPTR_NULL;337 338 // get pointer on registered item339 void * item_ptr = hal_remote_lpt( XPTR( rt_cxy , &ptr3[ix3] ) );340 341 // return extended pointer on registered item342 if ( item_ptr == NULL ) return XPTR_NULL;343 else return XPTR( rt_cxy , item_ptr );344 345 } // end grdxt_remote_lookup()346 271 347 272 ////////////////////////////////////// … … 400 325 401 326 } // end grdxt_get_first() 327 328 329 330 //////////////////////////////////////////////////////////////////////////////////////// 331 // Remote access functions 332 //////////////////////////////////////////////////////////////////////////////////////// 333 334 ////////////////////////////////////////////// 335 error_t grdxt_remote_insert( xptr_t rt_xp, 336 uint32_t key, 337 void * value ) 338 { 339 kmem_req_t req; 340 341 // get cluster and local pointer on remote rt descriptor 342 cxy_t rt_cxy = GET_CXY( rt_xp ); 343 grdxt_t * rt_ptr = GET_PTR( rt_xp ); 344 345 // get widths 346 uint32_t w1 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix1_width ) ); 347 uint32_t w2 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix2_width ) ); 348 uint32_t w3 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix3_width ) ); 349 350 // Check key value 351 assert( ((key >> (w1 + w2 + w3)) == 0 ), "illegal key value %x\n", key ); 352 353 // compute indexes 354 uint32_t ix1 = key >> (w2 + w3); // index in level 1 array 355 uint32_t ix2 = (key >> w3) & ((1 << w2) -1); // index in level 2 array 356 uint32_t ix3 = key & ((1 << w3) - 1); // index in level 3 array 357 358 // get ptr1 359 void ** ptr1 = hal_remote_lpt( XPTR( rt_cxy , &rt_ptr->root ) ); 360 361 if( ptr1 == NULL ) return -1; 362 363 // get ptr2 364 void ** ptr2 = hal_remote_lpt( XPTR( rt_cxy , &ptr1[ix1] ) ); 365 366 // allocate memory for the missing level_2 array if required 367 if( ptr2 == NULL ) 368 { 369 // allocate memory in remote cluster 370 req.type = KMEM_KCM; 371 req.order = w2 + ((sizeof(void*) == 4) ? 2 : 3 ); 372 req.flags = AF_ZERO | AF_KERNEL; 373 ptr2 = kmem_remote_alloc( rt_cxy , &req ); 374 375 if( ptr2 == NULL ) return -1; 376 377 // update level_1 entry 378 hal_remote_spt( XPTR( rt_cxy , &ptr1[ix1] ) , ptr2 ); 379 } 380 381 // get ptr3 382 void ** ptr3 = hal_remote_lpt( XPTR( rt_cxy , &ptr2[ix2] ) ); 383 384 // allocate memory for the missing level_3 array if required 385 if( ptr3 == NULL ) 386 { 387 // allocate memory in remote cluster 388 req.type = KMEM_KCM; 389 req.order = w3 + ((sizeof(void*) == 4) ? 2 : 3 ); 390 req.flags = AF_ZERO | AF_KERNEL; 391 ptr3 = kmem_remote_alloc( rt_cxy , &req ); 392 393 if( ptr3 == NULL ) return -1; 394 395 // update level_2 entry 396 hal_remote_spt( XPTR( rt_cxy , &ptr2[ix2] ) , ptr3 ); 397 } 398 399 // register value in level_3 array 400 hal_remote_spt( XPTR( rt_cxy , &ptr3[ix3] ) , value ); 401 402 hal_fence(); 403 404 return 0; 405 406 } // end grdxt_remote_insert() 407 408 //////////////////////////////////////////// 409 void * grdxt_remote_remove( xptr_t rt_xp, 410 uint32_t key ) 411 { 412 // get cluster and local pointer on remote rt descriptor 413 cxy_t rt_cxy = GET_CXY( rt_xp ); 414 grdxt_t * rt_ptr = GET_PTR( rt_xp ); 415 416 // get widths 417 uint32_t w1 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix1_width ) ); 418 uint32_t w2 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix2_width ) ); 419 uint32_t w3 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix3_width ) ); 420 421 // Check key value 422 assert( ((key >> (w1 + w2 + w3)) == 0 ), "illegal key value %x\n", key ); 423 424 // compute indexes 425 uint32_t ix1 = key >> (w2 + w3); // index in level 1 array 426 uint32_t ix2 = (key >> w3) & ((1 << w2) -1); // index in level 2 array 427 uint32_t ix3 = key & ((1 << w3) - 1); // index in level 3 array 428 429 // get ptr1 430 void ** ptr1 = hal_remote_lpt( XPTR( rt_cxy , &rt_ptr->root ) ); 431 432 // get ptr2 433 void ** ptr2 = hal_remote_lpt( XPTR( rt_cxy , &ptr1[ix1] ) ); 434 if( ptr2 == NULL ) return NULL; 435 436 // get ptr3 437 void ** ptr3 = hal_remote_lpt( XPTR( rt_cxy , &ptr2[ix2] ) ); 438 if( ptr3 == NULL ) return NULL; 439 440 // get value 441 void * value = hal_remote_lpt( XPTR( rt_cxy , &ptr3[ix3] ) ); 442 443 // reset selected slot 444 hal_remote_spt( XPTR( rt_cxy, &ptr3[ix3] ) , NULL ); 445 hal_fence(); 446 447 return value; 448 449 } // end grdxt_remote_remove() 450 451 //////////////////////////////////////////// 452 xptr_t grdxt_remote_lookup( xptr_t rt_xp, 453 uint32_t key ) 454 { 455 // get cluster and local pointer on remote rt descriptor 456 grdxt_t * rt_ptr = GET_PTR( rt_xp ); 457 cxy_t rt_cxy = GET_CXY( rt_xp ); 458 459 // get widths 460 uint32_t w1 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix1_width ) ); 461 uint32_t w2 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix2_width ) ); 462 uint32_t w3 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix3_width ) ); 463 464 // Check key value 465 assert( ((key >> (w1 + w2 + w3)) == 0 ), "illegal key value %x\n", key ); 466 467 // compute indexes 468 uint32_t ix1 = key >> (w2 + w3); // index in level 1 array 469 uint32_t ix2 = (key >> w3) & ((1 << w2) -1); // index in level 2 array 470 uint32_t ix3 = key & ((1 << w3) - 1); // index in level 3 array 471 472 // get ptr1 473 void ** ptr1 = hal_remote_lpt( XPTR( rt_cxy , &rt_ptr->root ) ); 474 475 // get ptr2 476 void ** ptr2 = hal_remote_lpt( XPTR( rt_cxy , &ptr1[ix1] ) ); 477 if( ptr2 == NULL ) return XPTR_NULL; 478 479 // get ptr3 480 void ** ptr3 = hal_remote_lpt( XPTR( rt_cxy , &ptr2[ix2] ) ); 481 if( ptr3 == NULL ) return XPTR_NULL; 482 483 // get pointer on registered item 484 void * item_ptr = hal_remote_lpt( XPTR( rt_cxy , &ptr3[ix3] ) ); 485 486 // return extended pointer on registered item 487 if ( item_ptr == NULL ) return XPTR_NULL; 488 else return XPTR( rt_cxy , item_ptr ); 489 490 } // end grdxt_remote_lookup() 491 492 /////////////////////////i///////////////// 493 void grdxt_remote_display( xptr_t rt_xp, 494 char * name ) 495 { 496 uint32_t ix1; 497 uint32_t ix2; 498 uint32_t ix3; 499 500 // check rt_xp 501 assert( (rt_xp != XPTR_NULL) , "pointer on radix tree is NULL\n" ); 502 503 // get cluster and local pointer on remote rt descriptor 504 grdxt_t * rt_ptr = GET_PTR( rt_xp ); 505 cxy_t rt_cxy = GET_CXY( rt_xp ); 506 507 // get widths 508 uint32_t w1 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix1_width ) ); 509 uint32_t w2 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix2_width ) ); 510 uint32_t w3 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix3_width ) ); 511 512 void ** ptr1 = hal_remote_lpt( XPTR( rt_cxy , &rt_ptr->root ) ); 513 514 printk("\n***** Generic Radix Tree for <%s>\n", name ); 515 516 for( ix1=0 ; ix1 < (uint32_t)(1<<w1) ; ix1++ ) 517 { 518 void ** ptr2 = hal_remote_lpt( XPTR( rt_cxy , &ptr1[ix1] ) ); 519 if( ptr2 == NULL ) continue; 520 521 for( ix2=0 ; ix2 < (uint32_t)(1<<w2) ; ix2++ ) 522 { 523 void ** ptr3 = hal_remote_lpt( XPTR( rt_cxy , &ptr2[ix2] ) ); 524 if( ptr3 == NULL ) continue; 525 526 for( ix3=0 ; ix3 < (uint32_t)(1<<w3) ; ix3++ ) 527 { 528 void * value = hal_remote_lpt( XPTR( rt_cxy , &ptr3[ix3] ) ); 529 if( value == NULL ) continue; 530 531 uint32_t key = (ix1<<(w2+w3)) + (ix2<<w3) + ix3; 532 printk(" - key = %x / value = %x\n", key , (intptr_t)value ); 533 } 534 } 535 } 536 537 } // end grdxt_remote_display() 538 539 -
trunk/kernel/libk/grdxt.h
r626 r635 36 36 * Memory for the second and third levels arrays is dynamically allocated by the 37 37 * grdxt_insert() function and is only released by grdxt_destroy(). 38 * - This structure is entirely contained in one single cluster. 39 * - All modifications (insert / remove) must be done by a thread running in local cluster. 40 * - Lookup can be done by a thread running in any cluster (local or remote). 38 * This structure is entirely contained in one single cluster, but to allow any thread 39 * to access it, two sets of access functions are defined: 40 * - local threads can use access function using local pointers. 41 * - remote threads must use the access functions using extended pointers. 41 42 ****************************************************************************************** 42 43 * When it is used by the mapper implementing the file cache: … … 54 55 grdxt_t; 55 56 57 //////////////////////////////////////////////////////////////////////////////////////////// 58 // Local access functions 59 //////////////////////////////////////////////////////////////////////////////////////////// 60 56 61 /******************************************************************************************* 57 62 * This function initialises the radix-tree descriptor, 63 * It must be called by a local thread. 58 64 * and allocates memory for the first level array of pointers. 59 65 ******************************************************************************************* … … 71 77 /******************************************************************************************* 72 78 * This function releases all memory allocated to the radix-tree infrastructure. 73 * The radix-tree is supposed to be empty, but this is NOT checked by this function. 79 * It must be called by a local thread. 80 * A warning message is printed on the kernel TXT0 if the radix tree is not empty. 74 81 ******************************************************************************************* 75 82 * @ rt : pointer on the radix-tree descriptor. … … 79 86 /******************************************************************************************* 80 87 * This function insert a new item in the radix-tree. 88 * It must be called by a local thread. 81 89 * It dynamically allocates memory for new second and third level arrays if required. 82 90 ******************************************************************************************* … … 84 92 * @ key : key value. 85 93 * @ value : pointer on item to be registered in radix-tree. 86 * @ returns 0 if success / returns ENOMEM if no memory, or EINVAL ifillegal key.94 * @ returns 0 if success / returns -1 if no memory, or illegal key. 87 95 ******************************************************************************************/ 88 96 error_t grdxt_insert( grdxt_t * rt, … … 91 99 92 100 /******************************************************************************************* 93 * This function removes an item identified by its key, and returns a pointer 94 * on the removed item. No memory is released. 101 * This function removes an item identified by its key from the radix tree, 102 * It must be called by a local thread. 103 * and returns a pointer on the removed item. No memory is released. 95 104 ******************************************************************************************* 96 105 * @ rt : pointer on the radix-tree descriptor. … … 103 112 /******************************************************************************************* 104 113 * This function returns to a local client, a local pointer on the item identified 114 * It must be called by a local thread. 105 115 * by the <key> argument, from the radix tree identified by the <rt> local pointer. 106 116 ******************************************************************************************* … … 113 123 114 124 /******************************************************************************************* 115 * This function returns to a - possibly remote - remote client, an extended pointer116 * on the item identified by the <key> argument, from the radix tree identified by117 * the <rt_xp> remote pointer.118 *******************************************************************************************119 * @ rt_xp : extended pointer on the radix-tree descriptor.120 * @ key : key value.121 * @ returns an extended pointer on found item if success / returns XPTR_NULL if failure.122 ******************************************************************************************/123 xptr_t grdxt_remote_lookup( xptr_t rt_xp,124 uint32_t key );125 126 /*******************************************************************************************127 125 * This function scan all radix-tree entries in increasing key order, starting from 126 * It must be called by a local thread. 128 127 * the value defined by the <key> argument, and return a pointer on the first valid 129 128 * registered item, and the found item key value. … … 138 137 uint32_t * found_key ); 139 138 139 //////////////////////////////////////////////////////////////////////////////////////////// 140 // Remote access functions 141 //////////////////////////////////////////////////////////////////////////////////////////// 142 143 /******************************************************************************************* 144 * This function insert a new item in a - possibly remote - radix tree. 145 * It dynamically allocates memory for new second and third level arrays if required. 146 ******************************************************************************************* 147 * @ rt_xp : extended pointer on the radix-tree descriptor. 148 * @ key : key value. 149 * @ value : pointer on item to be registered in radix-tree. 150 * @ returns 0 if success / returns -1 if no memory, or illegal key. 151 ******************************************************************************************/ 152 error_t grdxt_remote_insert( xptr_t rt_xp, 153 uint32_t key, 154 void * value ); 155 156 /******************************************************************************************* 157 * This function removes an item identified by its key from a - possibly remote - radix 158 * tree, and returns a local pointer on the removed item. No memory is released. 159 ******************************************************************************************* 160 * @ rt_xp : pointer on the radix-tree descriptor. 161 * @ key : key value. 162 * @ returns local pointer on removed item if success / returns NULL if failure. 163 ******************************************************************************************/ 164 void * grdxt_remote_remove( xptr_t rt_xp, 165 uint32_t key ); 166 167 /******************************************************************************************* 168 * This function returns to a - possibly remote - client, an extended pointer 169 * on the item identified by the <key> argument, from the radix tree identified by 170 * the <rt_xp> remote pointer. 171 ******************************************************************************************* 172 * @ rt_xp : extended pointer on the radix-tree descriptor. 173 * @ key : key value. 174 * @ returns an extended pointer on found item if success / returns XPTR_NULL if failure. 175 ******************************************************************************************/ 176 xptr_t grdxt_remote_lookup( xptr_t rt_xp, 177 uint32_t key ); 178 140 179 /******************************************************************************************* 141 180 * This function displays the current content of a possibly remote radix_tree. … … 144 183 * @ string : radix tree identifier. 145 184 ******************************************************************************************/ 146 void grdxt_display( xptr_t rt_xp, 147 char * string ); 148 185 void grdxt_remote_display( xptr_t rt_xp, 186 char * string ); 149 187 150 188 #endif /* _GRDXT_H_ */ -
trunk/kernel/libk/list.h
r632 r635 304 304 **************************************************************************/ 305 305 306 #define LIST_REMOTE_FIRST( cxy , root , type , member ) 307 ({ list_entry_t * __first = hal_remote_lpt( XPTR( cxy , &root->next ) );\308 LIST_ELEMENT( __first , type , member ); })306 #define LIST_REMOTE_FIRST( cxy , root , type , member ) \ 307 LIST_ELEMENT( hal_remote_lpt( XPTR( (cxy) , &(root)->next ) ), \ 308 type , member ) 309 309 310 310 /*************************************************************************** … … 314 314 * item(s) from the traversed list. 315 315 *************************************************************************** 316 * @ cxy : remote listcluster identifier316 * @ cxy : remote cluster identifier 317 317 * @ root : pointer on the root list_entry 318 318 * @ iter : pointer on the current list_entry -
trunk/kernel/libk/remote_barrier.c
r632 r635 83 83 pthread_barrierattr_t * attr ) 84 84 { 85 xptr_t gen_barrier_xp; // extended pointer on generic barrier descriptor86 85 generic_barrier_t * gen_barrier_ptr; // local pointer on generic barrier descriptor 87 86 void * barrier; // local pointer on implementation barrier descriptor … … 97 96 98 97 // allocate memory for generic barrier descriptor 99 if( ref_cxy == local_cxy ) // reference cluster is local 100 { 101 req.type = KMEM_GEN_BARRIER; 102 req.flags = AF_ZERO; 103 gen_barrier_ptr = kmem_alloc( &req ); 104 gen_barrier_xp = XPTR( local_cxy , gen_barrier_ptr ); 105 } 106 else // reference cluster is remote 107 { 108 rpc_kcm_alloc_client( ref_cxy, 109 KMEM_GEN_BARRIER, 110 &gen_barrier_xp ); 111 gen_barrier_ptr = GET_PTR( gen_barrier_xp ); 112 } 98 req.type = KMEM_KCM; 99 req.order = bits_log2( sizeof(generic_barrier_t) ); 100 req.flags = AF_ZERO | AF_KERNEL; 101 gen_barrier_ptr = kmem_remote_alloc( ref_cxy , &req ); 113 102 114 103 if( gen_barrier_ptr == NULL ) … … 124 113 barrier = simple_barrier_create( count ); 125 114 126 if( barrier == NULL ) 127 { 128 printk("\n[ERROR] in %s : cannot create simple barrier\n", __FUNCTION__); 129 return -1; 130 } 115 if( barrier == NULL ) return -1; 131 116 } 132 117 else // QDT barrier implementation … … 147 132 barrier = dqt_barrier_create( x_size , y_size , nthreads ); 148 133 149 if( barrier == NULL ) 150 { 151 printk("\n[ERROR] in %s : cannot create DQT barrier descriptor\n", __FUNCTION__); 152 return -1; 153 } 134 if( barrier == NULL ) return -1; 154 135 } 155 136 … … 211 192 212 193 // release memory allocated to barrier descriptor 213 if( gen_barrier_cxy == local_cxy ) 214 { 215 req.type = KMEM_GEN_BARRIER; 216 req.ptr = gen_barrier_ptr; 217 kmem_free( &req ); 218 } 219 else 220 { 221 rpc_kcm_free_client( gen_barrier_cxy, 222 gen_barrier_ptr, 223 KMEM_GEN_BARRIER ); 224 } 194 req.type = KMEM_KCM; 195 req.ptr = gen_barrier_ptr; 196 kmem_remote_free( ref_cxy , &req ); 197 225 198 } // end generic_barrier_destroy() 226 199 … … 273 246 simple_barrier_t * simple_barrier_create( uint32_t count ) 274 247 { 275 xptr_t barrier_xp;248 kmem_req_t req; 276 249 simple_barrier_t * barrier; 277 250 … … 285 258 286 259 // allocate memory for simple barrier descriptor 287 if( ref_cxy == local_cxy ) // reference is local 288 { 289 kmem_req_t req; 290 req.type = KMEM_SMP_BARRIER; 291 req.flags = AF_ZERO; 292 barrier = kmem_alloc( &req ); 293 barrier_xp = XPTR( local_cxy , barrier ); 294 } 295 else // reference is remote 296 { 297 rpc_kcm_alloc_client( ref_cxy, 298 KMEM_SMP_BARRIER, 299 &barrier_xp ); 300 barrier = GET_PTR( barrier_xp ); 301 } 302 303 if( barrier == NULL ) return NULL; 260 req.type = KMEM_KCM; 261 req.order = bits_log2( sizeof(simple_barrier_t) ); 262 req.flags = AF_ZERO | AF_KERNEL; 263 barrier = kmem_remote_alloc( ref_cxy , &req ); 264 265 if( barrier == NULL ) 266 { 267 printk("\n[ERROR] in %s : cannot create simple barrier\n", __FUNCTION__ ); 268 return NULL; 269 } 304 270 305 271 // initialise simple barrier descriptor … … 325 291 void simple_barrier_destroy( xptr_t barrier_xp ) 326 292 { 293 kmem_req_t req; 294 327 295 // get barrier cluster and local pointer 328 296 cxy_t barrier_cxy = GET_CXY( barrier_xp ); … … 330 298 331 299 // release memory allocated for barrier descriptor 332 if( barrier_cxy == local_cxy ) 333 { 334 kmem_req_t req; 335 req.type = KMEM_SMP_BARRIER; 336 req.ptr = barrier_ptr; 337 kmem_free( &req ); 338 } 339 else 340 { 341 rpc_kcm_free_client( barrier_cxy, 342 barrier_ptr, 343 KMEM_SMP_BARRIER ); 344 } 300 req.type = KMEM_KCM; 301 req.ptr = barrier_ptr; 302 kmem_remote_free( barrier_cxy , &req ); 345 303 346 304 #if DEBUG_BARRIER_DESTROY … … 498 456 499 457 #if DEBUG_BARRIER_CREATE 500 staticvoid dqt_barrier_display( xptr_t barrier_xp );458 void dqt_barrier_display( xptr_t barrier_xp ); 501 459 #endif 502 460 … … 506 464 uint32_t nthreads ) 507 465 { 508 xptr_t dqt_page_xp;509 page_t * rpc_page;510 xptr_t rpc_page_xp;511 466 dqt_barrier_t * barrier; // local pointer on DQT barrier descriptor 512 467 xptr_t barrier_xp; // extended pointer on DQT barrier descriptor 513 468 uint32_t z; // actual DQT size == max(x_size,y_size) 514 469 uint32_t levels; // actual number of DQT levels 515 xptr_t rpc_xp; // extended pointer on RPC descriptors array516 rpc_desc_t * rpc; // pointer on RPC descriptors array517 uint32_t responses; // responses counter for parallel RPCs518 reg_t save_sr; // for critical section519 470 uint32_t x; // X coordinate in QDT mesh 520 471 uint32_t y; // Y coordinate in QDT mesh … … 522 473 kmem_req_t req; // kmem request 523 474 524 // compute size and number of DQT levels475 // compute number of DQT levels, depending on the mesh size 525 476 z = (x_size > y_size) ? x_size : y_size; 526 477 levels = (z < 2) ? 1 : (z < 3) ? 2 : (z < 5) ? 3 : (z < 9) ? 4 : 5; … … 529 480 assert( (z <= 16) , "DQT mesh size larger than (16*16)\n"); 530 481 531 // check RPC descriptor size532 assert( (sizeof(rpc_desc_t) <= 128), "RPC descriptor larger than 128 bytes\n");533 534 482 // check size of an array of 5 DQT nodes 535 483 assert( (sizeof(dqt_node_t) * 5 <= 512 ), "array of DQT nodes larger than 512 bytes\n"); … … 538 486 assert( (sizeof(dqt_barrier_t) <= 0x4000 ), "DQT barrier descriptor larger than 4 pages\n"); 539 487 540 // get pointer on local client process descriptor488 // get pointer on client thread and process descriptors 541 489 thread_t * this = CURRENT_THREAD; 542 490 process_t * process = this->process; … … 553 501 cxy_t ref_cxy = GET_CXY( ref_xp ); 554 502 555 // 1. allocate 4 4 Kbytes pages for DQT barrier descriptor in reference cluster 556 dqt_page_xp = ppm_remote_alloc_pages( ref_cxy , 2 ); 557 558 if( dqt_page_xp == XPTR_NULL ) return NULL; 559 560 // get pointers on DQT barrier descriptor 561 barrier_xp = ppm_page2base( dqt_page_xp ); 562 barrier = GET_PTR( barrier_xp ); 503 // 1. allocate 4 small pages for the DQT barrier descriptor in reference cluster 504 req.type = KMEM_PPM; 505 req.order = 2; // 4 small pages == 16 Kbytes 506 req.flags = AF_ZERO | AF_KERNEL; 507 barrier = kmem_remote_alloc( ref_cxy , &req ); 508 509 if( barrier == NULL ) 510 { 511 printk("\n[ERROR] in %s : cannot create DQT barrier\n", __FUNCTION__ ); 512 return NULL; 513 } 514 515 // get pointers on DQT barrier descriptor in reference cluster 516 barrier_xp = XPTR( ref_cxy , barrier ); 563 517 564 518 // initialize global parameters in DQT barrier descriptor … … 569 523 #if DEBUG_BARRIER_CREATE 570 524 if( cycle > DEBUG_BARRIER_CREATE ) 571 printk("\n[%s] thread[%x,%x] created DQT barrier descriptor at(%x,%x)\n",525 printk("\n[%s] thread[%x,%x] created DQT barrier descriptor(%x,%x)\n", 572 526 __FUNCTION__, process->pid, this->trdid, ref_cxy, barrier ); 573 527 #endif 574 528 575 // 2. allocate memory from local cluster for an array of 256 RPCs descriptors 576 // cannot share the RPC descriptor, because the returned argument is not shared 577 req.type = KMEM_PAGE; 578 req.size = 3; // 8 pages == 32 Kbytes 579 req.flags = AF_ZERO; 580 rpc_page = kmem_alloc( &req ); 581 rpc_page_xp = XPTR( local_cxy , rpc_page ); 582 583 // get pointers on RPC descriptors array 584 rpc_xp = ppm_page2base( rpc_page_xp ); 585 rpc = GET_PTR( rpc_xp ); 586 587 #if DEBUG_BARRIER_CREATE 588 if( cycle > DEBUG_BARRIER_CREATE ) 589 printk("\n[%s] thread[%x,%x] created RPC descriptors array at (%x,%s)\n", 590 __FUNCTION__, process->pid, this->trdid, local_cxy, rpc ); 591 #endif 592 593 // 3. send parallel RPCs to all existing clusters covered by the DQT 594 // to allocate memory for an array of 5 DQT nodes in each cluster 529 // 2. allocate memory for an array of 5 DQT nodes 530 // in all existing clusters covered by the DQDT 595 531 // (5 nodes per cluster <= 512 bytes per cluster) 596 597 responses = 0; // initialize RPC responses counter 598 599 // mask IRQs 600 hal_disable_irq( &save_sr); 601 602 // client thread blocks itself 603 thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC ); 604 532 // and complete barrier descriptor initialisation. 605 533 for ( x = 0 ; x < x_size ; x++ ) 606 534 { 607 535 for ( y = 0 ; y < y_size ; y++ ) 608 536 { 609 // send RPC to existing clusters only 537 cxy_t cxy = HAL_CXY_FROM_XY( x , y ); // target cluster identifier 538 xptr_t local_array_xp; // xptr of nodes array in cluster cxy 539 540 // allocate memory in existing clusters only 610 541 if( LOCAL_CLUSTER->cluster_info[x][y] ) 611 542 { 612 cxy_t cxy = HAL_CXY_FROM_XY( x , y ); // target cluster identifier 613 614 // build a specific RPC descriptor for each target cluster 615 rpc[cxy].rsp = &responses; 616 rpc[cxy].blocking = false; 617 rpc[cxy].index = RPC_KCM_ALLOC; 618 rpc[cxy].thread = this; 619 rpc[cxy].lid = this->core->lid; 620 rpc[cxy].args[0] = (uint64_t)KMEM_512_BYTES; 621 622 // atomically increment expected responses counter 623 hal_atomic_add( &responses , 1 ); 624 625 // send a non-blocking RPC to allocate 512 bytes in target cluster 626 rpc_send( cxy , &rpc[cxy] ); 627 } 628 } 629 } 630 631 #if DEBUG_BARRIER_CREATE 632 if( cycle > DEBUG_BARRIER_CREATE ) 633 printk("\n[%s] thread[%x,%x] sent all RPC requests to allocate dqt_nodes array\n", 634 __FUNCTION__, process->pid, this->trdid ); 635 #endif 636 637 // client thread deschedule 638 sched_yield("blocked on parallel rpc_kcm_alloc"); 639 640 // restore IRQs 641 hal_restore_irq( save_sr); 642 643 // 4. initialize the node_xp[x][y][l] array in DQT barrier descriptor 644 // the node_xp[x][y][0] value is available in rpc.args[1] 645 646 #if DEBUG_BARRIER_CREATE 647 if( cycle > DEBUG_BARRIER_CREATE ) 648 printk("\n[%s] thread[%x,%x] initialises array of pointers on dqt_nodes\n", 649 __FUNCTION__, process->pid, this->trdid ); 650 #endif 651 652 for ( x = 0 ; x < x_size ; x++ ) 653 { 654 for ( y = 0 ; y < y_size ; y++ ) 655 { 656 cxy_t cxy = HAL_CXY_FROM_XY( x , y ); // target cluster identifier 657 xptr_t array_xp = (xptr_t)rpc[cxy].args[1]; // x_pointer on node array 658 uint32_t offset = sizeof( dqt_node_t ); // size of a DQT node 659 660 // set values into the node_xp[x][y][l] array 661 for ( l = 0 ; l < levels ; l++ ) 662 { 663 xptr_t node_xp = array_xp + (offset * l); 664 hal_remote_s64( XPTR( ref_cxy , &barrier->node_xp[x][y][l] ), node_xp ); 665 666 #if DEBUG_BARRIER_CREATE 543 req.type = KMEM_KCM; 544 req.order = 9; // 512 bytes 545 req.flags = AF_ZERO | AF_KERNEL; 546 547 void * ptr = kmem_remote_alloc( cxy , &req ); 548 549 if( ptr == NULL ) 550 { 551 printk("\n[ERROR] in %s : cannot allocate DQT in cluster %x\n", 552 __FUNCTION__, cxy ); 553 return NULL; 554 } 555 556 // build extended pointer on local node array in cluster cxy 557 local_array_xp = XPTR( cxy , ptr ); 558 559 // initialize the node_xp[x][y][l] array in barrier descriptor 560 for ( l = 0 ; l < levels ; l++ ) 561 { 562 xptr_t node_xp = local_array_xp + ( l * sizeof(dqt_node_t) ); 563 hal_remote_s64( XPTR( ref_cxy , &barrier->node_xp[x][y][l] ), node_xp ); 564 565 #if (DEBUG_BARRIER_CREATE & 1) 667 566 if( cycle > DEBUG_BARRIER_CREATE ) 668 567 printk(" - dqt_node_xp[%d,%d,%d] = (%x,%x) / &dqt_node_xp = %x\n", 669 568 x , y , l , GET_CXY( node_xp ), GET_PTR( node_xp ), &barrier->node_xp[x][y][l] ); 670 569 #endif 570 } 671 571 } 672 } 673 } 674 675 // 5. release memory locally allocated for the RPCs array 676 req.type = KMEM_PAGE; 677 req.ptr = rpc_page; 678 kmem_free( &req ); 572 else // register XPTR_NULL for all non-existing entries 573 { 574 for ( l = 0 ; l < levels ; l++ ) 575 { 576 hal_remote_s64( XPTR( ref_cxy , &barrier->node_xp[x][y][l] ), XPTR_NULL ); 577 } 578 } 579 } // end for y 580 } // end for x 679 581 680 582 #if DEBUG_BARRIER_CREATE 681 583 if( cycle > DEBUG_BARRIER_CREATE ) 682 printk("\n[%s] thread[%x,%x] released memory for RPC descriptors array\n",584 printk("\n[%s] thread[%x,%x] initialized array of pointers in DQT barrier\n", 683 585 __FUNCTION__, process->pid, this->trdid ); 684 586 #endif 685 587 686 // 6. initialise all distributed DQT nodes using remote accesses588 // 3. initialise all distributed DQT nodes using remote accesses 687 589 // and the pointers stored in the node_xp[x][y][l] array 688 590 for ( x = 0 ; x < x_size ; x++ ) … … 827 729 void dqt_barrier_destroy( xptr_t barrier_xp ) 828 730 { 829 page_t * rpc_page;830 xptr_t rpc_page_xp;831 rpc_desc_t * rpc; // local pointer on RPC descriptors array832 xptr_t rpc_xp; // extended pointer on RPC descriptor array833 reg_t save_sr; // for critical section834 731 kmem_req_t req; // kmem request 835 836 thread_t * this = CURRENT_THREAD; 732 uint32_t x; 733 uint32_t y; 734 837 735 838 736 // get DQT barrier descriptor cluster and local pointer … … 841 739 842 740 #if DEBUG_BARRIER_DESTROY 741 thread_t * this = CURRENT_THREAD; 843 742 uint32_t cycle = (uint32_t)hal_get_cycles(); 844 743 if( cycle > DEBUG_BARRIER_DESTROY ) … … 851 750 uint32_t y_size = hal_remote_l32( XPTR( barrier_cxy , &barrier_ptr->y_size ) ); 852 751 853 // 1. allocate memory from local cluster for an array of 256 RPCs descriptors 854 // cannot share the RPC descriptor, because the "buf" argument is not shared 855 req.type = KMEM_PAGE; 856 req.size = 3; // 8 pages == 32 Kbytes 857 req.flags = AF_ZERO; 858 rpc_page = kmem_alloc( &req ); 859 rpc_page_xp = XPTR( local_cxy , rpc_page ); 860 861 // get pointers on RPC descriptors array 862 rpc_xp = ppm_page2base( rpc_page_xp ); 863 rpc = GET_PTR( rpc_xp ); 864 865 // 2. send parallel RPCs to all existing clusters covered by the DQT 866 // to release memory allocated for the arrays of DQT nodes in each cluster 867 868 uint32_t responses = 0; // initialize RPC responses counter 869 870 // mask IRQs 871 hal_disable_irq( &save_sr); 872 873 // client thread blocks itself 874 thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC ); 875 876 uint32_t x , y; 877 878 #if DEBUG_BARRIER_DESTROY 879 if( cycle > DEBUG_BARRIER_DESTROY ) 880 printk("\n[%s] thread[%x,%x] send RPCs to release the distributed dqt_node array\n", 881 __FUNCTION__, this->process->pid, this->trdid ); 882 #endif 883 752 // 1. release memory allocated for the DQT nodes 753 // in all clusters covered by the QDT mesh 884 754 for ( x = 0 ; x < x_size ; x++ ) 885 755 { 886 756 for ( y = 0 ; y < y_size ; y++ ) 887 757 { 888 // send RPC to existing cluster only 758 // compute target cluster identifier 759 cxy_t cxy = HAL_CXY_FROM_XY( x , y ); 760 761 // existing cluster only 889 762 if( LOCAL_CLUSTER->cluster_info[x][y] ) 890 763 { 891 // compute target cluster identifier892 cxy_t cxy = HAL_CXY_FROM_XY( x , y );893 894 764 // get local pointer on dqt_nodes array in target cluster 895 765 xptr_t buf_xp_xp = XPTR( barrier_cxy , &barrier_ptr->node_xp[x][y][0] ); … … 899 769 assert( (cxy == GET_CXY(buf_xp)) , "bad extended pointer on dqt_nodes array\n" ); 900 770 901 // build a specific RPC descriptor 902 rpc[cxy].rsp = &responses; 903 rpc[cxy].blocking = false; 904 rpc[cxy].index = RPC_KCM_FREE; 905 rpc[cxy].thread = this; 906 rpc[cxy].lid = this->core->lid; 907 rpc[cxy].args[0] = (uint64_t)(intptr_t)buf; 908 rpc[cxy].args[1] = (uint64_t)KMEM_512_BYTES; 909 910 // atomically increment expected responses counter 911 hal_atomic_add( &responses , 1 ); 912 771 req.type = KMEM_KCM; 772 req.ptr = buf; 773 kmem_remote_free( cxy , &req ); 774 913 775 #if DEBUG_BARRIER_DESTROY 776 thread_t * this = CURRENT_THREAD; 777 uint32_t cycle = (uint32_t)hal_get_cycles(); 914 778 if( cycle > DEBUG_BARRIER_DESTROY ) 915 printk(" - target cluster(%d,%d) / buffer %x\n", x, y, buf ); 916 #endif 917 // send a non-blocking RPC to release 512 bytes in target cluster 918 rpc_send( cxy , &rpc[cxy] ); 779 printk("\n[%s] thread[%x,%x] released node array %x in cluster %x / cycle %d\n", 780 __FUNCTION__, this->process->pid, this->trdid, buf, cxy, cycle ); 781 #endif 919 782 } 920 783 } 921 784 } 922 785 923 // client thread deschedule 924 sched_yield("blocked on parallel rpc_kcm_free"); 925 926 // restore IRQs 927 hal_restore_irq( save_sr); 928 929 // 3. release memory locally allocated for the RPC descriptors array 930 req.type = KMEM_PAGE; 931 req.ptr = rpc_page; 932 kmem_free( &req ); 933 934 // 4. release memory allocated for barrier descriptor 935 xptr_t page_xp = ppm_base2page( barrier_xp ); 936 cxy_t page_cxy = GET_CXY( page_xp ); 937 page_t * page_ptr = GET_PTR( page_xp ); 938 939 ppm_remote_free_pages( page_cxy , page_ptr ); 786 // 2. release memory allocated for barrier descriptor in ref cluster 787 req.type = KMEM_PPM; 788 req.ptr = barrier_ptr; 789 kmem_remote_free( barrier_cxy , &req ); 940 790 941 791 #if DEBUG_BARRIER_DESTROY 942 792 cycle = (uint32_t)hal_get_cycles(); 943 793 if( cycle > DEBUG_BARRIER_DESTROY ) 944 printk("\n[%s] thread[%x,%x] exit for barrier (%x,%x) / cycle %d\n",794 printk("\n[%s] thread[%x,%x] release barrier descriptor (%x,%x) / cycle %d\n", 945 795 __FUNCTION__, this->process->pid, this->trdid, barrier_cxy, barrier_ptr, cycle ); 946 796 #endif … … 1022 872 { 1023 873 uint32_t level = hal_remote_l32( XPTR( node_cxy , &node_ptr->level )); 1024 uint32_t arity = hal_remote_l32( XPTR( node_cxy , &node_ptr->arity ));1025 uint32_t count = hal_remote_l32( XPTR( node_cxy , &node_ptr->current ));1026 874 xptr_t pa_xp = hal_remote_l32( XPTR( node_cxy , &node_ptr->parent_xp )); 1027 875 xptr_t c0_xp = hal_remote_l32( XPTR( node_cxy , &node_ptr->child_xp[0] )); … … 1030 878 xptr_t c3_xp = hal_remote_l32( XPTR( node_cxy , &node_ptr->child_xp[3] )); 1031 879 1032 printk(" . level %d : (%x,%x) / %d on %d /P(%x,%x) / C0(%x,%x)"880 printk(" . level %d : (%x,%x) / P(%x,%x) / C0(%x,%x)" 1033 881 " C1(%x,%x) / C2(%x,%x) / C3(%x,%x)\n", 1034 level, node_cxy, node_ptr, count, arity,882 level, node_cxy, node_ptr, 1035 883 GET_CXY(pa_xp), GET_PTR(pa_xp), 1036 884 GET_CXY(c0_xp), GET_PTR(c0_xp), -
trunk/kernel/libk/remote_condvar.c
r581 r635 2 2 * remote_condvar.c - remote kernel condition variable implementation. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018 )4 * Authors Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 86 86 { 87 87 remote_condvar_t * condvar_ptr; 88 xptr_t condvar_xp;88 kmem_req_t req; 89 89 90 90 // get pointer on local process descriptor … … 98 98 process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); 99 99 100 // allocate memory for new condvar in reference cluster 101 if( ref_cxy == local_cxy ) // local cluster is the reference 102 { 103 kmem_req_t req; 104 req.type = KMEM_CONDVAR; 105 req.flags = AF_ZERO; 106 condvar_ptr = kmem_alloc( &req ); 107 condvar_xp = XPTR( local_cxy , condvar_ptr ); 108 } 109 else // reference cluster is remote 110 { 111 rpc_kcm_alloc_client( ref_cxy , KMEM_CONDVAR , &condvar_xp ); 112 condvar_ptr = GET_PTR( condvar_xp ); 113 } 114 115 if( condvar_xp == XPTR_NULL ) return 0xFFFFFFFF; 100 req.type = KMEM_KCM; 101 req.order = bits_log2( sizeof(remote_condvar_t) ); 102 req.flags = AF_ZERO | AF_KERNEL; 103 condvar_ptr = kmem_alloc( &req ); 104 105 if( condvar_ptr == NULL ) 106 { 107 printk("\n[ERROR] in %s : cannot create condvar\n", __FUNCTION__ ); 108 return -1; 109 } 116 110 117 111 // initialise condvar … … 136 130 void remote_condvar_destroy( xptr_t condvar_xp ) 137 131 { 132 kmem_req_t req; 133 138 134 // get pointer on local process descriptor 139 135 process_t * process = CURRENT_THREAD->process; … … 166 162 167 163 // release memory allocated for condvar descriptor 168 if( condvar_cxy == local_cxy ) // reference is local 169 { 170 kmem_req_t req; 171 req.type = KMEM_SEM; 172 req.ptr = condvar_ptr; 173 kmem_free( &req ); 174 } 175 else // reference is remote 176 { 177 rpc_kcm_free_client( condvar_cxy , condvar_ptr , KMEM_CONDVAR ); 178 } 164 req.type = KMEM_KCM; 165 req.ptr = condvar_ptr; 166 kmem_remote_free( ref_cxy , &req ); 179 167 180 168 } // end remote_convar_destroy() -
trunk/kernel/libk/remote_condvar.h
r581 r635 2 2 * remote_condvar.h: POSIX condition variable definition. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018 )4 * Authors Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 78 78 * This function implements the CONVAR_INIT operation. 79 79 * This function creates and initializes a remote_condvar, identified by its virtual 80 * address <vaddr> in the client process reference cluster, using RPC if required.80 * address <vaddr> in the client process reference cluster, using remote access. 81 81 * It registers this user condvar in the reference process descriptor. 82 82 ******************************************************************************************* -
trunk/kernel/libk/remote_mutex.c
r619 r635 2 2 * remote_mutex.c - POSIX mutex implementation. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018 )4 * Authors Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 84 84 error_t remote_mutex_create( intptr_t ident ) 85 85 { 86 xptr_t mutex_xp;87 86 remote_mutex_t * mutex_ptr; 87 kmem_req_t req; 88 88 89 89 // get pointer on local process descriptor … … 97 97 process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); 98 98 99 // allocate memory for mutex descriptor 100 if( ref_cxy == local_cxy ) // local cluster is the reference 101 { 102 kmem_req_t req; 103 req.type = KMEM_MUTEX; 104 req.flags = AF_ZERO; 105 mutex_ptr = kmem_alloc( &req ); 106 mutex_xp = XPTR( local_cxy , mutex_ptr ); 107 } 108 else // reference is remote 109 { 110 rpc_kcm_alloc_client( ref_cxy , KMEM_MUTEX , &mutex_xp ); 111 mutex_ptr = GET_PTR( mutex_xp ); 112 } 113 114 if( mutex_ptr == NULL ) return 0xFFFFFFFF; 99 // allocate memory for mutex descriptor in reference cluster 100 req.type = KMEM_KCM; 101 req.order = bits_log2( sizeof(remote_mutex_t) ); 102 req.flags = AF_ZERO | AF_KERNEL; 103 mutex_ptr = kmem_remote_alloc( ref_cxy , &req ); 104 105 if( mutex_ptr == NULL ) 106 { 107 printk("\n[ERROR] in %s : cannot create mutex\n", __FUNCTION__); 108 return -1; 109 } 115 110 116 111 // initialise mutex … … 150 145 void remote_mutex_destroy( xptr_t mutex_xp ) 151 146 { 147 kmem_req_t req; 148 152 149 // get pointer on local process descriptor 153 150 process_t * process = CURRENT_THREAD->process; … … 174 171 175 172 // release memory allocated for mutex descriptor 176 if( mutex_cxy == local_cxy ) // reference is local 177 { 178 kmem_req_t req; 179 req.type = KMEM_MUTEX; 180 req.ptr = mutex_ptr; 181 kmem_free( &req ); 182 } 183 else // reference is remote 184 { 185 rpc_kcm_free_client( mutex_cxy , mutex_ptr , KMEM_MUTEX ); 186 } 173 req.type = KMEM_KCM; 174 req.ptr = mutex_ptr; 175 kmem_remote_free( mutex_cxy , &req ); 187 176 188 177 } // end remote_mutex_destroy() -
trunk/kernel/libk/remote_sem.c
r563 r635 2 2 * remote_sem.c - POSIX unnamed semaphore implementation. 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 86 86 uint32_t value ) 87 87 { 88 kmem_req_t req; 88 89 remote_sem_t * sem_ptr; 89 xptr_t sem_xp;90 90 91 91 // get pointer on local process descriptor … … 100 100 101 101 // allocate memory for new semaphore in reference cluster 102 if( ref_cxy == local_cxy ) // local cluster is the reference 103 { 104 kmem_req_t req; 105 req.type = KMEM_SEM; 106 req.flags = AF_ZERO; 107 sem_ptr = kmem_alloc( &req ); 108 sem_xp = XPTR( local_cxy , sem_ptr ); 102 req.type = KMEM_KCM; 103 req.order = bits_log2( sizeof(remote_sem_t) ); 104 req.flags = AF_ZERO | AF_KERNEL; 105 sem_ptr = kmem_remote_alloc( ref_cxy, &req ); 106 107 if( sem_ptr == NULL ) 108 { 109 printk("\n[ERROR] in %s : cannot create semaphore\n", __FUNCTION__ ); 110 return -1; 109 111 } 110 else // reference is remote111 {112 rpc_kcm_alloc_client( ref_cxy , KMEM_SEM , &sem_xp );113 sem_ptr = GET_PTR( sem_xp );114 }115 116 if( sem_xp == XPTR_NULL ) return 0xFFFFFFFF;117 112 118 113 // initialise semaphore … … 149 144 void remote_sem_destroy( xptr_t sem_xp ) 150 145 { 146 kmem_req_t req; 147 151 148 // get pointer on local process descriptor 152 149 process_t * process = CURRENT_THREAD->process; … … 179 176 180 177 // release memory allocated for semaphore descriptor 181 if( sem_cxy == local_cxy ) // reference is local 182 { 183 kmem_req_t req; 184 req.type = KMEM_SEM; 185 req.ptr = sem_ptr; 186 kmem_free( &req ); 187 } 188 else // reference is remote 189 { 190 rpc_kcm_free_client( sem_cxy , sem_ptr , KMEM_SEM ); 191 } 178 req.type = KMEM_KCM; 179 req.ptr = sem_ptr; 180 kmem_remote_free( sem_cxy , &req ); 192 181 193 182 } // end remote_sem_destroy() -
trunk/kernel/libk/user_dir.c
r633 r635 93 93 uint32_t attr; // attributes for all GPT entries 94 94 uint32_t dirents_per_page; // number of dirent descriptors per page 95 xptr_t page_xp; // extended pointer on page descriptor96 95 page_t * page; // local pointer on page descriptor 97 xptr_t base_xp; // extended pointer on physical page base98 96 struct dirent * base; // local pointer on physical page base 99 97 uint32_t total_dirents; // total number of dirents in dirent array … … 126 124 127 125 // check dirent size 128 assert( ( sizeof(struct dirent) == 64), "sizeof(dirent) !=64\n");126 assert( ( sizeof(struct dirent) == 64), "sizeof(dirent) must be 64\n"); 129 127 130 128 // compute number of dirent per page … … 135 133 136 134 // allocate memory for a local user_dir descriptor 137 req.type = KMEM_DIR; 138 req.flags = AF_ZERO; 135 req.type = KMEM_KCM; 136 req.order = bits_log2( sizeof(user_dir_t) ); 137 req.flags = AF_ZERO | AF_KERNEL; 139 138 dir = kmem_alloc( &req ); 140 139 … … 146 145 } 147 146 148 // Build an initialize the dirent array as a list of p hysical pages.147 // Build an initialize the dirent array as a list of pages. 149 148 // For each iteration in this while loop: 150 149 // - allocate one physical 4 Kbytes (64 dirent slots) … … 163 162 { 164 163 // allocate one physical page 165 req.type = KMEM_P AGE;166 req. size= 0;164 req.type = KMEM_PPM; 165 req.order = 0; 167 166 req.flags = AF_ZERO; 168 page = kmem_alloc( &req );169 170 if( page == NULL )167 base = kmem_alloc( &req ); 168 169 if( base == NULL ) 171 170 { 172 171 printk("\n[ERROR] in %s : cannot allocate page in cluster %x\n", … … 174 173 goto user_dir_create_failure; 175 174 } 176 177 // get pointer on page base (array of dirents)178 page_xp = XPTR( local_cxy , page );179 base_xp = ppm_page2base( page_xp );180 base = GET_PTR( base_xp );181 175 182 176 // call the relevant FS specific function to copy up to 64 dirents in page … … 198 192 total_dirents += entries; 199 193 194 // get page descriptor pointer from base 195 page = GET_PTR( ppm_base2page( XPTR( local_cxy , base ) ) ); 196 200 197 // register page in temporary list 201 198 list_add_last( &root , &page->list ); … … 303 300 304 301 // release the user_dir descriptor 305 req.type = KMEM_ DIR;302 req.type = KMEM_KCM; 306 303 req.ptr = dir; 307 304 kmem_free( &req ); … … 364 361 365 362 // release local user_dir_t structure 366 req.type = KMEM_ DIR;363 req.type = KMEM_KCM; 367 364 req.ptr = dir; 368 365 kmem_free( &req ); … … 372 369 { 373 370 page = LIST_FIRST( &root , page_t , list ); 374 req.type = KMEM_PAGE; 375 req.ptr = page; 371 372 // get base from page descriptor pointer 373 base = GET_PTR( ppm_page2base( XPTR( local_cxy , page ) ) ); 374 375 req.type = KMEM_PPM; 376 req.ptr = base; 376 377 kmem_free( &req ); 377 378 } … … 492 493 // release local user_dir_t structure 493 494 kmem_req_t req; 494 req.type = KMEM_ DIR;495 req.type = KMEM_KCM; 495 496 req.ptr = dir; 496 497 kmem_free( &req ); -
trunk/kernel/libk/user_dir.h
r629 r635 78 78 * This function allocates memory and initializes a user_dir_t structure in the cluster 79 79 * containing the directory inode identified by the <inode> argument and map the 80 * user accessible dirent array in the reference user process V MM, identified by the80 * user accessible dirent array in the reference user process VSL, identified by the 81 81 * <ref_xp> argument. 82 82 * It must be executed by a thread running in the cluster containing the target inode. -
trunk/kernel/libk/xhtab.c
r614 r635 2 2 * xhtab.c - Remote access embedded hash table implementation. 3 3 * 4 * Author Alain Greiner (2016,2017)4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 134 134 uint32_t i; 135 135 136 // initialize readlock136 // initialize lock 137 137 remote_busylock_init( XPTR( local_cxy , &xhtab->lock), LOCK_XHTAB_STATE ); 138 138 … … 153 153 } 154 154 155 for( i=0 ; i < XHASHTAB_SIZE ; i++ ) 156 { 157 xlist_root_init( XPTR( local_cxy , &xhtab->roots[i] ) ); 158 } 159 160 #if DEBUG_XHTAB 161 printk("\n@@@ %s for xhtab (%x,%x)\n" 155 #if DEBUG_XHTAB 156 printk("\n[%s] for xhtab (%x,%x)\n" 162 157 " - index_from_key = %x (@ %x)\n" 163 158 " - item_match_key = %x (@ %x)\n" … … 169 164 #endif 170 165 166 for( i=0 ; i < XHASHTAB_SIZE ; i++ ) 167 { 168 xlist_root_init( XPTR( local_cxy , &xhtab->roots[i] ) ); 169 170 #if (DEBUG_XHTAB & 1) 171 printk("\n - initialize root[%d] / %x\n", i , &xhtab->roots[i] ); 172 #endif 173 174 } 175 171 176 } // end xhtab_init() 172 177 173 ////////////////////////////////////// 174 xptr_t xhtab_scan( xptr_t xhtab_xp, 175 uint32_t index, 176 void * key ) 178 ///////////////////////////////////////////////////////////////////////////////////////////// 179 // This static function traverse the subset identified by the <index> argument 180 // to find an item identified by the <key> argument. 181 ///////////////////////////////////////////////////////////////////////////////////////////// 182 // @ xhtab_xp : extended pointer on the xhtab descriptor. 183 // @ index : subset index. 184 // @ key : searched key value. 185 // @ return extended pointer on the found item if success / return XPTR_NULL if not found. 186 ///////////////////////////////////////////////////////////////////////////////////////////// 187 static xptr_t xhtab_scan( xptr_t xhtab_xp, 188 uint32_t index, 189 void * key ) 177 190 { 178 191 xptr_t xlist_xp; // xlist_entry_t (iterator) … … 220 233 index_from_key_t * index_from_key; // function pointer 221 234 222 #if DEBUG_XHTAB 223 printk("\n[%s] enter / key %s\n", __FUNCTION__, key ); 224 #endif 225 226 // get xhtab cluster and local pointer 227 xhtab_cxy = GET_CXY( xhtab_xp ); 228 xhtab_ptr = GET_PTR( xhtab_xp ); 235 // get xhtab cluster and local pointer 236 xhtab_cxy = GET_CXY( xhtab_xp ); 237 xhtab_ptr = GET_PTR( xhtab_xp ); 238 239 #if DEBUG_XHTAB 240 printk("\n[%s] enter / xhtab (%x,%x) / key = <%s> / cycle %d\n", 241 __FUNCTION__, xhtab_cxy, xhtab_ptr, key, (uint32_t)hal_get_cycles() ); 242 #endif 243 244 // build extended pointer on xhtab lock 245 xptr_t lock_xp = XPTR( xhtab_cxy , &xhtab_ptr->lock ); 229 246 230 247 // get pointer on "index_from_key" function 231 248 index_from_key = (index_from_key_t *)hal_remote_lpt( XPTR( xhtab_cxy , 232 249 &xhtab_ptr->index_from_key ) ); 250 #if DEBUG_XHTAB 251 printk("\n[%s] remote = %x / direct = %x / @ = %x\n", 252 __FUNCTION__, index_from_key, xhtab_ptr->index_from_key, &xhtab_ptr->index_from_key ); 253 #endif 254 233 255 // compute index from key 234 256 index = index_from_key( key ); 235 257 258 #if DEBUG_XHTAB 259 printk("\n[%s] index = %x\n", __FUNCTION__, index ); 260 #endif 261 236 262 // take the lock protecting hash table 237 remote_busylock_acquire( XPTR( xhtab_cxy , &xhtab_ptr->lock ));238 239 // search a matching item 263 remote_busylock_acquire( lock_xp ); 264 265 // search a matching item in subset 240 266 item_xp = xhtab_scan( xhtab_xp , index , key ); 241 267 242 if( item_xp != XPTR_NULL ) // error if found268 if( item_xp != XPTR_NULL ) // error if item already registered 243 269 { 244 270 // release the lock protecting hash table 245 remote_busylock_release( XPTR( xhtab_cxy , &xhtab_ptr->lock ));271 remote_busylock_release( lock_xp ); 246 272 247 273 return -1; … … 256 282 257 283 // release the lock protecting hash table 258 remote_busylock_release( XPTR( xhtab_cxy , &xhtab_ptr->lock ));284 remote_busylock_release( lock_xp ); 259 285 260 286 #if DEBUG_XHTAB 261 printk("\n[%s] success / %s\n", __FUNCTION__, key );287 printk("\n[%s] success / <%s>\n", __FUNCTION__, key ); 262 288 #endif 263 289 -
trunk/kernel/libk/xhtab.h
r614 r635 2 2 * xhtab.h - Remote access embedded hash table definition. 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 38 38 // The main goal is to speedup search by key in a large number of items of same type. 39 39 // For this purpose the set of all registered items is split in several subsets. 40 // Each subset is organised as an embedded double linked xlist s.40 // Each subset is organised as an embedded double linked xlist. 41 41 // - an item is uniquely identified by a <key>, that is a item specific pointer, 42 42 // that can be a - for example - a char* defining the item "name". … … 64 64 65 65 /****************************************************************************************** 66 * This define the four item_type_specific function prototypes that must be defined66 * Here are the four item_type_specific function prototypes that must be defined 67 67 * for each item type. 68 68 *****************************************************************************************/ … … 74 74 75 75 /****************************************************************************************** 76 * This define the supported item types.76 * This define the currently supported item types. 77 77 * - The XHTAB_DENTRY_TYPE is used to implement the set of directory entries for a 78 78 * directory inode : the "children" inode field is an embedded xhtab. -
trunk/kernel/mm/kcm.c
r619 r635 1 1 /* 2 * kcm.c - Per clusterKernel Cache Manager implementation.2 * kcm.c - Kernel Cache Manager implementation. 3 3 * 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018,2019) 4 * Author Alain Greiner (2016,2017,2018,2019) 6 5 * 7 6 * Copyright (c) UPMC Sorbonne Universites … … 38 37 39 38 39 ///////////////////////////////////////////////////////////////////////////////////// 40 // Local access functions 41 ///////////////////////////////////////////////////////////////////////////////////// 42 40 43 ////////////////////////////////////////////////////////////////////////////////////// 41 // This static function returns pointer on an allocated block from an active page. 42 // It returns NULL if no block available in selected page. 43 // It changes the page status if required. 44 // This static function must be called by a local thread. 45 // It returns a pointer on a block allocated from a non-full kcm_page. 46 // It makes a panic if no block is available in selected page. 47 // It changes the page status as required. 44 48 ////////////////////////////////////////////////////////////////////////////////////// 45 // @ kcm : pointer on kcm allocator. 46 // @ kcm_page : pointer on active kcm page to use. 47 ///////////////////////////////////////////////////////////////////////////////////// 48 static void * kcm_get_block( kcm_t * kcm, 49 kcm_page_t * kcm_page ) 50 { 51 52 #if DEBUG_KCM 53 thread_t * this = CURRENT_THREAD; 54 uint32_t cycle = (uint32_t)hal_get_cycles(); 49 // @ kcm : pointer on KCM allocator. 50 // @ kcm_page : pointer on a non-full kcm_page. 51 // @ return pointer on allocated block. 52 ///////////////////////////////////////////////////////////////////////////////////// 53 static void * __attribute__((noinline)) kcm_get_block( kcm_t * kcm, 54 kcm_page_t * kcm_page ) 55 { 56 // initialise variables 57 uint32_t size = 1 << kcm->order; 58 uint32_t max = kcm->max_blocks; 59 uint32_t count = kcm_page->count; 60 uint64_t status = kcm_page->status; 61 62 assert( (count < max) , "kcm_page should not be full" ); 63 64 uint32_t index = 1; 65 uint64_t mask = (uint64_t)0x2; 66 uint32_t found = 0; 67 68 // allocate first free block in kcm_page, update status, 69 // and count , compute index of allocated block in kcm_page 70 while( index <= max ) 71 { 72 if( (status & mask) == 0 ) // block non allocated 73 { 74 kcm_page->status = status | mask; 75 kcm_page->count = count + 1; 76 found = 1; 77 78 break; 79 } 80 81 index++; 82 mask <<= 1; 83 } 84 85 // change the page list if almost full 86 if( count == max-1 ) 87 { 88 list_unlink( &kcm_page->list); 89 kcm->active_pages_nr--; 90 91 list_add_first( &kcm->full_root , &kcm_page->list ); 92 kcm->full_pages_nr ++; 93 } 94 95 // compute return pointer 96 void * ptr = (void *)((intptr_t)kcm_page + (index * size) ); 97 98 #if (DEBUG_KCM & 1) 99 thread_t * this = CURRENT_THREAD; 100 uint32_t cycle = (uint32_t)hal_get_cycles(); 55 101 if( DEBUG_KCM < cycle ) 56 printk("\n[%s] thread[%x,%x] enters for %s / page %x / count %d / active %d\n", 57 __FUNCTION__, this->process->pid, this->trdid, kmem_type_str(kcm->type), 58 (intptr_t)kcm_page , kcm_page->count , kcm_page->active ); 59 #endif 60 61 assert( kcm_page->active , "kcm_page should be active" ); 62 63 // get first block available 64 int32_t index = bitmap_ffs( kcm_page->bitmap , kcm->blocks_nr ); 65 66 assert( (index != -1) , "kcm_page should not be full" ); 67 68 // allocate block 69 bitmap_clear( kcm_page->bitmap , index ); 70 71 // increase kcm_page count 72 kcm_page->count ++; 73 74 // change the kcm_page to busy if no more free block in page 75 if( kcm_page->count >= kcm->blocks_nr ) 76 { 77 kcm_page->active = 0; 78 list_unlink( &kcm_page->list); 79 kcm->active_pages_nr --; 80 81 list_add_first( &kcm->busy_root , &kcm_page->list); 82 kcm->busy_pages_nr ++; 83 kcm_page->busy = 1; 84 } 85 86 // compute return pointer 87 void * ptr = (void *)((intptr_t)kcm_page + CONFIG_KCM_SLOT_SIZE 88 + (index * kcm->block_size) ); 89 90 #if DEBUG_KCM 91 cycle = (uint32_t)hal_get_cycles(); 92 if( DEBUG_KCM < cycle ) 93 printk("\n[%s] thread[%x,%x] exit for %s / ptr %x / page %x / count %d\n", 94 __FUNCTION__, this->process->pid, this->trdid, kmem_type_str(kcm->type), 95 (intptr_t)ptr, (intptr_t)kcm_page, kcm_page->count ); 102 printk("\n[%s] thread[%x,%x] allocated block %x in page %x / size %d / count %d / cycle %d\n", 103 __FUNCTION__, this->process->pid, this->trdid, ptr, kcm_page, size, count + 1, cycle ); 96 104 #endif 97 105 98 106 return ptr; 99 } 100 101 ///////////////////////////////////////////////////////////////////////////////////// 102 // This static function releases a previously allocated block. 103 // It changes the kcm_page status if required. 104 ///////////////////////////////////////////////////////////////////////////////////// 105 // @ kcm : pointer on kcm allocator. 106 // @ kcm_page : pointer on kcm_page. 107 // @ ptr : pointer on block to be released. 108 ///////////////////////////////////////////////////////////////////////////////////// 109 static void kcm_put_block ( kcm_t * kcm, 110 kcm_page_t * kcm_page, 111 void * ptr ) 112 { 113 uint32_t index; 114 107 108 } // end kcm_get_block() 109 110 ///////////////////////////////////////////////////////////////////////////////////// 111 // This private static function must be called by a local thread. 112 // It releases a previously allocated block to the relevant kcm_page. 113 // It makes a panic if the released block is not allocated in this page. 114 // It changes the kcm_page status as required. 115 ///////////////////////////////////////////////////////////////////////////////////// 116 // @ kcm : pointer on kcm allocator. 117 // @ kcm_page : pointer on kcm_page. 118 // @ block_ptr : pointer on block to be released. 119 ///////////////////////////////////////////////////////////////////////////////////// 120 static void __attribute__((noinline)) kcm_put_block ( kcm_t * kcm, 121 kcm_page_t * kcm_page, 122 void * block_ptr ) 123 { 124 // initialise variables 125 uint32_t max = kcm->max_blocks; 126 uint32_t size = 1 << kcm->order; 127 uint32_t count = kcm_page->count; 128 uint64_t status = kcm_page->status; 129 115 130 // compute block index from block pointer 116 index = ((uint8_t *)ptr - (uint8_t *)kcm_page - CONFIG_KCM_SLOT_SIZE) / kcm->block_size; 117 118 assert( !bitmap_state( kcm_page->bitmap , index ) , "page already freed" ); 119 120 assert( (kcm_page->count > 0) , "count already zero" ); 121 122 bitmap_set( kcm_page->bitmap , index ); 123 kcm_page->count --; 124 125 // change the page to active if it was busy 126 if( kcm_page->busy ) 127 { 128 kcm_page->busy = 0; 131 uint32_t index = ((intptr_t)block_ptr - (intptr_t)kcm_page) / size; 132 133 // compute mask in bit vector 134 uint64_t mask = ((uint64_t)0x1) << index; 135 136 assert( (status & mask) , "released block not allocated : status (%x,%x) / mask(%x,%x)", 137 GET_CXY(status), GET_PTR(status), GET_CXY(mask ), GET_PTR(mask ) ); 138 139 // update status & count in kcm_page 140 kcm_page->status = status & ~mask; 141 kcm_page->count = count - 1; 142 143 // change the page mode if page was full 144 if( count == max ) 145 { 129 146 list_unlink( &kcm_page->list ); 130 kcm-> busy_pages_nr --;147 kcm->full_pages_nr --; 131 148 132 149 list_add_last( &kcm->active_root, &kcm_page->list ); 133 150 kcm->active_pages_nr ++; 134 kcm_page->active = 1; 135 } 136 137 // change the kcm_page to free if last block in active page 138 if( (kcm_page->active) && (kcm_page->count == 0) ) 139 { 140 kcm_page->active = 0; 141 list_unlink( &kcm_page->list); 142 kcm->active_pages_nr --; 143 144 list_add_first( &kcm->free_root , &kcm_page->list); 145 kcm->free_pages_nr ++; 146 } 147 } 148 149 ///////////////////////////////////////////////////////////////////////////////////// 150 // This static function allocates one page from PPM. It initializes 151 // the kcm_page descriptor, and introduces the new kcm_page into freelist. 152 ///////////////////////////////////////////////////////////////////////////////////// 153 static error_t freelist_populate( kcm_t * kcm ) 154 { 155 page_t * page; 156 kcm_page_t * kcm_page; 157 kmem_req_t req; 158 159 // get one page from local PPM 160 req.type = KMEM_PAGE; 161 req.size = 0; 162 req.flags = AF_KERNEL; 163 page = kmem_alloc( &req ); 164 165 if( page == NULL ) 166 { 167 printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n", 168 __FUNCTION__ , local_cxy ); 169 return ENOMEM; 170 } 171 172 // get page base address 173 xptr_t base_xp = ppm_page2base( XPTR( local_cxy , page ) ); 174 kcm_page = (kcm_page_t *)GET_PTR( base_xp ); 175 176 // initialize KCM-page descriptor 177 bitmap_set_range( kcm_page->bitmap , 0 , kcm->blocks_nr ); 178 179 kcm_page->busy = 0; 180 kcm_page->active = 0; 181 kcm_page->count = 0; 182 kcm_page->kcm = kcm; 183 kcm_page->page = page; 184 185 // introduce new page in free-list 186 list_add_first( &kcm->free_root , &kcm_page->list ); 187 kcm->free_pages_nr ++; 188 189 return 0; 190 } 191 192 ///////////////////////////////////////////////////////////////////////////////////// 193 // This private function gets one KCM page from the KCM freelist. 194 // It populates the freelist if required. 195 ///////////////////////////////////////////////////////////////////////////////////// 196 static kcm_page_t * freelist_get( kcm_t * kcm ) 197 { 198 error_t error; 199 kcm_page_t * kcm_page; 200 201 // get a new page from PPM if freelist empty 202 if( kcm->free_pages_nr == 0 ) 203 { 204 error = freelist_populate( kcm ); 205 if( error ) return NULL; 206 } 207 208 // get first KCM page from freelist and unlink it 209 kcm_page = LIST_FIRST( &kcm->free_root, kcm_page_t , list ); 210 list_unlink( &kcm_page->list ); 211 kcm->free_pages_nr --; 151 } 152 153 #if (DEBUG_KCM & 1) 154 thread_t * this = CURRENT_THREAD; 155 uint32_t cycle = (uint32_t)hal_get_cycles(); 156 if( DEBUG_KCM < cycle ) 157 printk("\n[%s] thread[%x,%x] released block %x in page %x / size %d / count %d / cycle %d\n", 158 __FUNCTION__, this->process->pid, this->trdid, block_ptr, kcm_page, size, count - 1, cycle ); 159 #endif 160 161 } // kcm_put_block() 162 163 ///////////////////////////////////////////////////////////////////////////////////// 164 // This private static function must be called by a local thread. 165 // It returns one non-full kcm_page with te following policy : 166 // - if the "active_list" is non empty, it returns the first "active" page, 167 // without modifying the KCM state. 168 // - if the "active_list" is empty, it allocates a new page fromm PPM, inserts 169 // this page in the active_list, and returns it. 170 ///////////////////////////////////////////////////////////////////////////////////// 171 // @ kcm : local pointer on local KCM allocator. 172 // @ return pointer on a non-full kcm page if success / returns NULL if no memory. 173 ///////////////////////////////////////////////////////////////////////////////////// 174 static kcm_page_t * __attribute__((noinline)) kcm_get_page( kcm_t * kcm ) 175 { 176 kcm_page_t * kcm_page; 177 178 uint32_t active_pages_nr = kcm->active_pages_nr; 179 180 if( active_pages_nr > 0 ) // return first active page 181 { 182 kcm_page = LIST_FIRST( &kcm->active_root , kcm_page_t , list ); 183 } 184 else // allocate a new page from PPM 185 { 186 // get one 4 Kbytes page from local PPM 187 page_t * page = ppm_alloc_pages( 0 ); 188 189 if( page == NULL ) 190 { 191 printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n", 192 __FUNCTION__ , local_cxy ); 193 194 return NULL; 195 } 196 197 // get page base address 198 xptr_t base_xp = ppm_page2base( XPTR( local_cxy , page ) ); 199 200 // get local pointer on kcm_page 201 kcm_page = GET_PTR( base_xp ); 202 203 // initialize kcm_page descriptor 204 kcm_page->status = 0; 205 kcm_page->count = 0; 206 kcm_page->kcm = kcm; 207 kcm_page->page = page; 208 209 // introduce new page in KCM active_list 210 list_add_first( &kcm->active_root , &kcm_page->list ); 211 kcm->active_pages_nr ++; 212 } 212 213 213 214 return kcm_page; 214 } 215 216 } // end kcm_get_page() 215 217 216 218 ////////////////////////////// 217 219 void kcm_init( kcm_t * kcm, 218 uint32_t type ) 219 { 220 221 // the kcm_page descriptor must fit in the KCM slot 222 assert( (sizeof(kcm_page_t) <= CONFIG_KCM_SLOT_SIZE) , "KCM slot too small\n" ); 223 224 // the allocated object must fit in one single page 225 assert( (kmem_type_size(type) <= (CONFIG_PPM_PAGE_SIZE - CONFIG_KCM_SLOT_SIZE)), 226 "allocated object requires more than one single page\n" ); 220 uint32_t order) 221 { 222 223 assert( ((order > 5) && (order < 12)) , "order must be in [6,11]" ); 227 224 228 225 // initialize lock 229 busylock_init( &kcm->lock , LOCK_KCM_STATE ); 230 231 // initialize KCM type 232 kcm->type = type; 226 remote_busylock_init( XPTR( local_cxy , &kcm->lock ) , LOCK_KCM_STATE ); 233 227 234 228 // initialize KCM page lists 235 kcm->free_pages_nr = 0; 236 kcm->busy_pages_nr = 0; 229 kcm->full_pages_nr = 0; 237 230 kcm->active_pages_nr = 0; 238 list_root_init( &kcm->free_root ); 239 list_root_init( &kcm->busy_root ); 231 list_root_init( &kcm->full_root ); 240 232 list_root_init( &kcm->active_root ); 241 233 242 // initialize block size 243 uint32_t block_size = ARROUND_UP( kmem_type_size( type ) , CONFIG_KCM_SLOT_SIZE ); 244 kcm->block_size = block_size; 245 246 // initialize number of blocks per page 247 uint32_t blocks_nr = (CONFIG_PPM_PAGE_SIZE - CONFIG_KCM_SLOT_SIZE) / block_size; 248 kcm->blocks_nr = blocks_nr; 249 234 // initialize order and max_blocks 235 kcm->order = order; 236 kcm->max_blocks = ( CONFIG_PPM_PAGE_SIZE >> order ) - 1; 237 250 238 #if DEBUG_KCM 251 239 thread_t * this = CURRENT_THREAD; 252 240 uint32_t cycle = (uint32_t)hal_get_cycles(); 253 241 if( DEBUG_KCM < cycle ) 254 printk("\n[%s] thread[%x,%x] initialised KCM %s : block_size %d / blocks_nr %d\n", 255 __FUNCTION__, this->process->pid, this->trdid, 256 kmem_type_str( kcm->type ), block_size, blocks_nr ); 257 #endif 258 259 } 242 printk("\n[%s] thread[%x,%x] initialised KCM / order %d / max_blocks %d\n", 243 __FUNCTION__, this->process->pid, this->trdid, order, kcm->max_blocks ); 244 #endif 245 246 } // end kcm_init() 260 247 261 248 /////////////////////////////// … … 263 250 { 264 251 kcm_page_t * kcm_page; 265 list_entry_t * iter; 252 253 // build extended pointer on KCM lock 254 xptr_t lock_xp = XPTR( local_cxy , &kcm->lock ); 266 255 267 256 // get KCM lock 268 busylock_acquire( &kcm->lock ); 269 270 // release all free pages 271 LIST_FOREACH( &kcm->free_root , iter ) 272 { 273 kcm_page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list ); 274 list_unlink( iter ); 275 kcm->free_pages_nr --; 257 remote_busylock_acquire( lock_xp ); 258 259 // release all full pages 260 while( list_is_empty( &kcm->full_root ) == false ) 261 { 262 kcm_page = LIST_FIRST( &kcm->full_root , kcm_page_t , list ); 263 list_unlink( &kcm_page->list ); 276 264 ppm_free_pages( kcm_page->page ); 277 265 } 278 266 279 // release all active pages 280 LIST_FOREACH( &kcm->active_root , iter ) 281 { 282 kcm_page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list ); 283 list_unlink( iter ); 284 kcm->free_pages_nr --; 267 // release all empty pages 268 while( list_is_empty( &kcm->active_root ) == false ) 269 { 270 kcm_page = LIST_FIRST( &kcm->active_root , kcm_page_t , list ); 271 list_unlink( &kcm_page->list ); 285 272 ppm_free_pages( kcm_page->page ); 286 273 } 287 274 288 // release all busy pages289 LIST_FOREACH( &kcm->busy_root , iter )290 {291 kcm_page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list );292 list_unlink( iter );293 kcm->free_pages_nr --;294 ppm_free_pages( kcm_page->page );295 }296 297 275 // release KCM lock 298 busylock_release( &kcm->lock);276 remote_busylock_release( lock_xp ); 299 277 } 300 278 301 /////////////////////////////// 302 void * kcm_alloc( kcm_t * kcm ) 303 { 279 ////////////////////////////////// 280 void * kcm_alloc( uint32_t order ) 281 { 282 kcm_t * kcm_ptr; 304 283 kcm_page_t * kcm_page; 305 void * ptr = NULL; // pointer on block 284 void * block_ptr; 285 286 // min block size is 64 bytes 287 if( order < 6 ) order = 6; 288 289 assert( (order < 12) , "order = %d / must be less than 12" , order ); 290 291 // get local pointer on relevant KCM allocator 292 kcm_ptr = &LOCAL_CLUSTER->kcm[order - 6]; 293 294 // build extended pointer on local KCM lock 295 xptr_t lock_xp = XPTR( local_cxy , &kcm_ptr->lock ); 296 297 // get KCM lock 298 remote_busylock_acquire( lock_xp ); 299 300 // get a non-full kcm_page 301 kcm_page = kcm_get_page( kcm_ptr ); 302 303 if( kcm_page == NULL ) 304 { 305 remote_busylock_release( lock_xp ); 306 return NULL; 307 } 308 309 // get a block from selected active page 310 block_ptr = kcm_get_block( kcm_ptr , kcm_page ); 311 312 // release lock 313 remote_busylock_release( lock_xp ); 314 315 #if DEBUG_KCM 316 thread_t * this = CURRENT_THREAD; 317 uint32_t cycle = (uint32_t)hal_get_cycles(); 318 if( DEBUG_KCM < cycle ) 319 printk("\n[%s] thread[%x,%x] allocated block %x / order %d / kcm %x / status[%x,%x] / count %d\n", 320 __FUNCTION__, this->process->pid, this->trdid, block_ptr, order, kcm_ptr, 321 GET_CXY(kcm_page->status), GET_PTR(kcm_page->status), kcm_page->count ); 322 #endif 323 324 return block_ptr; 325 326 } // end kcm_alloc() 327 328 ///////////////////////////////// 329 void kcm_free( void * block_ptr ) 330 { 331 kcm_t * kcm_ptr; 332 kcm_page_t * kcm_page; 333 334 // check argument 335 assert( (block_ptr != NULL) , "block pointer cannot be NULL" ); 336 337 // get local pointer on KCM page 338 kcm_page = (kcm_page_t *)((intptr_t)block_ptr & ~CONFIG_PPM_PAGE_MASK); 339 340 // get local pointer on KCM descriptor 341 kcm_ptr = kcm_page->kcm; 342 343 #if DEBUG_KCM 344 thread_t * this = CURRENT_THREAD; 345 uint32_t cycle = (uint32_t)hal_get_cycles(); 346 if( DEBUG_KCM < cycle ) 347 printk("\n[%s] thread[%x,%x] release block %x / order %d / kcm %x / status [%x,%x] / count %d\n", 348 __FUNCTION__, this->process->pid, this->trdid, block_ptr, kcm_ptr->order, kcm_ptr, 349 GET_CXY(kcm_page->status), GET_PTR(kcm_page->status), kcm_page->count ); 350 #endif 351 352 // build extended pointer on local KCM lock 353 xptr_t lock_xp = XPTR( local_cxy , &kcm_ptr->lock ); 306 354 307 355 // get lock 308 busylock_acquire( &kcm->lock ); 309 310 // get an active page 311 if( list_is_empty( &kcm->active_root ) ) // no active page => get one 312 { 313 // get a page from free list 314 kcm_page = freelist_get( kcm ); 315 316 if( kcm_page == NULL ) 317 { 318 busylock_release( &kcm->lock ); 319 return NULL; 320 } 321 322 // insert page in active list 323 list_add_first( &kcm->active_root , &kcm_page->list ); 324 kcm->active_pages_nr ++; 325 kcm_page->active = 1; 326 } 327 else // get first page from active list 328 { 329 // get page pointer from active list 330 kcm_page = (kcm_page_t *)LIST_FIRST( &kcm->active_root , kcm_page_t , list ); 356 remote_busylock_acquire( lock_xp ); 357 358 // release block 359 kcm_put_block( kcm_ptr , kcm_page , block_ptr ); 360 361 // release lock 362 remote_busylock_release( lock_xp ); 363 } 364 365 ///////////////////////////////////////////////////////////////////////////////////// 366 // Remote access functions 367 ///////////////////////////////////////////////////////////////////////////////////// 368 369 ///////////////////////////////////////////////////////////////////////////////////// 370 // This static function can be called by any thread running in any cluster. 371 // It returns a local pointer on a block allocated from an non-full kcm_page. 372 // It makes a panic if no block available in selected page. 373 // It changes the page status as required. 374 ///////////////////////////////////////////////////////////////////////////////////// 375 // @ kcm_cxy : remote KCM cluster identidfier. 376 // @ kcm_ptr : local pointer on remote KCM allocator. 377 // @ kcm_page : pointer on active kcm page to use. 378 // @ return a local pointer on the allocated block. 379 ///////////////////////////////////////////////////////////////////////////////////// 380 static void * __attribute__((noinline)) kcm_remote_get_block( cxy_t kcm_cxy, 381 kcm_t * kcm_ptr, 382 kcm_page_t * kcm_page ) 383 { 384 uint32_t order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ) ); 385 uint32_t max = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->max_blocks ) ); 386 uint32_t count = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) ); 387 uint64_t status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ) ); 388 uint32_t size = 1 << order; 389 390 assert( (count < max) , "kcm_page should not be full" ); 391 392 uint32_t index = 1; 393 uint64_t mask = (uint64_t)0x2; 394 uint32_t found = 0; 395 396 // allocate first free block in kcm_page, update status, 397 // and count , compute index of allocated block in kcm_page 398 while( index <= max ) 399 { 400 if( (status & mask) == 0 ) // block non allocated 401 { 402 hal_remote_s64( XPTR( kcm_cxy , &kcm_page->status ) , status | mask ); 403 hal_remote_s64( XPTR( kcm_cxy , &kcm_page->count ) , count + 1 ); 404 found = 1; 405 break; 406 } 407 408 index++; 409 mask <<= 1; 410 } 411 412 // change the page list if almost full 413 if( count == max-1 ) 414 { 415 list_remote_unlink( kcm_cxy , &kcm_page->list ); 416 hal_remote_atomic_add( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) , -1 ); 417 418 list_remote_add_first( kcm_cxy , &kcm_ptr->full_root , &kcm_page->list ); 419 hal_remote_atomic_add( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr ) , 1 ); 420 } 421 422 // compute return pointer 423 void * ptr = (void *)((intptr_t)kcm_page + (index * size) ); 424 425 #if DEBUG_KCM_REMOTE 426 thread_t * this = CURRENT_THREAD; 427 uint32_t cycle = (uint32_t)hal_get_cycles(); 428 if( DEBUG_KCM_REMOTE < cycle ) 429 printk("\n[%s] thread[%x,%x] get block %x in page %x / cluster %x / size %x / count %d\n", 430 __FUNCTION__, this->process->pid, this->trdid, 431 ptr, kcm_page, kcm_cxy, size, count + 1 ); 432 #endif 433 434 return ptr; 435 436 } // end kcm_remote_get_block() 437 438 ///////////////////////////////////////////////////////////////////////////////////// 439 // This private static function can be called by any thread running in any cluster. 440 // It releases a previously allocated block to the relevant kcm_page. 441 // It changes the kcm_page status as required. 442 ///////////////////////////////////////////////////////////////////////////////////// 443 // @ kcm_cxy : remote KCM cluster identifier 444 // @ kcm_ptr : local pointer on remote KCM. 445 // @ kcm_page : local pointer on kcm_page. 446 // @ block_ptr : pointer on block to be released. 447 ///////////////