Changeset 440 for trunk/kernel/kern/rpc.c
- Timestamp:
- May 3, 2018, 5:51:22 PM (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/kern/rpc.c
r438 r440 114 114 client_core_lid = this->core->lid; 115 115 116 // select a server_core index: 117 // use client core index if possible / core 0 otherwise 116 // select a server_core : use client core index if possible / core 0 otherwise 118 117 if( client_core_lid < hal_remote_lw( XPTR( server_cxy , &cluster->cores_nr ) ) ) 119 118 { … … 133 132 134 133 // get local pointer on rpc_fifo in remote cluster, 135 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo ;134 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo[server_core_lid]; 136 135 137 136 // post RPC in remote fifo / deschedule and retry if fifo full … … 231 230 core_t * core = this->core; 232 231 scheduler_t * sched = &core->scheduler; 233 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo ;232 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo[core->lid]; 234 233 235 234 #if DEBUG_RPC_SERVER_GENERIC … … 243 242 hal_disable_irq( &sr_save ); 244 243 245 // activate (or create) RPC thread if RPC FIFO not empty 244 // activate (or create) RPC thread if RPC FIFO not empty and no acive RPC thread 246 245 if( (rpc_fifo->owner == 0) && (local_fifo_is_empty(rpc_fifo) == false) ) 247 246 { … … 254 253 #endif 255 254 256 // search one IDLE RPC thread 255 // search one IDLE RPC thread associated to the selected core 257 256 list_entry_t * iter; 258 257 LIST_FOREACH( &sched->k_root , iter ) … … 270 269 } 271 270 272 // create new RPC thread if not found271 // create new RPC thread for the selected core if not found 273 272 if( found == false ) 274 273 { … … 277 276 &rpc_thread_func, 278 277 NULL, 279 this->core->lid ); 280 if( error ) 281 { 282 assert( false , __FUNCTION__ , 283 "no memory to allocate a new RPC thread in cluster %x", local_cxy ); 284 } 278 core->lid ); 279 280 assert( (error == 0), __FUNCTION__ , 281 "no memory to allocate a new RPC thread in cluster %x", local_cxy ); 285 282 286 283 // unblock created RPC thread 287 284 thread->blocked = 0; 288 285 289 // update core descriptorcounter290 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 );286 // update RRPC threads counter 287 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads[core->lid] , 1 ); 291 288 292 289 #if DEBUG_RPC_SERVER_GENERIC … … 325 322 void rpc_thread_func() 326 323 { 327 uint32_t count; // handled RPC requests counter 328 error_t empty; // local RPC fifo state 329 xptr_t desc_xp; // extended pointer on RPC request 330 cxy_t desc_cxy; // RPC request cluster (client) 331 rpc_desc_t * desc_ptr; // RPC request local pointer 332 uint32_t index; // RPC request index 333 thread_t * thread_ptr; // local pointer on client thread 334 lid_t core_lid; // local index of client core 335 bool_t blocking; // blocking RPC when true 324 error_t empty; // local RPC fifo state 325 xptr_t desc_xp; // extended pointer on RPC request 326 cxy_t desc_cxy; // RPC request cluster (client) 327 rpc_desc_t * desc_ptr; // RPC request local pointer 328 uint32_t index; // RPC request index 329 thread_t * client_ptr; // local pointer on client thread 330 thread_t * server_ptr; // local pointer on server thread 331 xptr_t server_xp; // extended pointer on server thread 332 lid_t client_core_lid; // local index of client core 333 lid_t server_core_lid; // local index of server core 334 bool_t blocking; // blocking RPC when true 335 remote_fifo_t * rpc_fifo; // local pointer on RPC fifo 336 336 337 337 // makes RPC thread not preemptable 338 338 hal_disable_irq( NULL ); 339 339 340 thread_t * this = CURRENT_THREAD; 341 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo; 340 server_ptr = CURRENT_THREAD; 341 server_xp = XPTR( local_cxy , server_ptr ); 342 server_core_lid = server_ptr->core->lid; 343 rpc_fifo = &LOCAL_CLUSTER->rpc_fifo[server_core_lid]; 342 344 343 345 // two embedded loops: 344 346 // - external loop : "infinite" RPC thread 345 // - internal loop : handle up to CONFIG_RPC_PENDING_MAX RPC requests347 // - internal loop : handle one RPC request per iteration 346 348 347 349 while(1) // infinite loop 348 350 { 349 351 // try to take RPC_FIFO ownership 350 if( hal_atomic_test_set( &rpc_fifo->owner , this->trdid ) )352 if( hal_atomic_test_set( &rpc_fifo->owner , server_ptr->trdid ) ) 351 353 { 352 354 … … 355 357 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 356 358 printk("\n[DBG] %s : RPC thread %x in cluster %x takes RPC fifo ownership / cycle %d\n", 357 __FUNCTION__, this, local_cxy, cycle ); 358 #endif 359 // initializes RPC requests counter 360 count = 0; 361 362 // exit internal loop in three cases: 363 // - RPC fifo is empty 364 // - ownership has been lost (because descheduling) 365 // - max number of RPCs is reached 366 while( 1 ) // internal loop 359 __FUNCTION__, server_ptr, local_cxy, cycle ); 360 #endif 361 while( 1 ) // one RPC request per iteration 367 362 { 368 363 empty = local_fifo_get_item( rpc_fifo , (uint64_t *)&desc_xp ); 369 364 370 if ( empty == 0 ) // one RPC request found 365 // exit when FIFO empty or FIFO ownership lost (in case of descheduling) 366 if ( (empty == 0) && (rpc_fifo->owner == server_ptr->trdid) ) 371 367 { 372 368 // get client cluster and pointer on RPC descriptor … … 381 377 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 382 378 printk("\n[DBG] %s : RPC thread %x in cluster %x got rpc[%d] / rpc_cxy %x / rpc_ptr %x\n", 383 __FUNCTION__, this, local_cxy, index, desc_cxy, desc_ptr );379 __FUNCTION__, server_ptr, local_cxy, index, desc_cxy, desc_ptr ); 384 380 #endif 385 381 // call the relevant server function … … 390 386 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 391 387 printk("\n[DBG] %s : RPC thread %x in cluster %x completes rpc[%d] / rpc_ptr %x / cycle %d\n", 392 __FUNCTION__, this, local_cxy, index, desc_ptr, cycle ); 393 #endif 394 // increment handled RPCs counter 395 count++; 396 388 __FUNCTION__, server_ptr, local_cxy, index, desc_ptr, cycle ); 389 #endif 397 390 // decrement response counter in RPC descriptor if blocking 398 391 if( blocking ) … … 402 395 403 396 // get client thread pointer and client core lid from RPC descriptor 404 thread_ptr= hal_remote_lpt( XPTR( desc_cxy , &desc_ptr->thread ) );405 c ore_lid= hal_remote_lw ( XPTR( desc_cxy , &desc_ptr->lid ) );397 client_ptr = hal_remote_lpt( XPTR( desc_cxy , &desc_ptr->thread ) ); 398 client_core_lid = hal_remote_lw ( XPTR( desc_cxy , &desc_ptr->lid ) ); 406 399 407 400 // unblock client thread 408 thread_unblock( XPTR( desc_cxy , thread_ptr ) , THREAD_BLOCKED_RPC );401 thread_unblock( XPTR( desc_cxy , client_ptr ) , THREAD_BLOCKED_RPC ); 409 402 410 403 hal_fence(); … … 414 407 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 415 408 printk("\n[DBG] %s : RPC thread %x (cluster %x) unblocked client thread %x (cluster %x)\n", 416 __FUNCTION__, this, local_cxy, thread_ptr, desc_cxy, cycle );409 __FUNCTION__, server_ptr, local_cxy, client_ptr, desc_cxy, cycle ); 417 410 #endif 418 411 // send IPI to client core 419 dev_pic_send_ipi( desc_cxy , c ore_lid );412 dev_pic_send_ipi( desc_cxy , client_core_lid ); 420 413 } 421 414 } 422 423 // chek exit condition 424 if( local_fifo_is_empty( rpc_fifo ) || 425 (rpc_fifo->owner != this->trdid) || 426 (count >= CONFIG_RPC_PENDING_MAX) ) break; 415 else 416 { 417 break; 418 } 427 419 } // end internal loop 428 420 429 421 // release rpc_fifo ownership if not lost 430 if( rpc_fifo->owner == this->trdid ) rpc_fifo->owner = 0;422 if( rpc_fifo->owner == server_ptr->trdid ) rpc_fifo->owner = 0; 431 423 432 424 } // end if RPC fifo 433 425 434 // sucide if too many RPC threads in cluster 435 if( LOCAL_CLUSTER->rpc_threads >= CONFIG_RPC_THREADS_MAX ) 426 // RPC thread blocks on IDLE 427 thread_block( server_xp , THREAD_BLOCKED_IDLE ); 428 429 // sucide if too many RPC threads / simply deschedule otherwise 430 if( LOCAL_CLUSTER->rpc_threads[server_core_lid] >= CONFIG_RPC_THREADS_MAX ) 436 431 { 437 432 … … 440 435 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 441 436 printk("\n[DBG] %s : RPC thread %x in cluster %x suicides / cycle %d\n", 442 __FUNCTION__, this, local_cxy, cycle );437 __FUNCTION__, server_ptr, local_cxy, cycle ); 443 438 #endif 444 439 // update RPC threads counter 445 440 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , -1 ); 446 441 447 // suicide 448 thread_kill( XPTR( local_cxy , this ), 449 true, // is_exit 450 true ); // is forced 442 // RPC thread blocks on GLOBAL 443 thread_block( server_xp , THREAD_BLOCKED_GLOBAL ); 444 445 // RPC thread set the REQ_DELETE flag to suicide 446 hal_remote_atomic_or( server_xp , THREAD_FLAG_REQ_DELETE ); 451 447 } 448 else 449 { 452 450 453 451 #if DEBUG_RPC_SERVER_GENERIC 454 452 uint32_t cycle = (uint32_t)hal_get_cycles(); 455 453 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 456 printk("\n[DBG] %s : RPC thread %x in cluster %x deschedules / cycle %d\n", 457 __FUNCTION__, this, local_cxy, cycle ); 458 #endif 459 460 // Block and deschedule 461 thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_IDLE ); 462 sched_yield("RPC fifo empty or too much work"); 463 464 #if DEBUG_RPC_SERVER_GENERIC 465 cycle = (uint32_t)hal_get_cycles(); 466 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 467 printk("\n[DBG] %s : RPC thread %x in cluster %x resumes / cycle %d\n", 468 __FUNCTION__, this, local_cxy, cycle ); 469 #endif 454 printk("\n[DBG] %s : RPC thread %x in cluster %x block & deschedules / cycle %d\n", 455 __FUNCTION__, server_ptr, local_cxy, cycle ); 456 #endif 457 458 // RPC thread deschedules 459 assert( thread_can_yield( server_ptr ) , __FUNCTION__, "illegal sched_yield\n" ); 460 sched_yield("RPC fifo empty"); 461 } 470 462 471 463 } // end infinite loop … … 646 638 647 639 // set input arguments in RPC descriptor 648 rpc.args[0] = (uint64_t) (intptr_t)ref_process_xp;649 rpc.args[1] = (uint64_t) (intptr_t)parent_thread_xp;640 rpc.args[0] = (uint64_t)ref_process_xp; 641 rpc.args[1] = (uint64_t)parent_thread_xp; 650 642 651 643 // register RPC request in remote RPC fifo … … 903 895 void rpc_process_sigaction_server( xptr_t xp ) 904 896 { 905 pid_t pid; // target process identifier 906 process_t * process; // pointer on local target process descriptor 907 uint32_t action; // sigaction index 908 thread_t * client_thread; // pointer on client thread in client cluster 909 cxy_t client_cxy; // client cluster identifier 910 rpc_desc_t * rpc; // pointer on rpc descriptor in client cluster 911 xptr_t count_xp; // extended pointer on response counter 912 lid_t client_lid; // client core local index 897 pid_t pid; // target process identifier 898 process_t * process; // pointer on local target process descriptor 899 uint32_t action; // sigaction index 900 thread_t * client_ptr; // pointer on client thread in client cluster 901 xptr_t client_xp; // extended pointer client thread 902 cxy_t client_cxy; // client cluster identifier 903 rpc_desc_t * rpc; // pointer on rpc descriptor in client cluster 904 xptr_t count_xp; // extended pointer on responses counter 905 uint32_t count_value; // responses counter value 906 lid_t client_lid; // client core local index 913 907 914 908 // get client cluster identifier and pointer on RPC descriptor … … 927 921 #endif 928 922 923 // get client thread pointers 924 client_ptr = (thread_t *)hal_remote_lpt( XPTR( client_cxy , &rpc->thread ) ); 925 client_xp = XPTR( client_cxy , client_ptr ); 926 929 927 // get local process descriptor 930 928 process = cluster_get_local_process_from_pid( pid ); 931 929 932 930 // call relevant kernel function 933 if ( action == DELETE_ALL_THREADS ) process_delete_threads ( process );934 else if ( action == BLOCK_ALL_THREADS ) process_block_threads ( process );931 if ( action == DELETE_ALL_THREADS ) process_delete_threads ( process , client_xp ); 932 else if ( action == BLOCK_ALL_THREADS ) process_block_threads ( process , client_xp ); 935 933 else if ( action == UNBLOCK_ALL_THREADS ) process_unblock_threads( process ); 936 934 … … 939 937 940 938 // decrement the responses counter in RPC descriptor, 939 count_value = hal_remote_atomic_add( count_xp , -1 ); 940 941 941 // unblock the client thread only if it is the last response. 942 if( hal_remote_atomic_add( count_xp , -1 ) == 1 )942 if( count_value == 1 ) 943 943 { 944 // get client thread pointer and client core lid 945 client_thread = (thread_t *)hal_remote_lpt( XPTR( client_cxy , &rpc->thread ) ); 944 // get client core lid 946 945 client_lid = (lid_t) hal_remote_lw ( XPTR( client_cxy , &rpc->lid ) ); 947 946 948 thread_unblock( XPTR( client_cxy , client_thread ) , THREAD_BLOCKED_RPC ); 947 // unblock client thread 948 thread_unblock( client_xp , THREAD_BLOCKED_RPC ); 949 950 // send an IPI to client core 949 951 dev_pic_send_ipi( client_cxy , client_lid ); 950 952 } … … 1192 1194 vfs_dentry_t * dentry ) 1193 1195 { 1196 #if DEBUG_RPC_VFS_DENTRY_DESTROY 1197 uint32_t cycle = (uint32_t)hal_get_cycles(); 1198 if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY ) 1199 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 1200 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1201 #endif 1202 1194 1203 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1195 1204 … … 1206 1215 rpc_send( cxy , &rpc ); 1207 1216 1217 #if DEBUG_RPC_VFS_DENTRY_DESTROY 1218 cycle = (uint32_t)hal_get_cycles(); 1219 if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY ) 1220 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 1221 __FUNCTION__ , CURRENT_THREAD , cycle ); 1222 #endif 1208 1223 } 1209 1224 … … 1211 1226 void rpc_vfs_dentry_destroy_server( xptr_t xp ) 1212 1227 { 1228 #if DEBUG_RPC_VFS_DENTRY_DESTROY 1229 uint32_t cycle = (uint32_t)hal_get_cycles(); 1230 if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY ) 1231 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 1232 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1233 #endif 1234 1213 1235 vfs_dentry_t * dentry; 1214 1236 … … 1223 1245 vfs_dentry_destroy( dentry ); 1224 1246 1247 #if DEBUG_RPC_VFS_DENTRY_DESTROY 1248 cycle = (uint32_t)hal_get_cycles(); 1249 if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY ) 1250 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 1251 __FUNCTION__ , CURRENT_THREAD , cycle ); 1252 #endif 1225 1253 } 1226 1254 … … 1319 1347 vfs_file_t * file ) 1320 1348 { 1349 #if DEBUG_RPC_VFS_FILE_DESTROY 1350 uint32_t cycle = (uint32_t)hal_get_cycles(); 1351 if( cycle > DEBUG_RPC_VFS_FILE_DESTROY ) 1352 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 1353 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1354 #endif 1355 1321 1356 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1322 1357 … … 1333 1368 rpc_send( cxy , &rpc ); 1334 1369 1370 #if DEBUG_RPC_VFS_FILE_DESTROY 1371 cycle = (uint32_t)hal_get_cycles(); 1372 if( cycle > DEBUG_RPC_VFS_FILE_DESTROY ) 1373 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 1374 __FUNCTION__ , CURRENT_THREAD , cycle ); 1375 #endif 1335 1376 } 1336 1377 … … 1338 1379 void rpc_vfs_file_destroy_server( xptr_t xp ) 1339 1380 { 1381 #if DEBUG_RPC_VFS_FILE_DESTROY 1382 uint32_t cycle = (uint32_t)hal_get_cycles(); 1383 if( cycle > DEBUG_RPC_VFS_FILE_DESTROY ) 1384 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 1385 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1386 #endif 1387 1340 1388 vfs_file_t * file; 1341 1389 … … 1350 1398 vfs_file_destroy( file ); 1351 1399 1400 #if DEBUG_RPC_VFS_FILE_DESTROY 1401 cycle = (uint32_t)hal_get_cycles(); 1402 if( cycle > DEBUG_RPC_VFS_FILE_DESTROY ) 1403 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 1404 __FUNCTION__ , CURRENT_THREAD , cycle ); 1405 #endif 1352 1406 } 1353 1407 … … 1536 1590 error_t * error ) // out 1537 1591 { 1592 #if DEBUG_RPC_VMM_GET_VSEG 1593 uint32_t cycle = (uint32_t)hal_get_cycles(); 1594 if( cycle > DEBUG_RPC_VMM_GET_VSEG ) 1595 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 1596 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1597 #endif 1598 1538 1599 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1539 1600 … … 1555 1616 *error = (error_t)rpc.args[3]; 1556 1617 1618 #if DEBUG_RPC_VMM_GET_VSEG 1619 cycle = (uint32_t)hal_get_cycles(); 1620 if( cycle > DEBUG_RPC_VMM_GET_VSEG ) 1621 printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n", 1622 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1623 #endif 1557 1624 } 1558 1625 … … 1560 1627 void rpc_vmm_get_vseg_server( xptr_t xp ) 1561 1628 { 1629 #if DEBUG_RPC_VMM_GET_VSEG 1630 uint32_t cycle = (uint32_t)hal_get_cycles(); 1631 if( cycle > DEBUG_RPC_VMM_GET_VSEG ) 1632 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 1633 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1634 #endif 1635 1562 1636 process_t * process; 1563 1637 intptr_t vaddr; … … 1582 1656 hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 1583 1657 1584 } 1585 1586 1587 ///////////////////////////////////////////////////////////////////////////////////////// 1588 // [21] Marshaling functions attached to RPC_VMM_GET_PTE (blocking) 1658 #if DEBUG_RPC_VMM_GET_VSEG 1659 cycle = (uint32_t)hal_get_cycles(); 1660 if( cycle > DEBUG_RPC_VMM_GET_VSEG ) 1661 printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n", 1662 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1663 #endif 1664 } 1665 1666 1667 ///////////////////////////////////////////////////////////////////////////////////////// 1668 // [21] Marshaling functions attached to RPC_VMM_GET_VSEG (blocking) 1589 1669 ///////////////////////////////////////////////////////////////////////////////////////// 1590 1670 … … 1598 1678 error_t * error ) // out 1599 1679 { 1600 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1601 1602 // initialise RPC descriptor header 1603 rpc_desc_t rpc; 1604 rpc.index = RPC_VMM_GET_PTE; 1680 #if DEBUG_RPC_VMM_GET_PTE 1681 uint32_t cycle = (uint32_t)hal_get_cycles(); 1682 if( cycle > DEBUG_RPC_VMM_GET_PTE ) 1683 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 1684 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1685 #endif 1686 1687 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1688 1689 // initialise RPC descriptor header 1690 rpc_desc_t rpc; 1691 rpc.index = RPC_VMM_GET_VSEG; 1605 1692 rpc.blocking = true; 1606 1693 rpc.responses = 1; … … 1619 1706 *error = (error_t)rpc.args[5]; 1620 1707 1708 #if DEBUG_RPC_VMM_GET_PTE 1709 cycle = (uint32_t)hal_get_cycles(); 1710 if( cycle > DEBUG_RPC_VMM_GET_PTE ) 1711 printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n", 1712 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1713 #endif 1621 1714 } 1622 1715 … … 1624 1717 void rpc_vmm_get_pte_server( xptr_t xp ) 1625 1718 { 1719 #if DEBUG_RPC_VMM_GET_PTE 1720 uint32_t cycle = (uint32_t)hal_get_cycles(); 1721 if( cycle > DEBUG_RPC_VMM_GET_PTE ) 1722 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 1723 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1724 #endif 1725 1626 1726 process_t * process; 1627 1727 vpn_t vpn; … … 1648 1748 hal_remote_swd( XPTR( client_cxy , &desc->args[5] ) , (uint64_t)error ); 1649 1749 1750 #if DEBUG_RPC_VMM_GET_PTE 1751 cycle = (uint32_t)hal_get_cycles(); 1752 if( cycle > DEBUG_RPC_VMM_GET_PTE ) 1753 printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n", 1754 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1755 #endif 1650 1756 } 1651 1757
Note: See TracChangeset
for help on using the changeset viewer.