/* * rpc.c - RPC operations implementation. * * Author Alain Greiner (2016,2017,2018) * * Copyright (c) UPMC Sorbonne Universites * * This file is part of ALMOS-MKH. * * ALMOS-MKH is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2.0 of the License. * * ALMOS-MKH is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with ALMOS-MKH; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include ///////////////////////////////////////////////////////////////////////////////////////// // Array of function pointers and array of printable strings. // These arrays must be kept consistent with enum in rpc.h file. ///////////////////////////////////////////////////////////////////////////////////////// rpc_server_t * rpc_server[RPC_MAX_INDEX] = { &rpc_pmem_get_pages_server, // 0 &rpc_pmem_release_pages_server, // 1 &rpc_undefined, // 2 unused slot &rpc_process_make_fork_server, // 3 &rpc_undefined, // 4 unused slot &rpc_undefined, // 5 unused slot &rpc_thread_user_create_server, // 6 &rpc_thread_kernel_create_server, // 7 &rpc_undefined, // 8 unused slot &rpc_process_sigaction_server, // 9 &rpc_vfs_inode_create_server, // 10 &rpc_vfs_inode_destroy_server, // 11 &rpc_vfs_dentry_create_server, // 12 &rpc_vfs_dentry_destroy_server, // 13 &rpc_vfs_file_create_server, // 14 &rpc_vfs_file_destroy_server, // 15 &rpc_vfs_fs_child_init_server, // 16 &rpc_vfs_fs_add_dentry_server, // 17 &rpc_vfs_fs_remove_dentry_server, // 18 &rpc_vfs_inode_load_all_pages_server, // 19 &rpc_vmm_get_vseg_server, // 20 &rpc_vmm_global_update_pte_server, // 21 &rpc_kcm_alloc_server, // 22 &rpc_kcm_free_server, // 23 &rpc_undefined, // 24 unused slot &rpc_mapper_handle_miss_server, // 25 &rpc_undefined, // 26 unused slot &rpc_vmm_create_vseg_server, // 27 &rpc_vmm_set_cow_server, // 28 &rpc_vmm_display_server, // 29 }; char * rpc_str[RPC_MAX_INDEX] = { "PMEM_GET_PAGES", // 0 "PMEM_RELEASE_PAGES", // 1 "undefined", // 2 "PROCESS_MAKE_FORK", // 3 "undefined", // 4 "undefined", // 5 "THREAD_USER_CREATE", // 6 "THREAD_KERNEL_CREATE", // 7 "undefined", // 8 "PROCESS_SIGACTION", // 9 "VFS_INODE_CREATE", // 10 "VFS_INODE_DESTROY", // 11 "VFS_DENTRY_CREATE", // 12 "VFS_DENTRY_DESTROY", // 13 "VFS_FILE_CREATE", // 14 "VFS_FILE_DESTROY", // 15 "VFS_FS_CHILD_INIT", // 16 "VFS_FS_ADD_DENTRY", // 17 "VFS_FS_REMOVE_DENTRY", // 18 "VFS_INODE_LOAD_ALL_PAGES", // 19 "GET_VSEG", // 20 "GLOBAL_UPDATE_PTE", // 21 "KCM_ALLOC", // 22 "KCM_FREE", // 23 "undefined", // 24 "MAPPER_HANDLE_MISS", // 25 "undefined", // 26 "VMM_CREATE_VSEG", // 27 "VMM_SET_COW", // 28 "VMM_DISPLAY", // 29 }; ////////////////////////////////////////////////////////////////////////////////// void __attribute__((noinline)) rpc_undefined( xptr_t xp __attribute__ ((unused)) ) { assert( false , "called in cluster %x", local_cxy ); } /***************************************************************************************/ /************ Generic function supporting RPCs : client side ***************************/ /***************************************************************************************/ /////////////////////////////////////// void rpc_send( cxy_t server_cxy, rpc_desc_t * rpc ) { lid_t server_core_lid; lid_t client_core_lid; volatile error_t full; thread_t * this; full = 0; this = CURRENT_THREAD; client_core_lid = this->core->lid; // check calling thread can yield when client thread is not the IDLE thread // RPCs executed by the IDLE thread during kernel_init do not deschedule if( this->type != THREAD_IDLE ) thread_assert_can_yield( this , __FUNCTION__ ); // select a server_core : use client core index if possible / core 0 otherwise if( client_core_lid < hal_remote_l32( XPTR( server_cxy , &LOCAL_CLUSTER->cores_nr ) ) ) { server_core_lid = client_core_lid; } else { server_core_lid = 0; } // register client_thread and client_core in RPC descriptor rpc->thread = this; rpc->lid = client_core_lid; // build extended pointer on the RPC descriptor xptr_t desc_xp = XPTR( local_cxy , rpc ); // get local pointer on rpc_fifo in remote cluster, remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo[server_core_lid]; xptr_t rpc_fifo_xp = XPTR( server_cxy , rpc_fifo ); // post RPC in remote fifo / deschedule without blocking if fifo full do { full = remote_fifo_put_item( rpc_fifo_xp , (uint64_t )desc_xp ); if ( full ) { printk("\n[WARNING] %s : cluster %x cannot post RPC to cluster %x\n", __FUNCTION__ , local_cxy , server_cxy ); // deschedule without blocking sched_yield("RPC fifo full"); } } while( full ); hal_fence(); #if DEBUG_RPC_CLIENT_GENERIC uint32_t cycle = (uint32_t)hal_get_cycles(); uint32_t items = remote_fifo_items( rpc_fifo_xp ); if( DEBUG_RPC_CLIENT_GENERIC < cycle ) printk("\n[%s] thread[%x,%x] / rpc %s / server[%x,%d] / items %d / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, rpc_str[rpc->index], server_cxy, server_core_lid, items, cycle ); #endif // send IPI to the selected server core dev_pic_send_ipi( server_cxy , server_core_lid ); // wait RPC completion before returning if blocking RPC : // - descheduling without blocking if thread idle (in kernel init) // - block and deschedule policy for any other thread if ( rpc->blocking ) { if( this->type == THREAD_IDLE ) // deschedule without blocking policy { #if DEBUG_RPC_CLIENT_GENERIC cycle = (uint32_t)hal_get_cycles(); if( DEBUG_RPC_CLIENT_GENERIC < cycle ) printk("\n[%s] thread[%x,%x] enter waiting loop for rpc %s / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, rpc_str[rpc->index], cycle ); #endif while( rpc->responses ) sched_yield( "busy waiting on RPC"); #if DEBUG_RPC_CLIENT_GENERIC cycle = (uint32_t)hal_get_cycles(); if( DEBUG_RPC_CLIENT_GENERIC < cycle ) printk("\n[%s] thread[%x,%x] received response for rpc %s / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, rpc_str[rpc->index], cycle ); #endif } else // block and deschedule policy { #if DEBUG_RPC_CLIENT_GENERIC cycle = (uint32_t)hal_get_cycles(); if( DEBUG_RPC_CLIENT_GENERIC < cycle ) printk("\n[%s] thread[%x,%x] blocks & deschedules for rpc %s / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, rpc_str[rpc->index], cycle ); #endif // block client thread thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC ); // deschedule sched_yield("blocked on RPC"); #if DEBUG_RPC_CLIENT_GENERIC cycle = (uint32_t)hal_get_cycles(); if( DEBUG_RPC_CLIENT_GENERIC < cycle ) printk("\n[%s] thread[%x,%x] resumes for rpc %s / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, rpc_str[rpc->index], cycle ); #endif } // response must be available for a blocking RPC assert( (rpc->responses == 0) , "illegal response for RPC %s\n", rpc_str[rpc->index] ); } else // non blocking RPC { #if DEBUG_RPC_CLIENT_GENERIC cycle = (uint32_t)hal_get_cycles(); if( DEBUG_RPC_CLIENT_GENERIC < cycle ) printk("\n[%s] thread[%x,%x] returns for non blocking rpc %s / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, rpc_str[rpc->index], cycle ); #endif } } // end rpc_send() /***************************************************************************************/ /************ Generic functions supporting RPCs : server side **************************/ /***************************************************************************************/ //////////////////////////// void rpc_thread_func( void ) { error_t empty; // local RPC fifo state xptr_t desc_xp; // extended pointer on RPC request cxy_t desc_cxy; // RPC request cluster (client) rpc_desc_t * desc_ptr; // RPC request local pointer uint32_t index; // RPC request index thread_t * client_ptr; // local pointer on client thread thread_t * server_ptr; // local pointer on server thread xptr_t server_xp; // extended pointer on server thread lid_t client_core_lid; // local index of client core lid_t server_core_lid; // local index of server core bool_t blocking; // blocking RPC when true remote_fifo_t * rpc_fifo; // local pointer on RPC fifo // makes RPC thread not preemptable hal_disable_irq( NULL ); server_ptr = CURRENT_THREAD; server_xp = XPTR( local_cxy , server_ptr ); server_core_lid = server_ptr->core->lid; rpc_fifo = &LOCAL_CLUSTER->rpc_fifo[server_core_lid]; // "infinite" RPC thread loop while(1) { // try to take RPC_FIFO ownership if( hal_atomic_test_set( &rpc_fifo->owner , server_ptr->trdid ) ) { #if DEBUG_RPC_SERVER_GENERIC uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_RPC_SERVER_GENERIC < cycle ) printk("\n[%s] RPC thread %x on core[%d] takes RPC_FIFO ownership / cycle %d\n", __FUNCTION__, server_ptr->trdid, server_core_lid, cycle ); #endif // try to consume one RPC request empty = remote_fifo_get_item( rpc_fifo , (uint64_t *)&desc_xp ); // release RPC_FIFO ownership rpc_fifo->owner = 0; // handle RPC request if success if ( empty == 0 ) { // get client cluster and pointer on RPC descriptor desc_cxy = GET_CXY( desc_xp ); desc_ptr = GET_PTR( desc_xp ); index = hal_remote_l32( XPTR( desc_cxy , &desc_ptr->index ) ); blocking = hal_remote_l32( XPTR( desc_cxy , &desc_ptr->blocking ) ); client_ptr = hal_remote_lpt( XPTR( desc_cxy , &desc_ptr->thread ) ); #if DEBUG_RPC_SERVER_GENERIC cycle = (uint32_t)hal_get_cycles(); uint32_t items = remote_fifo_items( XPTR( local_cxy , rpc_fifo ) ); if( DEBUG_RPC_SERVER_GENERIC < cycle ) printk("\n[%s] RPC thread %x got rpc %s / client_cxy %x / items %d / cycle %d\n", __FUNCTION__, server_ptr->trdid, rpc_str[index], desc_cxy, items, cycle ); #endif // register client thread in RPC thread descriptor server_ptr->rpc_client_xp = XPTR( desc_cxy , client_ptr ); // call the relevant server function rpc_server[index]( desc_xp ); #if DEBUG_RPC_SERVER_GENERIC cycle = (uint32_t)hal_get_cycles(); if( DEBUG_RPC_SERVER_GENERIC < cycle ) printk("\n[%s] RPC thread %x completes rpc %s / client_cxy %x / cycle %d\n", __FUNCTION__, server_ptr->trdid, rpc_str[index], desc_cxy, cycle ); #endif // decrement response counter in RPC descriptor if blocking RPC if( blocking ) { // decrement responses counter in RPC descriptor hal_remote_atomic_add( XPTR( desc_cxy, &desc_ptr->responses ), -1 ); // get client thread pointer and client core lid from RPC descriptor client_ptr = hal_remote_lpt( XPTR( desc_cxy , &desc_ptr->thread ) ); client_core_lid = hal_remote_l32 ( XPTR( desc_cxy , &desc_ptr->lid ) ); // unblock client thread thread_unblock( XPTR( desc_cxy , client_ptr ) , THREAD_BLOCKED_RPC ); hal_fence(); #if DEBUG_RPC_SERVER_GENERIC cycle = (uint32_t)hal_get_cycles(); if( DEBUG_RPC_SERVER_GENERIC < cycle ) printk("\n[%s] RPC thread %x unblocked client thread %x / cycle %d\n", __FUNCTION__, server_ptr->trdid, client_ptr->trdid, cycle ); #endif // send IPI to client core dev_pic_send_ipi( desc_cxy , client_core_lid ); } // end if blocking RPC } // end RPC handling if fifo non empty } // end if RPC_fIFO ownership successfully taken and released // sucide if too many RPC threads if( LOCAL_CLUSTER->rpc_threads[server_core_lid] >= CONFIG_RPC_THREADS_MAX ) { #if DEBUG_RPC_SERVER_GENERIC uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_RPC_SERVER_GENERIC < cycle ) printk("\n[%s] RPC thread %x suicides / cycle %d\n", __FUNCTION__, server_ptr->trdid, cycle ); #endif // update RPC threads counter hal_atomic_add( &LOCAL_CLUSTER->rpc_threads[server_core_lid] , -1 ); // RPC thread blocks on GLOBAL thread_block( server_xp , THREAD_BLOCKED_GLOBAL ); // RPC thread set the REQ_DELETE flag to suicide hal_remote_atomic_or( server_xp , THREAD_FLAG_REQ_DELETE ); } // block and deschedule otherwise else { #if DEBUG_RPC_SERVER_GENERIC uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_RPC_SERVER_GENERIC < cycle ) printk("\n[%s] RPC thread %x block IDLE & deschedules / cycle %d\n", __FUNCTION__, server_ptr->trdid, cycle ); #endif // RPC thread blocks on IDLE thread_block( server_xp , THREAD_BLOCKED_IDLE ); // RPC thread deschedules sched_yield("RPC_FIFO empty"); } } // end infinite loop } // end rpc_thread_func() ///////////////////////////////////////////////////////////////////////////////////////// // [0] Marshaling functions attached to RPC_PMEM_GET_PAGES (blocking) ///////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////// void rpc_pmem_get_pages_client( cxy_t cxy, uint32_t order, // in page_t ** page ) // out { #if DEBUG_RPC_PMEM_GET_PAGES thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_PMEM_GET_PAGES ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif assert( (cxy != local_cxy) , "target cluster is not remote\n"); // initialise RPC descriptor header rpc_desc_t rpc; rpc.index = RPC_PMEM_GET_PAGES; rpc.blocking = true; rpc.responses = 1; // set input arguments in RPC descriptor rpc.args[0] = (uint64_t)order; // register RPC request in remote RPC fifo rpc_send( cxy , &rpc ); // get output arguments from RPC descriptor *page = (page_t *)(intptr_t)rpc.args[1]; #if DEBUG_RPC_PMEM_GET_PAGES cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_PMEM_GET_PAGES ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif } /////////////////////////////////////////// void rpc_pmem_get_pages_server( xptr_t xp ) { #if DEBUG_RPC_PMEM_GET_PAGES thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_PMEM_GET_PAGES ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif // get client cluster identifier and pointer on RPC descriptor cxy_t cxy = GET_CXY( xp ); rpc_desc_t * desc = GET_PTR( xp ); // get input arguments from client RPC descriptor uint32_t order = (uint32_t)hal_remote_l64( XPTR( cxy , &desc->args[0] ) ); // call local pmem allocator page_t * page = ppm_alloc_pages( order ); // set output arguments into client RPC descriptor hal_remote_s64( XPTR( cxy , &desc->args[1] ) , (uint64_t)(intptr_t)page ); #if DEBUG_RPC_PMEM_GET_PAGES cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_PMEM_GET_PAGES ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif } ///////////////////////////////////////////////////////////////////////////////////////// // [1] Marshaling functions attached to RPC_PMEM_RELEASE_PAGES (blocking) ///////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////// void rpc_pmem_release_pages_client( cxy_t cxy, page_t * page ) // out { #if DEBUG_RPC_PMEM_RELEASE_PAGES thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif assert( (cxy != local_cxy) , "target cluster is not remote\n"); // initialise RPC descriptor header rpc_desc_t rpc; rpc.index = RPC_PMEM_RELEASE_PAGES; rpc.blocking = true; rpc.responses = 1; // set input arguments in RPC descriptor rpc.args[0] = (uint64_t)(intptr_t)page; // register RPC request in remote RPC fifo rpc_send( cxy , &rpc ); #if DEBUG_RPC_PMEM_RELEASE_PAGES cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif } /////////////////////////////////////////////// void rpc_pmem_release_pages_server( xptr_t xp ) { #if DEBUG_RPC_PMEM_RELEASE_PAGES thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif // get client cluster identifier and pointer on RPC descriptor cxy_t cxy = GET_CXY( xp ); rpc_desc_t * desc = GET_PTR( xp ); // get input arguments from client RPC descriptor page_t * page = (page_t *)(intptr_t)hal_remote_l64( XPTR( cxy , &desc->args[0] ) ); // release memory to local pmem kmem_req_t req; req.type = KMEM_PAGE; req.ptr = page; kmem_free( &req ); #if DEBUG_RPC_PMEM_RELEASE_PAGES cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif } ///////////////////////////////////////////////////////////////////////////////////////// // [2] undefined slot ///////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////// // [3] Marshaling functions attached to RPC_PROCESS_MAKE_FORK (blocking) ///////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////// void rpc_process_make_fork_client( cxy_t cxy, xptr_t ref_process_xp, // in xptr_t parent_thread_xp, // in pid_t * child_pid, // out thread_t ** child_thread_ptr, // out error_t * error ) // out { #if DEBUG_RPC_PROCESS_MAKE_FORK thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_PROCESS_MAKE_FORK ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif assert( (cxy != local_cxy) , "target cluster is not remote\n"); // initialise RPC descriptor header rpc_desc_t rpc; rpc.index = RPC_PROCESS_MAKE_FORK; rpc.blocking = true; rpc.responses = 1; // set input arguments in RPC descriptor rpc.args[0] = (uint64_t)ref_process_xp; rpc.args[1] = (uint64_t)parent_thread_xp; // register RPC request in remote RPC fifo rpc_send( cxy , &rpc ); // get output arguments from RPC descriptor *child_pid = (pid_t)rpc.args[2]; *child_thread_ptr = (thread_t *)(intptr_t)rpc.args[3]; *error = (error_t)rpc.args[4]; #if DEBUG_RPC_PROCESS_MAKE_FORK cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_PROCESS_MAKE_FORK ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif } ////////////////////////////////////////////// void rpc_process_make_fork_server( xptr_t xp ) { #if DEBUG_RPC_PROCESS_MAKE_FORK thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_PROCESS_MAKE_FORK ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif xptr_t ref_process_xp; // extended pointer on reference parent process xptr_t parent_thread_xp; // extended pointer on parent thread pid_t child_pid; // child process identifier thread_t * child_thread_ptr; // local copy of exec_info structure error_t error; // local error status // get client cluster identifier and pointer on RPC descriptor cxy_t client_cxy = GET_CXY( xp ); rpc_desc_t * desc = GET_PTR( xp ); // get input arguments from cient RPC descriptor ref_process_xp = (xptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); parent_thread_xp = (xptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); // call local kernel function error = process_make_fork( ref_process_xp, parent_thread_xp, &child_pid, &child_thread_ptr ); // set output argument into client RPC descriptor hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)child_pid ); hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)(intptr_t)child_thread_ptr ); hal_remote_s64( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error ); #if DEBUG_RPC_PROCESS_MAKE_FORK cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_PROCESS_MAKE_FORK ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif } ///////////////////////////////////////////////////////////////////////////////////////// // [4] undefined slot ///////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////// // [5] undefined slot ///////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////// // [6] Marshaling functions attached to RPC_THREAD_USER_CREATE (blocking) ///////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////// void rpc_thread_user_create_client( cxy_t cxy, pid_t pid, // in void * start_func, // in void * start_arg, // in pthread_attr_t * attr, // in xptr_t * thread_xp, // out error_t * error ) // out { #if DEBUG_RPC_THREAD_USER_CREATE thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_THREAD_USER_CREATE) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif assert( (cxy != local_cxy) , "target cluster is not remote\n"); // initialise RPC descriptor header rpc_desc_t rpc; rpc.index = RPC_THREAD_USER_CREATE; rpc.blocking = true; rpc.responses = 1; // set input arguments in RPC descriptor rpc.args[0] = (uint64_t)pid; rpc.args[1] = (uint64_t)(intptr_t)start_func; rpc.args[2] = (uint64_t)(intptr_t)start_arg; rpc.args[3] = (uint64_t)(intptr_t)attr; // register RPC request in remote RPC fifo rpc_send( cxy , &rpc ); // get output arguments from RPC descriptor *thread_xp = (xptr_t)rpc.args[4]; *error = (error_t)rpc.args[5]; #if DEBUG_RPC_THREAD_USER_CREATE cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_THREAD_USER_CREATE) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif } /////////////////////////////////////////////// void rpc_thread_user_create_server( xptr_t xp ) { #if DEBUG_RPC_THREAD_USER_CREATE thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_THREAD_USER_CREATE) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif pthread_attr_t * attr_ptr; // pointer on attributes structure in client cluster pthread_attr_t attr_copy; // attributes structure copy in server cluster thread_t * thread_ptr; // local pointer on thread descriptor xptr_t thread_xp; // extended pointer on thread descriptor pid_t pid; // process identifier void * start_func; void * start_arg; error_t error; // get client cluster identifier and pointer on RPC descriptor cxy_t client_cxy = GET_CXY( xp ); rpc_desc_t * desc = GET_PTR( xp ); // get pointer on attributes structure in client cluster from RPC descriptor // get input arguments from RPC descriptor pid = (pid_t) hal_remote_l64(XPTR(client_cxy , &desc->args[0])); start_func = (void *)(intptr_t) hal_remote_l64(XPTR(client_cxy , &desc->args[1])); start_arg = (void *)(intptr_t) hal_remote_l64(XPTR(client_cxy , &desc->args[2])); attr_ptr = (pthread_attr_t *)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[3])); // makes a local copy of attributes structure hal_remote_memcpy( XPTR( local_cxy , &attr_copy ), XPTR( client_cxy , attr_ptr ), sizeof(pthread_attr_t) ); // call kernel function error = thread_user_create( pid, start_func, start_arg, &attr_copy, &thread_ptr ); // set output arguments thread_xp = XPTR( local_cxy , thread_ptr ); hal_remote_s64( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)thread_xp ); hal_remote_s64( XPTR( client_cxy , &desc->args[5] ) , (uint64_t)error ); #if DEBUG_RPC_THREAD_USER_CREATE cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_THREAD_USER_CREATE) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif } ///////////////////////////////////////////////////////////////////////////////////////// // [7] Marshaling functions attached to RPC_THREAD_KERNEL_CREATE (blocking) ///////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////// void rpc_thread_kernel_create_client( cxy_t cxy, uint32_t type, // in void * func, // in void * args, // in xptr_t * thread_xp, // out error_t * error ) // out { #if DEBUG_RPC_THREAD_KERNEL_CREATE thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_THREAD_KERNEL_CREATE) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif assert( (cxy != local_cxy) , "target cluster is not remote\n"); // initialise RPC descriptor header rpc_desc_t rpc; rpc.index = RPC_THREAD_KERNEL_CREATE; rpc.blocking = true; rpc.responses = 1; // set input arguments in RPC descriptor rpc.args[0] = (uint64_t)type; rpc.args[1] = (uint64_t)(intptr_t)func; rpc.args[2] = (uint64_t)(intptr_t)args; // register RPC request in remote RPC fifo rpc_send( cxy , &rpc ); // get output arguments from RPC descriptor *thread_xp = (xptr_t)rpc.args[3]; *error = (error_t)rpc.args[4]; #if DEBUG_RPC_THREAD_KERNEL_CREATE cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_THREAD_KERNEL_CREATE) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif } ///////////////////////////////////////////////// void rpc_thread_kernel_create_server( xptr_t xp ) { #if DEBUG_RPC_THREAD_KERNEL_CREATE thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_THREAD_KERNEL_CREATE) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif thread_t * thread_ptr; // local pointer on thread descriptor xptr_t thread_xp; // extended pointer on thread descriptor lid_t core_lid; // core local index error_t error; // get client cluster identifier and pointer on RPC descriptor cxy_t client_cxy = GET_CXY( xp ); rpc_desc_t * desc = GET_PTR( xp ); // get attributes from RPC descriptor uint32_t type = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); void * func = (void*)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); void * args = (void*)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) ); // select one core core_lid = cluster_select_local_core(); // call local kernel function error = thread_kernel_create( &thread_ptr , type , func , args , core_lid ); // set output arguments thread_xp = XPTR( local_cxy , thread_ptr ); hal_remote_s64( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error ); hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)thread_xp ); #if DEBUG_RPC_THREAD_KERNEL_CREATE cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_THREAD_KERNEL_CREATE) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif } ///////////////////////////////////////////////////////////////////////////////////////// // [8] undefined slot ///////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////// // [9] Marshaling functions attached to RPC_PROCESS_SIGACTION (multicast / non blocking) ///////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////// void rpc_process_sigaction_client( cxy_t cxy, rpc_desc_t * rpc ) { #if DEBUG_RPC_PROCESS_SIGACTION uint32_t cycle = (uint32_t)hal_get_cycles(); uint32_t action = rpc->args[0]; pid_t pid = rpc->args[1]; thread_t * this = CURRENT_THREAD; if( DEBUG_RPC_PROCESS_SIGACTION < cycle ) printk("\n[%s] thread[%x,%x] enter to request %s of process %x in cluster %x / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, process_action_str(action), pid, cxy, cycle ); #endif // check some RPC arguments assert( (rpc->blocking == false) , "must be non-blocking\n"); assert( (rpc->index == RPC_PROCESS_SIGACTION ) , "bad RPC index\n" ); // register RPC request in remote RPC fifo and return rpc_send( cxy , rpc ); #if DEBUG_RPC_PROCESS_SIGACTION cycle = (uint32_t)hal_get_cycles(); if( DEBUG_RPC_PROCESS_SIGACTION < cycle ) printk("\n[%s] thread[%x,%x] requested %s of process %x in cluster %x / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, process_action_str(action), pid, cxy, cycle ); #endif } // end rpc_process_sigaction_client() ////////////////////////////////////////////// void rpc_process_sigaction_server( xptr_t xp ) { pid_t pid; // target process identifier process_t * process; // pointer on local target process descriptor uint32_t action; // sigaction index thread_t * client_ptr; // pointer on client thread in client cluster xptr_t client_xp; // extended pointer client thread cxy_t client_cxy; // client cluster identifier rpc_desc_t * rpc; // pointer on rpc descriptor in client cluster xptr_t count_xp; // extended pointer on responses counter uint32_t count_value; // responses counter value lid_t client_lid; // client core local index // get client cluster identifier and pointer on RPC descriptor client_cxy = GET_CXY( xp ); rpc = GET_PTR( xp ); // get arguments from RPC descriptor action = (uint32_t)hal_remote_l64( XPTR(client_cxy , &rpc->args[0]) ); pid = (pid_t) hal_remote_l64( XPTR(client_cxy , &rpc->args[1]) ); #if DEBUG_RPC_PROCESS_SIGACTION uint32_t cycle = (uint32_t)hal_get_cycles(); thread_t * this = CURRENT_THREAD; if( DEBUG_RPC_PROCESS_SIGACTION < cycle ) printk("\n[%s] thread[%x,%x] enter to %s process %x in cluster %x / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, process_action_str( action ), pid, local_cxy, cycle ); #endif // get client thread pointers client_ptr = (thread_t *)hal_remote_lpt( XPTR( client_cxy , &rpc->thread ) ); client_xp = XPTR( client_cxy , client_ptr ); // get local process descriptor process = cluster_get_local_process_from_pid( pid ); // call relevant kernel function if ( action == DELETE_ALL_THREADS ) process_delete_threads ( process , client_xp ); else if ( action == BLOCK_ALL_THREADS ) process_block_threads ( process ); else if ( action == UNBLOCK_ALL_THREADS ) process_unblock_threads( process ); // build extended pointer on response counter in RPC count_xp = XPTR( client_cxy , &rpc->responses ); // decrement the responses counter in RPC descriptor, count_value = hal_remote_atomic_add( count_xp , -1 ); // unblock the client thread only if it is the last response. if( count_value == 1 ) { // get client core lid client_lid = (lid_t) hal_remote_l32 ( XPTR( client_cxy , &rpc->lid ) ); // unblock client thread thread_unblock( client_xp , THREAD_BLOCKED_RPC ); // send an IPI to client core // dev_pic_send_ipi( client_cxy , client_lid ); } #if DEBUG_RPC_PROCESS_SIGACTION cycle = (uint32_t)hal_get_cycles(); if( DEBUG_RPC_PROCESS_SIGACTION < cycle ) printk("\n[%s] thread[%x,%x] exit after %s process %x in cluster %x / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, process_action_str( action ), pid, local_cxy, cycle ); #endif } // end rpc_process_sigaction_server() ///////////////////////////////////////////////////////////////////////////////////////// // [10] Marshaling functions attached to RPC_VFS_INODE_CREATE (blocking) ///////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////// void rpc_vfs_inode_create_client( cxy_t cxy, uint32_t fs_type, // in uint32_t inode_type, // in uint32_t attr, // in uint32_t rights, // in uint32_t uid, // in uint32_t gid, // in xptr_t * inode_xp, // out error_t * error ) // out { #if DEBUG_RPC_VFS_INODE_CREATE thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_INODE_CREATE ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif assert( (cxy != local_cxy) , "target cluster is not remote\n"); // initialise RPC descriptor header rpc_desc_t rpc; rpc.index = RPC_VFS_INODE_CREATE; rpc.blocking = true; rpc.responses = 1; // set input arguments in RPC descriptor rpc.args[0] = (uint64_t)fs_type; rpc.args[1] = (uint64_t)inode_type; rpc.args[2] = (uint64_t)attr; rpc.args[3] = (uint64_t)rights; rpc.args[4] = (uint64_t)uid; rpc.args[5] = (uint64_t)gid; // register RPC request in remote RPC fifo rpc_send( cxy , &rpc ); // get output values from RPC descriptor *inode_xp = (xptr_t)rpc.args[6]; *error = (error_t)rpc.args[7]; #if DEBUG_RPC_VFS_INODE_CREATE cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_INODE_CREATE ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif } ///////////////////////////////////////////// void rpc_vfs_inode_create_server( xptr_t xp ) { #if DEBUG_RPC_VFS_INODE_CREATE thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_INODE_CREATE ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif uint32_t fs_type; uint32_t inode_type; uint32_t attr; uint32_t rights; uint32_t uid; uint32_t gid; xptr_t inode_xp; error_t error; // get client cluster identifier and pointer on RPC descriptor cxy_t client_cxy = GET_CXY( xp ); rpc_desc_t * desc = GET_PTR( xp ); // get input arguments from client rpc descriptor fs_type = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); inode_type = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); attr = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) ); rights = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[3] ) ); uid = (uid_t) hal_remote_l64( XPTR( client_cxy , &desc->args[4] ) ); gid = (gid_t) hal_remote_l64( XPTR( client_cxy , &desc->args[5] ) ); // call local kernel function error = vfs_inode_create( fs_type, inode_type, attr, rights, uid, gid, &inode_xp ); // set output arguments hal_remote_s64( XPTR( client_cxy , &desc->args[6] ) , (uint64_t)inode_xp ); hal_remote_s64( XPTR( client_cxy , &desc->args[7] ) , (uint64_t)error ); #if DEBUG_RPC_VFS_INODE_CREATE cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_INODE_CREATE ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif } ///////////////////////////////////////////////////////////////////////////////////////// // [11] Marshaling functions attached to RPC_VFS_INODE_DESTROY (blocking) ///////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////// void rpc_vfs_inode_destroy_client( cxy_t cxy, struct vfs_inode_s * inode ) { #if DEBUG_RPC_VFS_INODE_DESTROY thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_INODE_DESTROY ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif assert( (cxy != local_cxy) , "target cluster is not remote\n"); // initialise RPC descriptor header rpc_desc_t rpc; rpc.index = RPC_VFS_INODE_DESTROY; rpc.blocking = true; rpc.responses = 1; // set input arguments in RPC descriptor rpc.args[0] = (uint64_t)(intptr_t)inode; // register RPC request in remote RPC fifo rpc_send( cxy , &rpc ); #if DEBUG_RPC_VFS_INODE_DESTROY cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_INODE_DESTROY ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif } ////////////////////////////////////////////// void rpc_vfs_inode_destroy_server( xptr_t xp ) { #if DEBUG_RPC_VFS_INODE_DESTROY thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_INODE_DESTROY ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif vfs_inode_t * inode; // get client cluster identifier and pointer on RPC descriptor cxy_t client_cxy = GET_CXY( xp ); rpc_desc_t * desc = GET_PTR( xp ); // get argument "inode" from client RPC descriptor inode = (vfs_inode_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); // call local kernel function vfs_inode_destroy( inode ); #if DEBUG_RPC_VFS_INODE_DESTROY cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_INODE_DESTROY ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif } ///////////////////////////////////////////////////////////////////////////////////////// // [12] Marshaling functions attached to RPC_VFS_DENTRY_CREATE (blocking) ///////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////// void rpc_vfs_dentry_create_client( cxy_t cxy, uint32_t type, // in char * name, // in xptr_t * dentry_xp, // out error_t * error ) // out { #if DEBUG_RPC_VFS_DENTRY_CREATE thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif assert( (cxy != local_cxy) , "target cluster is not remote\n"); // initialise RPC descriptor header rpc_desc_t rpc; rpc.index = RPC_VFS_DENTRY_CREATE; rpc.blocking = true; rpc.responses = 1; // set input arguments in RPC descriptor rpc.args[0] = (uint64_t)type; rpc.args[1] = (uint64_t)(intptr_t)name; // register RPC request in remote RPC fifo rpc_send( cxy , &rpc ); // get output values from RPC descriptor *dentry_xp = (xptr_t)rpc.args[2]; *error = (error_t)rpc.args[3]; #if DEBUG_RPC_VFS_DENTRY_CREATE cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif } ////////////////////////////////////////////// void rpc_vfs_dentry_create_server( xptr_t xp ) { #if DEBUG_RPC_VFS_DENTRY_CREATE thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif uint32_t type; char * name; xptr_t dentry_xp; error_t error; char name_copy[CONFIG_VFS_MAX_NAME_LENGTH]; // get client cluster identifier and pointer on RPC descriptor cxy_t client_cxy = GET_CXY( xp ); rpc_desc_t * desc = GET_PTR( xp ); // get arguments "name", "type", and "parent" from client RPC descriptor type = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); name = (char *)(intptr_t) hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); // makes a local copy of name hal_remote_strcpy( XPTR( local_cxy , name_copy ), XPTR( client_cxy , name ) ); // call local kernel function error = vfs_dentry_create( type, name_copy, &dentry_xp ); // set output arguments hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)dentry_xp ); hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); #if DEBUG_RPC_VFS_DENTRY_CREATE cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif } ///////////////////////////////////////////////////////////////////////////////////////// // [13] Marshaling functions attached to RPC_VFS_DENTRY_DESTROY (blocking) ///////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////// void rpc_vfs_dentry_destroy_client( cxy_t cxy, vfs_dentry_t * dentry ) { #if DEBUG_RPC_VFS_DENTRY_DESTROY thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif assert( (cxy != local_cxy) , "target cluster is not remote\n"); // initialise RPC descriptor header rpc_desc_t rpc; rpc.index = RPC_VFS_DENTRY_DESTROY; rpc.blocking = true; rpc.responses = 1; // set input arguments in RPC descriptor rpc.args[0] = (uint64_t)(intptr_t)dentry; // register RPC request in remote RPC fifo rpc_send( cxy , &rpc ); #if DEBUG_RPC_VFS_DENTRY_DESTROY cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif } /////////////////////////////////////////////// void rpc_vfs_dentry_destroy_server( xptr_t xp ) { #if DEBUG_RPC_VFS_DENTRY_DESTROY thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif vfs_dentry_t * dentry; // get client cluster identifier and pointer on RPC descriptor cxy_t client_cxy = GET_CXY( xp ); rpc_desc_t * desc = GET_PTR( xp ); // get arguments "dentry" from client RPC descriptor dentry = (vfs_dentry_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); // call local kernel function vfs_dentry_destroy( dentry ); #if DEBUG_RPC_VFS_DENTRY_DESTROY cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif } ///////////////////////////////////////////////////////////////////////////////////////// // [14] Marshaling functions attached to RPC_VFS_FILE_CREATE (blocking) ///////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////// void rpc_vfs_file_create_client( cxy_t cxy, struct vfs_inode_s * inode, // in uint32_t file_attr, // in xptr_t * file_xp, // out error_t * error ) // out { #if DEBUG_RPC_VFS_FILE_CREATE thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_FILE_CREATE ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif assert( (cxy != local_cxy) , "target cluster is not remote\n"); // initialise RPC descriptor header rpc_desc_t rpc; rpc.index = RPC_VFS_FILE_CREATE; rpc.blocking = true; rpc.responses = 1; // set input arguments in RPC descriptor rpc.args[0] = (uint64_t)(intptr_t)inode; rpc.args[1] = (uint64_t)file_attr; // register RPC request in remote RPC fifo rpc_send( cxy , &rpc ); // get output values from RPC descriptor *file_xp = (xptr_t)rpc.args[2]; *error = (error_t)rpc.args[3]; #if DEBUG_RPC_VFS_FILE_CREATE cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_FILE_CREATE ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif } //////////////////////////////////////////// void rpc_vfs_file_create_server( xptr_t xp ) { #if DEBUG_RPC_VFS_FILE_CREATE thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_FILE_CREATE ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif uint32_t file_attr; vfs_inode_t * inode; xptr_t file_xp; error_t error; // get client cluster identifier and pointer on RPC descriptor cxy_t client_cxy = GET_CXY( xp ); rpc_desc_t * desc = GET_PTR( xp ); // get arguments "file_attr" and "inode" from client RPC descriptor inode = (vfs_inode_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); file_attr = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); // call local kernel function error = vfs_file_create( inode, file_attr, &file_xp ); // set output arguments hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)file_xp ); hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); #if DEBUG_RPC_VFS_FILE_CREATE cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_FILE_CREATE ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif } ///////////////////////////////////////////////////////////////////////////////////////// // [15] Marshaling functions attached to RPC_VFS_FILE_DESTROY (blocking) ///////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////// void rpc_vfs_file_destroy_client( cxy_t cxy, vfs_file_t * file ) { #if DEBUG_RPC_VFS_FILE_DESTROY thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_FILE_DESTROY ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif assert( (cxy != local_cxy) , "target cluster is not remote\n"); // initialise RPC descriptor header rpc_desc_t rpc; rpc.index = RPC_VFS_FILE_DESTROY; rpc.blocking = true; rpc.responses = 1; // set input arguments in RPC descriptor rpc.args[0] = (uint64_t)(intptr_t)file; // register RPC request in remote RPC fifo rpc_send( cxy , &rpc ); #if DEBUG_RPC_VFS_FILE_DESTROY cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_FILE_DESTROY ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif } ///////////////////////////////////////////// void rpc_vfs_file_destroy_server( xptr_t xp ) { #if DEBUG_RPC_VFS_FILE_DESTROY thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_FILE_DESTROY ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif vfs_file_t * file; // get client cluster identifier and pointer on RPC descriptor cxy_t client_cxy = GET_CXY( xp ); rpc_desc_t * desc = GET_PTR( xp ); // get arguments "dentry" from client RPC descriptor file = (vfs_file_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); // call local kernel function vfs_file_destroy( file ); #if DEBUG_RPC_VFS_FILE_DESTROY cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_FILE_DESTROY ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); #endif } ///////////////////////////////////////////////////////////////////////////////////////// // [16] Marshaling functions attached to RPC_VFS_FS_CHILD_INIT (blocking) ///////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////// void rpc_vfs_fs_child_init_client( cxy_t cxy, vfs_inode_t * parent_inode, // in char * name, // in xptr_t child_inode_xp, // in error_t * error ) // out { #if DEBUG_RPC_VFS_FS_CHILD_INIT thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_FS_CHILD_INIT ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif assert( (cxy != local_cxy) , "target cluster is not remote\n"); // initialise RPC descriptor header rpc_desc_t rpc; rpc.index = RPC_VFS_FS_CHILD_INIT; rpc.blocking = true; rpc.responses = 1; // set input arguments in RPC descriptor rpc.args[0] = (uint64_t)(intptr_t)parent_inode; rpc.args[1] = (uint64_t)(intptr_t)name; rpc.args[2] = (uint64_t)child_inode_xp; // register RPC request in remote RPC fifo rpc_send( cxy , &rpc ); // get output values from RPC descriptor *error = (error_t)rpc.args[3]; #if DEBUG_RPC_VFS_FS_CHILD_INIT cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_FS_CHILD_INIT ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif } ////////////////////////////////////////////// void rpc_vfs_fs_child_init_server( xptr_t xp ) { #if DEBUG_RPC_VFS_FS_CHILD_INIT thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_FS_CHILD_INIT ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif error_t error; vfs_inode_t * parent; xptr_t child_xp; char * name; char name_copy[CONFIG_VFS_MAX_NAME_LENGTH]; // get client cluster identifier and pointer on RPC descriptor cxy_t client_cxy = GET_CXY( xp ); rpc_desc_t * desc = GET_PTR( xp ); // get arguments "parent", "name", and "child_xp" parent = (vfs_inode_t*)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[0])); name = (char*)(intptr_t) hal_remote_l64(XPTR(client_cxy , &desc->args[1])); child_xp = (xptr_t) hal_remote_l64(XPTR(client_cxy , &desc->args[2])); // get name local copy hal_remote_strcpy( XPTR( local_cxy , name_copy ) , XPTR( client_cxy , name ) ); // call the kernel function error = vfs_fs_child_init( parent , name_copy , child_xp ); // set output argument hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); #if DEBUG_RPC_VFS_FS_CHILD_INIT cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_FS_CHILD_INIT ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif } ///////////////////////////////////////////////////////////////////////////////////////// // [17] Marshaling function attached to RPC_VFS_FS_ADD_DENTRY (blocking) ///////////////////////////////////////////////////////////////////////////////////////// void rpc_vfs_fs_add_dentry_client( cxy_t cxy, vfs_inode_t * parent, // in vfs_dentry_t * dentry, // in error_t * error ) // out { #if DEBUG_RPC_VFS_FS_ADD_DENTRY thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_FS_ADD_DENTRY ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif assert( (cxy != local_cxy) , "target cluster is not remote\n"); // initialise RPC descriptor header rpc_desc_t rpc; rpc.index = RPC_VFS_FS_ADD_DENTRY; rpc.blocking = true; rpc.responses = 1; // set input arguments in RPC descriptor rpc.args[0] = (uint64_t)(intptr_t)parent; rpc.args[1] = (uint64_t)(intptr_t)dentry; // register RPC request in remote RPC fifo rpc_send( cxy , &rpc ); // get output values from RPC descriptor *error = (error_t)rpc.args[2]; #if DEBUG_RPC_VFS_FS_ADD_DENTRY cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_FS_ADD_DENTRY ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif } ///////////////////////////////////////////////// void rpc_vfs_fs_add_dentry_server( xptr_t xp ) { #if DEBUG_RPC_VFS_FS_ADD_DENTRY thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_FS_ADD_DENTRY ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif error_t error; vfs_inode_t * parent; vfs_dentry_t * dentry; // get client cluster identifier and pointer on RPC descriptor cxy_t client_cxy = GET_CXY( xp ); rpc_desc_t * desc = GET_PTR( xp ); // get input arguments parent = (vfs_inode_t*)(intptr_t) hal_remote_l64(XPTR(client_cxy , &desc->args[0])); dentry = (vfs_dentry_t*)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[1])); // call the kernel function error = vfs_fs_add_dentry( parent , dentry ); // set output argument hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)error ); #if DEBUG_RPC_VFS_FS_ADD_DENTRY cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_FS_ADD_DENTRY ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif } ///////////////////////////////////////////////////////////////////////////////////////// // [18] Marshaling function attached to RPC_VFS_FS_REMOVE_DENTRY (blocking) ///////////////////////////////////////////////////////////////////////////////////////// void rpc_vfs_fs_remove_dentry_client( cxy_t cxy, vfs_inode_t * parent, // in vfs_dentry_t * dentry, // in error_t * error ) // out { #if DEBUG_RPC_VFS_FS_REMOVE_DENTRY thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_FS_REMOVE_DENTRY ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif assert( (cxy != local_cxy) , "target cluster is not remote\n"); // initialise RPC descriptor header rpc_desc_t rpc; rpc.index = RPC_VFS_FS_REMOVE_DENTRY; rpc.blocking = true; rpc.responses = 1; // set input arguments in RPC descriptor rpc.args[0] = (uint64_t)(intptr_t)parent; rpc.args[1] = (uint64_t)(intptr_t)dentry; // register RPC request in remote RPC fifo rpc_send( cxy , &rpc ); // get output values from RPC descriptor *error = (error_t)rpc.args[2]; #if DEBUG_RPC_VFS_FS_REMOVE_DENTRY cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_FS_REMOVE_DENTRY ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif } ///////////////////////////////////////////////// void rpc_vfs_fs_remove_dentry_server( xptr_t xp ) { #if DEBUG_RPC_VFS_FS_REMOVE_DENTRY thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_FS_REMOVE_DENTRY ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif error_t error; vfs_inode_t * parent; vfs_dentry_t * dentry; // get client cluster identifier and pointer on RPC descriptor cxy_t client_cxy = GET_CXY( xp ); rpc_desc_t * desc = GET_PTR( xp ); // get input arguments parent = (vfs_inode_t*)(intptr_t) hal_remote_l64(XPTR(client_cxy , &desc->args[0])); dentry = (vfs_dentry_t*)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[1])); // call the kernel function error = vfs_fs_remove_dentry( parent , dentry ); // set output argument hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)error ); #if DEBUG_RPC_VFS_FS_REMOVE_DENTRY cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_FS_REMOVE_DENTRY ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif } ///////////////////////////////////////////////////////////////////////////////////////// // [19] Marshaling functions attached to RPC_VFS_INODE_LOAD_ALL_PAGES (blocking) ///////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// void rpc_vfs_inode_load_all_pages_client( cxy_t cxy, vfs_inode_t * inode, // in error_t * error ) // out { #if DEBUG_RPC_VFS_INODE_LOAD_ALL_PAGES thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_INODE_LOAD_ALL_PAGES ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif assert( (cxy != local_cxy) , "target cluster is not remote\n"); // initialise RPC descriptor header rpc_desc_t rpc; rpc.index = RPC_VFS_INODE_LOAD_ALL_PAGES; rpc.blocking = true; rpc.responses = 1; // set input arguments in RPC descriptor rpc.args[0] = (uint64_t)(intptr_t)inode; // register RPC request in remote RPC fifo rpc_send( cxy , &rpc ); // get output values from RPC descriptor *error = (error_t)rpc.args[1]; #if DEBUG_RPC_VFS_INODE_LOAD_ALL_PAGES cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_INODE_LOAD_ALL_PAGES ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif } ///////////////////////////////////////////////////// void rpc_vfs_inode_load_all_pages_server( xptr_t xp ) { #if DEBUG_RPC_VFS_INODE_LOAD_ALL_PAGES thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_INODE_LOAD_ALL_PAGES ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif error_t error; vfs_inode_t * inode; // get client cluster identifier and pointer on RPC descriptor cxy_t client_cxy = GET_CXY( xp ); rpc_desc_t * desc = GET_PTR( xp ); // get input argument inode = (vfs_inode_t*)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[0])); // call the kernel function error = vfs_inode_load_all_pages( inode ); // set output argument hal_remote_s64( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error ); #if DEBUG_RPC_VFS_INODE_LOAD_ALL_PAGES cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VFS_INODE_LOAD_ALL_PAGES ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif } ///////////////////////////////////////////////////////////////////////////////////////// // [20] Marshaling functions attached to RPC_VMM_GET_VSEG (blocking) ///////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////// void rpc_vmm_get_vseg_client( cxy_t cxy, process_t * process, // in intptr_t vaddr, // in xptr_t * vseg_xp, // out error_t * error ) // out { #if DEBUG_RPC_VMM_GET_VSEG thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VMM_GET_VSEG ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif assert( (cxy != local_cxy) , "target cluster is not remote\n"); // initialise RPC descriptor header rpc_desc_t rpc; rpc.index = RPC_VMM_GET_VSEG; rpc.blocking = true; rpc.responses = 1; // set input arguments in RPC descriptor rpc.args[0] = (uint64_t)(intptr_t)process; rpc.args[1] = (uint64_t)vaddr; // register RPC request in remote RPC fifo rpc_send( cxy , &rpc ); // get output argument from rpc descriptor *vseg_xp = rpc.args[2]; *error = (error_t)rpc.args[3]; #if DEBUG_RPC_VMM_GET_VSEG cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VMM_GET_VSEG ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif } ///////////////////////////////////////// void rpc_vmm_get_vseg_server( xptr_t xp ) { #if DEBUG_RPC_VMM_GET_VSEG thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VMM_GET_VSEG ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif process_t * process; intptr_t vaddr; vseg_t * vseg_ptr; xptr_t vseg_xp; error_t error; // get client cluster identifier and pointer on RPC descriptor cxy_t client_cxy = GET_CXY( xp ); rpc_desc_t * desc = GET_PTR( xp ); // get input argument from client RPC descriptor process = (process_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); vaddr = (intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); // call local kernel function error = vmm_get_vseg( process , vaddr , &vseg_ptr ); // set output arguments to client RPC descriptor vseg_xp = XPTR( local_cxy , vseg_ptr ); hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)vseg_xp ); hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); #if DEBUG_RPC_VMM_GET_VSEG cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VMM_GET_VSEG ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif } ///////////////////////////////////////////////////////////////////////////////////////// // [21] Marshaling functions attached to RPC_VMM_GLOBAL_UPDATE_PTE (blocking) ///////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////// void rpc_vmm_global_update_pte_client( cxy_t cxy, process_t * process, // in vpn_t vpn, // in uint32_t attr, // in ppn_t ppn ) // in { #if DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif assert( (cxy != local_cxy) , "target cluster is not remote\n"); // initialise RPC descriptor header rpc_desc_t rpc; rpc.index = RPC_VMM_GLOBAL_UPDATE_PTE; rpc.blocking = true; rpc.responses = 1; // set input arguments in RPC descriptor rpc.args[0] = (uint64_t)(intptr_t)process; rpc.args[1] = (uint64_t)vpn; rpc.args[2] = (uint64_t)attr; rpc.args[3] = (uint64_t)ppn; // register RPC request in remote RPC fifo rpc_send( cxy , &rpc ); #if DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif } ////////////////////////////////////////////////// void rpc_vmm_global_update_pte_server( xptr_t xp ) { #if DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif process_t * process; vpn_t vpn; uint32_t attr; ppn_t ppn; // get client cluster identifier and pointer on RPC descriptor cxy_t client_cxy = GET_CXY( xp ); rpc_desc_t * desc = GET_PTR( xp ); // get input argument "process" & "vpn" from client RPC descriptor process = (process_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); vpn = (vpn_t) hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); attr = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) ); ppn = (ppn_t) hal_remote_l64( XPTR( client_cxy , &desc->args[3] ) ); // call local kernel function vmm_global_update_pte( process , vpn , attr , ppn ); #if DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif } ///////////////////////////////////////////////////////////////////////////////////////// // [22] Marshaling functions attached to RPC_KCM_ALLOC (blocking) ///////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////// void rpc_kcm_alloc_client( cxy_t cxy, uint32_t kmem_type, // in xptr_t * buf_xp ) // out { #if DEBUG_RPC_KCM_ALLOC thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_KCM_ALLOC ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif assert( (cxy != local_cxy) , "target cluster is not remote\n"); // initialise RPC descriptor header rpc_desc_t rpc; rpc.index = RPC_KCM_ALLOC; rpc.blocking = true; rpc.responses = 1; // set input arguments in RPC descriptor rpc.args[0] = (uint64_t)kmem_type; // register RPC request in remote RPC fifo rpc_send( cxy , &rpc ); // get output arguments from RPC descriptor *buf_xp = (xptr_t)rpc.args[1]; #if DEBUG_RPC_KCM_ALLOC cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_KCM_ALLOC ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif } ////////////////////////////////////// void rpc_kcm_alloc_server( xptr_t xp ) { #if DEBUG_RPC_KCM_ALLOC thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_KCM_ALLOC ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif // get client cluster identifier and pointer on RPC descriptor cxy_t client_cxy = GET_CXY( xp ); rpc_desc_t * desc = GET_PTR( xp ); // get input argument "kmem_type" from client RPC descriptor uint32_t kmem_type = (uint32_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); // allocates memory for kcm kmem_req_t req; req.type = kmem_type; req.flags = AF_ZERO; void * buf_ptr = kmem_alloc( &req ); // set output argument xptr_t buf_xp = XPTR( local_cxy , buf_ptr ); hal_remote_s64( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)buf_xp ); #if DEBUG_RPC_KCM_ALLOC cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_KCM_ALLOC ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif } ///////////////////////////////////////////////////////////////////////////////////////// // [23] Marshaling functions attached to RPC_KCM_FREE (blocking) ///////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////// void rpc_kcm_free_client( cxy_t cxy, void * buf, // in uint32_t kmem_type ) // in { #if DEBUG_RPC_KCM_FREE thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_KCM_FREE ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif assert( (cxy != local_cxy) , "target cluster is not remote\n"); // initialise RPC descriptor header rpc_desc_t rpc; rpc.index = RPC_KCM_FREE; rpc.blocking = true; rpc.responses = 1; // set input arguments in RPC descriptor rpc.args[0] = (uint64_t)(intptr_t)buf; rpc.args[1] = (uint64_t)kmem_type; // register RPC request in remote RPC fifo rpc_send( cxy , &rpc ); #if DEBUG_RPC_KCM_FREE cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_KCM_FREE ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif } ///////////////////////////////////// void rpc_kcm_free_server( xptr_t xp ) { #if DEBUG_RPC_KCM_FREE thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_KCM_FREE ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif // get client cluster identifier and pointer on RPC descriptor cxy_t client_cxy = GET_CXY( xp ); rpc_desc_t * desc = GET_PTR( xp ); // get input arguments "buf" and "kmem_type" from client RPC descriptor void * buf = (void *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); uint32_t kmem_type = (uint32_t)hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); // releases memory kmem_req_t req; req.type = kmem_type; req.ptr = buf; kmem_free( &req ); #if DEBUG_RPC_KCM_FREE cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_KCM_FREE ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif } ///////////////////////////////////////////////////////////////////////////////////////// // [24] undefined slot ///////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////// // [25] Marshaling functions attached to RPC_MAPPER_HANDLE_MISS (blocking) ///////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////// void rpc_mapper_handle_miss_client( cxy_t cxy, struct mapper_s * mapper, uint32_t page_id, xptr_t * page_xp, error_t * error ) { #if DEBUG_RPC_MAPPER_HANDLE_MISS uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_MAPPER_HANDLE_MISS ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif assert( (cxy != local_cxy) , "target cluster is not remote\n"); // initialise RPC descriptor header rpc_desc_t rpc; rpc.index = RPC_MAPPER_HANDLE_MISS; rpc.blocking = true; rpc.responses = 1; // set input arguments in RPC descriptor rpc.args[0] = (uint64_t)(intptr_t)mapper; rpc.args[1] = (uint64_t)page_id; // register RPC request in remote RPC fifo rpc_send( cxy , &rpc ); // get output values from RPC descriptor *page_xp = (xptr_t)rpc.args[2]; *error = (error_t)rpc.args[3]; #if DEBUG_RPC_MAPPER_HANDLE_MISS cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_MAPPER_HANDLE_MISS ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif } /////////////////////////////////////////////// void rpc_mapper_handle_miss_server( xptr_t xp ) { #if DEBUG_RPC_MAPPER_HANDLE_MISS uint32_t cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_MAPPER_HANDLE_MISS ) printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif mapper_t * mapper; uint32_t page_id; xptr_t page_xp; error_t error; // get client cluster identifier and pointer on RPC descriptor cxy_t client_cxy = GET_CXY( xp ); rpc_desc_t * desc = GET_PTR( xp ); // get arguments from client RPC descriptor mapper = (mapper_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); page_id = hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); // call local kernel function error = mapper_handle_miss( mapper, page_id, &page_xp ); // set output argument to client RPC descriptor hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)page_xp ); hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); #if DEBUG_RPC_MAPPER_HANDLE_MISS cycle = (uint32_t)hal_get_cycles(); if( cycle > DEBUG_RPC_MAPPER_HANDLE_MISS ) printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); #endif } ///////////////////////////////////////////////////////////////////////////////////////// // [26] undefined slot ///////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////// // [27] Marshaling functions attached to RPC_VMM_CREATE_VSEG (blocking) ///////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////// void rpc_vmm_create_vseg_client( cxy_t cxy, struct process_s * process, vseg_type_t type, intptr_t base, uint32_t size, uint32_t file_offset, uint32_t file_size, xptr_t mapper_xp, cxy_t vseg_cxy, struct vseg_s ** vseg ) { assert( (cxy != local_cxy) , "target cluster is not remote\n"); // initialise RPC descriptor header rpc_desc_t rpc; rpc.index = RPC_VMM_CREATE_VSEG; rpc.blocking = true; rpc.responses = 1; // set input arguments in RPC descriptor rpc.args[0] = (uint64_t)(intptr_t)process; rpc.args[1] = (uint64_t)type; rpc.args[2] = (uint64_t)base; rpc.args[3] = (uint64_t)size; rpc.args[4] = (uint64_t)file_offset; rpc.args[5] = (uint64_t)file_size; rpc.args[6] = (uint64_t)mapper_xp; rpc.args[7] = (uint64_t)vseg_cxy; // register RPC request in remote RPC fifo rpc_send( cxy , &rpc ); // get output values from RPC descriptor *vseg = (vseg_t *)(intptr_t)rpc.args[8]; } //////////////////////////////////////////// void rpc_vmm_create_vseg_server( xptr_t xp ) { // get client cluster identifier and pointer on RPC descriptor cxy_t cxy = GET_CXY( xp ); rpc_desc_t * desc = GET_PTR( xp ); // get input arguments from client RPC descriptor process_t * process = (process_t *)(intptr_t)hal_remote_l64( XPTR(cxy , &desc->args[0])); vseg_type_t type = (vseg_type_t)(uint32_t)hal_remote_l64( XPTR(cxy , &desc->args[1])); intptr_t base = (intptr_t) hal_remote_l64( XPTR(cxy , &desc->args[2])); uint32_t size = (uint32_t) hal_remote_l64( XPTR(cxy , &desc->args[3])); uint32_t file_offset = (uint32_t) hal_remote_l64( XPTR(cxy , &desc->args[4])); uint32_t file_size = (uint32_t) hal_remote_l64( XPTR(cxy , &desc->args[5])); xptr_t mapper_xp = (xptr_t) hal_remote_l64( XPTR(cxy , &desc->args[6])); cxy_t vseg_cxy = (cxy_t)(uint32_t) hal_remote_l64( XPTR(cxy , &desc->args[7])); // call local kernel function vseg_t * vseg = vmm_create_vseg( process, type, base, size, file_offset, file_size, mapper_xp, vseg_cxy ); // set output arguments into client RPC descriptor hal_remote_s64( XPTR( cxy , &desc->args[8] ) , (uint64_t)(intptr_t)vseg ); } ///////////////////////////////////////////////////////////////////////////////////////// // [28] Marshaling functions attached to RPC_VMM_SET_COW (blocking) ///////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////// void rpc_vmm_set_cow_client( cxy_t cxy, process_t * process ) { assert( (cxy != local_cxy) , "target cluster is not remote\n"); // initialise RPC descriptor header rpc_desc_t rpc; rpc.index = RPC_VMM_SET_COW; rpc.blocking = true; rpc.responses = 1; // set input arguments in RPC descriptor rpc.args[0] = (uint64_t)(intptr_t)process; // register RPC request in remote RPC fifo rpc_send( cxy , &rpc ); } //////////////////////////////////////// void rpc_vmm_set_cow_server( xptr_t xp ) { process_t * process; // get client cluster identifier and pointer on RPC descriptor cxy_t cxy = GET_CXY( xp ); rpc_desc_t * desc = GET_PTR( xp ); // get input arguments from client RPC descriptor process = (process_t *)(intptr_t)hal_remote_l64( XPTR(cxy , &desc->args[0])); // call local kernel function vmm_set_cow( process ); } ///////////////////////////////////////////////////////////////////////////////////////// // [29] Marshaling functions attached to RPC_VMM_DISPLAY (blocking) ///////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////// void rpc_vmm_display_client( cxy_t cxy, process_t * process, bool_t detailed ) { assert( (cxy != local_cxy) , "target cluster is not remote\n"); // initialise RPC descriptor header rpc_desc_t rpc; rpc.index = RPC_VMM_DISPLAY; rpc.blocking = true; rpc.responses = 1; // set input arguments in RPC descriptor rpc.args[0] = (uint64_t)(intptr_t)process; rpc.args[1] = (uint64_t)detailed; // register RPC request in remote RPC fifo rpc_send( cxy , &rpc ); } //////////////////////////////////////// void rpc_vmm_display_server( xptr_t xp ) { process_t * process; bool_t detailed; // get client cluster identifier and pointer on RPC descriptor cxy_t cxy = GET_CXY( xp ); rpc_desc_t * desc = GET_PTR( xp ); // get input arguments from client RPC descriptor process = (process_t *)(intptr_t)hal_remote_l64( XPTR(cxy , &desc->args[0])); detailed = (bool_t) hal_remote_l64( XPTR(cxy , &desc->args[1])); // call local kernel function vmm_display( process , detailed ); }