/* * rpc.h - RPC (Remote Procedure Call) operations definition. * * Authors Mohamed Karaoui (2015) * Alain Greiner (2016) * * Copyright (c) UPMC Sorbonne Universites * * This file is part of ALMOS-MKH. * * ALMOS-MKH is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2.0 of the License. * * ALMOS-MKH is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with ALMOS-MKH; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef _RPC_H_ #define _RPC_H_ #include #include #include #include #include #include /**** Forward declarations ****/ struct process_s; struct vseg_s; struct exec_info_s; struct pthread_attr_s; struct remote_sem_s; struct fragment_s; struct vfs_inode_s; struct vfs_dentry_s; struct thread_s; struct mapper_s; /**********************************************************************************/ /************** structures for Remote Procedure Calls ****************************/ /**********************************************************************************/ /*********************************************************************************** * This enum defines all RPC indexes. * It must be consistent with the rpc_server[] array defined in in the rpc.c file. **********************************************************************************/ typedef enum { RPC_PMEM_GET_PAGES = 0, RPC_PROCESS_PID_ALLOC = 1, RPC_PROCESS_EXEC = 2, RPC_PROCESS_KILL = 3, RPC_THREAD_USER_CREATE = 4, RPC_THREAD_KERNEL_CREATE = 5, RPC_ICU_WTI_ALLOC = 6, RPC_DEVICE_ALLOC = 7, RPC_VFS_INODE_CREATE = 10, RPC_VFS_INODE_DESTROY = 11, RPC_VFS_DENTRY_CREATE = 12, RPC_VFS_DENTRY_DESTROY = 13, RPC_VMM_GET_REF_VSEG = 20, RPC_VMM_GET_PTE = 21, RPC_SEMAPHORE_ALLOC = 22, RPC_SEMAPHORE_FREE = 23, RPC_MAPPER_MOVE = 24, RPC_FATFS_GET_CLUSTER = 30, RPC_MAX_INDEX = 31, } rpc_index_t; /*********************************************************************************** * This defines the prototype of the rpc_server functions, * defined by the rpc_server[] array in the rpc.c file. **********************************************************************************/ typedef void (rpc_server_t) ( xptr_t xp ); /*********************************************************************************** * This structure defines the RPC descriptor **********************************************************************************/ typedef struct rpc_desc_s { rpc_index_t index; // index of requested RPC service volatile uint32_t response; // response valid when 0 uint64_t args[8]; // input/output arguments buffer } rpc_desc_t; /*********************************************************************************** * This structure defines the RPC fifo, containing a remote_fifo, the owner RPC * thread TRDID (used as a light lock), and the intrumentation counter. * * Implementation note: the TRDID is a good owner identifier, because all * RPC threads in a given cluster belong to the same process_zero kernel process, * and RPC threads cannot have local index LTID = 0. **********************************************************************************/ typedef struct rpc_fifo_s { trdid_t owner; // owner thread / 0 if no owner uint64_t count; // total number of received RPCs (instrumentation) remote_fifo_t fifo; // embedded remote fifo } rpc_fifo_t; /**********************************************************************************/ /******* Generic functions supporting RPCs : client side **************************/ /**********************************************************************************/ /*********************************************************************************** * This blocking function executes on the client core. * It puts one RPC extended pointer in the remote fifo. * It sends an IPI if fifo is empty, and waits until RPC response available. * The RPC descriptor must be allocated in the caller's stack * and initialised by the caller. Exit with a Panic message if remote fifo * is still full after (CONFIG_RPC_PUT_MAX_ITERATIONS) retries. *********************************************************************************** * @ cxy : server cluster identifier * @ desc : local pointer on RPC descriptor in client cluster **********************************************************************************/ void rpc_send_sync( cxy_t cxy, rpc_desc_t * desc ); /**********************************************************************************/ /******* Generic functions supporting RPCs : server side **************************/ /**********************************************************************************/ /*********************************************************************************** * This function initialises the local RPC fifo and the lock protecting readers. * The number of slots is defined by the CONFIG_REMOTE_FIFO_SLOTS parameter. * Each slot contains an extended pointer on the RPC descriptor. *********************************************************************************** * @ rf : pointer on the local RPC fifo. **********************************************************************************/ void rpc_fifo_init( rpc_fifo_t * rf ); /*********************************************************************************** * This function is the entry point for RPC handling on the server side. * It can be executed by any thread running (in kernel mode) on any core. * It first checks the core private RPC fifo, an then the cluster shared RPC fifo. * It calls the rpc_activate_thread() function to activate a dedicated RPC thread. *********************************************************************************** * @ returns true if at least one RPC found / false otherwise. **********************************************************************************/ bool_t rpc_check(); /*********************************************************************************** * This function contains the loop to execute all pending RPCs on the server side. * It should be called with irq disabled and after light lock acquisition. *********************************************************************************** * @ rpc_fifo : pointer on the local RPC fifo * @ returns 0 if success **********************************************************************************/ error_t rpc_execute_all( rpc_fifo_t * rpc_fifo ); /********************************************************************************** * This function is called by any thread running on any core in any cluster, * that detected a non-empty RPC_FIFO and got the RPC_FIFO ownership. * It activates one RPC thread, and immediately switches to the RPC thread. * It gets the first free RPC thread from the core free-list, or creates a new one * when the core free-list is empty. *********************************************************************************** * @ rpc_fifo : pointer on the non-empty RPC fifo. * @ return 0 if success / return ENOMEM if error. **********************************************************************************/ error_t rpc_activate_thread( rpc_fifo_t * rpc_fifo ); /*********************************************************************************** * This function contains the infinite loop executed by each RPC thread. **********************************************************************************/ void rpc_thread_func(); /*********************************************************************************** * This function is executed in case of illegal RPC index. **********************************************************************************/ void __attribute__((noinline)) rpc_undefined(); /**********************************************************************************/ /******* Marshalling functions attached to the various RPCs ***********************/ /**********************************************************************************/ /*********************************************************************************** * The RPC_PMEM_GET_PAGES allocates one or several pages in a remote cluster, * and returns the PPN of the first allocated page. *********************************************************************************** * @ cxy : server cluster identifier * @ order : [in] ln2( number of requested pages ) * @ error : [out] error status (0 if success) * @ ppn : [out] first physical page number **********************************************************************************/ void rpc_pmem_get_pages_client( cxy_t cxy, uint32_t order, error_t * error, uint32_t * ppn ); void rpc_pmem_get_pages_server( xptr_t xp ); /*********************************************************************************** * The RPC_PROCESS_PID_ALLOC allocates one new PID in a remote cluster, registers * the new process in the remote cluster, and returns the PID, and an error code. *********************************************************************************** * @ cxy : server cluster identifier. * @ process : [in] local pointer on process descriptor in client cluster. * @ error : [out] error status (0 if success). * @ pid : [out] new process identifier. **********************************************************************************/ void rpc_process_pid_alloc_client( cxy_t cxy, struct process_s * process, error_t * error, pid_t * pid ); void rpc_process_pid_alloc_server( xptr_t xp ); /*********************************************************************************** * The RPC_PROCESS_EXEC creates a process descriptor copy, in a remote cluster * and initializes if from information found in the reference process descriptor. * This remote cluster becomes the new reference cluster. *********************************************************************************** * @ cxy : server cluster identifier. * @ info : [in] pointer on local exec_info structure. * @ error : [out] error status (0 if success). **********************************************************************************/ void rpc_process_exec_client( cxy_t cxy, struct exec_info_s * info, error_t * error ); void rpc_process_exec_server( xptr_t xp ); /*********************************************************************************** * The RPC_PROCESS_KILL is actually a multicast RPC sent by the reference cluster * to other clusters containing a process descriptor copy, to destroy these copies. *********************************************************************************** * @ process : local pointer on target process. **********************************************************************************/ void rpc_process_kill_client( struct process_s * process ); void rpc_process_kill_server( xptr_t xp ); /*********************************************************************************** * The RPC_THREAD_USER_CREATE creates an user thread in the server cluster, * as specified by the pthread_attr_t argument. It returns the local pointer * on the thread descriptor in server cluster, and an error code. * It is called by the pthread_create system call. *********************************************************************************** * @ cxy : server cluster identifier. * @ attr : [in] pointer on pthread_attr_t in client cluster. * @ thread_xp : [out] pointer on buffer for thread extended pointer. * @ error : [out] error status (0 if success). **********************************************************************************/ void rpc_thread_user_create_client( cxy_t cxy, struct pthread_attr_s * attr, xptr_t * thread_xp, error_t * error ); void rpc_thread_user_create_server( xptr_t xp ); /*********************************************************************************** * The RPC_THREAD_KERNEL_CREATE creates a kernel thread in the server cluster, * as specified by the type, func and args arguments. It returns the local pointer * on the thread descriptor in server cluster and an error code. * It is used by the dev_init() function to cretae the device server thread. *********************************************************************************** * @ cxy : server cluster identifier. * @ type : [in] type of kernel thread. * @ func : [in] local pointer on thread function. * @ args : [in] local pointer on function arguments. * @ thread_xp : [out] pointer on buffer for thread extended pointer. * @ error : [out] error status (0 if success). **********************************************************************************/ void rpc_thread_kernel_create_client( cxy_t cxy, uint32_t type, void * func, void * args, xptr_t * thread_xp, error_t * error ); void rpc_thread_kernel_create_server( xptr_t xp ); /*********************************************************************************** * The RPC_VFS_INODE_CREATE creates an inode and the associated mapper in a * remote cluster. The parent dentry must have been previously created. * It returns an extended pointer on the created inode. *********************************************************************************** * @ cxy : server cluster identifier * @ dentry_xp : [in] extended pointer on parent dentry. * @ type : [in] file system type. * @ attr : [in] TODO ??? * @ mode : [in] access mode. * @ uid : [in] user ID * @ gid : [in] group ID * @ inode_xp : [out] buffer for extended pointer on created inode. * @ error : [out] error status (0 if success). **********************************************************************************/ void rpc_vfs_inode_create_client( cxy_t cxy, xptr_t dentry_xp, uint32_t type, uint32_t attr, uint32_t mode, uint32_t uid, uint32_t gid, xptr_t * inode_xp, error_t * error ); void rpc_vfs_inode_create_server( xptr_t xp ); /*********************************************************************************** * The RPC_VFS_INODE_DESTROY releases memory allocated for an inode descriptor * and for the associated mapper in a remote cluster. *********************************************************************************** * @ cxy : server cluster identifier * @ inode : [in] local pointer on inode. **********************************************************************************/ void rpc_vfs_inode_destroy_client( cxy_t cxy, struct vfs_inode_s * inode ); void rpc_vfs_inode_destroy_server( xptr_t xp ); /*********************************************************************************** * The RPC_VFS_DENTRY_CREATE creates a dentry in a remote cluster. * It returns an extended pointer on the created dentry. *********************************************************************************** * @ cxy : server cluster identifier * @ type : [in] file system type. * @ name : [in] directory entry name. * @ parent : [in] local pointer on parent inode. * @ dentry_xp : [out] buffer for extended pointer on created dentry. * @ error : [out] error status (0 if success). **********************************************************************************/ void rpc_vfs_dentry_create_client( cxy_t cxy, uint32_t type, char * name, struct vfs_inode_s * parent, xptr_t * dentry_xp, error_t * error ); void rpc_vfs_dentry_create_server( xptr_t xp ); /*********************************************************************************** * The RPC_VFS_DENTRY_DESTROY releases memory allocated for an dentry descriptor * in a remote cluster. *********************************************************************************** * @ cxy : server cluster identifier * @ dentry : [in] local pointer on dentry. **********************************************************************************/ void rpc_vfs_dentry_destroy_client( cxy_t cxy, struct vfs_dentry_s * dentry ); void rpc_vfs_dentry_destroy_server( xptr_t xp ); /*********************************************************************************** * The RPC_VMM_GET_REF_VSEG returns an extended pointer * on the vseg containing a given virtual address in a given process. * The server cluster is supposed to be the reference cluster. * It returns NULL if no vseg has been founded. *********************************************************************************** * @ cxy : server cluster identifier. * @ process : [in] pointer on process descriptor in server cluster. * @ vaddr : [in] virtual address to be searched. * @ vseg : [out] address of buffer for vseg pointer in client cluster. **********************************************************************************/ void rpc_vmm_get_ref_vseg_client( cxy_t cxy, struct process_s * process, intptr_t vaddr, xptr_t * vseg_xp ); void rpc_vmm_get_ref_vseg_server( xptr_t xp ); /*********************************************************************************** * The RPC_VMM_GET_PTE returns in the "ppn" and "attr" arguments the PTE value * for a given VPN in a given process. * The server cluster is supposed to be the reference cluster, and the vseg * containing the VPN must be registered in the reference VMM. * It returns an error if physical memory cannot be allocated for the PTE2, * or for the missing page itself. *********************************************************************************** * @ cxy : server cluster identifier. * @ process : [in] pointer on process descriptor in server cluster. * @ vaddr : [in] virtual address to be searched. * @ attr : [out] address of buffer for attributes. * @ ppn : [out] address of buffer for PPN. * @ error : [out] address of buffer for error code. **********************************************************************************/ void rpc_vmm_get_pte_client( cxy_t cxy, struct process_s * process, vpn_t vpn, uint32_t * attr, ppn_t * ppn, error_t * error ); void rpc_vmm_get_pte_server( xptr_t xp ); /*********************************************************************************** * The RPC_SEMAPHORE_ALLOC allocates memory for a semaphore in a remote cluster, * and returns an extended pointer on the created semaphore. It returns NULL if physical memory cannot be allocated. *********************************************************************************** * @ cxy : server cluster identifier. * @ sem_xp : [out] buffer for extended pointer on semaphore. **********************************************************************************/ void rpc_semaphore_alloc_client( cxy_t cxy, xptr_t * sem_xp ); void rpc_semaphore_alloc_server( xptr_t xp ); /*********************************************************************************** * The RPC_SEMAPHORE_FREE releases memory allocated for a semaphore * in a remote cluster. *********************************************************************************** * @ cxy : server cluster identifier. * @ sem : [in] local pointer on semaphore. **********************************************************************************/ void rpc_semaphore_free_client( cxy_t cxy, struct remote_sem_s * sem ); void rpc_semaphore_free_server( xptr_t xp ); /*********************************************************************************** * The RPC_MAPPER_MOVE can be send by any thread running in a "client" cluster * to the "server" cluster containing the mapper of a given file. The service is * to move data between the mapper and an user buffer. This user buffer is described * as a set of fragments. Each fragment is contained in one single physical page. * It is defined by four parameters : size / file_offset / ppn / page_offset, * defined in the mapper.h file. The client thread is in charge of building * the fragments array covering the user buffer. * As each fragments can be stored in a different cluster, and this fragment can * be stored in two successive pages in the radix tree, each fragment is moved * using one or two different hal_remote_memcpy(). *********************************************************************************** * @ cxy : server cluster identifier. * @ inode : [in] local pointer on inode (in server cluster). * @ read : [in] mapper to buffer if true / buffer to mapper if false. * @ nb_frags : [in] number of fragments in fragments array. * @ frags : [in] local pointer on fragments array (in client cluster). * @ error : [out] local pointer on buffer for error code (in client cluster). **********************************************************************************/ void rpc_mapper_move_client( cxy_t cxy, struct mapper_s * mapper, bool_t read, uint32_t nb_frags, struct fragment_s * frags, error_t * error ); void rpc_mapper_move_server( xptr_t xp ); /*********************************************************************************** * The RPC_ICU_WTI_ALLOC can be send by any thread running in a "client" cluster * to get a WTI mailbox from the ICU of a "server" cluster. * The WTI is allocated from the server ICU, but the WTI is not enabled, * and no target core is selected in remote cluster. * It returns wti_id == -1 if there is no free WTI in server cluster. *********************************************************************************** * @ cxy : server cluster identifier. * @ wti_id : [out] local pointer on WTI index in client cluster (-1 if error). **********************************************************************************/ void rpc_icu_wti_alloc_client( cxy_t cxy, uint32_t * wti_id ); void rpc_icu_wti_alloc_server( xptr_t xp ); /*********************************************************************************** * The RPC_DEVICE_ALLOC can be send by any thread running in a "client" cluster * to create a device descriptor in a remote "server" cluster. * The WTI is allocated from the server ICU, but the WTI is not enabled, * and no target core is selected in remote cluster. * It returns wti_id == -1 if there is no free WTI in server cluster. *********************************************************************************** * @ cxy : server cluster identifier. * @ dev_xp : [out] buffer for extended pointer on device (in client cluster). * @ error : [out] local pointer on buffer for error code (in client cluster). **********************************************************************************/ void rpc_device_alloc_client( cxy_t cxy, xptr_t * dev_xp, error_t * error ); void rpc_device_alloc_server( xptr_t xp ); /*********************************************************************************** * The RPC_FATFS_GET_CLUSTER can be send by any thread running in a "client" cluster * to scan the FAT mapper, stored in a remote "server" cluster, and get the FATFS * cluster index of a given page of a given file. *********************************************************************************** * @ cxy : server cluster identifier. * @ mapper : [in] local pointer on FAT mapper. * @ first : [in] FATFS cluster index allocated to first page of file. * @ page : [in] page index in file. * @ cluster : [out] local pointer on buffer for found FATFS cluster index. * @ error : [out] local pointer on buffer for error code (in client cluster). **********************************************************************************/ void rpc_fatfs_get_cluster_client( cxy_t cxy, struct mapper_s * mapper, uint32_t first, uint32_t page, uint32_t * cluster, error_t * error ); void rpc_fatfs_get_cluster_server( xptr_t xp ); #endif