Ignore:
Timestamp:
Nov 7, 2017, 3:08:12 PM (5 years ago)
Author:
alain
Message:

First implementation of fork/exec.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/kern/rpc.h

    r401 r407  
    3030#include <bits.h>
    3131#include <spinlock.h>
     32#include <vseg.h>
    3233#include <remote_fifo.h>
    3334
     
    8283    RPC_MAPPER_MOVE_BUFFER     = 24,
    8384    RPC_MAPPER_GET_PAGE        = 25,
     85    RPC_VMM_CREATE_VSEG        = 26,
     86    RPC_SCHED_DISPLAY          = 27,
    8487
    8588    RPC_MAX_INDEX              = 30,
     
    100103typedef struct rpc_desc_s
    101104{
    102         rpc_index_t       index;       // index of requested RPC service
    103         volatile uint32_t response;    // response valid when 0
    104     uint64_t          args[10];    // input/output arguments buffer
     105        rpc_index_t         index;       /*! index of requested RPC service           */
     106        volatile uint32_t   response;    /*! response valid when 0                    */
     107    struct thread_s   * thread;      /*! local pointer on client thread           */
     108    uint32_t            lid;         /*! index of core running the calling thread */
     109    uint64_t            args[10];    /*! input/output arguments buffer            */
    105110}
    106111rpc_desc_t;
    107 
    108 /***********************************************************************************
    109  * This structure defines the RPC fifo, containing a remote_fifo, the owner RPC
    110  * thread TRDID (used as a light lock), and the intrumentation counter.
    111  *
    112  * Implementation note: the TRDID is a good owner identifier, because all
    113  * RPC threads in a given cluster belong to the same process_zero kernel process,
    114  * and RPC threads cannot have local index LTID = 0.
    115  **********************************************************************************/
    116 
    117 typedef struct rpc_fifo_s
    118 {
    119         trdid_t           owner;       // owner thread / 0 if no owner
    120         uint64_t          count;       // total number of received RPCs (instrumentation)
    121         remote_fifo_t     fifo;        // embedded remote fifo
    122 }
    123 rpc_fifo_t;
    124 
    125112
    126113/**********************************************************************************/
     
    149136
    150137/***********************************************************************************
    151  * This function initialises the local RPC fifo and the lock protecting readers.
    152  * The number of slots is defined by the CONFIG_REMOTE_FIFO_SLOTS parameter.
    153  * Each slot contains an extended pointer on the RPC descriptor.
    154  ***********************************************************************************
    155  * @ rf     : pointer on the local RPC fifo.
    156  **********************************************************************************/
    157 void rpc_fifo_init( rpc_fifo_t * rf );
    158 
    159 /***********************************************************************************
    160138 * This function is the entry point for RPC handling on the server side.
    161  * It is executed by a core receiving an IPI.
    162  * It checks the RPC fifo, try to take the light-lock and activates (or creates)
    163  * an RPC thread in case of success.
    164  ***********************************************************************************
    165  * @ returns true if success / false otherwise.
    166  **********************************************************************************/
    167 bool_t rpc_check();
     139 * It is executed by a core receiving an IPI, and each time the core enters,
     140 * or exit the kernel to handle .
     141 * It does nothing and return if the RPC_FIFO is empty.
     142 * The calling thread checks if it exist at least one non-blocked RPC thread,
     143 * creates a new RPC if required, and deschedule to allow the RPC thead to execute.
     144 **********************************************************************************/
     145void rpc_check();
    168146
    169147/***********************************************************************************
    170148 * This function contains the loop to execute all pending RPCs on the server side.
    171  * It should be called with irq disabled and after light lock acquisition.
     149 * It is called by the rpc_thread_func() function with irq disabled, and after
     150 * RPC_FIFO ownership acquisition.
    172151 ***********************************************************************************
    173152 * @ rpc_fifo  : pointer on the local RPC fifo
    174153 **********************************************************************************/
    175 void rpc_execute_all( rpc_fifo_t * rpc_fifo );
    176 
    177 /**********************************************************************************
    178  * This function is called by any thread running on any core in any cluster,
    179  * that detected a non-empty RPC_FIFO and got the RPC_FIFO ownership.
    180  * It activates one RPC thread, and immediately switches to the RPC thread.
    181  * It gets the first free RPC thread from the core free-list, or creates a new one
    182  * when the core free-list is empty.
    183  ***********************************************************************************
    184  * @ rpc_fifo : pointer on the non-empty RPC fifo.
    185  * @ return 0 if success / return ENOMEM if error.
    186  **********************************************************************************/
    187 error_t rpc_activate_thread( rpc_fifo_t * rpc_fifo );
    188 
    189 /***********************************************************************************
    190  * This function contains the infinite loop executed by each RPC thread.
     154void rpc_execute_all( remote_fifo_t * rpc_fifo );
     155
     156/***********************************************************************************
     157 * This function contains the infinite loop executed by a RPC thread.
    191158 **********************************************************************************/
    192159void rpc_thread_func();
     
    266233 ***********************************************************************************
    267234 * @ cxy       : server cluster identifier.
    268  * @ attr      : [in]  pointer on pthread_attr_t in client cluster.
    269  * @ thread_xp : [out] pointer on buffer for thread extended pointer.
     235 * @ attr      : [in]  local pointer on pthread_attr_t in client cluster.
     236 * @ thread_xp : [out] buffer for thread extended pointer.
    270237 * @ error     : [out] error status (0 if success).
    271238 **********************************************************************************/
     
    274241                                    void                  * start_func,
    275242                                    void                  * start_arg,
    276                                     struct pthread_attr_s * attr,
     243                                    pthread_attr_t        * attr,
    277244                                    xptr_t                * thread_xp,
    278245                                    error_t               * error );
     
    499466
    500467/***********************************************************************************
    501  * [21] The RPC_VMM_GET_PTE returns in the "ppn" and "attr" arguments the PTE value
    502  * for a given VPN in a given process.
     468 * [21] The RPC_VMM_GET_PTE returns in the <ppn> and <attr> arguments the PTE value
     469 * for a given <vpn> in a given <process> (page_fault or copy_on_write event).
    503470 * The server cluster is supposed to be the reference cluster, and the vseg
    504471 * containing the VPN must be registered in the reference VMM.
    505  * It returns an error if physical memory cannot be allocated for the PTE2,
     472 * It returns an error if physical memory cannot be allocated for the missing PTE2,
    506473 * or for the missing page itself.
    507474 ***********************************************************************************
     
    509476 * @ process : [in]   pointer on process descriptor in server cluster.
    510477 * @ vaddr   : [in]   virtual address to be searched.
     478 * @ cow     : [in]   "copy_on_write" event if true / "page_fault" event if false.
    511479 * @ attr    : [out]  address of buffer for attributes.
    512480 * @ ppn     : [out]  address of buffer for PPN.
     
    516484                             struct process_s * process,
    517485                             vpn_t              vpn,
     486                             bool_t             cow,
    518487                             uint32_t         * attr,
    519488                             ppn_t            * ppn,
     
    601570void rpc_mapper_get_page_server( xptr_t xp );
    602571
     572/***********************************************************************************
     573 * [26] The RPC_VMM_CREATE_VSEG allows a client thread to request the remote
     574 * reference cluster of a given process to allocate and register in the reference
     575 * process VMM a new vseg descriptor.
     576 * On the server side, this RPC uses the vmm_create_vseg() function, and returns
     577 * to the client the local pointer on the created vseg descriptor.
     578 ***********************************************************************************
     579 * @ cxy         : server cluster identifier.
     580 * @ process     : [in]  local pointer on process descriptor in server.
     581 * @ type        : [in]  vseg type.
     582 * @ base        : [in]  base address (unused for dynamically allocated vsegs).
     583 * @ size        : [in]  number of bytes.
     584 * @ file_offset : [in]  offset in file (for CODE, DATA, FILE types).
     585 * @ file_size   : [in]  can be smaller than size for DATA type.
     586 * @ mapper_xp   : [in]  extended pointer on mapper (for CODE, DATA, FILE types).
     587 * @ vseg_cxy    : [in]  target cluster for mapping (if not data type).
     588 * @ vseg        : [out] local pointer on vseg descriptor / NULL if failure.
     589 **********************************************************************************/
     590void rpc_vmm_create_vseg_client( cxy_t              cxy,
     591                                 struct process_s * process,
     592                                 vseg_type_t        type,
     593                                 intptr_t           base,
     594                                 uint32_t           size,
     595                                 uint32_t           file_offset,
     596                                 uint32_t           file_size,
     597                                 xptr_t             mapper_xp,
     598                                 cxy_t              vseg_cxy,
     599                                 struct vseg_s   ** vseg );
     600
     601void rpc_vmm_create_vseg_server( xptr_t xp );
     602
     603/***********************************************************************************
     604 * [27] The RPC_SCHED_DISPLAY allows a client thread to request the display
     605 * of a remote scheduler, identified by the <lid> argument.
     606 ***********************************************************************************
     607 * @ cxy         : server cluster identifier.
     608 * @ lid         : [in]  local index of target core in client cluster.
     609 **********************************************************************************/
     610void rpc_sched_display_client( cxy_t              cxy,
     611                               lid_t              lid );
     612
     613void rpc_sched_display_server( xptr_t xp );
     614
    603615#endif
Note: See TracChangeset for help on using the changeset viewer.