Changeset 440 for trunk/kernel/kern


Ignore:
Timestamp:
May 3, 2018, 5:51:22 PM (6 years ago)
Author:
alain
Message:

1/ Fix a bug in the Multithreaded "sort" applicationr:
The pthread_create() arguments must be declared as global variables.
2/ The exit syscall can be called by any thread of a process..

Location:
trunk/kernel/kern
Files:
11 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/kern/chdev.c

    r438 r440  
    124124{
    125125    thread_t * server_ptr;    // local pointer on server thread associated to chdev
     126    xptr_t     server_xp;     // extended pointer on server thread
    126127    core_t   * core_ptr;      // local pointer on core running the server thread
    127128    uint32_t   lid;           // core running the server thread local index
     
    140141    thread_t * this = CURRENT_THREAD;
    141142
    142     // get device descriptor cluster and local pointer
     143    // get chdev cluster and local pointer
    143144    cxy_t     chdev_cxy = GET_CXY( chdev_xp );
    144     chdev_t * chdev_ptr = (chdev_t *)GET_PTR( chdev_xp );
     145    chdev_t * chdev_ptr = GET_PTR( chdev_xp );
     146
     147    // get local and extended pointers on server thread
     148    server_ptr = (thread_t *)hal_remote_lpt( XPTR( chdev_cxy , &chdev_ptr->server) );
     149    server_xp  = XPTR( chdev_cxy , server_ptr );
     150
     151    // get local pointer on core running the server thread
     152    core_ptr   = (core_t *)hal_remote_lpt( XPTR( chdev_cxy , &server_ptr->core ) );
     153
     154    // get server core local index
     155    lid = hal_remote_lw( XPTR( chdev_cxy , &core_ptr->lid ) );
    145156
    146157#if (DEBUG_CHDEV_CMD_RX || DEBUG_CHDEV_CMD_TX)
     
    162173#endif
    163174
    164     // build extended pointers on client thread xlist and device root
    165     xptr_t  list_xp = XPTR( local_cxy , &this->wait_list );
    166     xptr_t  root_xp = XPTR( chdev_cxy , &chdev_ptr->wait_root );
    167 
    168     // get local pointer on server thread
    169     server_ptr = (thread_t *)hal_remote_lpt( XPTR( chdev_cxy , &chdev_ptr->server) );
    170 
    171     // build extended pointer on chdev lock protecting queue
     175    // build extended pointer on client thread xlist
     176    xptr_t  list_xp    = XPTR( local_cxy , &this->wait_list );
     177
     178    // build extended pointer on chdev waiting queue root
     179    xptr_t  root_xp    = XPTR( chdev_cxy , &chdev_ptr->wait_root );
     180
     181    // build extended pointer on server thread blocked state
     182    xptr_t  blocked_xp = XPTR( chdev_cxy , &server_ptr->blocked );
     183
     184    // build extended pointer on lock protecting chdev waiting queue
    172185    lock_xp = XPTR( chdev_cxy , &chdev_ptr->wait_lock );
    173186
    174     // get local pointer on core running the server thread
    175     core_ptr = (core_t *)hal_remote_lpt( XPTR( chdev_cxy , &server_ptr->core ) );
    176 
    177     // get core local index
    178     lid = hal_remote_lw( XPTR( chdev_cxy , &core_ptr->lid ) );
    179 
    180     // compute server core != thread core
    181     different = (lid != this->core->lid) || (local_cxy != chdev_cxy);
    182 
    183     // enter critical section to make atomic :
    184     // (1) client blocking
    185     // (2) client registration in server queue
    186     // (3) IPI to force server scheduling
    187     // (4) descheduling
     187    // critical section for the following sequence:
     188    // (1) take the lock protecting waiting queue
     189    // (2) block the client thread
     190    // (3) unblock the server thread if required
     191    // (4) register client thread in server queue
     192    // (5) send IPI to force server scheduling
     193    // (6) release the lock protecting waiting queue
     194    // (7) deschedule
    188195    // ... in this order
     196
     197    // enter critical section
    189198    hal_disable_irq( &save_sr );
     199
     200    // take the lock
     201    remote_spinlock_lock( lock_xp );
    190202
    191203    // block current thread
    192204    thread_block( XPTR( local_cxy , CURRENT_THREAD ) , THREAD_BLOCKED_IO );
    193205
     206    if( hal_remote_lw( blocked_xp ) & THREAD_BLOCKED_IDLE )
     207    thread_unblock( server_xp , THREAD_BLOCKED_IDLE );
     208
    194209    // register client thread in waiting queue
    195     remote_spinlock_lock( lock_xp );
    196210    xlist_add_last( root_xp , list_xp );
    197     remote_spinlock_unlock( lock_xp );
    198 
    199     // send IPI to core running the server thread if required
     211
     212    // send IPI to core running the server thread when server != client
     213    different = (lid != this->core->lid) || (local_cxy != chdev_cxy);
    200214    if( different ) dev_pic_send_ipi( chdev_cxy , lid );
    201215   
     216    // release lock
     217    remote_spinlock_unlock( lock_xp );
     218
    202219    // deschedule
    203220    assert( thread_can_yield( this ) , __FUNCTION__ , "illegal sched_yield\n" );
     
    260277            remote_spinlock_unlock( lock_xp );
    261278
     279            // block
     280            thread_block( XPTR( local_cxy , server ) , THREAD_BLOCKED_IDLE );
     281
    262282            // deschedule
     283            assert( thread_can_yield( server ) , __FUNCTION__ , "illegal sched_yield\n" );
    263284            sched_yield("I/O queue empty");
    264285        }
    265286        else                            // waiting queue not empty
    266287        {
     288            // get extended pointer on first client thread
     289            client_xp = XLIST_FIRST_ELEMENT( root_xp , thread_t , wait_list );
     290
     291            // get client thread cluster and local pointer
     292            client_cxy = GET_CXY( client_xp );
     293            client_ptr = GET_PTR( client_xp );
     294
     295            // remove this first client thread from waiting queue
     296            xlist_unlink( XPTR( client_cxy , &client_ptr->wait_list ) );
     297
    267298            // release lock
    268299            remote_spinlock_unlock( lock_xp );
    269 
    270             // get extended pointer on first client thread
    271             client_xp = XLIST_FIRST_ELEMENT( root_xp , thread_t , wait_list );
    272 
    273             // get client thread cluster, local pointer, and identifier
    274             client_cxy = GET_CXY( client_xp );
    275             client_ptr = (thread_t *)GET_PTR( client_xp );
    276300
    277301#if DEBUG_CHDEV_SERVER_RX
     
    300324            chdev->cmd( client_xp );
    301325       
    302             // remove the client thread from waiting queue
    303             remote_spinlock_lock( lock_xp );
    304             xlist_unlink( XPTR( client_cxy , &client_ptr->wait_list ) );
    305             remote_spinlock_unlock( lock_xp );
    306 
    307326            // unblock client thread
    308327            thread_unblock( client_xp , THREAD_BLOCKED_IO );
     
    343362    chdev_t     * chdev_ptr;
    344363
     364    assert( (file_xp != XPTR_NULL) , __FUNCTION__,
     365    "file_xp == XPTR_NULL\n" );
     366
    345367    // get cluster and local pointer on remote file descriptor
    346368    // associated inode and chdev are stored in same cluster as the file desc.
     
    353375
    354376    assert( (inode_type == INODE_TYPE_DEV) , __FUNCTION__ ,
    355     "inode type %d is not INODE_TYPE_DEV", inode_type );
     377    "inode type %d is not INODE_TYPE_DEV\n", inode_type );
    356378
    357379    // get chdev local pointer from inode extension
  • trunk/kernel/kern/chdev.h

    r428 r440  
    4242 * independant) Channel Device descriptor (in brief "chdev").
    4343 * ALMOS-MKH supports multi-channels peripherals, and defines one separated chdev
    44  * descriptor for each channel (and for each RX/TX direction for the NIC device).
     44 * descriptor for each channel (and for each RX/TX direction for the NIC and TXT devices).
    4545 * Each chdev contains a waiting queue, registering the "client threads" requests,
    4646 * and an associated "server thread", handling these requests.
  • trunk/kernel/kern/cluster.c

    r438 r440  
    153153#endif
    154154
    155     // initialises RPC fifo
    156         local_fifo_init( &cluster->rpc_fifo );
    157     cluster->rpc_threads = 0;
     155    // initialises RPC FIFOs
     156        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
     157    {
     158            local_fifo_init( &cluster->rpc_fifo[lid] );
     159        cluster->rpc_threads[lid] = 0;
     160    }
    158161
    159162#if( DEBUG_CLUSTER_INIT & 1 )
     
    221224lid_t cluster_select_local_core()
    222225{
    223     uint32_t min = 100;
    224     lid_t    sel = 0;
    225     lid_t    lid;
     226    uint32_t      min = 1000;
     227    lid_t         sel = 0;
     228    uint32_t      nthreads;
     229    lid_t         lid;
     230    scheduler_t * sched;
    226231
    227232    cluster_t * cluster = LOCAL_CLUSTER;
     
    229234    for( lid = 0 ; lid < cluster->cores_nr ; lid++ )
    230235    {
    231         if( cluster->core_tbl[lid].usage < min )
     236        sched    = &cluster->core_tbl[lid].scheduler;
     237        nthreads = sched->u_threads_nr + sched->k_threads_nr;
     238
     239        if( nthreads < min )
    232240        {
    233             min = cluster->core_tbl[lid].usage;
     241            min = nthreads;
    234242            sel = lid;
    235243        }
     
    323331    bool_t      found;
    324332
     333#if DEBUG_CLUSTER_PID_ALLOC
     334uint32_t cycle = (uint32_t)hal_get_cycles();
     335if( DEBUG_CLUSTER_PID_ALLOC < cycle )
     336printk("\n[DBG] %s : thread %x enters in cluster %x / cycle %d\n",
     337__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
     338#endif
     339
    325340    pmgr_t    * pm         = &LOCAL_CLUSTER->pmgr;
    326341
     
    361376    }
    362377
     378#if DEBUG_CLUSTER_PID_ALLOC
     379cycle = (uint32_t)hal_get_cycles();
     380if( DEBUG_CLUSTER_PID_ALLOC < cycle )
     381printk("\n[DBG] %s : thread %x exit in cluster %x / pid %x / cycle %d\n",
     382__FUNCTION__ , CURRENT_THREAD , local_cxy , *pid , cycle );
     383#endif
     384
    363385} // end cluster_pid_alloc()
    364386
     
    366388void cluster_pid_release( pid_t pid )
    367389{
     390
     391#if DEBUG_CLUSTER_PID_RELEASE
     392uint32_t cycle = (uint32_t)hal_get_cycles();
     393if( DEBUG_CLUSTER_PID_RELEASE < cycle )
     394printk("\n[DBG] %s : thread %x enters in cluster %x / pid %x / cycle %d\n",
     395__FUNCTION__ , CURRENT_THREAD , local_cxy , pid , cycle );
     396#endif
     397
    368398    cxy_t  owner_cxy  = CXY_FROM_PID( pid );
    369399    lpid_t lpid       = LPID_FROM_PID( pid );
     
    371401    pmgr_t  * pm = &LOCAL_CLUSTER->pmgr;
    372402
    373     // check pid argument
    374     assert( (lpid < CONFIG_MAX_PROCESS_PER_CLUSTER) && (owner_cxy == local_cxy) ,
    375     __FUNCTION__ , "illegal PID" );
     403    // check lpid
     404    assert( (lpid < CONFIG_MAX_PROCESS_PER_CLUSTER), __FUNCTION__ ,
     405    "illegal LPID = %d" , lpid );
     406
     407    // check owner cluster
     408    assert( (owner_cxy == local_cxy) , __FUNCTION__ ,
     409    "local_cluster %x !=  owner_cluster %x" , local_cxy , owner_cxy );
    376410
    377411    // get the process manager lock
     
    384418    // release the processs_manager lock
    385419    spinlock_unlock( &pm->pref_lock );
     420
     421#if DEBUG_CLUSTER_PID_RELEASE
     422cycle = (uint32_t)hal_get_cycles();
     423if( DEBUG_CLUSTER_PID_RELEASE < cycle )
     424printk("\n[DBG] %s : thread %x exit in cluster %x / cycle %d\n",
     425__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
     426#endif
    386427
    387428} // end cluster_pid_release()
  • trunk/kernel/kern/cluster.h

    r438 r440  
    9696typedef struct cluster_s
    9797{
    98         spinlock_t        kcm_lock;          /*! local, protect creation of KCM allocators    */
     98        spinlock_t      kcm_lock;          /*! local, protect creation of KCM allocators      */
    9999
    100100    // global parameters
    101         uint32_t          paddr_width;       /*! numer of bits in physical address            */
    102     uint32_t          x_width;           /*! number of bits to code x_size  (can be 0)    */
    103     uint32_t          y_width;           /*! number of bits to code y_size  (can be 0)    */
    104         uint32_t          x_size;            /*! number of clusters in a row    (can be 1)    */
    105         uint32_t          y_size;            /*! number of clusters in a column (can be 1)    */
    106         cxy_t             io_cxy;            /*! io cluster identifier                        */
    107     uint32_t          dqdt_root_level;   /*! index of root node in dqdt_tbl[]             */
    108     uint32_t          nb_txt_channels;   /*! number of TXT channels                       */
    109     uint32_t          nb_nic_channels;   /*! number of NIC channels                       */
    110     uint32_t          nb_ioc_channels;   /*! number of IOC channels                       */
    111     uint32_t          nb_fbf_channels;   /*! number of FBF channels                       */
     101        uint32_t        paddr_width;       /*! numer of bits in physical address              */
     102    uint32_t        x_width;           /*! number of bits to code x_size  (can be 0)      */
     103    uint32_t        y_width;           /*! number of bits to code y_size  (can be 0)      */
     104        uint32_t        x_size;            /*! number of clusters in a row    (can be 1)      */
     105        uint32_t        y_size;            /*! number of clusters in a column (can be 1)      */
     106        cxy_t           io_cxy;            /*! io cluster identifier                          */
     107    uint32_t        dqdt_root_level;   /*! index of root node in dqdt_tbl[]               */
     108    uint32_t        nb_txt_channels;   /*! number of TXT channels                         */
     109    uint32_t        nb_nic_channels;   /*! number of NIC channels                         */
     110    uint32_t        nb_ioc_channels;   /*! number of IOC channels                         */
     111    uint32_t        nb_fbf_channels;   /*! number of FBF channels                         */
    112112
    113113    // local parameters
    114         uint32_t          cores_nr;          /*! actual number of cores in cluster            */
    115     uint32_t          ram_size;          /*! physical memory size                         */
    116     uint32_t          ram_base;          /*! physical memory base (local address)         */
    117 
    118         core_t            core_tbl[CONFIG_MAX_LOCAL_CORES];         /*! embedded cores        */
    119 
    120         list_entry_t      dev_root;          /*! root of list of devices in cluster           */
     114        uint32_t        cores_nr;          /*! actual number of cores in cluster              */
     115    uint32_t        ram_size;          /*! physical memory size                           */
     116    uint32_t        ram_base;          /*! physical memory base (local address)           */
     117
     118        core_t          core_tbl[CONFIG_MAX_LOCAL_CORES];    /*! embedded cores               */
     119
     120        list_entry_t    dev_root;          /*! root of list of devices in cluster             */
    121121
    122122    // memory allocators
    123         ppm_t             ppm;               /*! embedded kernel page manager                 */
    124         khm_t             khm;               /*! embedded kernel heap manager                 */
    125         kcm_t             kcm;               /*! embedded kernel cache manager (for KCMs)     */
    126 
    127     kcm_t           * kcm_tbl[KMEM_TYPES_NR];         /*! pointers on allocated KCMs      */
     123        ppm_t           ppm;               /*! embedded kernel page manager                   */
     124        khm_t           khm;               /*! embedded kernel heap manager                   */
     125        kcm_t           kcm;               /*! embedded kernel KCMs manager                   */
     126
     127    kcm_t         * kcm_tbl[KMEM_TYPES_NR];              /*! pointers on allocated KCMs   */
    128128
    129129    // RPC
    130         remote_fifo_t     rpc_fifo;          /*! RPC fifo (one per cluster)                   */
    131     uint32_t          rpc_threads;       /*! current number of RPC threads in cluster     */
     130        remote_fifo_t   rpc_fifo[CONFIG_MAX_LOCAL_CORES];    /*! one RPC FIFO per core        */
     131    uint32_t        rpc_threads[CONFIG_MAX_LOCAL_CORES]; /*! RPC threads per core         */
    132132
    133133    // DQDT
    134         dqdt_node_t       dqdt_tbl[CONFIG_DQDT_LEVELS_NR]; /*! embedded DQDT nodes in cluster */
     134        dqdt_node_t     dqdt_tbl[CONFIG_DQDT_LEVELS_NR];     /*! embedded DQDT nodes          */
    135135
    136136    // Local process manager
    137     pmgr_t            pmgr;            /*! embedded process manager                       */
    138 
    139     void            * pic_extend;      /*! PIC implementation specific extension          */
     137    pmgr_t          pmgr;              /*! embedded process manager                       */
     138
     139    void          * pic_extend;        /*! PIC implementation specific extension          */
    140140}
    141141cluster_t;
  • trunk/kernel/kern/kernel_init.c

    r438 r440  
    12381238    dev_pic_enable_timer( CONFIG_SCHED_TICK_MS_PERIOD );
    12391239
     1240#if DEBUG_KERNEL_INIT
     1241printk("\n[DBG] %s : thread %x on core[%x,%d] jumps to thread_idle_func() / cycle %d\n",
     1242__FUNCTION__ , CURRENT_THREAD , local_cxy , core_lid , (uint32_t)hal_get_cycles() );
     1243#endif
     1244
    12401245    // each core jump to thread_idle_func
    12411246    thread_idle_func();
  • trunk/kernel/kern/process.c

    r438 r440  
    106106    char        rx_path[40];
    107107    char        tx_path[40];
     108    xptr_t      file_xp;
    108109    xptr_t      chdev_xp;
    109110    chdev_t *   chdev_ptr;
     
    179180        assert( (stdin_id == 0) , __FUNCTION__ , "stdin index must be 0" );
    180181
     182#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
     183cycle = (uint32_t)hal_get_cycles();
     184if( DEBUG_PROCESS_REFERENCE_INIT )
     185printk("\n[DBG] %s : thread %x / stdin open for process %x / cycle %d\n",
     186__FUNCTION__ , CURRENT_THREAD , pid , cycle );
     187#endif
     188
    181189        // create stdout pseudo file         
    182190        error = vfs_open( process,
     
    190198        assert( (stdout_id == 1) , __FUNCTION__ , "stdout index must be 1" );
    191199
     200#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
     201cycle = (uint32_t)hal_get_cycles();
     202if( DEBUG_PROCESS_REFERENCE_INIT )
     203printk("\n[DBG] %s : thread %x / stdout open for process %x / cycle %d\n",
     204__FUNCTION__ , CURRENT_THREAD , pid , cycle );
     205#endif
     206
    192207        // create stderr pseudo file         
    193208        error = vfs_open( process,
     
    201216        assert( (stderr_id == 2) , __FUNCTION__ , "stderr index must be 2" );
    202217
     218#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
     219cycle = (uint32_t)hal_get_cycles();
     220if( DEBUG_PROCESS_REFERENCE_INIT )
     221printk("\n[DBG] %s : thread %x / stderr open for process %x / cycle %d\n",
     222__FUNCTION__ , CURRENT_THREAD , pid , cycle );
     223#endif
     224
    203225    }
    204226    else                                            // normal user process
    205227    {
     228        // get extended pointer on stdin pseudo file in model process
     229        file_xp = (xptr_t)hal_remote_lwd( XPTR( model_cxy , &model_ptr->fd_array.array[0] ) );
     230
    206231        // get extended pointer on model process TXT chdev
    207         chdev_xp = chdev_from_file( model_ptr->fd_array.array[0] );
     232        chdev_xp = chdev_from_file( file_xp );
    208233 
    209234        // get cluster and local pointer on chdev
     
    374399uint32_t cycle = (uint32_t)hal_get_cycles();
    375400if( DEBUG_PROCESS_DESTROY )
    376 printk("\n[DBG] %s : thread %x enter to destroy process %x (pid = %x) / cycle %d\n",
    377 __FUNCTION__ , CURRENT_THREAD , process, pid , cycle );
     401printk("\n[DBG] %s : thread %x enter in cluster %x / pid %x / process %x / cycle %d\n",
     402__FUNCTION__ , CURRENT_THREAD , pid , process , cycle );
    378403#endif
    379404
     
    401426    }
    402427
    403     // release the process PID to cluster manager
    404     cluster_pid_release( pid );
     428    // release the process PID to cluster manager if owner cluster
     429    if( CXY_FROM_PID( pid ) == local_cxy ) cluster_pid_release( pid );
    405430
    406431    // FIXME close all open files and update dirty [AG]
     
    507532    XLIST_FOREACH( root_xp , iter_xp )
    508533    {
     534        // atomically increment responses counter
     535        hal_atomic_add( (void *)&rpc.responses , 1 );
     536
     537        process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
     538        process_cxy = GET_CXY( process_xp );
    509539
    510540#if DEBUG_PROCESS_SIGACTION
     
    513543__FUNCTION__ , process_action_str( action_type ) , pid , process_cxy );
    514544#endif
    515         // atomically increment responses counter
    516         hal_atomic_add( (void *)&rpc.responses , 1 );
    517 
    518         process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
    519         process_cxy = GET_CXY( process_xp );
    520 
    521545        // call RPC in target cluster
    522546        rpc_process_sigaction_client( process_cxy , &rpc );
     
    529553    hal_restore_irq( save_sr);
    530554
    531     // client deschedule : will be unblocked by the last RPC server thread
     555    // client thread deschedule : will be unblocked by the last RPC server thread
    532556    sched_yield("blocked on rpc_process_sigaction");
    533557
     
    542566
    543567/////////////////////////////////////////////////
    544 void process_block_threads( process_t * process )
     568void process_block_threads( process_t * process,
     569                            xptr_t      client_xp )
    545570{
    546571    thread_t          * target;         // pointer on target thread
     
    567592    spinlock_lock( &process->th_lock );
    568593
    569     // loop to block all threads but the main thread
     594    // loop on target process local threads
    570595    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
    571596    for( ltid = 0 , count = 0 , ack_count = 0 ; count < process->th_nr ; ltid++ )
     
    577602            count++;
    578603
    579             // main thread should not be deleted
    580             if( (ltid != 0) || (owner_cxy != local_cxy) )
     604            // main thread and client thread should not be blocked
     605            if( ((ltid != 0) || (owner_cxy != local_cxy)) &&         // not main thread
     606                (client_xp) != XPTR( local_cxy , target ) )          // not client thread
    581607            {
    582608                // set the global blocked bit in target thread descriptor.
     
    626652}  // end process_block_threads()
    627653
    628 ///////////////////////////////////////////////////
    629 void process_unblock_threads( process_t * process )
    630 {
    631     thread_t          * target;        // pointer on target thead
     654/////////////////////////////////////////////////
     655void process_delete_threads( process_t * process,
     656                             xptr_t      client_xp )
     657{
    632658    thread_t          * this;          // pointer on calling thread
     659    thread_t          * target;        // local pointer on target thread
     660    xptr_t              target_xp;     // extended pointer on target thread
     661    cxy_t               owner_cxy;     // owner process cluster
    633662    uint32_t            ltid;          // index in process th_tbl
    634     uint32_t            count;         // requests counter
     663    uint32_t            count;         // threads counter
    635664
    636665    // get calling thread pointer
    637666    this = CURRENT_THREAD;
     667
     668    // get target process owner cluster
     669    owner_cxy = CXY_FROM_PID( process->pid );
    638670
    639671#if DEBUG_PROCESS_SIGACTION
     
    647679    spinlock_lock( &process->th_lock );
    648680
    649     // loop on process threads to unblock all threads
     681    // loop on target process local threads                       
    650682    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
    651     for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )
     683    for( ltid = 0 , count = 0  ; count < process->th_nr ; ltid++ )
    652684    {
    653685        target = process->th_tbl[ltid];
    654686
    655         if( target != NULL )             // thread found
     687        if( target != NULL )    // valid thread 
    656688        {
    657689            count++;
    658 
    659             // reset the global blocked bit in target thread descriptor.
    660             thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
     690            target_xp = XPTR( local_cxy , target );
     691
     692            // main thread and client thread should not be blocked
     693            if( ((ltid != 0) || (owner_cxy != local_cxy)) &&         // not main thread
     694                (client_xp) != target_xp )                           // not client thread
     695            {
     696                // mark target thread for delete and block it
     697                thread_delete( target_xp , process->pid , false );   // not forced
     698            }
    661699        }
    662700    }
     
    672710#endif
    673711
    674 }  // end process_unblock_threads()
    675 
    676 //////////////////////////////////////////////////
    677 void process_delete_threads( process_t * process )
    678 {
    679     thread_t          * target;        // pointer on target thread
     712}  // end process_delete_threads()
     713
     714///////////////////////////////////////////////////
     715void process_unblock_threads( process_t * process )
     716{
     717    thread_t          * target;        // pointer on target thead
     718    thread_t          * this;          // pointer on calling thread
    680719    uint32_t            ltid;          // index in process th_tbl
    681     uint32_t            count;         // threads counter
     720    uint32_t            count;         // requests counter
     721
     722    // get calling thread pointer
     723    this = CURRENT_THREAD;
    682724
    683725#if DEBUG_PROCESS_SIGACTION
     
    685727if( DEBUG_PROCESS_SIGACTION < cycle )
    686728printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",
    687 __FUNCTION__ , CURRENT_THREAD , process->pid , local_cxy , cycle );
     729__FUNCTION__ , this , process->pid , local_cxy , cycle );
    688730#endif
    689731
     
    691733    spinlock_lock( &process->th_lock );
    692734
    693     // loop to set the REQ_DELETE flag on all threads but the main
     735    // loop on process threads to unblock all threads
    694736    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
    695     for( ltid = 0 , count = 0  ; count < process->th_nr ; ltid++ )
     737    for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )
    696738    {
    697739        target = process->th_tbl[ltid];
    698740
    699         if( target != NULL )
     741        if( target != NULL )             // thread found
    700742        {
    701743            count++;
    702            
    703             thread_kill( XPTR( local_cxy , target ),
    704                          false,                       // is_exit
    705                          true );                      // is_forced
     744
     745            // reset the global blocked bit in target thread descriptor.
     746            thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
    706747        }
    707748    }
     
    714755if( DEBUG_PROCESS_SIGACTION < cycle )
    715756printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n",
    716 __FUNCTION__ , CURRENT_THREAD , process->pid , local_cxy , cycle );
    717 #endif
    718 
    719 }  // end process_delete_threads()
     757__FUNCTION__ , this , process->pid , local_cxy , cycle );
     758#endif
     759
     760}  // end process_unblock_threads()
    720761
    721762///////////////////////////////////////////////
     
    749790
    750791    // allocate memory for a new local process descriptor
    751     // and initialise it from reference cluster if required
     792    // and initialise it from reference cluster if not found
    752793    if( !found )
    753794    {
     
    765806        if( error ) return NULL;
    766807    }
     808
     809#if DEBUG_PROCESS_GET_LOCAL_COPY
     810uint32_t cycle = (uint32_t)hal_get_cycles();
     811if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
     812printk("\n[DBG] %s : enter in cluster %x / pid %x / process %x / cycle %d\n",
     813__FUNCTION__ , local_cxy , pid , process_ptr , cycle );
     814#endif
    767815
    768816    return process_ptr;
     
    10321080    // check parent process is the reference process
    10331081    ref_xp = hal_remote_lwd( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) );
    1034 
    1035 printk("\n@@@ %s : parent_cxy = %x / parent_ptr = %x / ref_cxy = %x / ref_ptr = %x\n",
    1036 __FUNCTION__, parent_process_cxy, parent_process_ptr, GET_CXY( ref_xp ), GET_PTR( ref_xp ) );
    10371082
    10381083    assert( (parent_process_xp == ref_xp ) , __FUNCTION__ ,
  • trunk/kernel/kern/process.h

    r436 r440  
    101101 * 4) The <sem_root>, <mutex_root>, <barrier_root>, <condvar_root>, and the associated
    102102 *    <sync_lock>, that are dynamically allocated, are only defined in the reference cluster.
    103  * 5) The <children_root>, <children_nr>, <brothers_list>, and <txt_list> fields are only
     103 * 5) The <children_root>, <children_nr>, <children_list>, and <txt_list> fields are only
    104104 *    defined in the reference cluster, and are undefined in other clusters.
    105105 * 6) The <local_list>, <copies_list>, <th_tbl>, <th_nr>, <th_lock> fields
    106106 *    are defined in all process descriptors copies.
    107107 * 7) The termination <flags> and <exit_status> are only defined in the reference cluster.
     108 *    The term state format is defined in the shared_syscalls.h file.
    108109 ********************************************************************************************/
    109110
     
    282283 * all threads of a process identified by the <pid> argument, depending on the
    283284 * <action_type> argument.
    284  * WARNING : the DELETE action is NOT executed on the target process main thread
    285  * (thread 0 in process owner cluster).
     285 * WARNING : the DELETE and BLOCK actions are NOT executed on the target process main thread
     286 * (thread 0 in process owner cluster), and not executed on the calling thread itself.
    286287 * It uses the multicast, non blocking rpc_process_sigaction_client() function to send
    287  * parallel requests to all remote clusters containing a process copy.
     288 * parallel requests to all remote clusters containing process copies.
    288289 * Then it blocks and deschedule to wait completion of these parallel requests.
    289290 *
     
    305306
    306307/*********************************************************************************************
    307  * This function blocks all threads - but the main thread - for a given <process>
    308  * in a given cluster. It sets the THREAD_BLOCKED_GLOBAL bit in the thread descriptor,
    309  * and request the relevant schedulers to acknowledge the blocking, using IPI if required.
     308 * This function blocks all threads for a given <process> in the local cluster.
     309 * It scan the list of local thread, and sets the THREAD_BLOCKED_GLOBAL bit for all
     310 * threads, BUT the main thread (thread 0 in owner cluster), and the client thread
     311 * identified by the <client_xp> argument. It request the relevant schedulers to acknowledge
     312 * the blocking, using IPI if required, and returns only when all blockable threads
     313 * in cluster are actually blocked.
    310314 * The threads are not detached from the scheduler, and not detached from the local process.
    311  * This function returns only when all blockable threads in cluster are actually blocked.
    312315 *********************************************************************************************
    313316 * @ process     : pointer on the target process descriptor.
    314  ********************************************************************************************/
    315 void process_block_threads( process_t * process );
     317 * @ client_xp   : extended pointer on the client thread that should not be blocked.
     318 ********************************************************************************************/
     319void process_block_threads( process_t * process,
     320                            xptr_t      client_xp );
     321
     322/*********************************************************************************************
     323 * This function marks for deletion all threads for a given <process> in the local cluster.
     324 * It scan the list of local thread, and sets the THREAD_FLAG_REQ_DELETE bit for all
     325 * threads, BUT the main thread (thread 0 in owner cluster), and the client thread
     326 * identified by the <client_xp> argument.
     327 * The actual deletion will be done by the scheduler at the next scheduling point.
     328 *********************************************************************************************
     329 * @ process     : pointer on the process descriptor.
     330 * @ client_xp   : extended pointer on the client thread that should not be marked.
     331 ********************************************************************************************/
     332void process_delete_threads( process_t * process,
     333                            xptr_t       client_xp );
    316334
    317335/*********************************************************************************************
     
    321339 ********************************************************************************************/
    322340void process_unblock_threads( process_t * process );
    323 
    324 /*********************************************************************************************
    325  * This function marks for deletion all threads - but the main thread - for a given <process>
    326  * in a given cluster. It sets the THREAD_FLAG_REQ_DELETE bit. For each marked thread,
    327  * the following actions will be done by the scheduler at the next scheduling point:
    328  * - the thread will be detached from the scheduler.
    329  * - the thread will be detached from the local process descriptor.
    330  * - the thread will be detached from parent if required.
    331  * - the memory allocated to the thread descriptor is released.
    332  * - the memory allocated to the process descriptor is released, if it is the last thread.
    333  *********************************************************************************************
    334  * @ process     : pointer on the process descriptor.
    335  ********************************************************************************************/
    336 void process_delete_threads( process_t * process );
    337341
    338342/*********************************************************************************************
     
    398402                            struct thread_s ** child_thread_ptr );
    399403
    400 
    401404/********************   File Management Operations   ****************************************/
    402405
  • trunk/kernel/kern/rpc.c

    r438 r440  
    114114    client_core_lid = this->core->lid;
    115115
    116     // select a server_core index:
    117     // use client core index if possible / core 0 otherwise
     116    // select a server_core : use client core index if possible / core 0 otherwise
    118117    if( client_core_lid < hal_remote_lw( XPTR( server_cxy , &cluster->cores_nr ) ) )
    119118    {
     
    133132
    134133    // get local pointer on rpc_fifo in remote cluster,
    135     remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;
     134    remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo[server_core_lid];
    136135
    137136        // post RPC in remote fifo / deschedule and retry if fifo full
     
    231230    core_t        * core     = this->core;
    232231    scheduler_t   * sched    = &core->scheduler;
    233         remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;
     232        remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo[core->lid];
    234233
    235234#if DEBUG_RPC_SERVER_GENERIC
     
    243242        hal_disable_irq( &sr_save );
    244243
    245     // activate (or create) RPC thread if RPC FIFO not empty
     244    // activate (or create) RPC thread if RPC FIFO not empty and no acive RPC thread
    246245        if( (rpc_fifo->owner == 0) && (local_fifo_is_empty(rpc_fifo) == false) )
    247246    {
     
    254253#endif
    255254
    256         // search one IDLE RPC thread  
     255        // search one IDLE RPC thread associated to the selected core  
    257256        list_entry_t * iter;
    258257        LIST_FOREACH( &sched->k_root , iter )
     
    270269        }
    271270
    272         // create new RPC thread if not found   
     271        // create new RPC thread for the selected core if not found   
    273272        if( found == false )                   
    274273        {
     
    277276                                                      &rpc_thread_func,
    278277                                          NULL,
    279                                                       this->core->lid );
    280                 if( error )
    281             {
    282                 assert( false , __FUNCTION__ ,
    283                 "no memory to allocate a new RPC thread in cluster %x", local_cxy );
    284             }
     278                                                      core->lid );
     279                 
     280            assert( (error == 0), __FUNCTION__ ,
     281            "no memory to allocate a new RPC thread in cluster %x", local_cxy );
    285282
    286283            // unblock created RPC thread
    287284            thread->blocked = 0;
    288285
    289             // update core descriptor counter 
    290             hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 );
     286            // update RRPC threads counter 
     287            hal_atomic_add( &LOCAL_CLUSTER->rpc_threads[core->lid] , 1 );
    291288
    292289#if DEBUG_RPC_SERVER_GENERIC
     
    325322void rpc_thread_func()
    326323{
    327     uint32_t     count;       // handled RPC requests counter
    328     error_t      empty;       // local RPC fifo state
    329     xptr_t       desc_xp;     // extended pointer on RPC request
    330     cxy_t        desc_cxy;    // RPC request cluster (client)
    331     rpc_desc_t * desc_ptr;    // RPC request local pointer
    332     uint32_t     index;       // RPC request index
    333     thread_t   * thread_ptr;  // local pointer on client thread
    334     lid_t        core_lid;    // local index of client core
    335     bool_t       blocking;    // blocking RPC when true
     324    error_t         empty;              // local RPC fifo state
     325    xptr_t          desc_xp;            // extended pointer on RPC request
     326    cxy_t           desc_cxy;           // RPC request cluster (client)
     327    rpc_desc_t    * desc_ptr;           // RPC request local pointer
     328    uint32_t        index;              // RPC request index
     329    thread_t      * client_ptr;         // local pointer on client thread
     330        thread_t      * server_ptr;         // local pointer on server thread
     331    xptr_t          server_xp;          // extended pointer on server thread
     332    lid_t           client_core_lid;    // local index of client core
     333    lid_t           server_core_lid;    // local index of server core
     334    bool_t          blocking;           // blocking RPC when true
     335        remote_fifo_t * rpc_fifo;           // local pointer on RPC fifo
    336336 
    337337    // makes RPC thread not preemptable
    338338        hal_disable_irq( NULL );
    339339 
    340         thread_t      * this     = CURRENT_THREAD;
    341         remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;
     340        server_ptr      = CURRENT_THREAD;
     341    server_xp       = XPTR( local_cxy , server_ptr );
     342    server_core_lid = server_ptr->core->lid;
     343        rpc_fifo        = &LOCAL_CLUSTER->rpc_fifo[server_core_lid];
    342344
    343345    // two embedded loops:
    344346    // - external loop : "infinite" RPC thread
    345     // - internal loop : handle up to CONFIG_RPC_PENDING_MAX RPC requests
     347    // - internal loop : handle one RPC request per iteration
    346348 
    347349        while(1)  // infinite loop
    348350        {
    349351        // try to take RPC_FIFO ownership
    350         if( hal_atomic_test_set( &rpc_fifo->owner , this->trdid ) )
     352        if( hal_atomic_test_set( &rpc_fifo->owner , server_ptr->trdid ) )
    351353        {
    352354
     
    355357if( DEBUG_RPC_SERVER_GENERIC < cycle )
    356358printk("\n[DBG] %s : RPC thread %x in cluster %x takes RPC fifo ownership / cycle %d\n",
    357 __FUNCTION__, this, local_cxy, cycle );
    358 #endif
    359             // initializes RPC requests counter
    360             count = 0;
    361 
    362                     // exit internal loop in three cases:
    363             // - RPC fifo is empty
    364             // - ownership has been lost (because descheduling)
    365             // - max number of RPCs is reached
    366                 while( 1 )  // internal loop
     359__FUNCTION__, server_ptr, local_cxy, cycle );
     360#endif
     361                while( 1 )  //  one RPC request per iteration
    367362            {
    368363                    empty = local_fifo_get_item( rpc_fifo , (uint64_t *)&desc_xp );
    369364
    370                     if ( empty == 0 ) // one RPC request found
     365                // exit when FIFO empty or FIFO ownership lost (in case of descheduling)
     366                    if ( (empty == 0) && (rpc_fifo->owner == server_ptr->trdid) )
    371367                {
    372368                    // get client cluster and pointer on RPC descriptor
     
    381377if( DEBUG_RPC_SERVER_GENERIC < cycle )
    382378printk("\n[DBG] %s : RPC thread %x in cluster %x got rpc[%d] / rpc_cxy %x / rpc_ptr %x\n",
    383 __FUNCTION__, this, local_cxy, index, desc_cxy, desc_ptr );
     379__FUNCTION__, server_ptr, local_cxy, index, desc_cxy, desc_ptr );
    384380#endif
    385381                    // call the relevant server function
     
    390386if( DEBUG_RPC_SERVER_GENERIC < cycle )
    391387printk("\n[DBG] %s : RPC thread %x in cluster %x completes rpc[%d] / rpc_ptr %x / cycle %d\n",
    392 __FUNCTION__, this, local_cxy, index, desc_ptr, cycle );
    393 #endif
    394                     // increment handled RPCs counter
    395                         count++;
    396 
     388__FUNCTION__, server_ptr, local_cxy, index, desc_ptr, cycle );
     389#endif
    397390                    // decrement response counter in RPC descriptor if blocking
    398391                    if( blocking )
     
    402395
    403396                        // get client thread pointer and client core lid from RPC descriptor
    404                         thread_ptr = hal_remote_lpt( XPTR( desc_cxy , &desc_ptr->thread ) );
    405                         core_lid  = hal_remote_lw ( XPTR( desc_cxy , &desc_ptr->lid ) );
     397                        client_ptr      = hal_remote_lpt( XPTR( desc_cxy , &desc_ptr->thread ) );
     398                        client_core_lid = hal_remote_lw ( XPTR( desc_cxy , &desc_ptr->lid ) );
    406399
    407400                        // unblock client thread
    408                         thread_unblock( XPTR( desc_cxy , thread_ptr ) , THREAD_BLOCKED_RPC );
     401                        thread_unblock( XPTR( desc_cxy , client_ptr ) , THREAD_BLOCKED_RPC );
    409402
    410403                        hal_fence();
     
    414407if( DEBUG_RPC_SERVER_GENERIC < cycle )
    415408printk("\n[DBG] %s : RPC thread %x (cluster %x) unblocked client thread %x (cluster %x)\n",
    416 __FUNCTION__, this, local_cxy, thread_ptr, desc_cxy, cycle );
     409__FUNCTION__, server_ptr, local_cxy, client_ptr, desc_cxy, cycle );
    417410#endif
    418411                        // send IPI to client core
    419                             dev_pic_send_ipi( desc_cxy , core_lid );
     412                            dev_pic_send_ipi( desc_cxy , client_core_lid );
    420413                    }
    421414                        }
    422        
    423                 // chek exit condition
    424                         if( local_fifo_is_empty( rpc_fifo )  ||
    425                     (rpc_fifo->owner != this->trdid) ||
    426                     (count >= CONFIG_RPC_PENDING_MAX) ) break;
     415                else
     416                {
     417                    break;
     418                }
    427419                } // end internal loop
    428420
    429421            // release rpc_fifo ownership if not lost
    430             if( rpc_fifo->owner == this->trdid ) rpc_fifo->owner = 0;
     422            if( rpc_fifo->owner == server_ptr->trdid ) rpc_fifo->owner = 0;
    431423
    432424        }  // end if RPC fifo
    433425
    434         // sucide if too many RPC threads in cluster
    435         if( LOCAL_CLUSTER->rpc_threads >= CONFIG_RPC_THREADS_MAX )
     426        // RPC thread blocks on IDLE
     427        thread_block( server_xp , THREAD_BLOCKED_IDLE );
     428
     429        // sucide if too many RPC threads / simply deschedule otherwise
     430        if( LOCAL_CLUSTER->rpc_threads[server_core_lid] >= CONFIG_RPC_THREADS_MAX )
    436431            {
    437432
     
    440435if( DEBUG_RPC_SERVER_GENERIC < cycle )
    441436printk("\n[DBG] %s : RPC thread %x in cluster %x suicides / cycle %d\n",
    442 __FUNCTION__, this, local_cxy, cycle );
     437__FUNCTION__, server_ptr, local_cxy, cycle );
    443438#endif
    444439            // update RPC threads counter
    445440                hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , -1 );
    446441
    447             // suicide
    448                 thread_kill( XPTR( local_cxy , this ),
    449                          true,                      // is_exit
    450                          true );                    // is forced
     442            // RPC thread blocks on GLOBAL
     443                thread_block( server_xp , THREAD_BLOCKED_GLOBAL );
     444
     445            // RPC thread set the REQ_DELETE flag to suicide
     446            hal_remote_atomic_or( server_xp , THREAD_FLAG_REQ_DELETE );
    451447            }
     448        else
     449        {
    452450
    453451#if DEBUG_RPC_SERVER_GENERIC
    454452uint32_t cycle = (uint32_t)hal_get_cycles();
    455453if( DEBUG_RPC_SERVER_GENERIC < cycle )
    456 printk("\n[DBG] %s : RPC thread %x in cluster %x deschedules / cycle %d\n",
    457 __FUNCTION__, this, local_cxy, cycle );
    458 #endif
    459 
    460         // Block and deschedule
    461         thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_IDLE );
    462         sched_yield("RPC fifo empty or too much work");
    463 
    464 #if DEBUG_RPC_SERVER_GENERIC
    465 cycle = (uint32_t)hal_get_cycles();
    466 if( DEBUG_RPC_SERVER_GENERIC < cycle )
    467 printk("\n[DBG] %s : RPC thread %x in cluster %x resumes / cycle %d\n",
    468 __FUNCTION__, this, local_cxy, cycle );
    469 #endif
     454printk("\n[DBG] %s : RPC thread %x in cluster %x block & deschedules / cycle %d\n",
     455__FUNCTION__, server_ptr, local_cxy, cycle );
     456#endif
     457
     458            // RPC thread deschedules
     459            assert( thread_can_yield( server_ptr ) , __FUNCTION__, "illegal sched_yield\n" );
     460            sched_yield("RPC fifo empty");
     461        }
    470462
    471463        } // end infinite loop
     
    646638
    647639    // set input arguments in RPC descriptor 
    648     rpc.args[0] = (uint64_t)(intptr_t)ref_process_xp;
    649     rpc.args[1] = (uint64_t)(intptr_t)parent_thread_xp;
     640    rpc.args[0] = (uint64_t)ref_process_xp;
     641    rpc.args[1] = (uint64_t)parent_thread_xp;
    650642
    651643    // register RPC request in remote RPC fifo
     
    903895void rpc_process_sigaction_server( xptr_t xp )
    904896{
    905     pid_t        pid;              // target process identifier
    906     process_t  * process;          // pointer on local target process descriptor
    907     uint32_t     action;           // sigaction index
    908     thread_t   * client_thread;    // pointer on client thread in client cluster
    909     cxy_t        client_cxy;       // client cluster identifier
    910     rpc_desc_t * rpc;              // pointer on rpc descriptor in client cluster
    911     xptr_t       count_xp;         // extended pointer on response counter
    912     lid_t        client_lid;       // client core local index
     897    pid_t        pid;             // target process identifier
     898    process_t  * process;         // pointer on local target process descriptor
     899    uint32_t     action;          // sigaction index
     900    thread_t   * client_ptr;      // pointer on client thread in client cluster
     901    xptr_t       client_xp;       // extended pointer client thread
     902    cxy_t        client_cxy;      // client cluster identifier
     903    rpc_desc_t * rpc;             // pointer on rpc descriptor in client cluster
     904    xptr_t       count_xp;        // extended pointer on responses counter
     905    uint32_t     count_value;     // responses counter value
     906    lid_t        client_lid;      // client core local index
    913907
    914908    // get client cluster identifier and pointer on RPC descriptor
     
    927921#endif
    928922
     923    // get client thread pointers
     924    client_ptr = (thread_t *)hal_remote_lpt( XPTR( client_cxy , &rpc->thread ) );
     925    client_xp  = XPTR( client_cxy , client_ptr );
     926
    929927    // get local process descriptor
    930928    process = cluster_get_local_process_from_pid( pid );
    931929
    932930    // call relevant kernel function
    933     if      ( action == DELETE_ALL_THREADS  ) process_delete_threads ( process );
    934     else if ( action == BLOCK_ALL_THREADS   ) process_block_threads  ( process );
     931    if      ( action == DELETE_ALL_THREADS  ) process_delete_threads ( process , client_xp );
     932    else if ( action == BLOCK_ALL_THREADS   ) process_block_threads  ( process , client_xp );
    935933    else if ( action == UNBLOCK_ALL_THREADS ) process_unblock_threads( process );
    936934
     
    939937
    940938    // decrement the responses counter in RPC descriptor,
     939    count_value = hal_remote_atomic_add( count_xp , -1 );
     940
    941941    // unblock the client thread only if it is the last response.
    942     if( hal_remote_atomic_add( count_xp , -1 ) == 1 )
     942    if( count_value == 1 )
    943943    {
    944         // get client thread pointer and client core lid
    945         client_thread = (thread_t *)hal_remote_lpt( XPTR( client_cxy , &rpc->thread ) );
     944        // get client core lid
    946945        client_lid    = (lid_t)     hal_remote_lw ( XPTR( client_cxy , &rpc->lid    ) );
    947946
    948         thread_unblock( XPTR( client_cxy , client_thread ) , THREAD_BLOCKED_RPC );
     947        // unblock client thread
     948        thread_unblock( client_xp , THREAD_BLOCKED_RPC );
     949
     950        // send an IPI to client core
    949951        dev_pic_send_ipi( client_cxy , client_lid );
    950952    }
     
    11921194                                    vfs_dentry_t * dentry )
    11931195{
     1196#if DEBUG_RPC_VFS_DENTRY_DESTROY
     1197uint32_t cycle = (uint32_t)hal_get_cycles();
     1198if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY )
     1199printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1200__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1201#endif
     1202
    11941203    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    11951204
     
    12061215    rpc_send( cxy , &rpc );
    12071216
     1217#if DEBUG_RPC_VFS_DENTRY_DESTROY
     1218cycle = (uint32_t)hal_get_cycles();
     1219if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY )
     1220printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     1221__FUNCTION__ , CURRENT_THREAD , cycle );
     1222#endif
    12081223}
    12091224
     
    12111226void rpc_vfs_dentry_destroy_server( xptr_t xp )
    12121227{
     1228#if DEBUG_RPC_VFS_DENTRY_DESTROY
     1229uint32_t cycle = (uint32_t)hal_get_cycles();
     1230if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY )
     1231printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1232__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1233#endif
     1234
    12131235    vfs_dentry_t * dentry;
    12141236
     
    12231245    vfs_dentry_destroy( dentry );
    12241246
     1247#if DEBUG_RPC_VFS_DENTRY_DESTROY
     1248cycle = (uint32_t)hal_get_cycles();
     1249if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY )
     1250printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     1251__FUNCTION__ , CURRENT_THREAD , cycle );
     1252#endif
    12251253}
    12261254
     
    13191347                                  vfs_file_t * file )
    13201348{
     1349#if DEBUG_RPC_VFS_FILE_DESTROY
     1350uint32_t cycle = (uint32_t)hal_get_cycles();
     1351if( cycle > DEBUG_RPC_VFS_FILE_DESTROY )
     1352printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1353__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1354#endif
     1355
    13211356    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    13221357
     
    13331368    rpc_send( cxy , &rpc );
    13341369
     1370#if DEBUG_RPC_VFS_FILE_DESTROY
     1371cycle = (uint32_t)hal_get_cycles();
     1372if( cycle > DEBUG_RPC_VFS_FILE_DESTROY )
     1373printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     1374__FUNCTION__ , CURRENT_THREAD , cycle );
     1375#endif
    13351376}
    13361377
     
    13381379void rpc_vfs_file_destroy_server( xptr_t xp )
    13391380{
     1381#if DEBUG_RPC_VFS_FILE_DESTROY
     1382uint32_t cycle = (uint32_t)hal_get_cycles();
     1383if( cycle > DEBUG_RPC_VFS_FILE_DESTROY )
     1384printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1385__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1386#endif
     1387
    13401388    vfs_file_t * file;
    13411389
     
    13501398    vfs_file_destroy( file );
    13511399
     1400#if DEBUG_RPC_VFS_FILE_DESTROY
     1401cycle = (uint32_t)hal_get_cycles();
     1402if( cycle > DEBUG_RPC_VFS_FILE_DESTROY )
     1403printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     1404__FUNCTION__ , CURRENT_THREAD , cycle );
     1405#endif
    13521406}
    13531407
     
    15361590                              error_t   * error )      // out
    15371591{
     1592#if DEBUG_RPC_VMM_GET_VSEG
     1593uint32_t cycle = (uint32_t)hal_get_cycles();
     1594if( cycle > DEBUG_RPC_VMM_GET_VSEG )
     1595printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1596__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1597#endif
     1598
    15381599    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    15391600
     
    15551616    *error   = (error_t)rpc.args[3];
    15561617
     1618#if DEBUG_RPC_VMM_GET_VSEG
     1619cycle = (uint32_t)hal_get_cycles();
     1620if( cycle > DEBUG_RPC_VMM_GET_VSEG )
     1621printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n",
     1622__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1623#endif
    15571624}
    15581625
     
    15601627void rpc_vmm_get_vseg_server( xptr_t xp )
    15611628{
     1629#if DEBUG_RPC_VMM_GET_VSEG
     1630uint32_t cycle = (uint32_t)hal_get_cycles();
     1631if( cycle > DEBUG_RPC_VMM_GET_VSEG )
     1632printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1633__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1634#endif
     1635
    15621636    process_t   * process;
    15631637    intptr_t      vaddr;
     
    15821656    hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error );
    15831657
    1584 }
    1585 
    1586 
    1587 /////////////////////////////////////////////////////////////////////////////////////////
    1588 // [21]          Marshaling functions attached to RPC_VMM_GET_PTE  (blocking)
     1658#if DEBUG_RPC_VMM_GET_VSEG
     1659cycle = (uint32_t)hal_get_cycles();
     1660if( cycle > DEBUG_RPC_VMM_GET_VSEG )
     1661printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n",
     1662__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1663#endif
     1664}
     1665
     1666
     1667/////////////////////////////////////////////////////////////////////////////////////////
     1668// [21]          Marshaling functions attached to RPC_VMM_GET_VSEG  (blocking)
    15891669/////////////////////////////////////////////////////////////////////////////////////////
    15901670
     
    15981678                             error_t   * error )   // out
    15991679{
    1600     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    1601 
    1602     // initialise RPC descriptor header
    1603     rpc_desc_t  rpc;
    1604     rpc.index    = RPC_VMM_GET_PTE;
     1680#if DEBUG_RPC_VMM_GET_PTE
     1681uint32_t cycle = (uint32_t)hal_get_cycles();
     1682if( cycle > DEBUG_RPC_VMM_GET_PTE )
     1683printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1684__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1685#endif
     1686
     1687    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
     1688
     1689    // initialise RPC descriptor header
     1690    rpc_desc_t  rpc;
     1691    rpc.index    = RPC_VMM_GET_VSEG;
    16051692    rpc.blocking = true;
    16061693    rpc.responses = 1;
     
    16191706    *error = (error_t)rpc.args[5];
    16201707
     1708#if DEBUG_RPC_VMM_GET_PTE
     1709cycle = (uint32_t)hal_get_cycles();
     1710if( cycle > DEBUG_RPC_VMM_GET_PTE )
     1711printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n",
     1712__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1713#endif
    16211714}
    16221715
     
    16241717void rpc_vmm_get_pte_server( xptr_t xp )
    16251718{
     1719#if DEBUG_RPC_VMM_GET_PTE
     1720uint32_t cycle = (uint32_t)hal_get_cycles();
     1721if( cycle > DEBUG_RPC_VMM_GET_PTE )
     1722printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1723__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1724#endif
     1725
    16261726    process_t   * process;
    16271727    vpn_t         vpn;
     
    16481748    hal_remote_swd( XPTR( client_cxy , &desc->args[5] ) , (uint64_t)error );
    16491749
     1750#if DEBUG_RPC_VMM_GET_PTE
     1751cycle = (uint32_t)hal_get_cycles();
     1752if( cycle > DEBUG_RPC_VMM_GET_PTE )
     1753printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n",
     1754__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1755#endif
    16501756}
    16511757
  • trunk/kernel/kern/scheduler.c

    r438 r440  
    125125            thread = LIST_ELEMENT( current , thread_t , sched_list );
    126126
    127             // execute RPC thread if non blocked
    128             if( (thread->blocked == 0)  &&
    129                 (thread->type == THREAD_RPC) )
    130             {
    131                 spinlock_unlock( &sched->lock );
    132                 return thread;
    133             }
    134 
    135             // execute DEV thread if non blocked and waiting queue non empty
    136             if( (thread->blocked == 0)  &&
    137                 (thread->type == THREAD_DEV) &&
    138                 (xlist_is_empty( XPTR( local_cxy , &thread->chdev->wait_root)) == 0) )
     127            // select kernel thread if non blocked and non IDLE
     128            if( (thread->blocked == 0)  && (thread->type != THREAD_IDLE) )
    139129            {
    140130                spinlock_unlock( &sched->lock );
     
    186176
    187177    list_entry_t * iter;
     178    list_entry_t * root;
    188179    thread_t     * thread;
    189180    process_t    * process;
    190181
     182    // get pointer on scheduler
    191183    scheduler_t  * sched = &core->scheduler;
     184
     185    // get pointer on user threads root
     186    root = &sched->u_root;
    192187
    193188    // take lock protecting threads lists
    194189    spinlock_lock( &sched->lock );
    195190
     191    // We use a while to scan the user threads, to control the iterator increment,
     192    // because some threads will be destroyed, and we cannot use a LIST_FOREACH()
     193
     194    // initialise list iterator
     195    iter = root->next;
     196
    196197    // scan all user threads
    197     LIST_FOREACH( &sched->u_root , iter )
    198     {
     198    while( iter != root )
     199    {
     200        // get pointer on thread
    199201        thread = LIST_ELEMENT( iter , thread_t , sched_list );
     202
     203        // increment iterator
     204        iter = iter->next;
    200205
    201206        // handle REQ_ACK
     
    219224            process = thread->process;
    220225
     226                // release FPU if required
     227                if( thread->core->fpu_owner == thread )  thread->core->fpu_owner = NULL;
     228
     229            // remove thread from scheduler (scheduler lock already taken)
     230            uint32_t threads_nr = sched->u_threads_nr;
     231
     232            assert( (threads_nr != 0) , __FUNCTION__ , "u_threads_nr cannot be 0\n" );
     233
     234            sched->u_threads_nr = threads_nr - 1;
     235            list_unlink( &thread->sched_list );
     236            if( threads_nr == 1 ) sched->u_last = NULL;
     237
     238            // delete thread
     239            thread_destroy( thread );
     240
    221241#if DEBUG_SCHED_HANDLE_SIGNALS
    222242uint32_t cycle = (uint32_t)hal_get_cycles();
    223243if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
    224 printk("\n[DBG] %s : thread %x in proces %x must be deleted / cycle %d\n",
    225 __FUNCTION__ , thread , process->pid , cycle );
    226 #endif
    227                 // release FPU if required
    228                 if( thread->core->fpu_owner == thread )  thread->core->fpu_owner = NULL;
    229 
    230             // detach thread from parent if attached
    231             if( (thread->flags & THREAD_FLAG_DETACHED) == 0 )
    232             thread_child_parent_unlink( thread->parent , XPTR( local_cxy , thread ) );
    233 
    234             // remove thread from scheduler (scheduler lock already taken)
    235             uint32_t threads_nr = sched->u_threads_nr;
    236             assert( (threads_nr != 0) , __FUNCTION__ , "u_threads_nr cannot be 0\n" );
    237             sched->u_threads_nr = threads_nr - 1;
    238             list_unlink( &thread->sched_list );
    239             if( threads_nr == 1 ) sched->u_last = NULL;
    240 
    241             // delete thread
    242             thread_destroy( thread );
    243 
    244 #if DEBUG_SCHED_HANDLE_SIGNALS
    245 cycle = (uint32_t)hal_get_cycles();
    246 if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
    247 printk("\n[DBG] %s : thread %x in process %x has been deleted / cycle %d\n",
    248 __FUNCTION__ , thread , process->pid , cycle );
     244printk("\n[DBG] %s : thread %x in proces %x (%x) deleted / cycle %d\n",
     245__FUNCTION__ , thread , process->pid , process , cycle );
    249246#endif
    250247            // destroy process descriptor if no more threads
     
    314311    {
    315312
     313if( (local_cxy == 0X1) && (core->lid == 1) && ((uint32_t)current == 0xcc000) )
     314printk("\n@@@@@ cc000 exit at cycle %d\n", (uint32_t)hal_get_cycles() );
     315
     316if( (local_cxy == 0X1) && (core->lid == 1) && ((uint32_t)next == 0xcc000) )
     317printk("\n@@@@@ cc000 enter at cycle %d\n", (uint32_t)hal_get_cycles() );
     318
    316319#if DEBUG_SCHED_YIELD
    317320uint32_t cycle = (uint32_t)hal_get_cycles();
  • trunk/kernel/kern/thread.c

    r438 r440  
    184184    thread->blocked         = THREAD_BLOCKED_GLOBAL;
    185185
    186     // reset children list
    187     xlist_root_init( XPTR( local_cxy , &thread->children_root ) );
    188     thread->children_nr = 0;
    189 
    190     // reset sched list and brothers list
     186    // reset sched list
    191187    list_entry_init( &thread->sched_list );
    192     xlist_entry_init( XPTR( local_cxy , &thread->brothers_list ) );
    193188
    194189    // reset thread info
     
    238233    // get process descriptor local copy
    239234    process = process_get_local_copy( pid );
     235
    240236    if( process == NULL )
    241237    {
     
    604600///////////////////////////////////////////////////////////////////////////////////////
    605601// TODO: check that all memory dynamically allocated during thread execution
    606 // has been released, using a cache of mmap and malloc requests. [AG]
     602// has been released, using a cache of mmap requests. [AG]
    607603///////////////////////////////////////////////////////////////////////////////////////
    608604void thread_destroy( thread_t * thread )
     
    619615__FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle );
    620616#endif
    621 
    622     assert( (thread->children_nr == 0) , __FUNCTION__ , "still attached children" );
    623617
    624618    assert( (thread->local_locks == 0) , __FUNCTION__ , "all local locks not released" );
     
    663657}   // end thread_destroy()
    664658
    665 /////////////////////////////////////////////////
    666 void thread_child_parent_link( xptr_t  xp_parent,
    667                                xptr_t  xp_child )
    668 {
    669     // get extended pointers on children list root
    670     cxy_t      parent_cxy = GET_CXY( xp_parent );
    671     thread_t * parent_ptr = (thread_t *)GET_PTR( xp_parent );
    672     xptr_t     root       = XPTR( parent_cxy , &parent_ptr->children_root );
    673 
    674     // get extended pointer on children list entry
    675     cxy_t      child_cxy  = GET_CXY( xp_child );
    676     thread_t * child_ptr  = (thread_t *)GET_PTR( xp_child );
    677     xptr_t     entry      = XPTR( child_cxy , &child_ptr->brothers_list );
    678 
    679     // set the link
    680     xlist_add_first( root , entry );
    681     hal_remote_atomic_add( XPTR( parent_cxy , &parent_ptr->children_nr ) , 1 );
    682 
    683 }  // end thread_child_parent_link()
    684 
    685 ///////////////////////////////////////////////////
    686 void thread_child_parent_unlink( xptr_t  xp_parent,
    687                                  xptr_t  xp_child )
    688 {
    689     // get extended pointer on children list lock
    690     cxy_t      parent_cxy = GET_CXY( xp_parent );
    691     thread_t * parent_ptr = (thread_t *)GET_PTR( xp_parent );
    692     xptr_t     lock       = XPTR( parent_cxy , &parent_ptr->children_lock );
    693 
    694     // get extended pointer on children list entry
    695     cxy_t      child_cxy  = GET_CXY( xp_child );
    696     thread_t * child_ptr  = (thread_t *)GET_PTR( xp_child );
    697     xptr_t     entry      = XPTR( child_cxy , &child_ptr->brothers_list );
    698 
    699     // get the lock
    700     remote_spinlock_lock( lock );
    701 
    702     // remove the link
    703     xlist_unlink( entry );
    704     hal_remote_atomic_add( XPTR( parent_cxy , &parent_ptr->children_nr ) , -1 );
    705 
    706     // release the lock
    707     remote_spinlock_unlock( lock );
    708 
    709 }  // thread_child_parent_unlink()
    710 
    711659//////////////////////////////////////////////////
    712660inline void thread_set_req_ack( thread_t * target,
     
    846794
    847795}  // end thread_unblock()
     796
     797/*
    848798
    849799////////////////////////////////////
     
    875825    process_t * target_process;         // pointer on target thread process
    876826
    877     // get target thread cluster and pointer
     827    // get target thread pointer and cluster
    878828    target_cxy = GET_CXY( target_xp );
    879829    target_ptr = GET_PTR( target_xp );
     
    883833    killer_xp  = XPTR( local_cxy , killer_ptr );
    884834
    885 #if DEBUG_THREAD_KILL
     835#if DEBUG_THREAD_DELETE
    886836uint32_t cycle  = (uint32_t)hal_get_cycles;
    887 if( DEBUG_THREAD_KILL < cycle )
     837if( DEBUG_THREAD_DELETE < cycle )
    888838printk("\n[DBG] %s : thread %x enter for target thread %x / cycle %d\n",
    889839__FUNCTION__, killer_ptr, target_ptr, cycle );
     
    982932        else          hal_remote_atomic_or( process_state_xp , PROCESS_TERM_KILL );
    983933
    984 #if DEBUG_THREAD_KILL
     934#if DEBUG_THREAD_DELETE
    985935cycle  = (uint32_t)hal_get_cycles;
    986 if( DEBUG_THREAD_KILL < cycle )
     936if( DEBUG_THREAD_DELETE < cycle )
    987937printk("\n[DBG] %s : thread %x exit for thread %x / main thread / cycle %d\n",
    988938__FUNCTION__, killer_ptr, target_ptr, cycle );
     
    995945        hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
    996946
    997 #if DEBUG_THREAD_KILL
     947#if DEBUG_THREAD_DELETE
    998948cycle  = (uint32_t)hal_get_cycles;
    999 if( DEBUG_THREAD_KILL < cycle )
     949if( DEBUG_THREAD_DELETE < cycle )
    1000950printk("\n[DBG] %s : thread %x exit for thread %x / not the main thread / cycle %d\n",
    1001951__FUNCTION__, killer_ptr, target_ptr, cycle );
     
    1005955
    1006956}  // end thread_kill()
     957
     958*/
     959
     960//////////////////////////////////////
     961void thread_delete( xptr_t  target_xp,
     962                    pid_t   pid,
     963                    bool_t  is_forced )
     964{
     965    reg_t       save_sr;                // for critical section
     966    bool_t      target_join_done;       // joining thread arrived first
     967    bool_t      target_attached;        // target thread attached
     968    xptr_t      killer_xp;              // extended pointer on killer thread (this)
     969    thread_t  * killer_ptr;             // pointer on killer thread (this)
     970    cxy_t       target_cxy;             // target thread cluster     
     971    thread_t  * target_ptr;             // pointer on target thread
     972    xptr_t      target_flags_xp;        // extended pointer on target thread <flags>
     973    uint32_t    target_flags;           // target thread <flags> value
     974    xptr_t      target_join_lock_xp;    // extended pointer on target thread <join_lock>
     975    xptr_t      target_join_xp_xp;      // extended pointer on target thread <join_xp>
     976    trdid_t     target_trdid;           // target thread identifier
     977    ltid_t      target_ltid;            // target thread local index
     978    xptr_t      joining_xp;             // extended pointer on joining thread
     979    thread_t  * joining_ptr;            // pointer on joining thread
     980    cxy_t       joining_cxy;            // joining thread cluster
     981    cxy_t       owner_cxy;              // process owner cluster
     982
     983
     984    // get target thread pointers, identifiers, and flags
     985    target_cxy      = GET_CXY( target_xp );
     986    target_ptr      = GET_PTR( target_xp );
     987    target_trdid    = hal_remote_lw( XPTR( target_cxy , &target_ptr->trdid ) );
     988    target_ltid     = LTID_FROM_TRDID( target_trdid );
     989    target_flags_xp = XPTR( target_cxy , &target_ptr->flags );
     990    target_flags    = hal_remote_lw( target_flags_xp );
     991
     992    // get killer thread pointers
     993    killer_ptr = CURRENT_THREAD;
     994    killer_xp  = XPTR( local_cxy , killer_ptr );
     995
     996#if DEBUG_THREAD_DELETE
     997uint32_t cycle  = (uint32_t)hal_get_cycles;
     998if( DEBUG_THREAD_DELETE < cycle )
     999printk("\n[DBG] %s : killer thread %x enter for target thread %x / cycle %d\n",
     1000__FUNCTION__, killer_ptr, target_ptr, cycle );
     1001#endif
     1002
     1003    // target thread cannot be the main thread, because the main thread
     1004    // must be deleted by the parent process sys_wait() function
     1005    owner_cxy = CXY_FROM_PID( pid );
     1006    assert( ((owner_cxy != target_cxy) || (target_ltid != 0)), __FUNCTION__,
     1007    "tharget thread cannot be the main thread\n" );
     1008
     1009    // block the target thread
     1010    thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
     1011
     1012    // get attached from target flag descriptor
     1013    target_attached = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_DETACHED) != 0);
     1014
     1015    // synchronize with the joining thread if the target thread is attached
     1016    if( target_attached && (is_forced == false) )
     1017    {
     1018        // build extended pointers on target thread join fields
     1019        target_join_lock_xp  = XPTR( target_cxy , &target_ptr->join_lock );
     1020        target_join_xp_xp    = XPTR( target_cxy , &target_ptr->join_xp );
     1021
     1022        // enter critical section
     1023        hal_disable_irq( &save_sr );
     1024
     1025        // take the join_lock in target thread descriptor
     1026        remote_spinlock_lock( target_join_lock_xp );
     1027
     1028        // get join_done from target thread descriptor
     1029        target_join_done = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_JOIN_DONE) != 0);
     1030   
     1031        if( target_join_done )  // joining thread arrived first => unblock the joining thread
     1032        {
     1033            // get extended pointer on joining thread
     1034            joining_xp  = (xptr_t)hal_remote_lwd( target_join_xp_xp );
     1035            joining_ptr = GET_PTR( joining_xp );
     1036            joining_cxy = GET_CXY( joining_xp );
     1037           
     1038            // reset the join_done flag in target thread
     1039            hal_remote_atomic_and( target_flags_xp , ~THREAD_FLAG_JOIN_DONE );
     1040
     1041            // unblock the joining thread
     1042            thread_unblock( joining_xp , THREAD_BLOCKED_JOIN );
     1043
     1044            // release the join_lock in target thread descriptor
     1045            remote_spinlock_unlock( target_join_lock_xp );
     1046
     1047            // restore IRQs
     1048            hal_restore_irq( save_sr );
     1049        }
     1050        else                // this thread arrived first => register flags and deschedule
     1051        {
     1052            // set the kill_done flag in target thread
     1053            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_KILL_DONE );
     1054
     1055            // block this thread on BLOCKED_JOIN
     1056            thread_block( killer_xp , THREAD_BLOCKED_JOIN );
     1057
     1058            // set extended pointer on killer thread in target thread
     1059            hal_remote_swd( target_join_xp_xp , killer_xp );
     1060
     1061            // release the join_lock in target thread descriptor
     1062            remote_spinlock_unlock( target_join_lock_xp );
     1063
     1064            // deschedule
     1065            sched_yield( "killer thread wait joining thread" );
     1066
     1067            // restore IRQs
     1068            hal_restore_irq( save_sr );
     1069        }
     1070    }  // end if attached
     1071
     1072    // set the REQ_DELETE flag in target thread descriptor
     1073    hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
     1074
     1075#if DEBUG_THREAD_DELETE
     1076cycle  = (uint32_t)hal_get_cycles;
     1077if( DEBUG_THREAD_DELETE < cycle )
     1078printk("\n[DBG] %s : killer thread %x exit for target thread %x / cycle %d\n",
     1079__FUNCTION__, killer_ptr, target_ptr, cycle );
     1080#endif
     1081
     1082}  // end thread_delete()
     1083
     1084
    10071085
    10081086///////////////////////
  • trunk/kernel/kern/thread.h

    r438 r440  
    171171    cxy_t               fork_cxy;        /*! target cluster  for next fork()          */
    172172
    173         xlist_entry_t       children_root;   /*! root of list of attached children        */
    174     uint32_t            children_nr;     /*! number of attached children threads      */
    175     remote_spinlock_t * children_lock;   /*! lock protecting the children list        */
    176 
    177     xlist_entry_t       brothers_list;   /*! list of attached threads to same parent  */
    178 
    179173        list_entry_t        sched_list;      /*! member of threads attached to same core  */
    180174
     
    222216 * in an existing process. It allocates memory for an user thread descriptor in the
    223217 * local cluster, and initializes it from information contained in the arguments.
    224  * The CPU context is initialized from scratch. If required by the <attr> argument,
    225  * the new thread is attached to the core specified in <attr>.
     218 * The CPU context is initialized from scratch.
    226219 * It is registered in the local process descriptor specified by the <pid> argument.
    227  * The thread descriptor pointer is returned to allow the parent thread to register it
    228  * in its children list.
    229220 * The THREAD_BLOCKED_GLOBAL bit is set => the thread must be activated to start.
    230221 ***************************************************************************************
     
    325316
    326317/***************************************************************************************
    327  * This function registers a child thread in the global list of attached
    328  * children threads of a parent thread.
    329  * It does NOT take a lock, as this function is always called by the parent thread.
    330  ***************************************************************************************
    331  * @ parent_xp : extended pointer on the parent thread descriptor.
    332  * @ child_xp  : extended pointer on the child thread descriptor.
    333  **************************************************************************************/
    334 void thread_child_parent_link( xptr_t  parent_xp,
    335                                xptr_t  child_xp );
    336 
    337 /***************************************************************************************
    338  * This function removes an user thread from the parent thread global list
    339  * of attached children threads.
    340  ***************************************************************************************
    341  * @ parent_xp : extended pointer on the parent thread descriptor.
    342  * @ child_xp  : extended pointer on the child thread descriptor.
    343  **************************************************************************************/
    344 void thread_child_parent_unlink( xptr_t parent_xp,
    345                                  xptr_t child_xp );
    346 
    347 /***************************************************************************************
    348318 * This function is used by a "blocker" thread running in the same cluster as a "target"
    349319 * thread to request the scheduler of the target thread to acknowledge that the target
     
    386356
    387357/***************************************************************************************
    388  * This function is called to handle the four pthread_cancel(), pthread_exit(),
    389  * kill() and exit() system calls. It kills a "target" thread identified by the
    390  * <thread_xp> argument. The "killer" thread can be the "target" thread, when the
    391  * <is_exit> argument is true. The "killer" thread can run in any cluster,
    392  * as it uses remote accesses.
    393  * If the "target" thread is running in "attached" mode, and the <is_forced> argument
     358 * This function is used by the four sys_thread_cancel(), sys_thread_exit(),
     359 * sys_kill() and sys_exit() system calls to delete a given thread.
     360 * It set the THREAD_BLOCKED_GLOBAL bit and set the the THREAD_FLAG_REQ_DELETE bit
     361 * in the thread descriptor identified by the <thread_xp> argument, to ask the scheduler
     362 * to asynchronously delete the target thread, at the next scheduling point.
     363 * The calling thread can run in any cluster, as it uses remote accesses, but
     364 * the target thread cannot be the main thread of the process identified by the <pid>,
     365 * because the main thread must be deleted by the parent process argument.
     366 * If the target thread is running in "attached" mode, and the <is_forced> argument
    394367 * is false, this function implements the required sychronisation with the joining
    395  * thread, blocking the "killer" thread until the pthread_join() syscall is executed.
    396  * To delete the target thread, this function sets the THREAD_FLAG_REQ_DELETE bit
    397  * and the THREAD BLOCKED_GLOBAL bit in the target thread, and the actual destruction
    398  * is asynchronously done by the scheduler at the next scheduling point.
     368 * thread, blocking the calling thread until the pthread_join() syscall is executed.
    399369 ***************************************************************************************
    400370 * @ thread_xp   : extended pointer on the target thread.
    401  * @ is_exit     : the killer thread is the target thread itself.
    402  * @ is_forced   : the killing does not depends on the attached mode.
    403  **************************************************************************************/
    404 void thread_kill( xptr_t  thread_xp,
    405                   bool_t  is_exit,
    406                   bool_t  is_forced );
     371 * @ pid         : process identifier (to get the owner cluster identifier).
     372 * @ is_forced   : the deletion does not depends on the attached mode.
     373 **************************************************************************************/
     374void thread_delete( xptr_t  thread_xp,
     375                    pid_t   pid,
     376                    bool_t  is_forced );
    407377
    408378/***************************************************************************************
Note: See TracChangeset for help on using the changeset viewer.