Changeset 436 for trunk/kernel


Ignore:
Timestamp:
Mar 7, 2018, 9:02:03 AM (6 years ago)
Author:
alain
Message:

1) improve the threads and process destruction mechanism.
2) introduce FIFOs in the soclib_tty driver.

Location:
trunk/kernel
Files:
40 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/devices/dev_nic.c

    r408 r436  
    130130
    131131        // block on THREAD_BLOCKED_IO condition and deschedule
    132         thread_block( thread_ptr , THREAD_BLOCKED_IO );
     132        thread_block( XPTR( local_cxy , thread_ptr ) , THREAD_BLOCKED_IO );
    133133        sched_yield("client blocked on I/O");
    134134
     
    200200
    201201        // block on THREAD_BLOCKED I/O condition and deschedule
    202         thread_block( thread_ptr , THREAD_BLOCKED_IO );
     202        thread_block( XPTR( local_cxy , thread_ptr ) , THREAD_BLOCKED_IO );
    203203        sched_yield("client blocked on I/O");
    204204
  • trunk/kernel/devices/dev_txt.c

    r435 r436  
    131131    thread_t * this = CURRENT_THREAD;
    132132
    133 #if (CONFIG_DEBUG_SYS_READ & 1)
    134 enter_txt_read = hal_time_stamp();
    135 #endif
    136 
    137 #if (CONFIG_DEBUG_SYS_WRITE & 1)
    138 enter_txt_write = hal_time_stamp();
    139 #endif
    140 
    141 #if CONFIG_DEBUG_DEV_TXT
    142 uint32_t cycle = (uint32_t)hal_get_cycles();
    143 if( CONFIG_DEBUG_DEV_TXT < cycle )
    144 printk("\n[DBG] %s : thread %x enters / cycle %d\n",
    145 __FUNCTION__, CURRENT_THREAD , cycle );
    146 #endif
    147 
    148133    // check channel argument
    149134    assert( (channel < CONFIG_MAX_TXT_CHANNELS) , __FUNCTION__ , "illegal channel index" );
     
    166151    chdev_register_command( dev_xp );
    167152
    168 #if CONFIG_DEBUG_DEV_TXT
    169 cycle = (uint32_t)hal_get_cycles();
    170 if( CONFIG_DEBUG_DEV_TXT < cycle )
    171 printk("\n[DBG] %s : thread %x exit / cycle %d\n",
    172 __FUNCTION__, CURRENT_THREAD , cycle );
    173 #endif
    174 
    175 #if (CONFIG_DEBUG_SYS_READ & 1)
    176 exit_txt_read = hal_time_stamp();
    177 #endif
    178 
    179 #if (CONFIG_DEBUG_SYS_WRITE & 1)
    180 exit_txt_write = hal_time_stamp();
    181 #endif
    182 
    183153    // return I/O operation status from calling thread descriptor
    184154    return this->txt_cmd.error;
     
    190160                       uint32_t   count )
    191161{
     162
     163#if (CONFIG_DEBUG_SYS_WRITE & 1)
     164enter_txt_write = hal_time_stamp();
     165#endif
     166
     167#if CONFIG_DEBUG_DEV_TXT_TX
     168uint32_t cycle = (uint32_t)hal_get_cycles();
     169if( CONFIG_DEBUG_DEV_TXT_TX < cycle )
     170printk("\n[DBG] %s : thread %x enters / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle );
     171#endif
     172
    192173    return dev_txt_access( TXT_WRITE , channel , buffer , count );
     174
     175#if CONFIG_DEBUG_DEV_TXT_TX
     176cycle = (uint32_t)hal_get_cycles();
     177if( CONFIG_DEBUG_DEV_TXT_TX < cycle )
     178printk("\n[DBG] %s : thread %x exit / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle );
     179#endif
     180
     181#if (CONFIG_DEBUG_SYS_WRITE & 1)
     182exit_txt_write = hal_time_stamp();
     183#endif
     184
    193185}
    194186
     
    197189                      char     * buffer )
    198190{
     191
     192#if (CONFIG_DEBUG_SYS_READ & 1)
     193enter_txt_read = hal_time_stamp();
     194#endif
     195
     196#if CONFIG_DEBUG_DEV_TXT_RX
     197uint32_t cycle = (uint32_t)hal_get_cycles();
     198if( CONFIG_DEBUG_DEV_TXT_RX < cycle )
     199printk("\n[DBG] %s : thread %x enters / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle );
     200#endif
     201
    199202    return dev_txt_access( TXT_READ , channel , buffer , 1 );
     203
     204#if CONFIG_DEBUG_DEV_TXT_RX
     205cycle = (uint32_t)hal_get_cycles();
     206if( CONFIG_DEBUG_DEV_TXT_RX < cycle )
     207printk("\n[DBG] %s : thread %x exit / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle );
     208#endif
     209
     210#if (CONFIG_DEBUG_SYS_READ & 1)
     211exit_txt_read = hal_time_stamp();
     212#endif
     213
    200214}
    201215
  • trunk/kernel/fs/vfs.c

    r433 r436  
    235235
    236236#if CONFIG_DEBUG_VFS_INODE_CREATE
    237 uint32_t cycle = (uint32_t)hal_get_cycles();
     237cycle = (uint32_t)hal_get_cycles();
    238238if( CONFIG_DEBUG_VFS_INODE_CREATE < cycle )
    239239printk("\n[DBG] %s : thread %x exit / inode = %x in cluster %x / cycle %d\n",
     
    13511351if( CONFIG_DEBUG_VFS_LOOKUP < cycle )
    13521352printk("\n[DBG] %s : thread %x enter for <%s> / cycle %d\n",
    1353 __FUNCTION__, CURRENT_THREAD, path, cycle );
     1353__FUNCTION__, CURRENT_THREAD, pathname, cycle );
    13541354#endif
    13551355
     
    15221522if( CONFIG_DEBUG_VFS_LOOKUP < cycle )
    15231523printk("\n[DBG] %s : thread %x exit for <%s> / inode %x in cluster %x / cycle %d\n",
    1524 __FUNCTION__, CURRENT_THREAD, path, GET_PTR(child_xp), GET_CXY(child_xp), cycle );
     1524__FUNCTION__, CURRENT_THREAD, pathname, GET_PTR(child_xp), GET_CXY(child_xp), cycle );
    15251525#endif
    15261526
  • trunk/kernel/kern/chdev.c

    r435 r436  
    179179
    180180    // block current thread
    181     thread_block( CURRENT_THREAD , THREAD_BLOCKED_IO );
     181    thread_block( XPTR( local_cxy , CURRENT_THREAD ) , THREAD_BLOCKED_IO );
    182182
    183183    // register client thread in waiting queue
  • trunk/kernel/kern/cluster.c

    r433 r436  
    237237{
    238238    xptr_t      root_xp;       // xptr on root of list of processes in owner cluster
    239     xptr_t      lock_xp;       // xptrr on lock protecting this list
     239    xptr_t      lock_xp;       // xptr on lock protecting this list
    240240    xptr_t      iter_xp;       // iterator
    241241    xptr_t      current_xp;    // xptr on current process descriptor
     
    277277    if( found ) return current_xp;
    278278    else        return XPTR_NULL;
    279 }
     279
     280}  // end cluster_get_owner_process_from_pid()
    280281
    281282//////////////////////////////////////////////////////////
     
    440441void cluster_process_copies_link( process_t * process )
    441442{
    442     uint32_t irq_state;
     443    reg_t    irq_state;
    443444    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
     445
     446#if CONFIG_DEBUG_CLUSTER_PROCESS_COPIES
     447uint32_t cycle = (uint32_t)hal_get_cycles();
     448if( CONFIG_DEBUG_CLUSTER_PROCESS_COPIES < cycle )
     449printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n",
     450__FUNCTION__ , local_cxy , process , cycle );
     451#endif
    444452
    445453    // get owner cluster identifier CXY and process LPID
     
    460468    remote_spinlock_lock_busy( copies_lock , &irq_state );
    461469
     470    // add copy to copies_list
    462471    xlist_add_first( copies_root , copies_entry );
    463472    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , 1 );
     
    465474    // release lock protecting copies_list[lpid]
    466475    remote_spinlock_unlock_busy( copies_lock , irq_state );
    467 }
     476
     477#if CONFIG_DEBUG_CLUSTER_PROCESS_COPIES
     478cycle = (uint32_t)hal_get_cycles();
     479if( CONFIG_DEBUG_CLUSTER_PROCESS_COPIES < cycle )
     480printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n",
     481__FUNCTION__ , local_cxy , process , cycle );
     482#endif
     483
     484}  // end cluster_process_copies_link()
    468485
    469486/////////////////////////////////////////////////////////
     
    472489    uint32_t irq_state;
    473490    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
     491
     492#if CONFIG_DEBUG_CLUSTER_PROCESS_COPIES
     493uint32_t cycle = (uint32_t)hal_get_cycles();
     494if( CONFIG_DEBUG_CLUSTER_PROCESS_COPIES < cycle )
     495printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n",
     496__FUNCTION__ , local_cxy , process , cycle );
     497#endif
    474498
    475499    // get owner cluster identifier CXY and process LPID
     
    479503
    480504    // get extended pointer on lock protecting copies_list[lpid]
    481     xptr_t copies_lock  = hal_remote_lwd( XPTR( owner_cxy , &pm->copies_lock[lpid] ) );
     505    xptr_t copies_lock  = XPTR( owner_cxy , &pm->copies_lock[lpid] );
    482506
    483507    // get extended pointer on the local copies_list entry
     
    487511    remote_spinlock_lock_busy( copies_lock , &irq_state );
    488512
     513    // remove copy from copies_list
    489514    xlist_unlink( copies_entry );
    490515    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , -1 );
     
    492517    // release lock protecting copies_list[lpid]
    493518    remote_spinlock_unlock_busy( copies_lock , irq_state );
    494 }
     519
     520#if CONFIG_DEBUG_CLUSTER_PROCESS_COPIES
     521cycle = (uint32_t)hal_get_cycles();
     522if( CONFIG_DEBUG_CLUSTER_PROCESS_COPIES < cycle )
     523printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n",
     524__FUNCTION__ , local_cxy , process , cycle );
     525#endif
     526
     527}  // end cluster_process_copies_unlink()
    495528
    496529///////////////////////////////////////////
  • trunk/kernel/kern/kernel_init.c

    r435 r436  
    125125// these debug variables are used to analyse the sys_read() syscall timing
    126126
    127 #if CONFIG_READ_DEBUG   
     127#if CONFIG_DEBUG_SYS_READ
    128128uint32_t   enter_sys_read;
    129129uint32_t   exit_sys_read;
     
    150150// these debug variables are used to analyse the sys_write() syscall timing
    151151
    152 #if CONFIG_WRITE_DEBUG   
     152#if CONFIG_DEBUG_SYS_WRITE   
    153153uint32_t   enter_sys_write;
    154154uint32_t   exit_sys_write;
  • trunk/kernel/kern/process.c

    r435 r436  
    365365    cxy_t       parent_cxy;
    366366    xptr_t      children_lock_xp;
    367     xptr_t      copies_lock_xp;
    368367
    369368        assert( (process->th_nr == 0) , __FUNCTION__ ,
     
    377376#endif
    378377
    379     // get local process manager pointer
    380     pmgr_t * pmgr = &LOCAL_CLUSTER->pmgr;
    381 
    382     // remove process from local_list in cluster manager
    383     remote_spinlock_lock( XPTR( local_cxy , &pmgr->local_lock ) );
    384     xlist_unlink( XPTR( local_cxy , &process->local_list ) );
    385     remote_spinlock_unlock( XPTR( local_cxy , &pmgr->local_lock ) );
    386 
    387     // get extended pointer on copies_lock in owner cluster manager
    388     cxy_t  owner_cxy = CXY_FROM_PID( process->pid );
    389         lpid_t lpid      = LPID_FROM_PID( process->pid );
    390     copies_lock_xp   = XPTR( owner_cxy , &pmgr->copies_lock[lpid] );
    391 
    392     // remove local process from copies_list
    393     remote_spinlock_lock( copies_lock_xp );
    394     xlist_unlink( XPTR( local_cxy , &process->copies_list ) );
    395     remote_spinlock_unlock( copies_lock_xp );
    396 
    397     // for reference process only
    398     if( XPTR( local_cxy , process ) == process->ref_xp )
    399     {
    400         // remove reference process from txt_list
    401         process_txt_detach( process );
    402 
     378    // remove process from local_list in local cluster manager
     379    cluster_process_local_unlink( process );
     380
     381    // remove process from copies_list in owner cluster manager
     382    cluster_process_copies_unlink( process );
     383
     384    // remove process from children_list if process is in owner cluster
     385    if( CXY_FROM_PID( process->pid ) == local_cxy )
     386    {
    403387        // get pointers on parent process
    404388        parent_xp  = process->parent_xp;
     
    461445    xptr_t             process_xp;        // extended pointer on process copy
    462446    cxy_t              process_cxy;       // process copy cluster identifier
    463     process_t        * process_ptr;       // local pointer on process copy
    464     uint32_t           responses;         // number of remote process copies
    465     uint32_t           rsp_count;         // used to assert number of copies
    466     rpc_desc_t         rpc;               // rpc descriptor allocated in stack
     447    reg_t              save_sr;           // for critical section
     448    rpc_desc_t         rpc;               // shared RPC descriptor
    467449
    468450    thread_t * client = CURRENT_THREAD;
     
    475457#endif
    476458
    477     // get local pointer on local cluster manager
     459    // get pointer on local cluster manager
    478460    cluster = LOCAL_CLUSTER;
    479461
     
    483465
    484466    // get root of list of copies, lock, and number of copies from owner cluster
    485     responses = hal_remote_lw ( XPTR( owner_cxy , &cluster->pmgr.copies_nr[lpid] ) );
    486     root_xp   = hal_remote_lwd( XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] ) );
    487     lock_xp   = hal_remote_lwd( XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] ) );
    488 
    489     rsp_count = 0;
     467    root_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
     468    lock_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
    490469
    491470    // check action type
     
    494473             (action_type == UNBLOCK_ALL_THREADS )), __FUNCTION__ , "illegal action type" );
    495474             
    496     // initialise rpc descriptor
    497     rpc.index    = RPC_PROCESS_SIGACTION;
    498     rpc.response = responses;
    499     rpc.blocking = false;
    500     rpc.thread   = client;
     475    // allocate a - shared - RPC descriptor in client thread stack
     476    // it can be shared because all parallel, non-blocking, server threads
     477    // use the same input arguments, and use the shared RPC response field
     478    // but use
     479
     480    // the client thread makes the following sequence:
     481    // 1. mask interrupts
     482    // 2. block itself
     483    // 3. send RPC requests to all copies
     484    // 4. unmask interrupts
     485    // 5. deschedule
     486
     487    // mask IRQs
     488    hal_disable_irq( &save_sr);
     489
     490    // client register blocking condition for itself
     491    thread_block( XPTR( local_cxy , client ) , THREAD_BLOCKED_RPC );
    501492
    502493    // take the lock protecting the copies
    503494    remote_spinlock_lock( lock_xp );
    504495
    505     // send RPCs to remote clusters
     496    // initialize shared RPC descriptor
     497    rpc.response = 0;
     498    rpc.blocking = false;
     499    rpc.index    = RPC_PROCESS_SIGACTION;
     500    rpc.thread   = client;
     501    rpc.lid      = client->core->lid;
     502    rpc.args[0]  = action_type;
     503    rpc.args[1]  = pid;
     504
     505    // send RPCs to all clusters containing process copiess
    506506    XLIST_FOREACH( root_xp , iter_xp )
    507507    {
     508
     509#if CONFIG_DEBUG_PROCESS_SIGACTION
     510if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     511printk("\n[DBG] %s : send RPC to %s process %x in cluster %x\n",
     512__FUNCTION__ , process_action_str( action_type ) , pid , process_cxy );
     513#endif
     514        // atomically increment responses counter
     515        hal_atomic_add( (void *)&rpc.response , 1 );
     516
    508517        process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
    509518        process_cxy = GET_CXY( process_xp );
    510         process_ptr = GET_PTR( process_xp );
    511 
    512 #if CONFIG_DEBUG_PROCESS_SIGACTION
    513 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
    514 printk("\n[DBG] %s : send RPC to cluster %x\n", __FUNCTION__ , process_cxy );
    515 #endif
    516 
    517         // check PID
    518         assert( (hal_remote_lw( XPTR( process_cxy , &process_ptr->pid) ) == pid),
    519         __FUNCTION__ , "unconsistent PID value\n" );
    520 
    521         rpc.args[0] = (uint64_t)action_type;
    522         rpc.args[1] = (uint64_t)pid;
     519
     520        // call RPC in target cluster
    523521        rpc_process_sigaction_client( process_cxy , &rpc );
    524         rsp_count++;
    525522    }
    526523   
     
    528525    remote_spinlock_unlock( lock_xp );
    529526
    530     // check number of copies...
    531     assert( (rsp_count == responses) , __FUNCTION__ ,
    532     "unconsistent number of process copies : rsp_count = %d / responses = %d",
    533     rsp_count , responses );
    534 
    535     // block and deschedule to wait RPC responses
    536     thread_block( CURRENT_THREAD , THREAD_BLOCKED_RPC );
    537     sched_yield("BLOCKED on RPC_PROCESS_SIGACTION");
     527    // restore IRQs
     528    hal_restore_irq( save_sr);
     529
     530    // client deschedule : will be unblocked by the last RPC server thread
     531    sched_yield("blocked on rpc_process_sigaction");
    538532
    539533#if CONFIG_DEBUG_PROCESS_SIGACTION
     
    541535if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
    542536printk("\n[DBG] %s : thread %x exit after %s process %x in cluster %x / cycle %d\n",
    543 __FUNCTION__ , client, process_action_str( action_type ) ,
    544 process->pid , local_cxy , cycle );
     537__FUNCTION__ , client, process_action_str( action_type ) , pid , local_cxy , cycle );
    545538#endif
    546539
     
    553546    thread_t          * this;           // pointer on calling thread
    554547    uint32_t            ltid;           // index in process th_tbl
     548    cxy_t               owner_cxy;      // target process owner cluster
    555549    uint32_t            count;          // requests counter
    556     volatile uint32_t   rsp_count;      // responses counter
     550    volatile uint32_t   ack_count;      // scheduler acknowledge counter
    557551
    558552    // get calling thread pointer
    559553    this = CURRENT_THREAD;
     554
     555    // get target process owner cluster
     556    owner_cxy = CXY_FROM_PID( process->pid );
    560557
    561558#if CONFIG_DEBUG_PROCESS_SIGACTION
     
    569566    spinlock_lock( &process->th_lock );
    570567
    571     // initialize local responses counter
    572     rsp_count = process->th_nr;
    573 
    574     // loop on process threads to block and deschedule all threads in cluster
     568    // loop to block all threads but the main thread
    575569    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
    576     for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )
     570    for( ltid = 0 , count = 0 , ack_count = 0 ; count < process->th_nr ; ltid++ )
    577571    {
    578572        target = process->th_tbl[ltid];
    579573
    580         assert( (target != this) , __FUNCTION__ , "calling thread cannot be a target\n" );
    581 
    582         if( target != NULL )             // thread found
     574        if( target != NULL )                                 // thread exist
    583575        {
    584576            count++;
    585577
    586             // - if the calling thread and the target thread are on the same core,
    587             //   we block the target thread, we don't need confirmation from scheduler,
    588             //   and we simply decrement the responses counter.
    589             // - if the calling thread and the target thread are not running on the same
    590             //   core, we ask the target scheduler to acknowlege the blocking
    591             //   to be sure that the target thread is not running.
    592            
    593             if( this->core->lid == target->core->lid )
     578            // main thread should not be deleted
     579            if( (ltid != 0) || (owner_cxy != local_cxy) )
    594580            {
    595581                // set the global blocked bit in target thread descriptor.
    596                 thread_block( target , THREAD_BLOCKED_GLOBAL );
    597 
    598                 // decrement responses counter
    599                 hal_atomic_add( (void *)&rsp_count , -1 );
    600             }
    601             else
    602             {
    603                 // set the global blocked bit in target thread descriptor.
    604                 thread_block( target , THREAD_BLOCKED_GLOBAL );
    605 
    606                 // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor
    607                 thread_set_req_ack( target , (void *)&rsp_count );
    608 
    609                 // force scheduling on target thread
    610                 dev_pic_send_ipi( local_cxy , target->core->lid );
     582                thread_block( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
     583 
     584                // - if the calling thread and the target thread are on the same core,
     585                //   we don't need confirmation from scheduler,
     586                // - if the calling thread and the target thread are not running on the same
     587                //   core, we ask the target scheduler to acknowlege the blocking
     588                //   to be sure that the target thread is not running.
     589           
     590                if( this->core->lid != target->core->lid )
     591                {
     592                    // increment responses counter
     593                    hal_atomic_add( (void*)&ack_count , 1 );
     594
     595                    // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor
     596                    thread_set_req_ack( target , (uint32_t *)&ack_count );
     597
     598                    // force scheduling on target thread
     599                    dev_pic_send_ipi( local_cxy , target->core->lid );
     600                }
    611601            }
    612602        }
     
    616606    spinlock_unlock( &process->th_lock );
    617607
    618     // wait all responses from schedulers
     608    // wait acknowledges
    619609    while( 1 )
    620610    {
    621         // exit loop when all local responses received
    622         if ( rsp_count == 0 ) break;
     611        // exit when all scheduler acknoledges received
     612        if ( ack_count == 0 ) break;
    623613   
    624614        // wait 1000 cycles before retry
     
    656646    spinlock_lock( &process->th_lock );
    657647
    658     // loop on process threads to unblock all threads in cluster
     648    // loop on process threads to unblock all threads
    659649    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
    660650    for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )
    661651    {
    662652        target = process->th_tbl[ltid];
    663 
    664         assert( (target != this) , __FUNCTION__ , "calling thread cannot be a target\n" );
    665653
    666654        if( target != NULL )             // thread found
     
    689677{
    690678    thread_t          * target;        // pointer on target thread
    691     thread_t          * this;          // pointer on calling thread
    692679    uint32_t            ltid;          // index in process th_tbl
    693     uint32_t            count;         // request counter
    694     cxy_t               owner_cxy;     // owner cluster identifier
    695 
    696     // get calling thread pointer
    697     this = CURRENT_THREAD;
    698     owner_cxy = CXY_FROM_PID( process->pid );
     680    uint32_t            count;         // threads counter
    699681
    700682#if CONFIG_DEBUG_PROCESS_SIGACTION
     
    702684if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
    703685printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",
    704 __FUNCTION__ , this , process->pid , local_cxy , cycle );
     686__FUNCTION__ , CURRENT_THREAD , process->pid , local_cxy , cycle );
    705687#endif
    706688
     
    708690    spinlock_lock( &process->th_lock );
    709691
    710     // loop on threads to set the REQ_DELETE flag
     692    // loop to set the REQ_DELETE flag on all threads but the main
    711693    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
    712694    for( ltid = 0 , count = 0  ; count < process->th_nr ; ltid++ )
     
    714696        target = process->th_tbl[ltid];
    715697
    716         assert( (target != this) , __FUNCTION__ , "calling thread cannot be a target\n" );
    717 
    718         if( target != NULL )            // thread found
     698        if( target != NULL )
    719699        {
    720700            count++;
    721701           
    722             // the main thread should not be deleted
    723             if( (owner_cxy != local_cxy) || (ltid != 0) )   
    724             {
    725                 hal_atomic_or( &target->flags , THREAD_FLAG_REQ_DELETE );
    726             }
     702            thread_kill( XPTR( local_cxy , target ),
     703                         false,                       // is_exit
     704                         true );                      // is_forced
    727705        }
    728706    }
     
    735713if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
    736714printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n",
    737 __FUNCTION__ , this , process->pid , local_cxy , cycle );
     715__FUNCTION__ , CURRENT_THREAD , process->pid , local_cxy , cycle );
    738716#endif
    739717
     
    790768
    791769}  // end process_get_local_copy()
     770
     771////////////////////////////////////////////
     772pid_t process_get_ppid( xptr_t  process_xp )
     773{
     774    cxy_t       process_cxy;
     775    process_t * process_ptr;
     776    xptr_t      parent_xp;
     777    cxy_t       parent_cxy;
     778    process_t * parent_ptr;
     779
     780    // get process cluster and local pointer
     781    process_cxy = GET_CXY( process_xp );
     782    process_ptr = GET_PTR( process_xp );
     783
     784    // get pointers on parent process
     785    parent_xp  = (xptr_t)hal_remote_lwd( XPTR( process_cxy , &process_ptr->parent_xp ) );
     786    parent_cxy = GET_CXY( parent_xp );
     787    parent_ptr = GET_PTR( parent_xp );
     788
     789    return hal_remote_lw( XPTR( parent_cxy , &parent_ptr->pid ) );
     790}
    792791
    793792//////////////////////////////////////////////////////////////////////////////////////////
     
    10671066                            parent_process_xp );
    10681067
    1069 #if CONFIG_DEBUG_PROCESS_MAKE_FORK
     1068#if( CONFIG_DEBUG_PROCESS_MAKE_FORK & 1 )
    10701069cycle = (uint32_t)hal_get_cycles();
    10711070if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )
     
    10861085    }
    10871086
    1088 #if CONFIG_DEBUG_PROCESS_MAKE_FORK
     1087#if( CONFIG_DEBUG_PROCESS_MAKE_FORK & 1 )
    10891088cycle = (uint32_t)hal_get_cycles();
    10901089if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )
     
    11121111    assert( (thread->trdid == 0) , __FUNCTION__ , "main thread must have index 0\n" );
    11131112
    1114 #if CONFIG_DEBUG_PROCESS_MAKE_FORK
     1113#if( CONFIG_DEBUG_PROCESS_MAKE_FORK & 1 )
    11151114cycle = (uint32_t)hal_get_cycles();
    11161115if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )
     
    11341133    vmm_set_cow( process );
    11351134 
    1136 #if CONFIG_DEBUG_PROCESS_MAKE_FORK
     1135#if( CONFIG_DEBUG_PROCESS_MAKE_FORK & 1 )
    11371136cycle = (uint32_t)hal_get_cycles();
    11381137if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )
     
    12361235
    12371236    // give TXT ownership to new_process
    1238     process_txt_set_ownership( XPTR( local_cxy , new_process ) );
    1239 
    1240 #if CONFIG_DEBUG_PROCESS_MAKE_EXEC
     1237    process_txt_set_ownership( XPTR( local_cxy , new_process) );
     1238
     1239#if( CONFIG_DEBUG_PROCESS_MAKE_EXEC & 1 )
    12411240cycle = (uint32_t)hal_get_cycles();
    12421241if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle )
     
    12551254        }
    12561255
    1257 #if CONFIG_DEBUG_PROCESS_MAKE_EXEC
     1256#if( CONFIG_DEBUG_PROCESS_MAKE_EXEC & 1 )
    12581257cycle = (uint32_t)hal_get_cycles();
    12591258if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle )
     
    12871286    assert( (new_thread->trdid == 0) , __FUNCTION__ , "main thread must have index 0\n" );
    12881287
    1289 #if CONFIG_DEBUG_PROCESS_MAKE_EXEC
     1288#if( CONFIG_DEBUG_PROCESS_MAKE_EXEC & 1 )
    12901289cycle = (uint32_t)hal_get_cycles();
    12911290if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle )
     
    13121311        thread_unblock( XPTR( local_cxy , new_thread ) , THREAD_BLOCKED_GLOBAL );
    13131312
     1313    // detach old_process from TXT
     1314    process_txt_detach( XPTR( local_cxy , old_process ) );
     1315
    13141316    // request old_thread destruction => old_process destruction
    1315     thread_block( old_thread , THREAD_BLOCKED_GLOBAL );
     1317    thread_block( XPTR( local_cxy , old_thread ) , THREAD_BLOCKED_GLOBAL );
    13161318    hal_atomic_or( &old_thread->flags , THREAD_FLAG_REQ_DELETE );
    13171319
     
    16001602if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
    16011603printk("\n[DBG] %s : thread %x enter for process %x / txt_id = %d  / cycle %d\n",
    1602 __FUNCTION__, CURRENT_THREAD, process, txt_id, cycle );
    1603 #endif
    1604 
    1605     // check process is reference
    1606     assert( (process->ref_xp == XPTR( local_cxy , process )) , __FUNCTION__ ,
    1607     "process is not the reference descriptor" );
     1604__FUNCTION__, CURRENT_THREAD, process->pid, txt_id, cycle );
     1605#endif
     1606
     1607    // check process is in owner cluster
     1608    assert( (CXY_FROM_PID( process->pid ) == local_cxy) , __FUNCTION__ ,
     1609    "process descriptor not in owner cluster" );
    16081610
    16091611    // check terminal index
     
    16291631if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
    16301632printk("\n[DBG] %s : thread %x exit for process %x / txt_id = %d / cycle %d\n",
    1631 __FUNCTION__, CURRENT_THREAD, process, txt_id , cycle );
     1633__FUNCTION__, CURRENT_THREAD, process->pid, txt_id , cycle );
    16321634#endif
    16331635
    16341636} // end process_txt_attach()
    16351637
    1636 //////////////////////////////////////////////
    1637 void process_txt_detach( process_t * process )
    1638 {
     1638/////////////////////////////////////////////
     1639void process_txt_detach( xptr_t  process_xp )
     1640{
     1641    process_t * process_ptr;  // local pointer on process in owner cluster
     1642    cxy_t       process_cxy;  // process owner cluster
     1643    pid_t       process_pid;  // process identifier
     1644    xptr_t      file_xp;      // extended pointer on stdin file
    16391645    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
    16401646    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
     
    16421648    xptr_t      lock_xp;      // extended pointer on list lock in chdev
    16431649
     1650    // get process cluster, local pointer, and PID
     1651    process_cxy = GET_CXY( process_xp );
     1652    process_ptr = GET_PTR( process_xp );
     1653    process_pid = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) );
     1654
     1655    // check process descriptor in owner cluster
     1656    assert( (CXY_FROM_PID( process_pid ) == process_cxy ) , __FUNCTION__ ,
     1657    "process descriptor not in owner cluster" );
     1658
    16441659#if CONFIG_DEBUG_PROCESS_TXT_ATTACH
    16451660uint32_t cycle = (uint32_t)hal_get_cycles();
    16461661if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
    16471662printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
    1648 __FUNCTION__, CURRENT_THREAD, process, cycle );
    1649 #endif
    1650 
    1651     // check process is reference
    1652     assert( (process->ref_xp == XPTR( local_cxy , process )) , __FUNCTION__ ,
    1653     "process is not the reference descriptor" );
    1654 
    1655     // get extended pointer on TXT_RX chdev
    1656     chdev_xp  = chdev_from_file( process->fd_array.array[0] );
     1663__FUNCTION__, CURRENT_THREAD, process_pid, cycle );
     1664#endif
     1665
     1666    // release TXT ownership (does nothing if not TXT owner)
     1667    process_txt_transfer_ownership( process_xp );
     1668
     1669    // get extended pointer on process stdin file
     1670    file_xp = (xptr_t)hal_remote_lwd( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
     1671
     1672    // get pointers on TXT_RX chdev
     1673    chdev_xp  = chdev_from_file( file_xp );
    16571674    chdev_cxy = GET_CXY( chdev_xp );
    16581675    chdev_ptr = (chdev_t *)GET_PTR( chdev_xp );
    16591676
    1660     // get extended pointer on lock of attached process list
     1677    // get extended pointer on lock protecting attached process list
    16611678    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
    16621679
    16631680    // unlink process from attached process list
    16641681    remote_spinlock_lock( lock_xp );
    1665     xlist_unlink( XPTR( local_cxy , &process->txt_list ) );
     1682    xlist_unlink( XPTR( process_cxy , &process_ptr->txt_list ) );
    16661683    remote_spinlock_unlock( lock_xp );
    1667    
     1684
     1685#if( CONFIG_DEBUG_PROCESS_TXT_ATTACH & 1 )
     1686if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1687{
     1688    xptr_t root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
     1689    xptr_t iter_xp;
     1690    XLIST_FOREACH( root_xp , iter_xp )
     1691    {
     1692        xptr_t      current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
     1693        process_t * current_ptr = GET_PTR( current_xp );
     1694
     1695        printk("\n[DBG] %s : attached_process %x (pid = %x)\n",
     1696        __FUNCTION__, current_ptr, current_ptr->pid );
     1697    }
     1698}
     1699#endif
     1700
    16681701#if CONFIG_DEBUG_PROCESS_TXT_ATTACH
    16691702cycle = (uint32_t)hal_get_cycles();
    16701703if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
    1671 printk("\n[DBG] %s : thread %x exit for process %x / cycle %d\n",
    1672 __FUNCTION__, CURRENT_THREAD, process, cycle );
     1704printk("\n[DBG] %s : thread %x exit / process %x detached from TXT / cycle %d\n",
     1705__FUNCTION__, CURRENT_THREAD, process->pid, cycle );
    16731706#endif
    16741707
     
    16801713    process_t * process_ptr;
    16811714    cxy_t       process_cxy;
     1715    pid_t       process_pid;
    16821716    xptr_t      file_xp;
    16831717    xptr_t      txt_xp;     
     
    16851719    cxy_t       txt_cxy;
    16861720
    1687     // get cluster and local pointer on process
     1721    // get pointers on process in owner cluster
    16881722    process_cxy = GET_CXY( process_xp );
    16891723    process_ptr = GET_PTR( process_xp );
     1724
     1725    // get process PID
     1726    process_pid = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) );
     1727
     1728    // check owner cluster
     1729    assert( (process_cxy == CXY_FROM_PID( process_pid )) , __FUNCTION__,
     1730    "process descriptor not in owner cluster\n" );
     1731
     1732#if CONFIG_DEBUG_PROCESS_TXT_ATTACH
     1733uint32_t cycle = (uint32_t)hal_get_cycles();
     1734if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1735printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
     1736__FUNCTION__, CURRENT_THREAD, process_pid, cycle );
     1737#endif
    16901738
    16911739    // get extended pointer on stdin pseudo file
     
    17001748    hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp );
    17011749
     1750#if CONFIG_DEBUG_PROCESS_TXT_ATTACH
     1751cycle = (uint32_t)hal_get_cycles();
     1752if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1753printk("\n[DBG] %s : thread %x exit for process %x / cycle %d\n",
     1754__FUNCTION__, CURRENT_THREAD, process_pid, cycle );
     1755#endif
     1756
    17021757}  // end process_txt_set ownership()
    17031758
    1704 /////////////////////////////////////////////////////
    1705 void process_txt_reset_ownership( xptr_t process_xp )
    1706 {
    1707     process_t * process_ptr;
    1708     cxy_t       process_cxy;
    1709     xptr_t      parent_xp;       // extended pointer on parent process
    1710     process_t * parent_ptr;
    1711     cxy_t       parent_cxy;
     1759////////////////////////////////////////////////////////
     1760void process_txt_transfer_ownership( xptr_t process_xp )
     1761{
     1762    process_t * process_ptr;     // local pointer on process releasing ownership
     1763    cxy_t       process_cxy;     // process cluster
     1764    pid_t       process_pid;     // process identifier
    17121765    xptr_t      file_xp;         // extended pointer on TXT_RX pseudo file
    17131766    xptr_t      txt_xp;          // extended pointer on TXT_RX chdev
     
    17171770    xptr_t      owner_xp;        // extended pointer on current TXT_RX owner
    17181771    xptr_t      root_xp;         // extended pointer on root of attached process list
     1772    xptr_t      lock_xp;         // extended pointer on lock protecting attached process list
    17191773    xptr_t      iter_xp;         // iterator for xlist
    17201774    xptr_t      current_xp;      // extended pointer on current process
    17211775    process_t * current_ptr;     // local pointer on current process
    17221776    cxy_t       current_cxy;     // cluster for current process
    1723     pid_t       ppid;            // parent process identifier for current process
    1724 
    1725     // get cluster and local pointer on process
     1777
     1778    // get pointers on process in owner cluster
    17261779    process_cxy = GET_CXY( process_xp );
    17271780    process_ptr = GET_PTR( process_xp );
     1781
     1782    // get process PID
     1783    process_pid = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) );
     1784
     1785    // check owner cluster
     1786    assert( (process_cxy == CXY_FROM_PID( process_pid )) , __FUNCTION__,
     1787    "process descriptor not in owner cluster\n" );
     1788
     1789#if CONFIG_DEBUG_PROCESS_TXT_ATTACH
     1790uint32_t cycle = (uint32_t)hal_get_cycles();
     1791if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1792printk("\n[DBG] %s : thread %x enter / process %x / pid %x / cycle %d\n",
     1793__FUNCTION__, CURRENT_THREAD, process_ptr, process_pid, cycle );
     1794#endif
    17281795
    17291796    // get extended pointer on stdin pseudo file
     
    17391806    txt_id   = hal_remote_lw ( XPTR( txt_cxy , &txt_ptr->channel ) );
    17401807
    1741     // transfer ownership to KSH if required
    1742     if( (owner_xp == process_xp) && (txt_id > 0) )   
    1743     {
    1744         // get extended pointer on root of list of attached processes
    1745         root_xp = hal_remote_lwd( XPTR( txt_cxy , &txt_ptr->ext.txt.root ) );
    1746 
    1747         // scan attached process list to find KSH process
    1748         XLIST_FOREACH( root_xp , iter_xp )
     1808#if( CONFIG_DEBUG_PROCESS_TXT_ATTACH & 1 )
     1809if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1810printk("\n[DBG] %s : file_ptr %x / txt_ptr %x / txt_id %d / owner_ptr = %x\n",
     1811__FUNCTION__, GET_PTR(file_xp), txt_ptr, txt_id, GET_PTR(owner_xp) );
     1812#endif
     1813
     1814    // transfer ownership only if process is the TXT owner
     1815    if( (owner_xp == process_xp) && (txt_id > 0) ) 
     1816    {
     1817        // get extended pointers on root and lock of attached processes list
     1818        root_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.root );
     1819        lock_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.lock );
     1820
     1821        // get lock
     1822        remote_spinlock_lock( lock_xp );
     1823
     1824        if( process_get_ppid( process_xp ) != 1 )           // process is not KSH
    17491825        {
    1750             current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
    1751             current_cxy = GET_CXY( current_xp );
    1752             current_ptr = GET_PTR( current_xp );
    1753             parent_xp   = hal_remote_lwd( XPTR( current_cxy , &current_ptr->parent_xp ) );
    1754             parent_cxy  = GET_CXY( parent_xp );
    1755             parent_ptr  = GET_PTR( parent_xp );
    1756             ppid        = hal_remote_lw( XPTR( parent_cxy , &parent_ptr->pid ) );
    1757 
    1758 printk("\n@@@ %s : pid = %x / process = %x\n", __FUNCTION__ , current_ptr->pid, current_ptr );
    1759 
    1760             if( ppid == 1 )  // current is KSH
     1826
     1827#if( CONFIG_DEBUG_PROCESS_TXT_ATTACH & 1 )
     1828if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1829printk("\n[DBG] %s : process is not the KSH process => search the KSH\n", __FUNCTION__ );
     1830#endif
     1831            // scan attached process list to find KSH process
     1832            XLIST_FOREACH( root_xp , iter_xp )
    17611833            {
    1762                 // set owner field in TXT chdev
    1763                 hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
    1764                 return;
     1834                current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
     1835                current_cxy = GET_CXY( current_xp );
     1836                current_ptr = GET_PTR( current_xp );
     1837
     1838                if( process_get_ppid( current_xp ) == 1 )  // current is KSH
     1839                {
     1840                    // release lock
     1841                    remote_spinlock_unlock( lock_xp );
     1842
     1843                    // set owner field in TXT chdev
     1844                    hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
     1845
     1846#if CONFIG_DEBUG_PROCESS_TXT_ATTACH
     1847cycle = (uint32_t)hal_get_cycles();
     1848if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1849printk("\n[DBG] %s : thread %x exit / process %x to KSH process %x / cycle %d\n",
     1850__FUNCTION__, CURRENT_THREAD, process_pid,
     1851hal_remote_lw( XPTR( current_cxy , &current_ptr->pid ) ), cycle );
     1852#endif
     1853                     return;
     1854                }
    17651855            }
     1856 
     1857            // release lock
     1858            remote_spinlock_unlock( lock_xp );
     1859
     1860            // PANIC if KSH not found
     1861            assert( false , __FUNCTION__ , "KSH process not found for TXT %d" );
     1862
     1863            return;
    17661864        }
    1767 
    1768         assert( false , __FUNCTION__ , "KSH process not found" );
    1769     }
    1770 }  // end process_txt_reset_ownership()
    1771 
    1772 
    1773 //////////////////////////////////////////////////////     
    1774 inline pid_t process_get_txt_owner( uint32_t channel )
     1865        else                                               // process is KSH
     1866        {
     1867
     1868#if( CONFIG_DEBUG_PROCESS_TXT_ATTACH & 1 )
     1869if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1870printk("\n[DBG] %s : process is the KSH process => search another\n", __FUNCTION__ );
     1871#endif
     1872
     1873            // scan attached process list to find another process
     1874            XLIST_FOREACH( root_xp , iter_xp )
     1875            {
     1876                current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
     1877                current_cxy = GET_CXY( current_xp );
     1878                current_ptr = GET_PTR( current_xp );
     1879
     1880                if( current_xp != process_xp )            // current is not KSH
     1881                {
     1882                    // release lock
     1883                    remote_spinlock_unlock( lock_xp );
     1884
     1885                    // set owner field in TXT chdev
     1886                    hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
     1887
     1888#if CONFIG_DEBUG_PROCESS_TXT_ATTACH
     1889cycle = (uint32_t)hal_get_cycles();
     1890if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1891printk("\n[DBG] %s : thread %x exit / KSH process %x to process %x / cycle %d\n",
     1892__FUNCTION__, CURRENT_THREAD, process_pid,
     1893hal_remote_lw( XPTR( current_cxy , &current_ptr->pid ) ), cycle );
     1894#endif
     1895                     return;
     1896                }
     1897            }
     1898
     1899            // release lock
     1900            remote_spinlock_unlock( lock_xp );
     1901
     1902            // no more owner for TXT if no other process found
     1903            hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL );
     1904
     1905#if CONFIG_DEBUG_PROCESS_TXT_ATTACH
     1906cycle = (uint32_t)hal_get_cycles();
     1907if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1908printk("\n[DBG] %s : thread %x exit / KSH process %x to nobody / cycle %d\n",
     1909__FUNCTION__, CURRENT_THREAD, process_pid, cycle );
     1910#endif
     1911            return;
     1912        }
     1913    }
     1914    else
     1915    {
     1916
     1917#if CONFIG_DEBUG_PROCESS_TXT_ATTACH
     1918cycle = (uint32_t)hal_get_cycles();
     1919if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1920printk("\n[DBG] %s : thread %x exit / process %x is not TXT owner / cycle %d\n",
     1921__FUNCTION__, CURRENT_THREAD, process_pid, cycle );
     1922#endif
     1923
     1924    }
     1925}  // end process_txt_transfer_ownership()
     1926
     1927
     1928////////////////////////////////////////////////     
     1929xptr_t process_txt_get_owner( uint32_t channel )
    17751930{
    17761931    xptr_t      txt_rx_xp  = chdev_dir.txt_rx[channel];
     
    17781933    chdev_t *   txt_rx_ptr = GET_PTR( txt_rx_xp );
    17791934
    1780     xptr_t process_xp = (xptr_t)hal_remote_lwd( XPTR( txt_rx_cxy,
    1781                                                 &txt_rx_ptr->ext.txt.owner_xp ) );
    1782 
    1783     cxy_t       process_cxy = GET_CXY( process_xp );
    1784     process_t * process_ptr = GET_PTR( process_xp );
    1785 
    1786     return (pid_t)hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) );
     1935    return (xptr_t)hal_remote_lwd( XPTR( txt_rx_cxy , &txt_rx_ptr->ext.txt.owner_xp ) );
    17871936}
    17881937
     
    18171966    remote_spinlock_lock( lock_xp );
    18181967
    1819     // scan attached process list to find KSH process
     1968    // scan attached process list
    18201969    XLIST_FOREACH( root_xp , iter_xp )
    18211970    {
  • trunk/kernel/kern/process.h

    r435 r436  
    5858enum process_sigactions
    5959{
    60     BLOCK_ALL_THREADS    = 11,
    61     UNBLOCK_ALL_THREADS  = 22,
    62     DELETE_ALL_THREADS   = 33,
     60    BLOCK_ALL_THREADS    = 0x11,
     61    UNBLOCK_ALL_THREADS  = 0x22,
     62    DELETE_ALL_THREADS   = 0x33,
    6363};
    6464
     
    281281 * This function allows a client thread running in any cluster to block, unblock or delete
    282282 * all threads of a process identified by the <pid> argument, depending on the
    283  * <action_type> argument.  The scenario is the following:
    284  * - It uses the multicast, non blocking rpc_process_sigaction_client() function to send
    285  *   parallel requests to all remote clusters containing a process copy. Then it blocks
    286  $   and deschedule to wait completion of these parrallel requests.
    287  * - In each remote cluster, the rpc_process_sigaction_server() function, calls directly
    288  *   the relevant process_block(), process_unblock(), or process_delete() function, and
    289  *   decrement the responses counter to signal completion. The last server unblock
    290  *   the client thread.
    291  * - Finally, the client thread calls directly the process_block(), process_unblock(), or
    292  *   process_delete() function in the owner cluster.
     283 * <action_type> argument.
     284 * WARNING : the DELETE action is NOT executed on the target process main thread
     285 * (thread 0 in process owner cluster).
     286 * It uses the multicast, non blocking rpc_process_sigaction_client() function to send
     287 * parallel requests to all remote clusters containing a process copy.
     288 * Then it blocks and deschedule to wait completion of these parallel requests.
     289 *
    293290 * It is used by the sys_kill() & sys_exit() functions to handle the "kill" & "exit" syscalls.
    294291 * It is also used by the process_make_exec() function to handle the "exec" syscall.
    295  * It is also called by the TXT device to execute the ctrl C & ctrl Z commands.
    296  * WARNING : the DELETE action is NOT executed on the main thread (thread 0 in owner cluster).
     292 * It is also called by the TXT device ISR to execute the ctrl C & ctrl Z commands.
     293 *
     294 * Implementation note:
     295 * This function allocates a - shared - RPC descriptor in client thread stack,
     296 * and initializes it. This RPC descriptor can be shared because all parallel,
     297 * non-blocking, RPC server threads use the same input arguments, including the
     298 * RPC responses counter field.
    297299 *********************************************************************************************
    298300 * @ pid         : target process identifier.
     
    303305
    304306/*********************************************************************************************
    305  * This function blocks all threads for a given <process> in a given cluster.
    306  * The calling thread cannot be a target thread.
    307  * It loops on all local threads of the process, set the THREAD_BLOCKED_GLOBAL bit,
     307 * This function blocks all threads - but the main thread - for a given <process>
     308 * in a given cluster. It sets the THREAD_BLOCKED_GLOBAL bit in the thread descriptor,
    308309 * and request the relevant schedulers to acknowledge the blocking, using IPI if required.
    309310 * The threads are not detached from the scheduler, and not detached from the local process.
     
    322323
    323324/*********************************************************************************************
    324  * This function marks for deletion all threads - but one _ for a given <process>
    325  * in a given cluster. The main thread in owner cluster is NOT marked.
    326  * It will be marked for deletion by the parent process sys_wait().
    327  * The calling thread cannot be a target thread.
    328  * It loops on all local threads of the process, and set the THREAD_FLAG_REQ_DELETE bit.
    329  * For each marked thread, the following actions will be done by the scheduler at the next
    330  * scheduling point:
     325 * This function marks for deletion all threads - but the main thread - for a given <process>
     326 * in a given cluster. It sets the THREAD_FLAG_REQ_DELETE bit. For each marked thread,
     327 * the following actions will be done by the scheduler at the next scheduling point:
    331328 * - the thread will be detached from the scheduler.
    332329 * - the thread will be detached from the local process descriptor.
     
    349346 ********************************************************************************************/
    350347process_t * process_get_local_copy( pid_t pid );
     348
     349/*********************************************************************************************
     350 * This function returns the parent process identifier for a remote process descriptor
     351 * identified by an extended pointer.
     352 *********************************************************************************************
     353 * @ process_xp   : extended pointer on remote process descriptor.
     354 * @ returns parent process dentifier.
     355 ********************************************************************************************/
     356pid_t process_get_ppid( xptr_t process_xp );
    351357
    352358/*********************************************************************************************
     
    508514
    509515/*********************************************************************************************
    510  * This function attach a reference process descriptor, identified by the <process>
     516 * This function attach a process descriptor in owner cluster, identified by the <process>
    511517 * argument to a TXT terminal, identified by its <txt_id> channel index argument.
    512518 * It insert the process descriptor in the xlist rooted in the TXT_RX device.
     
    520526
    521527/*********************************************************************************************
    522  * This function detach a reference process descriptor, identified by the <process_xp>
    523  * argument, from the list of process attached to a given TXT terminal.
    524  * It is called when the process is killed.
    525  *********************************************************************************************
    526  * @ process  : local pointer on process descriptor.
    527  ********************************************************************************************/
    528 void process_txt_detach( process_t * process );                     
    529 
    530 /*********************************************************************************************
    531  * This function gives to a process identified by the <process_xp> argument, and attached
     528 * This function detach a process, identified by the <process_xp> argument,
     529 * from the list of process attached to a given TXT terminal.
     530 * The target process descriptor must be in the owner cluster, but the calling thread can
     531 * be running in any cluster.
     532 *********************************************************************************************
     533 * @ process_xp  : extended pointer on process descriptor.
     534 ********************************************************************************************/
     535void process_txt_detach( xptr_t  process_xp );                     
     536
     537/*********************************************************************************************
     538 * This function gives to a process identified by the <owner_xp> argument, and attached
    532539 * to terminal TXT[i] the exclusive ownership of the TXT_RX[i] terminal.
    533  *********************************************************************************************
    534  * @ process_xp  : extended pointer on reference process descriptor.
    535  ********************************************************************************************/
    536 void process_txt_set_ownership( xptr_t process_xp );
    537 
    538 /*********************************************************************************************
    539  * When the process identified by the <process_xp> argument has the exclusive ownership
    540  * of the TXT_RX[i] terminal, this function gives this ownership to the KSH[i] process.
    541  * It does nothing if the process is not the owner.
    542  *********************************************************************************************
    543  * @ process_xp  : extended pointer on reference process descriptor.
    544  ********************************************************************************************/
    545 void process_txt_reset_ownership( xptr_t process_xp );
    546 
    547 /*********************************************************************************************
    548  * This function returns the terminal owner process (foreground process)
     540 * The process descriptor must be in the process owner cluster.
     541 *********************************************************************************************
     542 * @ owner_xp  : extended pointer on process descriptor in owner cluster.
     543 ********************************************************************************************/
     544void process_txt_set_ownership( xptr_t owner_xp );
     545
     546/*********************************************************************************************
     547 * When the process dentified by the <owner_xp> argument has the exclusive ownership of
     548 * the TXT_RX terminal, this function transfer this ownership to another attached process.
     549 * The process descriptor must be in the process owner cluster.
     550 * This function does nothing if the <pid> process is not the owner.
     551 * - If the current owner is not the KSH process, the new owner is the KSH process.
     552 * - If the <pid> process is the the KSH process, the new owner is another attached process.
     553 * - If there is no other attached process, the TXT has no more defined owner.
     554 *********************************************************************************************
     555 * @ owner_xp  : extended pointer on process descriptor in owner cluster.
     556 ********************************************************************************************/
     557void process_txt_transfer_ownership( xptr_t owner_xp );
     558
     559/*********************************************************************************************
     560 * This function returns the TXT owner process (foreground process)
    549561 * for a given TXT terminal identified by its <channel> index.
    550562 *********************************************************************************************
    551563 * @ channel  : TXT terminal channel.
    552  * @ return owner process identifier.
    553  ********************************************************************************************/
    554 pid_t process_get_txt_owner( uint32_t channel );
     564 * @ return extentded pointer on TXT owner process in owner cluster.
     565 ********************************************************************************************/
     566xptr_t process_txt_get_owner( uint32_t channel );
    555567
    556568/*********************************************************************************************
  • trunk/kernel/kern/rpc.c

    r435 r436  
    8282    &rpc_thread_user_create_server,     // 6
    8383    &rpc_thread_kernel_create_server,   // 7
    84     &rpc_thread_kill_server,            // 8                      
     84    &rpc_undefined,                     // 8    unused slot       
    8585    &rpc_process_sigaction_server,      // 9
    8686
     
    122122               rpc_desc_t * rpc )
    123123{
    124     error_t    error;
    125 
    126     thread_t * this = CURRENT_THREAD;
    127     core_t   * core = this->core;
     124    volatile error_t   full = 0;
     125    thread_t         * this = CURRENT_THREAD;
     126    core_t           * core = this->core;
     127
     128#if CONFIG_DEBUG_RPC_SEND
     129uint32_t cycle = (uint32_t)hal_get_cycles();
     130if( CONFIG_DEBUG_RPC_SEND < cycle )
     131printk("\n[DBG] %s : thread %x enter for rpc[%d] / rpc_ptr %x / cycle %d\n",
     132__FUNCTION__, CURRENT_THREAD, rpc->index, rpc, cycle );
     133#endif
    128134
    129135    // register client thread pointer and core lid in RPC descriptor
    130     rpc->thread    = this;
    131     rpc->lid       = core->lid;
     136    rpc->thread = this;
     137    rpc->lid    = core->lid;
    132138
    133139    // build an extended pointer on the RPC descriptor
    134140        xptr_t   desc_xp = XPTR( local_cxy , rpc );
    135141
    136     // get local pointer on rpc_fifo in remote cluster, with the
    137     // assumption that local pointers are identical in all clusters
     142    // get local pointer on rpc_fifo in remote cluster,
    138143    remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;
    139144
    140         // try to post an item in remote fifo
    141     // deschedule and retry if remote fifo full
     145        // post RPC in remote fifo / deschedule and retry if fifo full
    142146    do
    143147    {
    144         error = remote_fifo_put_item( XPTR( server_cxy , rpc_fifo ),
    145                                       (uint64_t )desc_xp );
    146             if ( error )
     148        full = remote_fifo_put_item( XPTR( server_cxy , rpc_fifo ), (uint64_t )desc_xp );
     149            if ( full )
    147150        {
    148151            printk("\n[WARNING] %s : cluster %x cannot post RPC to cluster %x\n",
    149152            __FUNCTION__ , local_cxy , server_cxy );
    150153
    151             if( thread_can_yield() ) sched_yield("RPC fifo full");
     154            // deschedule without blocking
     155            sched_yield("RPC fifo full");
    152156        }
    153157    }
    154     while( error );
     158    while( full );
    155159 
    156160    hal_fence();
     
    167171        {
    168172
    169 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s busy waiting after registering RPC\n"
    170 "        rpc = %d / server = %x / cycle %d\n",
    171 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , thread_type_str(this->type) ,
    172 rpc->index , server_cxy , hal_time_stamp() );
     173#if CONFIG_DEBUG_RPC_SEND
     174cycle = (uint32_t)hal_get_cycles();
     175if( CONFIG_DEBUG_RPC_SEND < cycle )
     176printk("\n[DBG] %s : thread %x busy waiting / rpc[%d] / server = %x / cycle %d\n",
     177__FUNCTION__, CURRENT_THREAD, rpc->index , server_cxy , cycle );
     178#endif
    173179
    174180            while( rpc->response ) hal_fixed_delay( 100 );
    175181   
    176 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s exit after RPC completion\n",
    177 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , thread_type_str(this->type) );
    178 
     182#if CONFIG_DEBUG_RPC_SEND
     183cycle = (uint32_t)hal_get_cycles();
     184if( CONFIG_DEBUG_RPC_SEND < cycle )
     185printk("\n[DBG] %s : thread % resume / rpc[%d] / cycle %d\n",
     186__FUNCTION__, CURRENT_THREAD, rpc->index, cycle );
     187#endif
    179188        }
    180         else                                                              // block & deschedule
     189        else                                                         // block & deschedule
    181190        {
    182191
    183 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s deschedule after registering RPC\n"
    184 "        rpc = %d / server = %x / cycle %d\n",
    185 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , thread_type_str(this->type) ,
    186 rpc->index , server_cxy , hal_time_stamp() );
    187 
    188             thread_block( this , THREAD_BLOCKED_RPC );
    189             sched_yield("BLOCKED on RPC");
    190 
    191 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s resumes after RPC completion\n",
    192 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , thread_type_str(this->type) );
    193 
     192#if CONFIG_DEBUG_RPC_SEND
     193cycle = (uint32_t)hal_get_cycles();
     194if( CONFIG_DEBUG_RPC_SEND < cycle )
     195printk("\n[DBG] %s : thread %x block & deschedule / rpc[%d] / server = %x / cycle %d\n",
     196__FUNCTION__, CURRENT_THREAD, rpc->index , server_cxy , cycle );
     197#endif
     198            thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC );
     199            sched_yield("blocked on RPC");
     200
     201#if CONFIG_DEBUG_RPC_SEND
     202cycle = (uint32_t)hal_get_cycles();
     203if( CONFIG_DEBUG_RPC_SEND < cycle )
     204printk("\n[DBG] %s : thread % resume / rpcr[%d] / cycle %d\n",
     205__FUNCTION__, CURRENT_THREAD, rpc->index, cycle );
     206#endif
    194207        }
    195208
     
    199212        // acknowledge the IPI sent by the server
    200213        dev_pic_ack_ipi();
     214    }
     215    else
     216    {
     217
     218#if CONFIG_DEBUG_RPC_SEND
     219cycle = (uint32_t)hal_get_cycles();
     220if( CONFIG_DEBUG_RPC_SEND < cycle )
     221printk("\n[DBG] %s : non blocking rpc[%d] => thread return  / cycle %d\n",
     222__FUNCTION__, rpc->index, CURRENT_THREAD, cycle );
     223#endif
     224
    201225    }
    202226}  // end rpc_send()
     
    220244        remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;
    221245
    222 grpc_dmsg("\n[DBG] %s : core[%x,%d] / interrupted thread %s / cycle %d\n",
    223 __FUNCTION__, local_cxy, core->lid, thread_type_str(this->type), hal_time_stamp() );
     246#if CONFIG_DEBUG_RPC_SERVER
     247uint32_t cycle = (uint32_t)hal_get_cycles();
     248if( CONFIG_DEBUG_RPC_SERVER < cycle )
     249printk("\n[DBG] %s : thread %x interrupted in cluster %x / cycle %d\n",
     250__FUNCTION__, this, local_cxy, cycle );
     251#endif
    224252
    225253    // interrupted thread not preemptable during RPC chek
     
    262290                    hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 );
    263291
    264 grpc_dmsg("\n[DBG] %s : core [%x,%d] creates a new RPC thread %x / trdid %x / cycle %d\n",
    265 __FUNCTION__ , local_cxy , core->lid , thread , thread->trdid , hal_time_stamp() );
    266 
     292#if CONFIG_DEBUG_RPC_SERVER
     293cycle = (uint32_t)hal_get_cycles();
     294if( CONFIG_DEBUG_RPC_SERVER < cycle )
     295printk("\n[DBG] %s : create a new RPC thread %x in cluster %x / cycle %d\n",
     296__FUNCTION__, thread, local_cxy, cycle );
     297#endif
    267298            }
    268299        }
    269300    }
    270301
    271 grpc_dmsg("\n[DBG] %s : core[%x,%d] / interrupted thread %s deschedules / cycle %d\n",
    272 __FUNCTION__, local_cxy, core->lid, thread_type_str(this->type), hal_time_stamp() );
     302#if CONFIG_DEBUG_RPC_SERVER
     303cycle = (uint32_t)hal_get_cycles();
     304if( CONFIG_DEBUG_RPC_SERVER < cycle )
     305printk("\n[DBG] %s : interrupted thread %x deschedules in cluster %x / cycle %d\n",
     306__FUNCTION__, this, local_cxy, cycle );
     307#endif
    273308
    274309    // interrupted thread deschedule always           
    275310        sched_yield("IPI received");
    276311
    277 grpc_dmsg("\n[DBG] %s : core[%x,%d] / interrupted thread %s resume / cycle %d\n",
    278 __FUNCTION__, local_cxy, core->lid, thread_type_str(this->type), hal_time_stamp() );
     312#if CONFIG_DEBUG_RPC_SERVER
     313cycle = (uint32_t)hal_get_cycles();
     314if( CONFIG_DEBUG_RPC_SERVER < cycle )
     315printk("\n[DBG] %s : interrupted thread %x resumes in cluster %x / cycle %d\n",
     316__FUNCTION__, this, local_cxy, cycle );
     317#endif
    279318
    280319    // interrupted thread restore IRQs after resume
     
    312351        if( hal_atomic_test_set( &rpc_fifo->owner , this->trdid ) )
    313352        {
     353
     354#if CONFIG_DEBUG_RPC_SERVER
     355uint32_t cycle = (uint32_t)hal_get_cycles();
     356if( CONFIG_DEBUG_RPC_SERVER < cycle )
     357printk("\n[DBG] %s : RPC thread %x takes RPC fifo ownership / cluster %x / cycle %d\n",
     358__FUNCTION__, this, local_cxy, cycle );
     359#endif
    314360            // initializes RPC requests counter
    315361            count = 0;
     
    324370                while( 1 )  // internal loop
    325371            {
    326 
    327372                    empty = local_fifo_get_item( rpc_fifo , (uint64_t *)&desc_xp );
    328373
     
    330375                {
    331376                    // get client cluster and pointer on RPC descriptor
    332                     desc_cxy = (cxy_t)GET_CXY( desc_xp );
    333                     desc_ptr = (rpc_desc_t *)GET_PTR( desc_xp );
    334 
    335                     // get RPC <index> & <blocking> fields from RPC descriptor
    336                         index    = hal_remote_lw( XPTR( desc_cxy , &desc_ptr->index ) );
    337                     blocking = hal_remote_lw( XPTR( desc_cxy , &desc_ptr->blocking ) );
    338 
    339 grpc_dmsg("\n[DBG] %s : core[%x,%d] / RPC thread %x / starts rpc %d / cycle %d\n",
    340 __FUNCTION__ , local_cxy , this->core->lid , this->trdid , index , (uint32_t)hal_get_cycles() );
    341 
     377                    desc_cxy = GET_CXY( desc_xp );
     378                    desc_ptr = GET_PTR( desc_xp );
     379
     380                        index    = desc_ptr->index;
     381                    blocking = desc_ptr->blocking;
     382
     383#if CONFIG_DEBUG_RPC_SERVER
     384cycle = (uint32_t)hal_get_cycles();
     385if( CONFIG_DEBUG_RPC_SERVER < cycle )
     386printk("\n[DBG] %s : RPC thread %x got rpc[%d] / rpc_ptr %x / cycle %d\n",
     387__FUNCTION__, this, index, desc_ptr, cycle );
     388#endif
    342389                    // call the relevant server function
    343390                    rpc_server[index]( desc_xp );
    344391
    345 grpc_dmsg("\n[DBG] %s : core[%x,%d] / RPC thread %x / completes rpc %d / cycle %d\n",
    346 __FUNCTION__ , local_cxy , this->core->lid , this->trdid , index , hal_time_stamp() );
    347 
     392#if CONFIG_DEBUG_RPC_SERVER
     393cycle = (uint32_t)hal_get_cycles();
     394if( CONFIG_DEBUG_RPC_SERVER < cycle )
     395printk("\n[DBG] %s : RPC thread %x completes rpc %d in cluster %x / cycle %d\n",
     396__FUNCTION__, this, index, local_cxy, cycle );
     397#endif
    348398                    // increment handled RPCs counter
    349399                        count++;
     
    382432            {
    383433
    384 grpc_dmsg("\n[DBG] %s : core[%x,%d] (RPC thread %x) suicide at cycle %d\n",
    385 __FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() );
    386 
     434#if CONFIG_DEBUG_RPC_SERVER
     435uint32_t cycle = (uint32_t)hal_get_cycles();
     436if( CONFIG_DEBUG_RPC_SERVER < cycle )
     437printk("\n[DBG] %s : RPC thread %x suicides in cluster %x / cycle %d\n",
     438__FUNCTION__, this, local_cxy, cycle );
     439#endif
    387440            // update RPC threads counter
    388441                hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , -1 );
    389442
    390443            // suicide
    391                 thread_kill( this );
     444                thread_kill( XPTR( local_cxy , this ),
     445                         true,                      // is_exit
     446                         true );                    // is forced
    392447            }
    393448
    394 grpc_dmsg("\n[DBG] %s : core[%x,%d] (RPC thread %x) deschedules / cycle %d\n",
    395 __FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() );
     449#if CONFIG_DEBUG_RPC_SERVER
     450uint32_t cycle = (uint32_t)hal_get_cycles();
     451if( CONFIG_DEBUG_RPC_SERVER < cycle )
     452printk("\n[DBG] %s : RPC thread %x deschedules in cluster %x / cycle %d\n",
     453__FUNCTION__, this, local_cxy, cycle );
     454#endif
    396455
    397456        // deschedule without blocking
    398457        sched_yield("RPC fifo empty or too much work");
    399458
    400 grpc_dmsg("\n[DBG] %s : core[%x,%d] (RPC thread %x) resumes / cycle %d\n",
    401 __FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() );
     459#if CONFIG_DEBUG_RPC_SERVER
     460cycle = (uint32_t)hal_get_cycles();
     461if( CONFIG_DEBUG_RPC_SERVER < cycle )
     462printk("\n[DBG] %s : RPC thread %x resumes in cluster %x / cycle %d\n",
     463__FUNCTION__, this, local_cxy, cycle );
     464#endif
    402465
    403466        } // end external loop
     
    430493    rpc.args[0] = (uint64_t)order;
    431494
    432     // register RPC request in remote RPC fifo (blocking function)
     495    // register RPC request in remote RPC fifo
    433496    rpc_send( cxy , &rpc );
    434497
     
    449512
    450513    // get client cluster identifier and pointer on RPC descriptor
    451     cxy_t        cxy  = (cxy_t)GET_CXY( xp );
    452     rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );
     514    cxy_t        cxy  = GET_CXY( xp );
     515    rpc_desc_t * desc = GET_PTR( xp );
    453516
    454517    // get input arguments from client RPC descriptor
     
    489552    rpc.args[0] = (uint64_t)(intptr_t)page;
    490553
    491     // register RPC request in remote RPC fifo (blocking function)
     554    // register RPC request in remote RPC fifo
    492555    rpc_send( cxy , &rpc );
    493556
     
    505568
    506569    // get client cluster identifier and pointer on RPC descriptor
    507     cxy_t        cxy  = (cxy_t)GET_CXY( xp );
    508     rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );
     570    cxy_t        cxy  = GET_CXY( xp );
     571    rpc_desc_t * desc = GET_PTR( xp );
    509572
    510573    // get input arguments from client RPC descriptor
     
    554617    rpc.args[1] = (uint64_t)(intptr_t)parent_thread_xp;
    555618
    556     // register RPC request in remote RPC fifo (blocking function)
     619    // register RPC request in remote RPC fifo
    557620    rpc_send( cxy , &rpc );
    558621
     
    581644
    582645    // get client cluster identifier and pointer on RPC descriptor
    583     cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
    584     rpc_desc_t * desc        = (rpc_desc_t *)GET_PTR( xp );
     646    cxy_t        client_cxy  = GET_CXY( xp );
     647    rpc_desc_t * desc        = GET_PTR( xp );
    585648
    586649    // get input arguments from cient RPC descriptor
     
    613676
    614677/////////////////////////////////////////////////////////////////////////////////////////
    615 // [6]           Marshaling functions attached to RPC_THREAD_USER_CREATE (blocking) 
     678// [6]      Marshaling functions attached to RPC_THREAD_USER_CREATE (blocking) 
    616679/////////////////////////////////////////////////////////////////////////////////////////
    617680
     
    633696    // initialise RPC descriptor header
    634697    rpc_desc_t  rpc;
    635     rpc.index     = RPC_THREAD_USER_CREATE;
    636     rpc.response  = 1;
     698    rpc.index    = RPC_THREAD_USER_CREATE;
     699    rpc.response = 1;
    637700    rpc.blocking = true;
    638701
     
    643706    rpc.args[3] = (uint64_t)(intptr_t)attr;
    644707
    645     // register RPC request in remote RPC fifo (blocking function)
     708    // register RPC request in remote RPC fifo
    646709    rpc_send( cxy , &rpc );
    647710
     
    673736
    674737    // get client cluster identifier and pointer on RPC descriptor
    675     cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
    676     rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );
     738    cxy_t        client_cxy  = GET_CXY( xp );
     739    rpc_desc_t * desc = GET_PTR( xp );
    677740
    678741    // get pointer on attributes structure in client cluster from RPC descriptor
     
    707770
    708771/////////////////////////////////////////////////////////////////////////////////////////
    709 // [7]           Marshaling functions attached to RPC_THREAD_KERNEL_CREATE (blocking)
     772// [7]      Marshaling functions attached to RPC_THREAD_KERNEL_CREATE (blocking)
    710773/////////////////////////////////////////////////////////////////////////////////////////
    711774
     
    735798    rpc.args[2] = (uint64_t)(intptr_t)args;
    736799   
    737     // register RPC request in remote RPC fifo (blocking function)
     800    // register RPC request in remote RPC fifo
    738801    rpc_send( cxy , &rpc );
    739802
     
    760823
    761824    // get client cluster identifier and pointer on RPC descriptor
    762     cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
    763     rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );
     825    cxy_t        client_cxy  = GET_CXY( xp );
     826    rpc_desc_t * desc = GET_PTR( xp );
    764827
    765828    // get attributes from RPC descriptor
     
    785848
    786849/////////////////////////////////////////////////////////////////////////////////////////
    787 // [8]           Marshaling functions attached to RPC_THREAD_KILL (blocking)
    788 /////////////////////////////////////////////////////////////////////////////////////////
    789 
    790 /////////////////////////////////////////////
    791 void rpc_thread_kill_client( cxy_t       cxy,
    792                              thread_t  * thread )    // in
    793 {
    794 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    795 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    796 CURRENT_THREAD->core->lid , hal_time_stamp() );
    797 
    798     // this RPC can be called in local cluster
    799 
    800     // initialise RPC descriptor header
    801     rpc_desc_t  rpc;
    802     rpc.index    = RPC_THREAD_KILL;
    803     rpc.response = 1;
    804     rpc.blocking = true;
    805 
    806     // set input arguments in RPC descriptor
    807     rpc.args[0] = (uint64_t)(intptr_t)thread;
    808    
    809     // register RPC request in remote RPC fifo (blocking function)
    810     rpc_send( cxy , &rpc );
    811 
    812 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    813 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    814 CURRENT_THREAD->core->lid , hal_time_stamp() );
    815 }
    816 
    817 ////////////////////////////////////////                             
    818 void rpc_thread_kill_server( xptr_t xp )
    819 {
    820 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    821 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    822 CURRENT_THREAD->core->lid , hal_time_stamp() );
    823 
    824     thread_t  * thread;  // local pointer on process descriptor
    825 
    826     // get client cluster identifier and pointer on RPC descriptor
    827     cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
    828     rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );
    829 
    830     // get attributes from RPC descriptor
    831     thread = (thread_t *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) );
    832 
    833     // call local kernel function
    834     thread_kill( thread );
    835 
    836 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    837 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    838 CURRENT_THREAD->core->lid , hal_time_stamp() );
    839 }
     850// [8]   undefined slot
     851/////////////////////////////////////////////////////////////////////////////////////////
    840852
    841853
     
    846858////////////////////////////////////////////////////
    847859void rpc_process_sigaction_client( cxy_t        cxy,
    848                                    rpc_desc_t * rpc_ptr )
    849 {
    850 rpc_dmsg("\n[DBG] %s : enter to %s process %x in cluster %x / cycle %d\n",
    851 __FUNCTION__ , process_action_str( (uint32_t)rpc_ptr->args[0] ) ,
    852 ((process_t *)(intptr_t)rpc_ptr->args[1])->pid , cxy , (uint32_t)hal_get_cycles() );
    853 
    854     // register RPC request in remote RPC fifo
    855     rpc_send( cxy , rpc_ptr );
    856 
    857 rpc_dmsg("\n[DBG] %s : exit after %s process %x in cluster %x / cycle %d\n",
    858 __FUNCTION__ , process_action_str( (uint32_t)rpc_ptr->args[0] ) ,
    859 ((process_t *)(intptr_t)rpc_ptr->args[1])->pid , cxy , (uint32_t)hal_get_cycles() );
    860 
     860                                   rpc_desc_t * rpc )
     861{
     862
     863#if (CONFIG_DEBUG_PROCESS_SIGACTION & 1)
     864uint32_t  cycle  = (uint32_t)hal_get_cycles();
     865uint32_t  action = rpc->args[0];
     866pid_t     pid    = rpc->args[1];
     867if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     868printk("\n[DBG] %s : enter to %s process %x in cluster %x / cycle %d\n",
     869__FUNCTION__ , process_action_str( action ) , pid , cxy , cycle );
     870#endif
     871
     872    // check some RPC arguments
     873    assert( (rpc->blocking == false) , __FUNCTION__ , "must be non-blocking\n");
     874    assert( (rpc->index == RPC_PROCESS_SIGACTION ) , __FUNCTION__ , "bad RPC index\n" );
     875
     876    // register RPC request in remote RPC fifo and return
     877    rpc_send( cxy , rpc );
     878
     879#if (CONFIG_DEBUG_PROCESS_SIGACTION & 1)
     880cycle = (uint32_t)hal_get_cycles();
     881if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     882printk("\n[DBG] %s : exit after requesting to %s process %x in cluster %x / cycle %d\n",
     883__FUNCTION__ , process_action_str( action ) , pid , cxy , cycle );
     884#endif
     885
     886}  // end rpc_process_sigaction_client()
    861887
    862888//////////////////////////////////////////////
     
    864890{
    865891    pid_t        pid;              // target process identifier
    866     process_t  * process;          // pointer on local process descriptor
     892    process_t  * process;          // pointer on local target process descriptor
    867893    uint32_t     action;           // sigaction index
    868     thread_t   * client_ptr;       // local pointer on client thread in client cluster
     894    thread_t   * client_thread;    // pointer on client thread in client cluster
    869895    cxy_t        client_cxy;       // client cluster identifier
    870     xptr_t       client_xp;        // extended pointer on client thread
    871     core_t     * client_core;      // local pointer on core running the client thread
    872     rpc_desc_t * rpc;              // local pointer on rpc descriptor in client cluster
    873 
    874     // get client cluster identifier and pointer on RPC descriptor
    875     client_cxy = (cxy_t)GET_CXY( xp );
    876     rpc        = (rpc_desc_t *)GET_PTR( xp );
     896    rpc_desc_t * rpc;              // pointer on rpc descriptor in client cluster
     897    xptr_t       count_xp;         // extended pointer on response counter
     898    lid_t        client_lid;       // client core local index
     899
     900    // get client cluster identifier and pointer on RPC descriptor
     901    client_cxy = GET_CXY( xp );
     902    rpc        = GET_PTR( xp );
    877903
    878904    // get arguments from RPC descriptor
    879     action      = (uint32_t)  hal_remote_lwd( XPTR( client_cxy , &rpc->args[0] ) );
    880     pid         = (pid_t)     hal_remote_lwd( XPTR( client_cxy , &rpc->args[1] ) );
    881     client_ptr  = (thread_t *)hal_remote_lpt( XPTR( client_cxy , &rpc->thread ) );
    882 
    883 rpc_dmsg("\n[DBG] %s : enter to %s process %x / cycle %d\n",
    884 __FUNCTION__ , process_action_str( action ) , pid , (uint32_t)hal_get_cycles() );
     905    action   = (uint32_t)hal_remote_lwd( XPTR(client_cxy , &rpc->args[0]) );
     906    pid      = (pid_t)   hal_remote_lwd( XPTR(client_cxy , &rpc->args[1]) );
     907
     908#if (CONFIG_DEBUG_PROCESS_SIGACTION & 1)
     909uint32_t cycle = (uint32_t)hal_get_cycles();
     910if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     911printk("\n[DBG] %s : enter to %s process %x in cluster %x / cycle %d\n",
     912__FUNCTION__ , process_action_str( action ) , pid , local_cxy , cycle );
     913#endif
    885914
    886915    // get local process descriptor
    887     process = process_get_local_copy( pid );
    888 
    889     // build extended pointer on client thread
    890     client_xp = XPTR( client_cxy , client_ptr );
     916    process = cluster_get_local_process_from_pid( pid );
    891917
    892918    // call relevant kernel function
    893     if      (action == DELETE_ALL_THREADS  ) process_delete_threads ( process );
    894     else if (action == BLOCK_ALL_THREADS   ) process_block_threads  ( process );
    895     else if (action == UNBLOCK_ALL_THREADS ) process_unblock_threads( process );
     919    if      ( action == DELETE_ALL_THREADS  ) process_delete_threads ( process );
     920    else if ( action == BLOCK_ALL_THREADS   ) process_block_threads  ( process );
     921    else if ( action == UNBLOCK_ALL_THREADS ) process_unblock_threads( process );
     922
     923    // build extended pointer on response counter in RPC
     924    count_xp = XPTR( client_cxy , &rpc->response );
    896925
    897926    // decrement the responses counter in RPC descriptor,
    898927    // unblock the client thread only if it is the last response.
    899     if( hal_remote_atomic_add( XPTR( client_cxy , &rpc->response ) , -1 ) == 1 )
     928    if( hal_remote_atomic_add( count_xp , -1 ) == 1 )
    900929    {
    901         client_core = (core_t *)hal_remote_lpt( XPTR( client_cxy , &client_ptr->core ) );
    902         thread_unblock( client_xp , THREAD_BLOCKED_RPC );
    903         dev_pic_send_ipi( client_cxy , client_core->lid );
     930        // get client thread pointer and client core lid
     931        client_thread = (thread_t *)hal_remote_lpt( XPTR( client_cxy , &rpc->thread ) );
     932        client_lid    = (lid_t)     hal_remote_lw ( XPTR( client_cxy , &rpc->lid    ) );
     933
     934        thread_unblock( XPTR( client_cxy , client_thread ) , THREAD_BLOCKED_RPC );
     935        dev_pic_send_ipi( client_cxy , client_lid );
    904936    }
    905937
    906 rpc_dmsg("\n[DBG] %s : exit after %s process %x / cycle %d\n",
    907 __FUNCTION__ , process_action_str( action ) , pid , (uint32_t)hal_get_cycles() );
    908 }
    909 
    910 /////////////////////////////////////////////////////////////////////////////////////////
    911 // [10]          Marshaling functions attached to RPC_VFS_INODE_CREATE  (blocking)
     938#if (CONFIG_DEBUG_PROCESS_SIGACTION & 1)
     939cycle = (uint32_t)hal_get_cycles();
     940if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     941printk("\n[DBG] %s : exit after %s process %x in cluster %x / cycle %d\n",
     942__FUNCTION__ , process_action_str( action ) , pid , local_cxy , cycle );
     943#endif
     944
     945} // end rpc_process_sigaction_server()
     946
     947/////////////////////////////////////////////////////////////////////////////////////////
     948// [10]     Marshaling functions attached to RPC_VFS_INODE_CREATE  (blocking)
    912949/////////////////////////////////////////////////////////////////////////////////////////
    913950
     
    947984    rpc.args[7] = (uint64_t)gid;
    948985
    949     // register RPC request in remote RPC fifo (blocking function)
     986    // register RPC request in remote RPC fifo
    950987    rpc_send( cxy , &rpc );
    951988
     
    9781015
    9791016    // get client cluster identifier and pointer on RPC descriptor
    980     cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
    981     rpc_desc_t * desc        = (rpc_desc_t *)GET_PTR( xp );
     1017    cxy_t        client_cxy  = GET_CXY( xp );
     1018    rpc_desc_t * desc        = GET_PTR( xp );
    9821019
    9831020    // get input arguments from client rpc descriptor
     
    10341071    rpc.args[0] = (uint64_t)(intptr_t)inode;
    10351072   
    1036     // register RPC request in remote RPC fifo (blocking function)
     1073    // register RPC request in remote RPC fifo
    10371074    rpc_send( cxy , &rpc );
    10381075
     
    10521089
    10531090    // get client cluster identifier and pointer on RPC descriptor
    1054     cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
    1055     rpc_desc_t * desc        = (rpc_desc_t *)GET_PTR( xp );
     1091    cxy_t        client_cxy  = GET_CXY( xp );
     1092    rpc_desc_t * desc        = GET_PTR( xp );
    10561093
    10571094    // get arguments "inode" from client RPC descriptor
     
    10951132    rpc.args[2] = (uint64_t)(intptr_t)parent;
    10961133
    1097     // register RPC request in remote RPC fifo (blocking function)
     1134    // register RPC request in remote RPC fifo
    10981135    rpc_send( cxy , &rpc );
    10991136
     
    11231160
    11241161    // get client cluster identifier and pointer on RPC descriptor
    1125     cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
    1126     rpc_desc_t * desc        = (rpc_desc_t *)GET_PTR( xp );
     1162    cxy_t        client_cxy  = GET_CXY( xp );
     1163    rpc_desc_t * desc        = GET_PTR( xp );
    11271164
    11281165    // get arguments "name", "type", and "parent" from client RPC descriptor
     
    11731210    rpc.args[0] = (uint64_t)(intptr_t)dentry;
    11741211   
    1175     // register RPC request in remote RPC fifo (blocking function)
     1212    // register RPC request in remote RPC fifo
    11761213    rpc_send( cxy , &rpc );
    11771214
     
    11911228
    11921229    // get client cluster identifier and pointer on RPC descriptor
    1193     cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
    1194     rpc_desc_t * desc        = (rpc_desc_t *)GET_PTR( xp );
     1230    cxy_t        client_cxy  = GET_CXY( xp );
     1231    rpc_desc_t * desc        = GET_PTR( xp );
    11951232
    11961233    // get arguments "dentry" from client RPC descriptor
     
    12331270    rpc.args[1] = (uint64_t)file_attr;
    12341271
    1235     // register RPC request in remote RPC fifo (blocking function)
     1272    // register RPC request in remote RPC fifo
    12361273    rpc_send( cxy , &rpc );
    12371274
     
    12581295
    12591296    // get client cluster identifier and pointer on RPC descriptor
    1260     cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
    1261     rpc_desc_t * desc        = (rpc_desc_t *)GET_PTR( xp );
     1297    cxy_t        client_cxy  = GET_CXY( xp );
     1298    rpc_desc_t * desc        = GET_PTR( xp );
    12621299
    12631300    // get arguments "file_attr" and "inode" from client RPC descriptor
     
    13021339    rpc.args[0] = (uint64_t)(intptr_t)file;
    13031340   
    1304     // register RPC request in remote RPC fifo (blocking function)
     1341    // register RPC request in remote RPC fifo
    13051342    rpc_send( cxy , &rpc );
    13061343
     
    13201357
    13211358    // get client cluster identifier and pointer on RPC descriptor
    1322     cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
    1323     rpc_desc_t * desc        = (rpc_desc_t *)GET_PTR( xp );
     1359    cxy_t        client_cxy  = GET_CXY( xp );
     1360    rpc_desc_t * desc        = GET_PTR( xp );
    13241361
    13251362    // get arguments "dentry" from client RPC descriptor
     
    13621399    rpc.args[2] = (uint64_t)child_inode_xp;
    13631400
    1364     // register RPC request in remote RPC fifo (blocking function)
     1401    // register RPC request in remote RPC fifo
    13651402    rpc_send( cxy , &rpc );
    13661403
     
    13881425
    13891426    // get client cluster identifier and pointer on RPC descriptor
    1390     cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
    1391     rpc_desc_t * desc        = (rpc_desc_t *)GET_PTR( xp );
     1427    cxy_t        client_cxy  = GET_CXY( xp );
     1428    rpc_desc_t * desc        = GET_PTR( xp );
    13921429
    13931430    // get arguments "parent", "name", and "child_xp"
     
    14351472    rpc.args[0] = (uint64_t)(intptr_t)inode;
    14361473
    1437     // register RPC request in remote RPC fifo (blocking function)
     1474    // register RPC request in remote RPC fifo
    14381475    rpc_send( cxy , &rpc );
    14391476
     
    14571494
    14581495    // get client cluster identifier and pointer on RPC descriptor
    1459     cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
    1460     rpc_desc_t * desc        = (rpc_desc_t *)GET_PTR( xp );
     1496    cxy_t        client_cxy  = GET_CXY( xp );
     1497    rpc_desc_t * desc        = GET_PTR( xp );
    14611498
    14621499    // get arguments "parent", "name", and "child_xp"
     
    15291566
    15301567    // get client cluster identifier and pointer on RPC descriptor
    1531     cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
    1532     rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );
     1568    cxy_t        client_cxy  = GET_CXY( xp );
     1569    rpc_desc_t * desc = GET_PTR( xp );
    15331570
    15341571    // get input arguments
     
    15761613    rpc.args[1] = (uint64_t)vaddr;
    15771614
    1578     // register RPC request in remote RPC fifo (blocking function)
     1615    // register RPC request in remote RPC fifo
    15791616    rpc_send( cxy , &rpc );
    15801617
     
    16021639
    16031640    // get client cluster identifier and pointer on RPC descriptor
    1604     cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
    1605     rpc_desc_t * desc        = (rpc_desc_t *)GET_PTR( xp );
     1641    cxy_t        client_cxy  = GET_CXY( xp );
     1642    rpc_desc_t * desc        = GET_PTR( xp );
    16061643
    16071644    // get input argument from client RPC descriptor
     
    16531690    rpc.args[2] = (uint64_t)cow;
    16541691
    1655     // register RPC request in remote RPC fifo (blocking function)
     1692    // register RPC request in remote RPC fifo
    16561693    rpc_send( cxy , &rpc );
    16571694
     
    16811718
    16821719    // get client cluster identifier and pointer on RPC descriptor
    1683     cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
    1684     rpc_desc_t * desc        = (rpc_desc_t *)GET_PTR( xp );
     1720    cxy_t        client_cxy  = GET_CXY( xp );
     1721    rpc_desc_t * desc        = GET_PTR( xp );
    16851722
    16861723    // get input argument "process" & "vpn" from client RPC descriptor
     
    17261763    rpc.args[0] = (uint64_t)kmem_type;
    17271764
    1728     // register RPC request in remote RPC fifo (blocking function)
     1765    // register RPC request in remote RPC fifo
    17291766    rpc_send( cxy , &rpc );
    17301767
     
    17451782
    17461783    // get client cluster identifier and pointer on RPC descriptor
    1747     cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
    1748     rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );
     1784    cxy_t        client_cxy  = GET_CXY( xp );
     1785    rpc_desc_t * desc = GET_PTR( xp );
    17491786
    17501787    // get input argument "kmem_type" from client RPC descriptor
     
    17911828    rpc.args[1] = (uint64_t)kmem_type;
    17921829
    1793     // register RPC request in remote RPC fifo (blocking function)
     1830    // register RPC request in remote RPC fifo
    17941831    rpc_send( cxy , &rpc );
    17951832
     
    18071844
    18081845    // get client cluster identifier and pointer on RPC descriptor
    1809     cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
    1810     rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );
     1846    cxy_t        client_cxy  = GET_CXY( xp );
     1847    rpc_desc_t * desc = GET_PTR( xp );
    18111848
    18121849    // get input arguments "buf" and "kmem_type" from client RPC descriptor
     
    18591896    rpc.args[5] = (uint64_t)size;
    18601897
    1861     // register RPC request in remote RPC fifo (blocking function)
     1898    // register RPC request in remote RPC fifo
    18621899    rpc_send( cxy , &rpc );
    18631900
     
    18871924
    18881925    // get client cluster identifier and pointer on RPC descriptor
    1889     cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
    1890     rpc_desc_t * desc        = (rpc_desc_t *)GET_PTR( xp );
     1926    cxy_t        client_cxy  = GET_CXY( xp );
     1927    rpc_desc_t * desc        = GET_PTR( xp );
    18911928
    18921929    // get arguments from client RPC descriptor
     
    19531990    rpc.args[1] = (uint64_t)index;
    19541991
    1955     // register RPC request in remote RPC fifo (blocking function)
     1992    // register RPC request in remote RPC fifo
    19561993    rpc_send( cxy , &rpc );
    19571994
     
    19722009
    19732010    // get client cluster identifier and pointer on RPC descriptor
    1974     cxy_t        cxy  = (cxy_t)GET_CXY( xp );
    1975     rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );
     2011    cxy_t        cxy  = GET_CXY( xp );
     2012    rpc_desc_t * desc = GET_PTR( xp );
    19762013
    19772014    // get input arguments from client RPC descriptor
     
    20282065    rpc.args[7] = (uint64_t)vseg_cxy;
    20292066
    2030     // register RPC request in remote RPC fifo (blocking function)
     2067    // register RPC request in remote RPC fifo
    20312068    rpc_send( cxy , &rpc );
    20322069
     
    20472084
    20482085    // get client cluster identifier and pointer on RPC descriptor
    2049     cxy_t        cxy  = (cxy_t)GET_CXY( xp );
    2050     rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );
     2086    cxy_t        cxy  = GET_CXY( xp );
     2087    rpc_desc_t * desc = GET_PTR( xp );
    20512088
    20522089    // get input arguments from client RPC descriptor
     
    21012138    rpc.args[0] = (uint64_t)lid;
    21022139
    2103     // register RPC request in remote RPC fifo (blocking function)
     2140    // register RPC request in remote RPC fifo
    21042141    rpc_send( cxy , &rpc );
    21052142
     
    21172154
    21182155    // get client cluster identifier and pointer on RPC descriptor
    2119     cxy_t        cxy  = (cxy_t)GET_CXY( xp );
    2120     rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );
     2156    cxy_t        cxy  = GET_CXY( xp );
     2157    rpc_desc_t * desc = GET_PTR( xp );
    21212158
    21222159    // get input arguments from client RPC descriptor
     
    21542191    rpc.args[0] = (uint64_t)(intptr_t)process;
    21552192
    2156     // register RPC request in remote RPC fifo (blocking function)
     2193    // register RPC request in remote RPC fifo
    21572194    rpc_send( cxy , &rpc );
    21582195
     
    21722209
    21732210    // get client cluster identifier and pointer on RPC descriptor
    2174     cxy_t        cxy  = (cxy_t)GET_CXY( xp );
    2175     rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );
     2211    cxy_t        cxy  = GET_CXY( xp );
     2212    rpc_desc_t * desc = GET_PTR( xp );
    21762213
    21772214    // get input arguments from client RPC descriptor
     
    22112248    rpc.args[1] = (uint64_t)detailed;
    22122249
    2213     // register RPC request in remote RPC fifo (blocking function)
     2250    // register RPC request in remote RPC fifo
    22142251    rpc_send( cxy , &rpc );
    22152252
     
    22302267
    22312268    // get client cluster identifier and pointer on RPC descriptor
    2232     cxy_t        cxy  = (cxy_t)GET_CXY( xp );
    2233     rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );
     2269    cxy_t        cxy  = GET_CXY( xp );
     2270    rpc_desc_t * desc = GET_PTR( xp );
    22342271
    22352272    // get input arguments from client RPC descriptor
  • trunk/kernel/kern/rpc.h

    r435 r436  
    4848struct mapper_s;
    4949
     50
    5051/**********************************************************************************/
    5152/**************  structures for Remote Procedure Calls ****************************/
     
    6768    RPC_THREAD_USER_CREATE     = 6,
    6869    RPC_THREAD_KERNEL_CREATE   = 7,
    69     RPC_THREAD_KILL            = 8,
     70    RPC_UNDEFINED_8            = 8,
    7071    RPC_PROCESS_SIGACTION      = 9,
    7172
     
    288289
    289290/***********************************************************************************
    290  * [8] The RPC_THREAD_KILL ask a target cluster to kill a given thread descriptor.
    291  * It is called by the sys_thread_cancel() function for a remote thread.
    292  ***********************************************************************************
    293  * @ cxy       : server cluster identifier.
    294  * @ thread   : [in]  local pointer on target process descriptor in server.
    295  **********************************************************************************/
    296 void rpc_thread_kill_client( cxy_t              cxy,
    297                              struct thread_s  * thread );
    298                              
    299 void rpc_thread_kill_server( xptr_t xp );
    300 
    301 /***********************************************************************************
    302  * [9] The RPC_PROCESS_SIGACTION allows the owner cluster to request any other
    303  * cluster to execute a given sigaction (BLOCK / UNBLOCK / DELETE) for all
    304  * threads of a given process.
     291 * [8] undefined slot
     292 **********************************************************************************/
     293
     294/***********************************************************************************
     295 * [9] The RPC_PROCESS_SIGACTION allows a thread running in any cluster
     296 * to request a cluster identified by the <cxy> argument (local or remote)
     297 * to execute a given sigaction for a given cluster. The <action_type> and
     298 * the <pid> arguments are defined in the shared RPC descriptor, that must be
     299 * initialised by the client thread.
    305300 *
    306301 * WARNING : It is implemented as a NON BLOCKING multicast RPC, that can be sent
    307  * in parallel to all process copies. The rpc descriptor is allocated in the client
    308  * thread stack by the process_sigaction() function. The various server threads
    309  * must decrement the responses counter defined in the rsp descriptor, and the last
    310  * server thread unblock the client thread that blocked (after sending all RPC
    311  * requests) in the process_sigaction() function.
    312  * - The first RPC argument is the sigaction type (BLOCK / UNBLOCK / DELETE).
    313  * - The second RPC argument is the local pointer on target process.
    314  ***********************************************************************************
    315  * @ cxy       : server cluster identifier.
    316  * @ rpc_ptr   : [in]  local pointer on rpc descriptor in client cluster.
    317  **********************************************************************************/
    318 void rpc_process_sigaction_client( cxy_t        cxy,
    319                                    rpc_desc_t * rpc_ptr );
     302 * in parallel to all process copies. The various RPC server threads atomically
     303 * decrement the <response> field in the shared RPC descriptor.
     304 * The last server thread unblock the client thread that blocked (after sending
     305 * all RPC requests) in the process_sigaction() function.
     306 ***********************************************************************************
     307 * @ cxy     : server cluster identifier.
     308 * @ rpc     : pointer on ishared RPC descriptor initialized by the client thread.
     309 **********************************************************************************/
     310void rpc_process_sigaction_client( cxy_t               cxy,
     311                                   struct rpc_desc_s * rpc );
    320312                             
    321313void rpc_process_sigaction_server( xptr_t xp );
  • trunk/kernel/kern/scheduler.c

    r435 r436  
    286286    next = sched_select( sched );
    287287
     288    // check next thread kernel_stack overflow
     289    assert( (next->signature == THREAD_SIGNATURE),
     290    __FUNCTION__ , "kernel stack overflow for thread %x\n", next );
     291
    288292    // check next thread attached to same core as the calling thread
    289     assert( (next->core == current->core), __FUNCTION__ ,
    290     "next core != current core\n");
     293    assert( (next->core == current->core),
     294    __FUNCTION__ , "next core %x != current core %x\n", next->core, current->core );
    291295
    292296    // check next thread not blocked when type != IDLE
     
    327331    {
    328332
    329 #if( CONFIG_DEBUG_SCHED_YIELD & 0x1 )
     333#if (CONFIG_DEBUG_SCHED_YIELD & 1)
    330334uint32_t cycle = (uint32_t)hal_get_cycles();
    331335if( CONFIG_DEBUG_SCHED_YIELD < cycle )
     
    354358    uint32_t       save_sr;
    355359
    356     if( lid >= LOCAL_CLUSTER->cores_nr )
    357     {
    358         printk("\n[ERROR] in %s : illegal local index %d in cluster %x\n",
    359         __FUNCTION__ , lid , local_cxy );
    360         return;
    361     }
     360    assert( (lid < LOCAL_CLUSTER->cores_nr), __FUNCTION__, "illegal core index %d\n", lid);
    362361
    363362    core_t       * core    = &LOCAL_CLUSTER->core_tbl[lid];
  • trunk/kernel/kern/scheduler.h

    r433 r436  
    4949    struct thread_s * idle;            /*! pointer on idle thread                           */
    5050    struct thread_s * current;         /*! pointer on current running thread                */
    51     bool_t            req_ack_pending; /*! signal_handller must be called when true         */
     51    volatile bool_t   req_ack_pending; /*! sequencialize ack requests when true             */
    5252}
    5353scheduler_t;
  • trunk/kernel/kern/thread.c

    r433 r436  
    799799}  // end thread_check_sched()
    800800
    801 /////////////////////////////////////
    802 void thread_block( thread_t * thread,
    803                    uint32_t   cause )
    804 {
     801//////////////////////////////////////
     802void thread_block( xptr_t   thread_xp,
     803                   uint32_t cause )
     804{
     805    // get thread cluster and local pointer
     806    cxy_t      cxy = GET_CXY( thread_xp );
     807    thread_t * ptr = GET_PTR( thread_xp );
     808
    805809    // set blocking cause
    806     hal_atomic_or( &thread->blocked , cause );
     810    hal_remote_atomic_or( XPTR( cxy , &ptr->blocked ) , cause );
    807811    hal_fence();
    808812
     
    810814uint32_t cycle = (uint32_t)hal_get_cycles();
    811815if( CONFIG_DEBUG_THREAD_BLOCK < cycle )
    812 printk("\n[DBG] %s : thread %x blocked thread %x / cause %x / state %x / cycle %d\n",
    813 __FUNCTION__ , CURRENT_THREAD , thread , cause , thread->blocked , cycle );
     816printk("\n[DBG] %s : thread %x blocked thread %x / cause %x / cycle %d\n",
     817__FUNCTION__ , CURRENT_THREAD , ptr , cause , cycle );
     818#endif
     819
     820#if (CONFIG_DEBUG_THREAD_BLOCK & 1)
     821if( CONFIG_DEBUG_THREAD_BLOCK < cycle )
     822sched_display( ptr->core->lid );
    814823#endif
    815824
     
    831840uint32_t cycle = (uint32_t)hal_get_cycles();
    832841if( CONFIG_DEBUG_THREAD_BLOCK < cycle )
    833 printk("\n[DBG] %s : thread %x unblocked thread %x / cause %x / state %x / cycle %d\n",
    834 __FUNCTION__ , CURRENT_THREAD , ptr , cause , ptr->blocked , cycle );
     842printk("\n[DBG] %s : thread %x unblocked thread %x / cause %x / cycle %d\n",
     843__FUNCTION__ , CURRENT_THREAD , ptr , cause , cycle );
     844#endif
     845
     846#if (CONFIG_DEBUG_THREAD_BLOCK & 1)
     847if( CONFIG_DEBUG_THREAD_BLOCK < cycle )
     848sched_display( ptr->core->lid );
    835849#endif
    836850
     
    840854}  // end thread_unblock()
    841855
    842 /////////////////////////////////////
    843 void thread_kill( thread_t * target )
    844 {
    845     volatile uint32_t  rsp_count = 1;     // responses counter
    846 
    847     thread_t * killer = CURRENT_THREAD;
     856////////////////////////////////////
     857void thread_kill( xptr_t  target_xp,
     858                  bool_t  is_exit,
     859                  bool_t  is_forced )
     860{
     861    reg_t       save_sr;                // for critical section
     862    bool_t      attached;               // target thread in attached mode
     863    bool_t      join_done;              // joining thread arrived first
     864    xptr_t      killer_xp;              // extended pointer on killer thread (this)
     865    thread_t  * killer_ptr;             // pointer on killer thread (this)
     866    cxy_t       target_cxy;             // target thread cluster     
     867    thread_t  * target_ptr;             // pointer on target thread
     868    xptr_t      joining_xp;             // extended pointer on joining thread
     869    thread_t  * joining_ptr;            // pointer on joining thread
     870    cxy_t       joining_cxy;            // joining thread cluster
     871    pid_t       target_pid;             // target process PID
     872    cxy_t       owner_cxy;              // target process owner cluster
     873    trdid_t     target_trdid;           // target thread identifier
     874    ltid_t      target_ltid;            // target thread local index
     875    xptr_t      process_state_xp;       // extended pointer on <term_state> in process
     876
     877    xptr_t      target_flags_xp;        // extended pointer on target thread <flags>
     878    xptr_t      target_join_lock_xp;    // extended pointer on target thread <join_lock>
     879    xptr_t      target_join_xp_xp;      // extended pointer on target thread <join_xp>
     880    xptr_t      target_process_xp;      // extended pointer on target thread <process>
     881
     882    process_t * target_process;         // pointer on target thread process
     883
     884    // get target thread cluster and pointer
     885    target_cxy = GET_CXY( target_xp );
     886    target_ptr = GET_PTR( target_xp );
     887
     888    // get killer thread pointers
     889    killer_ptr = CURRENT_THREAD;
     890    killer_xp  = XPTR( local_cxy , killer_ptr );
    848891
    849892#if CONFIG_DEBUG_THREAD_KILL
     
    851894if( CONFIG_DEBUG_THREAD_KILL < cycle )
    852895printk("\n[DBG] %s : thread %x enter for target thread %x / cycle %d\n",
    853 __FUNCTION__, killer, target, cycle );
    854 #endif
    855 
    856     // set the global blocked bit in target thread descriptor.
    857     thread_block( target , THREAD_BLOCKED_GLOBAL );
    858 
    859     // request target scheduler to deschedule the target thread
    860     // when killer thread is not running on same core as target thread
    861     if( killer->core->lid != target->core->lid )
    862     {
    863         // set signal in target thread descriptor and in target scheduler
    864         thread_set_req_ack( target , (void *)(&rsp_count) );
    865 
    866         // send an IPI to the target thread core.
    867         dev_pic_send_ipi( local_cxy , target->core->lid );
    868 
    869         // poll the response
    870         while( 1 )
     896__FUNCTION__, killer_ptr, target_ptr, cycle );
     897#endif
     898
     899    // block the target thread
     900    thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
     901
     902    // get target thread attached mode
     903    target_flags_xp = XPTR( target_cxy , &target_ptr->flags );
     904    attached = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_DETACHED) == 0);
     905
     906    // synchronize with the joining thread
     907    // if the target thread is attached && not forced
     908
     909    if( attached  && (is_forced == false) )
     910    {
     911        // build extended pointers on target thread join fields
     912        target_join_lock_xp  = XPTR( target_cxy , &target_ptr->join_lock );
     913        target_join_xp_xp    = XPTR( target_cxy , &target_ptr->join_xp );
     914
     915        // enter critical section
     916        hal_disable_irq( &save_sr );
     917
     918        // take the join_lock in target thread descriptor
     919        remote_spinlock_lock( target_join_lock_xp );
     920
     921        // get join_done from target thread descriptor
     922        join_done = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_JOIN_DONE) != 0);
     923   
     924        if( join_done )     // joining thread arrived first
    871925        {
    872             // exit when response received from scheduler
    873             if( rsp_count == 0 )  break;
    874 
    875             // deschedule without blocking
    876             hal_fixed_delay( 1000 );
     926            // get extended pointer on joining thread
     927            joining_xp  = (xptr_t)hal_remote_lwd( target_join_xp_xp );
     928            joining_ptr = GET_PTR( joining_xp );
     929            joining_cxy = GET_CXY( joining_xp );
     930           
     931            // reset the join_done flag in target thread
     932            hal_remote_atomic_and( target_flags_xp , ~THREAD_FLAG_JOIN_DONE );
     933
     934            // unblock the joining thread
     935            thread_unblock( joining_xp , THREAD_BLOCKED_JOIN );
     936
     937            // release the join_lock in target thread descriptor
     938            remote_spinlock_unlock( target_join_lock_xp );
     939
     940            // restore IRQs
     941            hal_restore_irq( save_sr );
    877942        }
    878     }
    879 
    880         // set REQ_DELETE flag
    881         hal_atomic_or( &target->flags , THREAD_FLAG_REQ_DELETE );
     943        else                // this thread arrived first
     944        {
     945            // set the kill_done flag in target thread
     946            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_KILL_DONE );
     947
     948            // block this thread on BLOCKED_JOIN
     949            thread_block( killer_xp , THREAD_BLOCKED_JOIN );
     950
     951            // set extended pointer on killer thread in target thread
     952            hal_remote_swd( target_join_xp_xp , killer_xp );
     953
     954            // release the join_lock in target thread descriptor
     955            remote_spinlock_unlock( target_join_lock_xp );
     956
     957            // deschedule
     958            sched_yield( "killer thread wait joining thread" );
     959
     960            // restore IRQs
     961            hal_restore_irq( save_sr );
     962        }
     963    }  // end if attached
     964
     965    // - if the target thread is the main thread
     966    //   => synchronize with the parent process main thread
     967    // - if the target thread is not the main thread
     968    //   => simply mark the target thread for delete
     969
     970    // get pointer on target thread process
     971    target_process_xp  = XPTR( target_cxy , &target_ptr->process );
     972    target_process     = (process_t *)hal_remote_lpt( target_process_xp );
     973
     974        // get target process owner cluster
     975        target_pid = hal_remote_lw( XPTR( target_cxy , &target_process->pid ) );
     976    owner_cxy = CXY_FROM_PID( target_pid );
     977
     978    // get target thread local index
     979    target_trdid = hal_remote_lw( XPTR( target_cxy , &target_ptr->trdid ) );
     980    target_ltid  = LTID_FROM_TRDID( target_trdid );
     981
     982    if( (owner_cxy == target_cxy) && (target_ltid == 0) )     // main thread
     983    {
     984        // get extended pointer on term_state in target process owner cluster
     985        process_state_xp = XPTR( owner_cxy , &target_process->term_state );
     986
     987        // set termination info in target process owner 
     988        if( is_exit ) hal_remote_atomic_or( process_state_xp , PROCESS_TERM_EXIT );
     989        else          hal_remote_atomic_or( process_state_xp , PROCESS_TERM_KILL );
    882990
    883991#if CONFIG_DEBUG_THREAD_KILL
    884992cycle  = (uint32_t)hal_get_cycles;
    885993if( CONFIG_DEBUG_THREAD_KILL < cycle )
    886 printk("\n[DBG] %s : thread %x exit for target thread %x / cycle %d\n",
    887 __FUNCTION__, killer, target, cycle );
    888 #endif
     994printk("\n[DBG] %s : thread %x exit for thread %x / main thread / cycle %d\n",
     995__FUNCTION__, killer_ptr, target_ptr, cycle );
     996#endif
     997
     998    }
     999    else                                                      // main thread
     1000    {
     1001        // set the REQ_DELETE flag in target thread descriptor
     1002        hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
     1003
     1004#if CONFIG_DEBUG_THREAD_KILL
     1005cycle  = (uint32_t)hal_get_cycles;
     1006if( CONFIG_DEBUG_THREAD_KILL < cycle )
     1007printk("\n[DBG] %s : thread %x exit for thread %x / not the main thread / cycle %d\n",
     1008__FUNCTION__, killer_ptr, target_ptr, cycle );
     1009#endif
     1010
     1011    }
    8891012
    8901013}  // end thread_kill()
     
    9581081    target_thread_ltid = LTID_FROM_TRDID( trdid );
    9591082
     1083    // check trdid argument
     1084        if( (target_thread_ltid >= CONFIG_THREAD_MAX_PER_CLUSTER) ||
     1085        cluster_is_undefined( target_cxy ) )         return XPTR_NULL;
     1086
    9601087    // get root of list of process descriptors in target cluster
    9611088    hal_remote_memcpy( XPTR( local_cxy  , &root ),
     
    9871114    remote_spinlock_unlock( lock_xp );
    9881115
    989     // check target thread found
    990     if( found == false )
    991     {
    992         return XPTR_NULL;
    993     }
     1116    // check PID found
     1117    if( found == false ) return XPTR_NULL;
    9941118
    9951119    // get target thread local pointer
     
    9971121    target_thread_ptr = (thread_t *)hal_remote_lpt( xp );
    9981122
    999     if( target_thread_ptr == NULL )
    1000     {
    1001         return XPTR_NULL;
    1002     }
     1123    if( target_thread_ptr == NULL )  return XPTR_NULL;
    10031124
    10041125    return XPTR( target_cxy , target_thread_ptr );
  • trunk/kernel/kern/thread.h

    r428 r436  
    7070
    7171#define THREAD_FLAG_DETACHED     0x0001  /*! This thread is detached from parent      */
    72 #define THREAD_FLAG_JOIN_DONE    0x0002  /*! Parent thread made a join                */
    73 #define THREAD_FLAG_SCHED        0x0004  /*! Scheduling required for this thread      */
    74 #define THREAD_FLAG_REQ_ACK      0x0008  /*! Acknowledge required from scheduler      */
    75 #define THREAD_FLAG_REQ_DELETE   0x0010  /*! Destruction required from scheduler      */
     72#define THREAD_FLAG_JOIN_DONE    0x0002  /*! Parent thread made a join request        */
     73#define THREAD_FLAG_KILL_DONE    0x0004  /*! This thread received a kill request      */
     74#define THREAD_FLAG_SCHED        0x0008  /*! Scheduling required for this thread      */
     75#define THREAD_FLAG_REQ_ACK      0x0010  /*! Acknowledge required from scheduler      */
     76#define THREAD_FLAG_REQ_DELETE   0x0020  /*! Destruction required from scheduler      */
    7677
    7778/***************************************************************************************
     
    8889#define THREAD_BLOCKED_USERSYNC  0x0100  /*! thread wait (cond/mutex/barrier)         */
    8990#define THREAD_BLOCKED_RPC       0x0200  /*! thread wait RPC completion               */
    90 #define THREAD_BLOCKED_DEV_ISR   0x0400  /*! thread DEV wait ISR                      */
     91#define THREAD_BLOCKED_ISR       0x0400  /*! thread DEV wait ISR                      */
    9192#define THREAD_BLOCKED_WAIT      0x0800  /*! thread parent wait child termination     */
    9293
     
    153154
    154155    remote_spinlock_t   join_lock;       /*! lock protecting the join/exit            */
    155     void              * join_value;      /*! exit_value used in case of join          */
    156     xptr_t              join_xp;         /*! extended pointer on joining thread       */
     156    xptr_t              join_xp;         /*! joining/killer thread extended pointer   */
    157157
    158158    uint32_t          * ack_rsp_count;   /*! pointer on acknowledge response counter  */
     
    386386
    387387/***************************************************************************************
    388  * This function is called to handle the "pthread_cancel" system call.
    389  * It allows a killer thread to kill one single target thread.
    390  * The killer thread must be running in the same cluster as the target thread.
    391  * If not, the client thread must use the RPC_THREAD_KILL.
    392  * - When the killer thread is running on the same core as the target thread,
    393  *   this function simply set the BLOCKED_ GLOBAL bit and the REQ_DELETE flag
    394  *   in the target thread descriptor and return.
    395  * - When the killer thread is running on a different core than the target thread,
    396  *   the killer set the BLOCKED_GLOBAL bit and the REQ_ACK flag in target  thread,
    397  *   to ask the scheduler to confirm that the target is blocked and not running.
    398  *   Then, it set the REQ_DELETE flag in the target thread and return.
    399  * In both cases, the actual target thread destruction is done by the scheduler
    400  * at the next scheduling point.
    401  ***************************************************************************************
    402  * @ thread   : local pointer on the target thread.
    403  **************************************************************************************/
    404 void thread_kill( thread_t * thread );
    405 
    406 /***************************************************************************************
    407  * This function registers a blocking cause in the target thread "blocked" bit vector.
    408  * Warning : this function does not deschedule the calling thread, and the descheduling
     388 * This function is called to handle the four pthread_cancel(), pthread_exit(),
     389 * kill() and exit() system calls. It kills a "target" thread identified by the
     390 * <thread_xp> argument. The "killer" thread can be the "target" thread, when the
     391 * <is_exit> argument is true. The "killer" thread can run in any cluster,
     392 * as it uses remote accesses.
     393 * If the "target" thread is running in "attached" mode, and the <is_forced> argument
     394 * is false, this function implements the required sychronisation with the joining
     395 * thread, blocking the "killer" thread until the pthread_join() syscall is executed.
     396 * To delete the target thread, this function sets the THREAD_FLAG_REQ_DELETE bit
     397 * and the THREAD BLOCKED_GLOBAL bit in the target thread, and the actual destruction
     398 * is asynchronously done by the scheduler at the next scheduling point.
     399 ***************************************************************************************
     400 * @ thread_xp   : extended pointer on the target thread.
     401 * @ is_exit     : the killer thread is the target thread itself.
     402 * @ is_forced   : the killing does not depends on the attached mode.
     403 **************************************************************************************/
     404void thread_kill( xptr_t  thread_xp,
     405                  bool_t  is_exit,
     406                  bool_t  is_forced );
     407
     408/***************************************************************************************
     409 * This function registers a blocking cause defined by the <cause> argument
     410 * in a remote thread descriptor identified by the <thread_xp> argument.
     411 * We need an extended pointer, because this function can be called by another thread
     412 * than the target thread, executing the sys_kill() function.
     413 * WARNING : this function does not deschedule the target thread, and the descheduling
    409414 * must be explicitely forced by a sched_yield().
    410415 ***************************************************************************************
    411  * @ thread   : local pointer on target thread descriptor.
    412  * @ cause    : mask defining the cause (one hot).
    413  **************************************************************************************/
    414 void thread_block( thread_t * thread,
    415                    uint32_t   cause );
    416 
    417 /***************************************************************************************
    418  * This function resets the bit identified by the cause argument in the "blocked"
    419  * bit vector of a remote thread descriptor, using an atomic access.
     416 * @ thread_xp   : extended pointer on remote thread descriptor.
     417 * @ cause       : mask defining the cause (one hot).
     418 **************************************************************************************/
     419void thread_block( xptr_t   thread_xp,
     420                   uint32_t cause );
     421
     422/***************************************************************************************
     423 * This function resets the bit identified by the <cause> argument in a remote
     424 * thread descriptor identified by the <thread_xp> argument.
    420425 * We need an extended pointer, because the client thread of an I/O operation on a
    421426 * given device is not in the same cluster as the associated device descriptor.
    422  * Warning : this function does not reschedule the remote thread.
     427 * WARNING : this function does not reschedule the remote thread.
    423428 * The scheduling can be forced by sending an IPI to the core running the remote thread.
    424429 ***************************************************************************************
    425  * @ thread   : extended pointer on the remote thread.
    426  * @ cause    : mask defining the cause (one hot).
     430 * @ thread_xp   : extended pointer the remote thread.
     431 * @ cause       : mask defining the cause (one hot).
    427432 * @ return non zero if the bit-vector was actually modified / return 0 otherwise
    428433 **************************************************************************************/
    429 uint32_t thread_unblock( xptr_t   thread,
     434uint32_t thread_unblock( xptr_t   thread_xp,
    430435                         uint32_t cause );
    431436
     
    449454
    450455/***************************************************************************************
    451  * This function handles all pending signals for the thread identified by the <thread>
    452  * argument. It is called each time the core exits the kernel, after handling an
    453  * interrupt, exception or syscall.
    454  * TODO This function is not implemented.
    455  ***************************************************************************************
    456  * @ thread   : local pointer on target thread.
    457  **************************************************************************************/
    458 void thread_signals_handle( thread_t * thread );
    459 
    460 /***************************************************************************************
    461456 * This function returns the extended pointer on a thread descriptor identified
    462457 * by its thread identifier, and process identifier.
  • trunk/kernel/libk/remote_barrier.c

    r408 r436  
    273273
    274274        // block & deschedule the calling thread
    275         thread_block( thread_ptr , THREAD_BLOCKED_USERSYNC );
     275        thread_block( XPTR( local_cxy , thread_ptr ) , THREAD_BLOCKED_USERSYNC );
    276276        sched_yield("blocked on barrier");
    277277
  • trunk/kernel/libk/remote_condvar.c

    r408 r436  
    188188
    189189    // block the calling thread
    190     thread_block( CURRENT_THREAD , THREAD_BLOCKED_USERSYNC );
     190    thread_block( XPTR( local_cxy , CURRENT_THREAD ) , THREAD_BLOCKED_USERSYNC );
    191191    sched_yield("blocked on condvar");
    192192
  • trunk/kernel/libk/remote_mutex.c

    r408 r436  
    207207
    208208        // block & deschedule the calling thread   
    209         thread_block( thread_ptr , THREAD_BLOCKED_USERSYNC );
     209        thread_block( XPTR( local_cxy , thread_ptr ) , THREAD_BLOCKED_USERSYNC );
    210210        sched_yield("blocked on mutex");
    211211
  • trunk/kernel/libk/remote_rwlock.c

    r433 r436  
    22 * remote_rwlock.c - kernel remote rwlock implementation.
    33 *
    4  * Authors    Alain   Greiner (2016,2017)
     4 * Authors    Alain   Greiner (2016,2017,2018)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    4141    hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->count )   , 0 );
    4242
    43 #if CONFIG_DEBUG_LOCKS
    44     hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner )   , XPTR_NULL );
    45     xlist_entry_init( XPTR( lock_cxy , &lock_ptr->list ) );
     43#if CONFIG_DEBUG_REMOTE_RWLOCKS
     44hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner )   , XPTR_NULL );
     45xlist_entry_init( XPTR( lock_cxy , &lock_ptr->list ) );
    4646#endif
    4747
     
    8686    thread_ptr->remote_locks++;
    8787
    88 #if CONFIG_DEBUG_LOCKS
    89     xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
    90                      XPTR( lock_cxy ,  &lock_ptr->list ) );
     88#if CONFIG_DEBUG_REMOTE_RWLOCKS
     89xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
     90                 XPTR( lock_cxy ,  &lock_ptr->list ) );
    9191#endif
    9292
     
    126126        thread_ptr->remote_locks--;
    127127
    128 #if CONFIG_DEBUG_LOCKS
    129     xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
     128#if CONFIG_DEBUG_REMOTE_RWLOCKS
     129xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
    130130#endif
    131131
     
    176176    }
    177177
    178 #if CONFIG_DEBUG_LOCKS
    179     hal_remote_swd( XPTR( lock_cxy  , &lock_ptr->owner ) ,
    180                     XPTR( local_cxy , thread_ptr ) );
    181     xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
    182                      XPTR( lock_cxy  , &lock_ptr->list ) );
     178#if CONFIG_DEBUG_REMOTE_RWLOCKS
     179hal_remote_swd( XPTR( lock_cxy  , &lock_ptr->owner ) ,
     180                XPTR( local_cxy , thread_ptr ) );
     181xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
     182                 XPTR( lock_cxy  , &lock_ptr->list ) );
    183183#endif   
    184184
     
    210210 
    211211#if CONFIG_LOCKS_OWNER
    212     hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL );
    213     xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
     212hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL );
     213xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
    214214#endif
    215215
  • trunk/kernel/libk/remote_rwlock.h

    r409 r436  
    22 * remote_rwlock.h - kernel remote_rwlock definition.
    33 *
    4  * Authors   Alain Greiner   (2016)
     4 * Authors   Alain Greiner   (2016,2017,2018)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    4848    uint32_t       count;           /*! current number of reader threads              */
    4949
    50 #if CONFIG_LOCKS_DEBUG
     50#if CONFIG_DEBUG_REMOTE_RWLOCKS
    5151    xptr_t         owner;           /*! extended pointer on writer thread             */
    5252    xlist_entry_t  list;            /*! member of list of remote locks taken by owner */
  • trunk/kernel/libk/remote_sem.c

    r408 r436  
    218218
    219219        // block and deschedule
    220         thread_block( this , THREAD_BLOCKED_SEM ); 
     220        thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_SEM ); 
    221221        sched_yield("blocked on semaphore");
    222222        }
  • trunk/kernel/libk/remote_spinlock.c

    r433 r436  
    22 * remote_spinlock.c - kernel remote spinlock implementation.
    33 *
    4  * Authors  Mohamed Karaoui (2015)
    5  *          Alain   Greiner (2016)
     4 * Authors   Alain   Greiner (2016,2017,2018)
    65 *
    76 * Copyright (c) UPMC Sorbonne Universites
     
    3938        hal_remote_sw ( XPTR( cxy , &ptr->taken ) , 0 );
    4039
    41 #if CONFIG_DEBUG_LOCKS
    42         hal_remote_swd( XPTR( cxy , &ptr->owner ) , XPTR_NULL );
    43         xlist_entry_init( XPTR( cxy , &ptr->list ) );
     40#if CONFIG_DEBUG_REMOTE_SPINLOCKS
     41hal_remote_swd( XPTR( cxy , &ptr->owner ) , XPTR_NULL );
     42xlist_entry_init( XPTR( cxy , &ptr->list ) );
    4443#endif
    4544
     
    7675                thread_ptr->remote_locks++;
    7776
    78 #if CONFIG_DEBUG_LOCKS
    79                 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) ,
    80                                 XPTR( local_cxy , thread_ptr) );
    81                 xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
    82                                  XPTR( lock_cxy , &lock_ptr->list ) );
     77#if CONFIG_DEBUG_REMOTE_SPINLOCKS
     78hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) ,
     79                XPTR( local_cxy , thread_ptr) );
     80xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
     81                         XPTR( lock_cxy , &lock_ptr->list ) );
    8382#endif
    8483
     
    121120        thread_ptr->remote_locks++;
    122121
    123 #if CONFIG_DEBUG_LOCKS
    124         hal_remote_swd( XPTR( lock_cxy  , &lock_ptr->owner ) ,
    125                         XPTR( local_cxy , thread_ptr) );
    126         xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
    127                          XPTR( lock_cxy  , &lock_ptr->list ) );
     122#if CONFIG_DEBUG_REMOTE_SPINLOCKS
     123hal_remote_swd( XPTR( lock_cxy  , &lock_ptr->owner ) ,
     124                XPTR( local_cxy , thread_ptr) );
     125xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
     126                 XPTR( lock_cxy  , &lock_ptr->list ) );
    128127#endif
    129128
     
    144143        thread_t          * thread_ptr = CURRENT_THREAD;
    145144
    146 #if CONFIG_DEBUG_LOCKS
    147         hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL );
    148         xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
     145#if CONFIG_DEBUG_REMOTE_SPINLOCKS
     146hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL );
     147xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
    149148#endif
    150149
     
    197196        thread_ptr->remote_locks++;
    198197
    199 #if CONFIG_DEBUG_LOCKS
    200         hal_remote_swd( XPTR( lock_cxy  , &lock_ptr->owner ) ,
    201                         XPTR( local_cxy , thread_ptr) );
    202         xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
    203                          XPTR( lock_cxy  , &lock_ptr->list ) );
     198#if CONFIG_DEBUG_REMOTE_SPINLOCKS
     199hal_remote_swd( XPTR( lock_cxy  , &lock_ptr->owner ),
     200                XPTR( local_cxy , thread_ptr) );
     201xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ),
     202                 XPTR( lock_cxy  , &lock_ptr->list ) );
     203
     204// if( (uint32_t)lock_ptr == 0x66788 )
     205// printk("\n@@@ %s : thread %x takes remote_spinlock %x\n",
     206//__FUNCTION__, thread_ptr, lock_ptr );
     207
    204208#endif
    205209
     
    218222        thread_t          * thread_ptr = CURRENT_THREAD;
    219223
    220 #if CONFIG_DEBUG_LOCKS
    221         hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL );
    222         xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
     224#if CONFIG_DEBUG_REMOTE_SPINLOCKS
     225hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL );
     226xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
     227
     228// if( (uint32_t)lock_ptr == 0x66788 )
     229// printk("\n@@@ %s : thread %x releases remote_spinlock %x\n",
     230// __FUNCTION__, thread_ptr, lock_ptr );
     231
    223232#endif
    224233
  • trunk/kernel/libk/remote_spinlock.h

    r433 r436  
    22 * remote_spinlock.h - kernel remote spinlock definition.
    33 *
    4  * Authors  Mohamed Karaoui (2016)
    5  *          Alain Greiner   (2016)
     4 * Author  Alain Greiner (2016,2017,2018)
    65 *
    76 * Copyright (c) UPMC Sorbonne Universites
     
    4241    volatile uint32_t     taken;       /*! free if 0 / taken if non zero             */
    4342
    44 #if CONFIG_LOCKS_DEBUG
     43#if CONFIG_DEBUG_REMOTE_SPINLOCKS
    4544    xptr_t                owner;       /*! extended pointer on the owner thread      */
    4645    xlist_entry_t         list;        /*! list of all remote_lock taken by owner    */
  • trunk/kernel/libk/rwlock.c

    r433 r436  
    22 * rwlock.c - kernel read/write lock synchronization.
    33 *
    4  * Author  Alain Greiner     (2016}
     4 * Author  Alain Greiner     (2016,2017,2018)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    3838    lock->count   = 0;
    3939
    40 #if CONFIG_DEBUG_LOCKS
    41         lock->owner   = NULL;
    42     list_entry_init( &lock->list );
     40#if CONFIG_DEBUG_RWLOCKS
     41lock->owner   = NULL;
     42list_entry_init( &lock->list );
    4343#endif
    4444
     
    7070    this->local_locks++;
    7171
    72 #if CONFIG_DEBUG_LOCKS
    73     list_add_first( &this->locks_root , &lock->list );
     72#if CONFIG_DEBUG_RWLOCKS
     73list_add_first( &this->locks_root , &lock->list );
    7474#endif
    7575
     
    9898    this->local_locks--;
    9999
    100 #if CONFIG_DEBUG_LOCKS
    101     list_unlink( &lock->list );
     100#if CONFIG_DEBUG_RWLOCKS
     101list_unlink( &lock->list );
    102102#endif
    103103
     
    138138    this->local_locks++;
    139139
    140 #if CONFIG_DEBUG_LOCKS
    141     lock->owner = this;
    142     list_add_first( &this->locks_root , &lock->list );
     140#if CONFIG_DEBUG_RWLOCKS
     141lock->owner = this;
     142list_add_first( &this->locks_root , &lock->list );
    143143#endif
    144144
     
    157157        hal_disable_irq( &mode );
    158158 
    159 #if CONFIG_DEBUG_LOCKS
    160     lock->owner = NULL;
    161     list_unlink( &lock->list );
     159#if CONFIG_DEBUG_RWLOCKS
     160lock->owner = NULL;
     161list_unlink( &lock->list );
    162162#endif
    163163
  • trunk/kernel/libk/rwlock.h

    r423 r436  
    22 * rwlock.h - kernel read/write lock definition.
    33 *
    4  * Author   Alain Greiner    (2016)
     4 * Author   Alain Greiner    (2016,2017,2018)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    5959    uint32_t            count;            /*! number of simultaneous readers threads      */
    6060
    61 #if CONFIG_LOCKS_DEBUG
     61#if CONFIG_DEBUG_RWLOCKS
    6262        struct thread_s   * owner;            /*! pointer on curent writer thread             */
    6363    list_entry_t        list;             /*! member of list of locks taken by owner      */
  • trunk/kernel/libk/spinlock.c

    r433 r436  
    3838    lock->taken = 0;
    3939
    40 #if CONFIG_DEBUG_LOCKS
    41     lock->owner = NULL;
    42     list_entry_init( &lock->list );
     40#if CONFIG_DEBUG_SPINLOCKS
     41lock->owner = NULL;
     42list_entry_init( &lock->list );
    4343#endif
    4444
     
    7171    this->local_locks++;
    7272
    73 #if CONFIG_DEBUG_LOCKS
    74     lock->owner = this;
    75     list_add_first( &this->locks_root , &lock->list );
     73#if CONFIG_DEBUG_SPINLOCKS
     74lock->owner = this;
     75list_add_first( &this->locks_root , &lock->list );
    7676#endif
    7777
     
    8686    thread_t * this = CURRENT_THREAD;;
    8787
    88 #if CONFIG_DEBUG_LOCKS
    89     lock->owner = NULL;
    90     list_unlink( &lock->list );
     88#if CONFIG_DEBUG_SPINLOCKS
     89lock->owner = NULL;
     90list_unlink( &lock->list );
    9191#endif
    9292
     
    132132    this->local_locks++;
    133133
    134 #if CONFIG_DEBUG_LOCKS
    135     lock->owner = this;
    136     list_add_first( &this->locks_root , &lock->list );
     134#if CONFIG_DEBUG_SPINLOCKS
     135lock->owner = this;
     136list_add_first( &this->locks_root , &lock->list );
    137137#endif
    138138
     
    162162        this->local_locks++;
    163163
    164 #if CONFIG_DEBUG_LOCKS
    165         lock->owner = this;
    166         list_add_first( &this->locks_root , &lock->list );
     164#if CONFIG_DEBUG_SPINLOCKS
     165lock->owner = this;
     166list_add_first( &this->locks_root , &lock->list );
    167167#endif
    168168
     
    177177    thread_t * this = CURRENT_THREAD;
    178178
    179 #if CONFIG_DEBUG_LOCKS
    180     lock->owner = NULL;
    181     list_unlink( &lock->list );
     179#if CONFIG_DEBUG_SPINLOCKS
     180lock->owner = NULL;
     181list_unlink( &lock->list );
    182182#endif
    183183
  • trunk/kernel/libk/spinlock.h

    r409 r436  
    22 * spinlock.h: kernel spinlock definition     
    33 *
    4  * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *          Alain Greiner    (2016)
     4 * Authors  Alain Greiner (2016,2017,2018)
    65 *
    76 * Copyright (c) UPMC Sorbonne Universites
     
    6362        uint32_t            taken;             /*! state : free if zero / taken if non zero  */
    6463
    65 #if CONFIG_LOCKS_DEBUG
     64#if CONFIG_DEBUG_SPINLOCKS
    6665        struct thread_s   * owner;             /*! pointer on curent owner thread            */
    6766    list_entry_t        list;              /*! member of list of locks taken by owner    */
  • trunk/kernel/mm/page.c

    r433 r436  
    180180
    181181                // deschedule the calling thread
    182                 thread_block( thread , THREAD_BLOCKED_PAGE );
     182                thread_block( XPTR( local_cxy , thread ) , THREAD_BLOCKED_PAGE );
    183183                sched_yield("cannot lock a page");
    184184        }
  • trunk/kernel/syscalls/sys_display.c

    r435 r436  
    3737                 reg_t  arg1 )
    3838{
    39     // get thread, process and core
    40     thread_t  * this    = CURRENT_THREAD;
    41     process_t * process = this->process;
    42     core_t    * core    = this->core;
    4339
    4440#if CONFIG_DEBUG_SYS_DISPLAY
    4541uint64_t    tm_start;
    4642uint64_t    tm_end;
     43thread_t  * this;
     44this     = CURRENT_THREAD;
    4745tm_start = hal_get_cycles();
    4846if( CONFIG_DEBUG_SYS_DISPLAY < tm_start )
    49 printk("\n[DBG] %s : thread %d enter / process %x / cycle = %d\n",
    50 __FUNCTION__, this, process->pid, (uint32_t)tm_start );
     47printk("\n[DBG] %s : thread %d enter / process %x / type  %d / cycle = %d\n",
     48__FUNCTION__, this, this->process->pid, type, (uint32_t)tm_start );
    5149#endif
    5250
     
    8078
    8179        // print message on TXT0 kernel terminal
    82         printk("\n[USER] thread %x / process %x / core[%x,%d] / cycle %d\n       %s",
    83         this->trdid , process->pid , local_cxy, core->lid ,
    84         (uint32_t)hal_get_cycles() , kbuf );
     80        printk("\n[USER] %s / cycle %d\n", kbuf, (uint32_t)hal_get_cycles() );
    8581    }
    8682    else if( type == DISPLAY_VMM )
     
    191187if( CONFIG_DEBUG_SYS_DISPLAY < tm_end )
    192188printk("\n[DBG] %s : thread %x exit / process %x / cost = %d / cycle %d\n",
    193 __FUNCTION__, this, process->pid, (uint32_t)(tm_end - tm_start) , (uint32_t)tm_end );
     189__FUNCTION__, this, this->process->pid, (uint32_t)(tm_end - tm_start) , (uint32_t)tm_end );
    194190#endif
    195191
  • trunk/kernel/syscalls/sys_exit.c

    r435 r436  
    4141    process_t * process = this->process;
    4242    pid_t       pid     = process->pid;
     43    trdid_t     trdid   = this->trdid;
    4344
    4445#if CONFIG_DEBUG_SYS_EXIT
     
    5556
    5657    // exit must be called by the main thread
    57     if( (owner_cxy != local_cxy) || (LTID_FROM_TRDID( this->trdid ) != 0) )
     58    if( (owner_cxy != local_cxy) || (LTID_FROM_TRDID( trdid ) != 0) )
    5859    {
    5960
    6061#if CONFIG_DEBUG_SYSCALLS_ERROR
    61 printk("\n[ERROR] %s must be called by thread 0 in process owner cluster\n"
    62        "         trdid = %x / pid = %x / local_cxy = %x\n",
    63 __FUNCTION__, this->trdid, pid, local_cxy );
     62printk("\n[ERROR] in %s : calling thread %x is not thread 0 in owner cluster %x\n",
     63__FUNCTION__, trdid, owner_cxy );
    6464#endif
    6565         this->errno = EINVAL;
     
    7373    process->term_state = status;
    7474
    75     // remove TXT ownership from owner process descriptor
    76     process_txt_reset_ownership( XPTR( local_cxy , process ) );
     75#if( CONFIG_DEBUG_SYS_EXIT & 1)
     76printk("\n[DBG] %s : set exit status in process term_state\n", __FUNCTION__);
     77#endif
    7778
    78     // block all process threads in all clusters
    79     process_sigaction( pid , BLOCK_ALL_THREADS );
     79    // remove process from TXT list
     80    process_txt_detach( XPTR( local_cxy , process ) );
    8081
    81     // mark all process threads in all clusters for delete
     82#if( CONFIG_DEBUG_SYS_EXIT & 1)
     83printk("\n[DBG] %s : removed from TXT list\n", __FUNCTION__);
     84#endif
     85
     86    // mark for delete all process threads in all clusters (but the main)
    8287    process_sigaction( pid , DELETE_ALL_THREADS );
     88
     89#if( CONFIG_DEBUG_SYS_EXIT & 1)
     90printk("\n[DBG] %s : deleted all other threads than main\n", __FUNCTION__);
     91#endif
    8392
    8493    // restore IRQs
    8594    hal_restore_irq( save_sr );
    8695
    87     // atomically update owner process descriptor term_state
     96    // block the main thread itself
     97    thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_GLOBAL );
     98
     99#if( CONFIG_DEBUG_SYS_EXIT & 1)
     100printk("\n[DBG] %s : blocked the main thread\n", __FUNCTION__);
     101#endif
     102
     103    // atomically update owner process descriptor term_state to ask
     104    // the parent process sys_wait() function to delete this main thread
    88105    hal_remote_atomic_or( XPTR( local_cxy , &process->term_state ) ,
    89106                          PROCESS_TERM_EXIT );
     107
     108#if( CONFIG_DEBUG_SYS_EXIT & 1)
     109printk("\n[DBG] %s : set EXIT flag in process term_state\n", __FUNCTION__);
     110#endif
     111
    90112    hal_fence();
    91113
     
    97119#endif
    98120
     121    // main thread deschedule
     122    sched_yield( "process exit" );
     123
     124    // this code should never be executed
     125    assert( false , __FUNCTION__ , "this code should not be executed...\n" );
    99126        return 0;
    100127
  • trunk/kernel/syscalls/sys_fg.c

    r421 r436  
    4545    thread_t  * this    = CURRENT_THREAD;
    4646
    47 #if CONFIG_SYSCALL_DEBUG
     47#if CONFIG_DEBUG_SYS_FG
    4848uint64_t    tm_start;
    4949uint64_t    tm_end;
    5050tm_start = hal_get_cycles();
    51 printk("\n[DBG] %s : core[%x,%d] enter / process %x / cycle %d\n",
    52 __FUNCTION__ , local_cxy , this->core->lid , pid, (uint32_t)tm_start );
     51if( CONFIG_DEBUG_SYS_FG < tm_start )
     52printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n",
     53__FUNCTION__ , CURRENT_THREAD , pid, (uint32_t)tm_start );
    5354#endif
    5455
     
    5859    if( process_xp == XPTR_NULL )
    5960    {
    60         syscall_dmsg("\n[ERROR] in %s : process %x not found\n",
    61         __FUNCTION__ , pid );
     61
     62#if CONFIG_DEBUG_SYSCALLS_ERROR
     63printk("\n[ERROR] in %s : process %x not found\n", __FUNCTION__ , pid );
     64#endif
    6265        this->errno = EINVAL;
    6366        return -1;
     
    7275
    7376    // get chdev cluster and local pointer
    74     chdev_ptr = (chdev_t *)GET_PTR( chdev_xp );
     77    chdev_ptr = GET_PTR( chdev_xp );
    7578    chdev_cxy = GET_CXY( chdev_xp );
    7679
     
    8083    hal_fence();
    8184
    82 #if CONFIG_SYSCALL_DEBUG
     85#if CONFIG_DEBUG_SYS_FG
    8386tm_end = hal_get_cycles();
    84 printk("\n[DBG] %s : core[%x,%d] exit / process %x / cost = %d\n",
    85 __FUNCTION__ , local_cxy , this->core->lid , pid, (uint32_t)(tm_end - tm_start) );
     87if( CONFIG_DEBUG_SYS_FG < tm_end )
     88printk("\n[DBG] %s : thread %x exit / process %x get TXT_%d ownership / cycle %d\n",
     89__FUNCTION__ , CURRENT_THREAD , pid,
     90hal_remote_lw( XPTR( chdev_cxy , &chdev_ptr->channel ) ) , (uint32_t)tm_end );
    8691#endif
    87  
     92
    8893        return 0;
    8994
    90 }  // end sys_kill()
     95}  // end sys_fg()
    9196
  • trunk/kernel/syscalls/sys_get_config.c

    r435 r436  
    8787tm_end = hal_get_cycles();
    8888if( CONFIG_DEBUG_SYS_GET_CONFIG < tm_end )
    89 printk("\n[DBG] %s : thread %x exit / process %x / cost %d / tycle %d\n",
     89printk("\n[DBG] %s : thread %x exit / process %x / cost %d / cycle %d\n",
    9090__FUNCTION__, this, process->pid, (uint32_t)(tm_end-tm_start), (uint32_t)tm_end );
    9191#endif
  • trunk/kernel/syscalls/sys_kill.c

    r435 r436  
    3737              uint32_t sig_id )
    3838{
    39     uint32_t    save_sr;       // required to enable IRQs
    40     xptr_t      owner_xp;      // extended pointer on target reference process
    41     cxy_t       owner_cxy;     // target process cluster
    42     process_t * owner_ptr;     // local pointer on target process
     39    xptr_t      owner_xp;      // extended pointer on target process in owner cluster
     40    cxy_t       owner_cxy;     // target process owner cluster
     41    process_t * owner_ptr;     // local pointer on target process in owner cluster
    4342    xptr_t      parent_xp;     // extended pointer on parent process
    4443    cxy_t       parent_cxy;    // parent process cluster
     
    4948    thread_t  * this    = CURRENT_THREAD;
    5049    process_t * process = this->process;
     50    trdid_t     trdid   = this->trdid;
    5151
    5252#if CONFIG_DEBUG_SYS_KILL
     
    5959#endif
    6060
    61     // process cannot kill itself
    62     if( pid == process->pid )
     61    // get pointers on process descriptor in owner cluster
     62    owner_xp  = cluster_get_owner_process_from_pid( pid );
     63    owner_cxy = GET_CXY( owner_xp );
     64    owner_ptr = GET_PTR( owner_xp );
     65   
     66    // check process found
     67    if( owner_xp == XPTR_NULL)
    6368    {
    6469
    6570#if CONFIG_DEBUG_SYSCALLS_ERROR
    66 printk("\n[ERROR] in %s : process %d cannot kill itself\n", __FUNCTION__ , pid );
     71printk("\n[ERROR] in %s : process %x not found\n", __FUNCTION__, pid );
    6772#endif
    6873        this->errno = EINVAL;
     
    7075    }
    7176
    72     // get cluster and pointers on owner target process descriptor
    73     owner_xp  = cluster_get_owner_process_from_pid( pid );
    74     owner_cxy = GET_CXY( owner_xp );
    75     owner_ptr = GET_PTR( owner_xp );
    76 
    77     // check process existence
    78     if( owner_xp == XPTR_NULL )
     77    // process can kill itself only when calling thread is the main thread
     78    if( (pid == process->pid) && ((owner_cxy != local_cxy) || (LTID_FROM_TRDID( trdid ))) )
    7979    {
    8080
    8181#if CONFIG_DEBUG_SYSCALLS_ERROR
    82 printk("\n[ERROR] in %s : process %x not found\n", __FUNCTION__ , pid );
     82printk("\n[ERROR] in %s : only main thread can kill itself\n", __FUNCTION__ );
    8383#endif
    8484        this->errno = EINVAL;
    8585        return -1;
    8686    }
    87    
     87
    8888    // get parent process PID
    8989    parent_xp  = hal_remote_lwd( XPTR( owner_cxy , &owner_ptr->parent_xp ) );
     
    9292    ppid       = hal_remote_lw( XPTR( parent_cxy , &parent_ptr->pid ) );
    9393
    94     // processes INIT
     94    // check processe INIT
    9595    if( pid == 1 )
    9696    {
     
    103103    }
    104104
    105     // enable IRQs
    106     hal_enable_irq( &save_sr );
    107 
    108105    // analyse signal type / supported values are : 0, SIGSTOP, SIGCONT, SIGKILL
    109106    switch( sig_id )
    110107    {
    111         case 0 :
     108        case 0 :          // does nothing
    112109        {
    113             // does nothing
    114110            retval = 0;
    115111            break;
    116112        }
    117         case SIGSTOP:     
     113        case SIGSTOP:     // block all target process threads
    118114        {
    119             // remove TXT ownership from target process
    120             process_txt_reset_ownership( owner_xp );
     115            // transfer TXT ownership
     116            process_txt_transfer_ownership( owner_xp );
    121117
    122             // block all threads in all clusters
     118            // block all threads in all clusters, but the main thread
    123119            process_sigaction( pid , BLOCK_ALL_THREADS );
     120
     121            // get pointer on target process main thread
     122            xptr_t main_xp = XPTR( owner_cxy , &owner_ptr->th_tbl[0] );
     123
     124            // block main thread
     125            thread_block( main_xp , THREAD_BLOCKED_GLOBAL );
    124126
    125127            // atomically update owner process termination state
    126128            hal_remote_atomic_or( XPTR( owner_cxy , &owner_ptr->term_state ) ,
    127129                                  PROCESS_TERM_STOP );
    128  
    129130            retval = 0;
    130131            break;
    131132        }
    132         case SIGCONT:
     133        case SIGCONT:     // unblock all target process threads
    133134        {
    134135            // unblock all threads in all clusters
    135136            process_sigaction( pid , UNBLOCK_ALL_THREADS );
    136137
    137             // atomically update reference process termination state
     138            // atomically update owner process termination state
    138139            hal_remote_atomic_and( XPTR( owner_cxy , &owner_ptr->term_state ) ,
    139140                                   ~PROCESS_TERM_STOP );
     
    144145        case SIGKILL:
    145146        {
    146             // remove TXT ownership from owner process descriptor
    147             process_txt_reset_ownership( owner_xp );
     147            // remove process from TXT list
     148            process_txt_detach( owner_xp );
    148149
    149             // block all process threads in all clusters
    150             process_sigaction( pid , BLOCK_ALL_THREADS );
    151 
    152             // mark all process threads in all clusters for delete
     150            // mark for delete all process threads in all clusters, but the main
    153151            process_sigaction( pid , DELETE_ALL_THREADS );
    154152
    155             // atomically update owner process descriptor flags
     153            // get pointer on target process main thread
     154            xptr_t main_xp = XPTR( owner_cxy , &owner_ptr->th_tbl[0] );
     155
     156            // block main thread
     157            thread_block( main_xp , THREAD_BLOCKED_GLOBAL );
     158
     159            // atomically update owner process descriptor term_state to ask
     160            // the parent process sys_wait() function to delete this main thread
    156161            hal_remote_atomic_or( XPTR( owner_cxy , &owner_ptr->term_state ) ,
    157162                                  PROCESS_TERM_KILL );
    158 
    159163            retval = 0;
    160164            break;
     
    172176    }
    173177   
    174     // restore IRQs
    175     hal_restore_irq( save_sr );
    176 
    177178    hal_fence();
    178179
     
    180181tm_end = hal_get_cycles();
    181182if( CONFIG_DEBUG_SYS_KILL < tm_end )
    182 printk("\n[DBG] %s : thread %x enter / process %x / sig %d / cost = %d / cycle %d\n",
     183printk("\n[DBG] %s : thread %x exit / process %x / sig %d / cost = %d / cycle %d\n",
    183184__FUNCTION__ , this, pid, sig_id, (uint32_t)(tm_end - tm_start), (uint32_t)tm_end );
    184185#endif
  • trunk/kernel/syscalls/sys_read.c

    r435 r436  
    3737
    3838extern uint32_t enter_sys_read;
    39 extern uint32_t enter_devfs_move;
     39extern uint32_t enter_devfs_read;
    4040extern uint32_t enter_txt_read;
    41 extern uint32_t enter_chdev_cmd;
    42 extern uint32_t enter_chdev_server;
    43 extern uint32_t enter_tty_cmd;
    44 extern uint32_t enter_tty_isr;
    45 extern uint32_t exit_tty_isr;
    46 extern uint32_t exit_tty_cmd;
    47 extern uint32_t exit_chdev_server;
    48 extern uint32_t exit_chdev_cmd;
     41extern uint32_t enter_chdev_cmd_read;
     42extern uint32_t enter_chdev_server_read;
     43extern uint32_t enter_tty_cmd_read;
     44extern uint32_t enter_tty_isr_read;
     45extern uint32_t exit_tty_isr_read;
     46extern uint32_t exit_tty_cmd_read;
     47extern uint32_t exit_chdev_server_read;
     48extern uint32_t exit_chdev_cmd_read;
    4949extern uint32_t exit_txt_read;
    50 extern uint32_t exit_devfs_move;
     50extern uint32_t exit_devfs_read;
    5151extern uint32_t exit_sys_read;
    5252
     
    6363    reg_t        save_sr;     // required to enable IRQs during syscall
    6464
    65 #if (CONFIG_DEBUG_SYS_READ & 1)
    66 enter_sys_read = (uint32_t)tm_start;
    67 #endif
    68 
    6965        thread_t  *  this    = CURRENT_THREAD;
    7066        process_t *  process = this->process;
     
    7975#endif
    8076
     77#if (CONFIG_DEBUG_SYS_READ & 1)
     78enter_sys_read = (uint32_t)tm_start;
     79#endif
     80
    8181    // check file_id argument
    8282        if( file_id >= CONFIG_PROCESS_FILE_MAX_NR )
     
    122122
    123123    // get file descriptor cluster and local pointer
    124     vfs_file_t * file_ptr = (vfs_file_t *)GET_PTR( file_xp );
     124    vfs_file_t * file_ptr = GET_PTR( file_xp );
    125125    cxy_t        file_cxy = GET_CXY( file_xp );
    126126
     
    165165    else if( type == INODE_TYPE_DEV )  // check ownership & read from from device
    166166    {
     167        // get cluster and pointers on TXT_RX chdev
     168        xptr_t    chdev_xp  = chdev_from_file( file_xp );
     169        cxy_t     chdev_cxy = GET_CXY( chdev_xp );
     170        chdev_t * chdev_ptr = GET_PTR( chdev_xp );
     171
     172        volatile xptr_t    owner_xp;   
     173
     174        while( 1 )
     175        {
     176            // extended pointer on owner process
     177            owner_xp  = hal_remote_lwd( XPTR( chdev_cxy , &chdev_ptr->ext.txt.owner_xp ) );
     178
     179            // check TXT_RX ownership
     180            if ( XPTR( local_cxy , process ) != owner_xp )
     181            {
     182                // deschedule without blocking
     183                sched_yield( "wait TXT ownership" );
     184            }
     185            else
     186            {
     187                break;
     188            }
     189        }
     190
    167191        // move count bytes from device
    168192        nbytes = devfs_user_move( true,             // from device to buffer
     
    171195                                  count );
    172196
    173         // check ownership
    174         xptr_t    chdev_xp  = chdev_from_file( file_xp );
    175         cxy_t     chdev_cxy = GET_CXY( chdev_xp );
    176         chdev_t * chdev_ptr = (chdev_t *)GET_PTR( chdev_xp );
    177         xptr_t    owner_xp  = hal_remote_lwd( XPTR( chdev_cxy , &chdev_ptr->ext.txt.owner_xp ) );
    178 
    179         if( XPTR( local_cxy , process ) != owner_xp )
    180         {
    181 
    182 #if CONFIG_DEBUG_SYSCALLS_ERROR
    183 printk("\n[ERROR] in %s : process %x not in foreground for TXT%d\n",
    184 __FUNCTION__, process->pid, hal_remote_lw( XPTR(chdev_cxy,&chdev_ptr->channel) ) );
    185 #endif
    186                     this->errno = EBADFD;
    187                     return -1;
    188         }
    189197    }
    190198    else
     
    215223printk("\n[DBG] %s : thread %x exit / process %x / cycle %d\n"
    216224"nbytes = %d / first byte = %c / file_id = %d / cost = %d\n",
    217 __FUNCTION__ , local_cxy , this->core->lid , this->trdid , this->process->pid ,
     225__FUNCTION__ , this, process->pid,
    218226(uint32_t)tm_start , nbytes , *((char *)(intptr_t)paddr) , file_id ,
    219227(uint32_t)(tm_end - tm_start) );
  • trunk/kernel/syscalls/sys_thread_cancel.c

    r410 r436  
    11/*
    2  * sys_thread_cancel.c - terminate execution of a target thread.
     2 * sys_thread_cancel.c - terminate execution of an user thread.
    33 *
    4  * Authors   Alain Greiner (2016,2017)
     4 * Authors   Alain Greiner (2016,2017, 2018)
    55 *
    6  * Copyright (c) 2011,2012 UPMC Sorbonne Universites
     6 * Copyright (c) UPMC Sorbonne Universites
    77 *
    88 * This file is part of ALMOS-MKH.
     
    3333{
    3434    xptr_t       target_xp;     // target thread extended pointer
    35         thread_t   * target_ptr;    // target thread local pointer
    36     cxy_t        target_cxy;    // target thread cluster
    37     ltid_t       target_ltid;   // target thread local index
    3835
    39 #if CONFIG_SYSCALL_DEBUG
    40 uint32_t     tm_start;
    41 uint32_t     tm_end;
    42 tm_start  = hal_get_cycles();
    43 #endif
    44 
     36    // get killer thread pointers
    4537        thread_t   * this    = CURRENT_THREAD;
    4638    process_t  * process = this->process;
    47 
    48     // check kernel stack overflow
    49     assert( (this->signature == THREAD_SIGNATURE), __FUNCTION__, "kernel stack overflow\n" );
    50 
    51     // get target thread ltid and cxy
    52     target_ltid = LTID_FROM_TRDID( trdid );
    53     target_cxy  = CXY_FROM_TRDID( trdid );
    54 
    55     // check trdid argument
    56         if( (target_ltid >= CONFIG_THREAD_MAX_PER_CLUSTER) || cluster_is_undefined( target_cxy ) ) 
    57         {
    58         printk("\n[ERROR] in %s : illegal trdid argument\n", __FUNCTION__ );
    59                 this->errno = EINVAL;
    60                 return -1;
    61         }
    6239
    6340    // get extended pointer on target thread
    6441        target_xp  = thread_get_xptr( process->pid , trdid );
    6542
     43    // check target_xp
    6644    if( target_xp == XPTR_NULL )
    6745    {
    68         printk("\n[ERROR] in %s : target thread not found\n", __FUNCTION__ );
     46
     47#if CONFIG_DEBUG_SYSCALLS_ERROR
     48printk("\n[ERROR] in %s : target thread %x not found\n", __FUNCTION__, trdid );
     49#endif
    6950        this->errno = EINVAL;
    7051        return -1;
    7152    }
    7253
    73     // get target thread local pointer
    74     target_ptr = (thread_t *)GET_PTR( target_xp );
     54#if CONFIG_DEBUG_SYS_THREAD_CANCEL
     55uint64_t     tm_start;
     56uint64_t     tm_end;
     57tm_start = hal_get_cycles();
     58if( CONFIG_DEBUG_SYS_THREAD_CANCEL < tm_start )
     59printk("\n[DBG] %s : thread %x enter to kill thread %x / cycle %d\n",
     60__FUCTION__, this, GET_PTR( target_xp ), (uint32_t)tm_start );
     61#endif
    7562
    7663    // cal the relevant kernel function
    77     if( target_cxy == local_cxy )    // target thread is local
    78     {
    79         thread_kill( target_ptr );
    80     }
    81     else
    82     {
    83         rpc_thread_kill_client( target_cxy , target_ptr );
    84     }
     64    thread_kill( target_xp,
     65                 0,           // is_exit
     66                 0 );         // is forced
    8567
    86 #if CONFIG_SYSCALL_DEBUG
     68#if CONFIG_DEBUG_SYS_THREAD_CANCEL
    8769tm_end = hal_get_cycles();
    88 syscall_dmsg("\n[DBG] %s : core[%x,%d] / thread %x in process %x / cycle %d\n"
    89 "thread %x killed / cost = %d\n",
    90 __FUNCTION__ , local_cxy , this->core->lid , this->trdid , this->process->pid , tm_start ,
    91 trdid , (uint32_t)(tm_end - tm_start) );
     70if( CONFIG_DEBUG_SYS_THREAD_CANCEL < tm_end )
     71printk("\n[DBG] %s : thread %x exit after kill thread %x / cycle %d\n",
     72__FUCTION__, this, GET_PTR( target_xp ), (uint32_t)tm_end );
    9273#endif
    9374
  • trunk/kernel/syscalls/sys_thread_exit.c

    r433 r436  
    3232int sys_thread_exit( void * exit_value )
    3333{
    34     paddr_t      paddr;
    35     error_t          error;
    36 
    37 #if CONFIG_SYSCALL_DEBUG
    38 uint32_t     tm_start;
    39 uint32_t     tm_end;
    40 tm_start = hal_get_cycles();
    41 #endif
    42 
    4334        thread_t  * this    = CURRENT_THREAD;
    4435    process_t * process = this->process;
    4536
    46     // check all locks released
    47         if( !thread_can_yield() )
    48         {
    49         printk("\n[ERROR] in %s : locks not released / thread %x in process %x\n",
    50         __FUNCTION__, this->trdid, process->pid );
     37    // check exit_value argument
     38    if( exit_value != NULL )
     39    {
     40
     41#if CONFIG_DEBUG_SYSCALLS_ERROR
     42printk("\n[ERROR] in %s : exit_value argument must be NULL for thread %x in process %x\n",
     43__FUNCTION__ , exit_value, this->trdid , process->pid );
     44#endif
    5145        this->errno = EINVAL;
    5246        return -1;
    5347    }
    5448
    55     // register the exit_value pointer in this thread descriptor
    56     this->join_value = exit_value;
    57 
    58     if( (this->flags & THREAD_FLAG_DETACHED) == 0 )    // this thread is joinable
    59     {
    60         // check exit_value in user space
    61         error = vmm_v2p_translate( false , exit_value , &paddr );
    62             if( error )
    63         {
    64             printk("\n[ERROR] in %s : illegal pointer = %x / thread %x in process %x\n",
    65             __FUNCTION__ , (intptr_t)exit_value, this->trdid , process->pid );
    66             this->errno = EINVAL;
    67             return -1;
    68         }
    69 
    70         // take the lock protecting the join
    71         remote_spinlock_lock( XPTR( local_cxy, &this->join_lock ) );
    72 
    73         if( this->flags & THREAD_FLAG_JOIN_DONE )       // parent thread arrived first
    74         {
    75             // unblock the parent thread
    76             thread_unblock( this->join_xp , THREAD_BLOCKED_EXIT );
    77 
    78             // reset the JOIN_DONE flag in this thread
    79             this->flags &= ~THREAD_FLAG_JOIN_DONE;
    80 
    81             // release the lock protecting the flags
    82                 remote_spinlock_unlock( XPTR( local_cxy, &this->join_lock ) );
    83         }
    84         else                                           // this thread arrived first
    85         {
    86             // block this thread
    87             thread_block( this , THREAD_BLOCKED_JOIN );
    88 
    89             // release the lock protecting the join
    90                 remote_spinlock_unlock( XPTR( local_cxy, &this->join_lock ) );
    91 
    92             // deschedule
    93             sched_yield( "WAITING JOIN" );
    94         }     
    95     }
    96 
    97 #if CONFIG_SYSCALL_DEBUG
    98 tm_end = hal_get_cycles();
    99 syscall_dmsg("\n[DBG] %s : core[%x,%d] / thread %x in process %x / cycle %d\n"
    100 "thread %x killed / cost = %d\n",
    101 __FUNCTION__ , local_cxy , this->core->lid , this->trdid , this->process->pid , tm_start ,
    102 this->trdid , (uint32_t)(tm_end - tm_start) );
     49#if CONFIG_DEBUG_SYS_THREAD_EXIT
     50uint64_t     tm_start;
     51uint64_t     tm_end;
     52tm_start = hal_get_cycles();
     53if( CONFIG_DEBUG_SYS_THREAD_EXIT < tm_start )
     54printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n",
     55__FUNCTION__ , this, process->pid , (uint32_t)tm_start );
    10356#endif
    10457
    105     // suicide using a rpc because  a thread cannot kill itself
    106     rpc_thread_kill_client( local_cxy , this );
     58    // cal the relevant kernel function
     59    thread_kill( XPTR( local_cxy , this ),
     60                 1,           // is_exit
     61                 0 );         // is forced
    10762
     63#if CONFIG_DEBUG_SYS_THREAD_EXIT
     64tm_end = hal_get_cycles();
     65if( CONFIG_DEBUG_SYS_THREAD_EXIT < tm_end )
     66printk("\n[DBG] %s : thread %x exit / process %x / cost %d / cycle %d\n",
     67__FUNCTION__, this, this->process->pid, (uint32_t)(tm_end - tm_start), (uint32_t)tm_end );
     68#endif
     69
     70    // deschedule <=> suicide, because blocked by thread_kill()
     71    sched_yield( "suicide after thread_exit" );
     72   
    10873    return 0;   // never executed but required by compiler
    10974
  • trunk/kernel/syscalls/sys_thread_join.c

    r421 r436  
    2525#include <hal_remote.h>
    2626#include <hal_special.h>
     27#include <hal_irqmask.h>
    2728#include <thread.h>
    2829#include <vmm.h>
     
    3637                      void    ** exit_value )
    3738{
     39    reg_t         save_sr;
    3840    xptr_t        target_xp;
    3941    thread_t    * target_ptr;
    4042    cxy_t         target_cxy;
    4143    ltid_t        target_ltid;
    42         uint32_t      target_blocked;   // target thread blocked bit-vector
    43     uint32_t      target_flags;     // target thread flags bit-bector
    44     paddr_t       paddr;            // required for vmm_v2p_translate()
     44    xptr_t        target_join_lock_xp;
     45    xptr_t        target_flags_xp;
     46    xptr_t        target_blocked_xp;
     47    xptr_t        target_join_xp_xp;
     48    xptr_t        killer_xp;
     49    xptr_t        joining_xp;
     50    thread_t    * joining_ptr;
     51    process_t   * process;
    4552
    46         thread_t    * this    = CURRENT_THREAD;
    47     process_t   * process = this->process;
     53    // get joining thread pointers
     54        joining_ptr = CURRENT_THREAD;
     55    joining_xp  = XPTR( local_cxy , joining_ptr );
     56    process     = joining_ptr->process;
    4857
    4958    // get target thread ltid and cxy
     
    5160    target_cxy  = CXY_FROM_TRDID( trdid );
    5261
     62#if CONFIG_DEBUG_SYS_THREAD_JOIN
     63uint64_t     tm_start;
     64uint64_t     tm_end;
     65tm_start = hal_get_cycles();
     66if( CONFIG_DEBUG_SYS_THREAD_JOIN < tm_start )
     67printk("\n[DBG] %s : parent thread %x enter / process %x / target trdid %x / cycle %d\n",
     68__FUNCTION__ , joining_ptr , process->pid , trdid , (uint32_t)tm_start );
     69#endif
     70
    5371    // check trdid argument
    5472        if( (target_ltid >= CONFIG_THREAD_MAX_PER_CLUSTER) || cluster_is_undefined( target_cxy ) ) 
    5573        {
    56         printk("\n[ERROR] in %s : illegal trdid argument\n", __FUNCTION__ );
    57                 this->errno = EINVAL;
     74
     75#if CONFIG_DEBUG_SYSCALLS_ERROR
     76printk("\n[ERROR] in %s : illegal trdid argument %x\n", __FUNCTION__, trdid );
     77#endif
     78                joining_ptr->errno = EINVAL;
    5879                return -1;
    5980        }
    6081
    6182    // check exit_value argument
    62         if( (exit_value != NULL) && (vmm_v2p_translate( false , exit_value , &paddr ) != 0 ) )
     83        if( exit_value != NULL )
    6384        {
    64         printk("\n[ERROR] in %s : illegal exit_value argument\n", __FUNCTION__ );
    65                 this->errno = EINVAL;
     85
     86#if CONFIG_DEBUG_SYSCALLS_ERROR
     87printk("\n[ERROR] in %s : exit_value argument must be NULL\n", __FUNCTION__ );
     88#endif
     89                joining_ptr->errno = EINVAL;
    6690                return -1;
    6791        }
    6892
    6993    // check target thread != this thread
    70     if( this->trdid == trdid )
     94    if( joining_ptr->trdid == trdid )
    7195    {
    72         printk("\n[ERROR] in %s : this thread == target thread\n", __FUNCTION__ );
    73         this->errno = EDEADLK;
     96
     97#if CONFIG_DEBUG_SYSCALLS_ERROR
     98printk("\n[ERROR] in %s : this thread == target thread\n", __FUNCTION__ );
     99#endif
     100        joining_ptr->errno = EDEADLK;
    74101        return -1;
    75102    }
    76103
    77     // get extended pointer on target thread
     104    // get pointers on target thread
    78105        target_xp  = thread_get_xptr( process->pid , trdid );
     106    target_ptr = GET_PTR( target_xp );
    79107
    80108    if( target_xp == XPTR_NULL )
    81109    {
    82         printk("\n[ERROR] in %s : target thread not found\n", __FUNCTION__ );
    83         this->errno = ESRCH;
     110
     111#if CONFIG_DEBUG_SYSCALLS_ERROR
     112printk("\n[ERROR] in %s : target thread %x not found\n", __FUNCTION__, trdid );
     113#endif
     114        joining_ptr->errno = ESRCH;
    84115        return -1;
    85116    }
    86117
    87     // get cluster and local pointer on target thread
    88     target_ptr = (thread_t *)GET_PTR( target_xp );
     118    // get extended pointers on various fields in target thread
     119    target_join_lock_xp = XPTR( target_cxy , &target_ptr->join_lock );
     120    target_flags_xp     = XPTR( target_cxy , &target_ptr->flags );
     121    target_blocked_xp   = XPTR( target_cxy , &target_ptr->blocked );
     122    target_join_xp_xp   = XPTR( target_cxy , &target_ptr->join_xp );
    89123
    90124    // check target thread joinable
    91     target_flags = hal_remote_lw( XPTR( target_cxy , &target_ptr->flags ) );
    92     if( target_flags & THREAD_FLAG_DETACHED )
     125    if( (hal_remote_lw( target_flags_xp ) & THREAD_FLAG_DETACHED) == 0 )
    93126    {
    94         printk("\n[ERROR] in %s : target thread not joinable\n", __FUNCTION__ );
    95         this->errno = EINVAL;
     127
     128#if CONFIG_DEBUG_SYSCALLS_ERROR
     129printk("\n[ERROR] in %s : target thread %x not joinable\n", __FUNCTION__, trdid );
     130#endif
     131        joining_ptr->errno = EINVAL;
    96132        return -1;
    97133    }
    98134
    99     // check kernel stack overflow
    100     if( target_ptr->signature != THREAD_SIGNATURE )
     135    // mask IRQs
     136    hal_disable_irq( &save_sr );
     137
     138    // get the lock protecting the join in target thread
     139    remote_spinlock_lock( target_join_lock_xp );
     140
     141    // test the kill_done flag from the target thread
     142    if( hal_remote_lw( target_flags_xp ) & THREAD_FLAG_KILL_DONE )  // killer thread is first
    101143    {
    102         assert( false , __FUNCTION__ , "kernel stack overflow\n" );
     144        // get pointers on killer thread
     145        killer_xp  = (xptr_t)hal_remote_lwd( target_join_xp_xp );
     146
     147        // reset the kill_done flag in target thread
     148        hal_remote_atomic_and( target_flags_xp , ~THREAD_FLAG_KILL_DONE );
     149
     150        // unblock the killer thread
     151        thread_unblock( killer_xp , THREAD_BLOCKED_JOIN );
     152
     153        // release the lock protecting join     
     154        remote_spinlock_unlock( target_join_lock_xp );
     155
     156        // restore IRQs
     157        hal_restore_irq( save_sr );
     158    }
     159    else                                                          // joining thread is first
     160    {
     161        // set the join_done flag in target thread
     162        hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_JOIN_DONE );
     163
     164        // block joining thread on BLOCKED_JOIN
     165        thread_block( joining_xp , THREAD_BLOCKED_JOIN );
     166
     167        // register the joining thread extended pointer in target thread
     168        hal_remote_swd( target_join_xp_xp , joining_xp );
     169
     170        // release the lock protecting the join     
     171        remote_spinlock_unlock( target_join_lock_xp );
     172
     173        // deschedule
     174        sched_yield( "joining thread waiting killer thread" );
     175   
     176        // restore IRQs
     177        hal_restore_irq( save_sr );
    103178    }
    104179
    105     // get the lock protecting the join in target thread
    106     remote_spinlock_lock( XPTR( target_cxy , &target_ptr->join_lock ) );
    107 
    108     // get the blocked bit_vector from the target thread
    109     target_blocked = hal_remote_lw( XPTR( target_cxy , &target_ptr->blocked ) );
    110 
    111     if( target_blocked & THREAD_BLOCKED_JOIN )    // target thread arrived first
    112     {
    113         // unblock the target thread
    114         thread_unblock( target_xp , THREAD_BLOCKED_JOIN );
    115 
    116         // release the lock protecting flags     
    117         remote_spinlock_unlock( XPTR( target_cxy , &target_ptr->join_lock ) );
    118 
    119         // get the exit value from target thread
    120         *exit_value = hal_remote_lpt( XPTR( target_cxy , &target_ptr->join_value ) );
    121     }
    122     else                                          // this thread arrived first
    123     {
    124         // register this thread extended pointer in target thread
    125         hal_remote_swd( XPTR( target_cxy , &target_ptr->join_xp ) ,
    126                               XPTR( local_cxy , this ) );
    127 
    128         // set the JOIN_DONE flag in target thread
    129         hal_remote_atomic_or( XPTR( target_cxy , &target_ptr->flags ) ,
    130                               THREAD_FLAG_JOIN_DONE );
    131 
    132         // block this thread on BLOCKED_EXIT
    133         thread_block( this , THREAD_BLOCKED_EXIT );
    134 
    135         // release the lock protecting flags     
    136         remote_spinlock_unlock( XPTR( target_cxy , &target_ptr->join_lock ) );
    137 
    138         // deschedule
    139         sched_yield( "WAITING_EXIT" );
    140    
    141         // get the exit value from target thread when resume
    142         *exit_value = hal_remote_lpt( XPTR( target_cxy , &target_ptr->join_value ) );
    143     }
     180#if CONFIG_DEBUG_SYS_THREAD_JOIN
     181tm_end = hal_get_cycles();
     182if( CONFIG_DEBUG_SYS_THREAD_JOIN < tm_end )
     183printk("\n[DBG] %s : parent thread %x exit / process %x / target trdid %x / cycle %d\n",
     184__FUNCTION__, joining_ptr, process->pid, trdid, (uint32_t)tm_end );
     185#endif
    144186
    145187    return 0;
  • trunk/kernel/syscalls/sys_thread_sleep.c

    r408 r436  
    3030int sys_thread_sleep()
    3131{
     32
    3233    thread_t * this = CURRENT_THREAD;
    3334
    34     thread_dmsg("\n[DBG] %s : thread %x in process %x goes to sleep at cycle %d\n",
    35                 __FUNCTION__, this->trdid, this->process->pid, hal_get_cycles() );
     35#if CONFIG_DEBUG_SYS_THREAD_SLEEP
     36uint64_t     tm_start;
     37uint64_t     tm_end;
     38tm_start = hal_get_cycles();
     39if( CONFIG_DEBUG_SYS_THREAD_SLEEP < tm_start )
     40printk("\n[DBG] %s : thread %x blocked / process %x / cycle %d\n",
     41__FUNCTION__ , this, this->process->pid , (uint32_t)tm_start );
     42#endif
    3643
    37     thread_block( this , THREAD_BLOCKED_GLOBAL );
     44    thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_GLOBAL );
    3845    sched_yield("blocked on sleep");
    3946
    40     thread_dmsg("\n[DBG] %s : thread %x in process %x resume at cycle\n",
    41                 __FUNCTION__, this->trdid, this->process->pid, hal_get_cycles() );
     47#if CONFIG_DEBUG_SYS_THREAD_SLEEP
     48tm_end = hal_get_cycles();
     49if( CONFIG_DEBUG_SYS_THREAD_SLEEP < tm_end )
     50printk("\n[DBG] %s : thread %x resume / process %x / cycle %d\n",
     51__FUNCTION__ , this, this->process->pid , (uint32_t)tm_end );
     52#endif
    4253
    4354        return 0;
     55
    4456}  // end sys_thread_sleep()
  • trunk/kernel/syscalls/sys_thread_wakeup.c

    r23 r436  
    3434    process_t * process = this->process;
    3535
     36#if CONFIG_DEBUG_SYS_THREAD_WAKEUP
     37uint64_t     tm_start;
     38uint64_t     tm_end;
     39tm_start = hal_get_cycles();
     40if( CONFIG_DEBUG_SYS_THREAD_WAKEUP < tm_start )
     41printk("\n[DBG] %s : thread %x enter / activate thread %x in process %x / cycle %d\n",
     42__FUNCTION__ , this, trdid, this->process->pid, (uint32_t)tm_start );
     43#endif
     44
    3645    // get target thread ltid and cxy
    3746    ltid_t   target_ltid = LTID_FROM_TRDID( trdid );
     
    4150        if( (target_ltid >= CONFIG_THREAD_MAX_PER_CLUSTER) || cluster_is_undefined( target_cxy ) ) 
    4251        {
    43         printk("\n[ERROR] in %s : illegal trdid argument\n", __FUNCTION__ );
     52
     53#if CONFIG_DEBUG_SISCALLS_ERROR
     54printk("\n[ERROR] in %s : illegal trdid argument %x\n", __FUNCTION__, trdid );
     55#endif
    4456                this->errno = EINVAL;
    4557                return -1;
     
    5163    if( thread_xp == XPTR_NULL )
    5264    {
    53         printk("\n[ERROR] in %s : cannot find thread %x in process %x/n",
    54                __FUNCTION__ , trdid , CURRENT_THREAD->process->pid );
     65
     66#if CONFIG_DEBUG_SISCALLS_ERROR
     67printk("\n[ERROR] in %s : cannot find thread %x in process %x/n",
     68__FUNCTION__ , trdid , this->process->pid );
     69#endif
    5570        CURRENT_THREAD->errno = EINVAL;
    5671        return -1;
     
    6075    thread_unblock( thread_xp , THREAD_BLOCKED_GLOBAL );
    6176
     77#if CONFIG_DEBUG_SYS_THREAD_WAKEUP
     78tm_end = hal_get_cycles();
     79if( CONFIG_DEBUG_SYS_THREAD_WAKEUP < tm_end )
     80printk("\n[DBG] %s : thread %x exit / thread %x in process %x activated / cycle %d\n",
     81__FUNCTION__ , this, trdid, this->process->pid, (uint32_t)tm_end );
     82#endif
     83
    6284    return 0;
     85
    6386}  // end sys_thread_wakeup()
  • trunk/kernel/syscalls/sys_wait.c

    r435 r436  
    6868__FUNCTION__ , this->trdid , process->pid );
    6969#endif
    70         this->errno = EFAULT;
     70        this->errno = EINVAL;
    7171                return -1;
    7272        }
    7373
    74     // get process owner cluster
    75     cxy_t owner_cxy = CXY_FROM_PID( pid );
     74    // get process owner cluster and calling thread trdid
     75    cxy_t   owner_cxy = CXY_FROM_PID( pid );
     76    trdid_t trdid     = this->trdid;
    7677
    77     // This function must be executed in owner cluster
    78     assert( (owner_cxy == local_cxy) , __FUNCTION__ ,
    79     "calling thread must execute in owner cluster" );
     78    // wait must be executed by the main thread
     79    if( (owner_cxy != local_cxy) || (LTID_FROM_TRDID(trdid) != 0) )
     80    {
    8081
    81     // This function must be executed by the main thread
    82     assert( (process->th_tbl[0] == this) , __FUNCTION__ ,
    83     "this function must be executed by the main thread" );
    84    
     82#if CONFIG_DEBUG_SYSCALL_ERROR
     83printk("\n[ERROR] in %s : calling thread %x is not thread 0 in owner cluster %x\n",
     84__FUNCTION__ , trdid , owner_cxy );
     85#endif
     86        this->errno = EINVAL;
     87                return -1;
     88        }
     89
    8590    // get extended pointer on children list root and lock
    8691    xptr_t children_root_xp = XPTR( owner_cxy , &process->children_root );
     
    96101        remote_spinlock_lock( children_lock_xp );
    97102
    98         // scan the list of owner child process
     103        // scan the list of child process
    99104        XLIST_FOREACH( children_root_xp , iter_xp )
    100105        {
     
    115120            {
    116121                // get pointer on main thread and PID from child owner process
    117                 child_pid    = (pid_t)     hal_remote_lw ( XPTR(child_cxy,&child_ptr->pid));
    118                 child_thread = (thread_t *)hal_remote_lpt( XPTR(child_cxy,&child_ptr->th_tbl[0]));
     122                child_pid    = (pid_t)     hal_remote_lw (XPTR( child_cxy , &child_ptr->pid ));
     123                child_thread = (thread_t *)hal_remote_lpt(XPTR( child_cxy ,
     124                                                                &child_ptr->th_tbl[0] ));
    119125
    120126                // set the PROCESS_FLAG_WAIT in owner child descriptor
     
    135141__FUNCTION__, this, process->pid, child_pid, (uint32_t)tm_end );
    136142#endif
    137                  // return relevant info to calling parent process
     143                 // return child termination state  to parent process
    138144                 hal_copy_to_uspace( status , &child_state , sizeof(int) );
    139145                 return child_pid;
    140146            }
    141         }
     147        }  // end loop on children
    142148       
    143149        // release lock protecting children list
  • trunk/kernel/syscalls/syscalls.h

    r435 r436  
    100100/******************************************************************************************
    101101 * [5] This function requests a target thread identified by its <trdid> argument
    102  * to be cancelled. Depending on killer thread and target thread location, it calls
    103  * the thread_kil() function or the rpc_thread_kill_client() function to do the work.
    104  * It actually set the THREAD_SIG_KILL signal, set the THREAD_BLOCKED_GLOBAL bit in the
    105  * target thread descriptor and return.
     102 * to be cancelled. It calls the thread_kill() function to block the target thread
     103 * on the THREAD_BLOCKED_GLOBAL condition, and to set the THREAD_FLAG_REQ_DELETE.
    106104 * The thread will be detached from its process, and the memory allocated to the thread
    107  * descriptor will be released later by the scheduler.
     105 * descriptor will be released by the scheduler at the next scheduling point.
    108106 ******************************************************************************************
    109107 * @ trdid   : thread identifier.
Note: See TracChangeset for help on using the changeset viewer.