Changeset 438 for trunk/kernel/kern


Ignore:
Timestamp:
Apr 4, 2018, 2:49:02 PM (6 years ago)
Author:
alain
Message:

Fix a bug in scheduler related to RPC blocking.

Location:
trunk/kernel/kern
Files:
14 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/kern/chdev.c

    r437 r438  
    3939extern chdev_directory_t    chdev_dir;   // allocated in kernel_init.c
    4040
    41 #if (CONFIG_DEBUG_SYS_READ & 1)
     41#if (DEBUG_SYS_READ & 1)
    4242extern uint32_t enter_chdev_cmd_read;
    4343extern uint32_t exit_chdev_cmd_read;
     
    4646#endif
    4747
    48 #if (CONFIG_DEBUG_SYS_WRITE & 1)
     48#if (DEBUG_SYS_WRITE & 1)
    4949extern uint32_t enter_chdev_cmd_write;
    5050extern uint32_t exit_chdev_cmd_write;
     
    130130    uint32_t   save_sr;       // for critical section
    131131
    132 #if (CONFIG_DEBUG_SYS_READ & 1)
     132#if (DEBUG_SYS_READ & 1)
    133133enter_chdev_cmd_read = (uint32_t)hal_get_cycles();
    134134#endif
    135135
    136 #if (CONFIG_DEBUG_SYS_WRITE & 1)
     136#if (DEBUG_SYS_WRITE & 1)
    137137enter_chdev_cmd_write = (uint32_t)hal_get_cycles();
    138138#endif
     
    144144    chdev_t * chdev_ptr = (chdev_t *)GET_PTR( chdev_xp );
    145145
    146 #if (CONFIG_DEBUG_CHDEV_CMD_RX || CONFIG_DEBUG_CHDEV_CMD_TX)
     146#if (DEBUG_CHDEV_CMD_RX || DEBUG_CHDEV_CMD_TX)
    147147bool_t is_rx = hal_remote_lw( XPTR( chdev_cxy , &chdev_ptr->is_rx ) );
    148148#endif
    149149   
    150 #if CONFIG_DEBUG_CHDEV_CMD_RX
     150#if DEBUG_CHDEV_CMD_RX
    151151uint32_t rx_cycle = (uint32_t)hal_get_cycles();
    152 if( (is_rx) && (CONFIG_DEBUG_CHDEV_CMD_RX < rx_cycle) )
     152if( (is_rx) && (DEBUG_CHDEV_CMD_RX < rx_cycle) )
    153153printk("\n[DBG] %s : client_thread %x (%s) enter for RX / cycle %d\n",
    154154__FUNCTION__, this, thread_type_str(this->type) , rx_cycle );
    155155#endif
    156156
    157 #if CONFIG_DEBUG_CHDEV_CMD_TX
     157#if DEBUG_CHDEV_CMD_TX
    158158uint32_t tx_cycle = (uint32_t)hal_get_cycles();
    159 if( (is_rx == 0) && (CONFIG_DEBUG_CHDEV_CMD_TX < tx_cycle) )
     159if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
    160160printk("\n[DBG] %s : client_thread %x (%s) enter for TX / cycle %d\n",
    161161__FUNCTION__, this, thread_type_str(this->type) , tx_cycle );
     
    207207    hal_restore_irq( save_sr );
    208208
    209 #if CONFIG_DEBUG_CHDEV_CMD_RX
     209#if DEBUG_CHDEV_CMD_RX
    210210rx_cycle = (uint32_t)hal_get_cycles();
    211 if( (is_rx) && (CONFIG_DEBUG_CHDEV_CMD_RX < rx_cycle) )
     211if( (is_rx) && (DEBUG_CHDEV_CMD_RX < rx_cycle) )
    212212printk("\n[DBG] %s : client_thread %x (%s) exit for RX / cycle %d\n",
    213213__FUNCTION__, this, thread_type_str(this->type) , rx_cycle );
    214214#endif
    215215
    216 #if CONFIG_DEBUG_CHDEV_CMD_TX
     216#if DEBUG_CHDEV_CMD_TX
    217217tx_cycle = (uint32_t)hal_get_cycles();
    218 if( (is_rx == 0) && (CONFIG_DEBUG_CHDEV_CMD_TX < tx_cycle) )
     218if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
    219219printk("\n[DBG] %s : client_thread %x (%s) exit for TX / cycle %d\n",
    220220__FUNCTION__, this, thread_type_str(this->type) , tx_cycle );
    221221#endif
    222222
    223 #if (CONFIG_DEBUG_SYS_READ & 1)
     223#if (DEBUG_SYS_READ & 1)
    224224exit_chdev_cmd_read = (uint32_t)hal_get_cycles();
    225225#endif
    226226
    227 #if (CONFIG_DEBUG_SYS_WRITE & 1)
     227#if (DEBUG_SYS_WRITE & 1)
    228228exit_chdev_cmd_write = (uint32_t)hal_get_cycles();
    229229#endif
     
    275275            client_ptr = (thread_t *)GET_PTR( client_xp );
    276276
    277 #if CONFIG_DEBUG_CHDEV_SERVER_RX
     277#if DEBUG_CHDEV_SERVER_RX
    278278uint32_t rx_cycle = (uint32_t)hal_get_cycles();
    279 if( (chdev->is_rx) && (CONFIG_DEBUG_CHDEV_SERVER_RX < rx_cycle) )
     279if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) )
    280280printk("\n[DBG] %s : server_thread %x start RX / client %x / cycle %d\n",
    281281__FUNCTION__ , server , client_ptr , rx_cycle );
    282282#endif
    283283
    284 #if CONFIG_DEBUG_CHDEV_SERVER_TX
     284#if DEBUG_CHDEV_SERVER_TX
    285285uint32_t tx_cycle = (uint32_t)hal_get_cycles();
    286 if( (chdev->is_rx == 0) && (CONFIG_DEBUG_CHDEV_SERVER_TX < tx_cycle) )
     286if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) )
    287287printk("\n[DBG] %s : server_thread %x start TX / client %x / cycle %d\n",
    288288__FUNCTION__ , server , client_ptr , tx_cycle );
    289289#endif
    290290
    291 #if (CONFIG_DEBUG_SYS_READ & 1)
     291#if (DEBUG_SYS_READ & 1)
    292292enter_chdev_server_read = (uint32_t)hal_get_cycles();
    293293#endif
    294294
    295 #if (CONFIG_DEBUG_SYS_WRITE & 1)
     295#if (DEBUG_SYS_WRITE & 1)
    296296enter_chdev_server_write = (uint32_t)hal_get_cycles();
    297297#endif
     
    308308            thread_unblock( client_xp , THREAD_BLOCKED_IO );
    309309
    310 #if CONFIG_DEBUG_CHDEV_SERVER_RX
     310#if DEBUG_CHDEV_SERVER_RX
    311311rx_cycle = (uint32_t)hal_get_cycles();
    312 if( (chdev->is_rx) && (CONFIG_DEBUG_CHDEV_SERVER_RX < rx_cycle) )
     312if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) )
    313313printk("\n[DBG] %s : server_thread %x completes RX / client %x / cycle %d\n",
    314314__FUNCTION__ , server , client_ptr , rx_cycle );
    315315#endif
    316316
    317 #if CONFIG_DEBUG_CHDEV_SERVER_TX
     317#if DEBUG_CHDEV_SERVER_TX
    318318tx_cycle = (uint32_t)hal_get_cycles();
    319 if( (chdev->is_rx == 0) && (CONFIG_DEBUG_CHDEV_SERVER_TX < tx_cycle) )
     319if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) )
    320320printk("\n[DBG] %s : server_thread %x completes TX / client %x / cycle %d\n",
    321321__FUNCTION__ , server , client_ptr , tx_cycle );
    322322#endif
    323323
    324 #if (CONFIG_DEBUG_SYS_READ & 1)
     324#if (DEBUG_SYS_READ & 1)
    325325exit_chdev_server_read = (uint32_t)hal_get_cycles();
    326326#endif
    327327
    328 #if (CONFIG_DEBUG_SYS_WRITE & 1)
     328#if (DEBUG_SYS_WRITE & 1)
    329329exit_chdev_server_write = (uint32_t)hal_get_cycles();
    330330#endif
  • trunk/kernel/kern/cluster.c

    r437 r438  
    8989        spinlock_init( &cluster->kcm_lock );
    9090
    91 #if CONFIG_DEBUG_CLUSTER_INIT
     91#if DEBUG_CLUSTER_INIT
    9292uint32_t cycle = (uint32_t)hal_get_cycles();
    93 if( CONFIG_DEBUG_CLUSTER_INIT < cycle )
     93if( DEBUG_CLUSTER_INIT < cycle )
    9494printk("\n[DBG] %s : thread %x enters for cluster %x / cycle %d\n",
    9595__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
     
    9999    cluster->dqdt_root_level = dqdt_init( info->x_size,
    100100                                          info->y_size,
    101                                           info->y_width );
    102     cluster->threads_var = 0;
    103     cluster->pages_var   = 0;
     101                                          info->y_width ) - 1;
    104102
    105103    // initialises embedded PPM
     
    113111    }
    114112
    115 #if( CONFIG_DEBUG_CLUSTER_INIT & 1 )
     113#if( DEBUG_CLUSTER_INIT & 1 )
    116114cycle = (uint32_t)hal_get_cycles();
    117 if( CONFIG_DEBUG_CLUSTER_INIT < cycle )
     115if( DEBUG_CLUSTER_INIT < cycle )
    118116printk("\n[DBG] %s : PPM initialized in cluster %x / cycle %d\n",
    119117__FUNCTION__ , local_cxy , cycle );
     
    123121        khm_init( &cluster->khm );
    124122
    125 #if( CONFIG_DEBUG_CLUSTER_INIT & 1 )
     123#if( DEBUG_CLUSTER_INIT & 1 )
    126124uint32_t cycle = (uint32_t)hal_get_cycles();
    127 if( CONFIG_DEBUG_CLUSTER_INIT < cycle )
     125if( DEBUG_CLUSTER_INIT < cycle )
    128126printk("\n[DBG] %s : KHM initialized in cluster %x at cycle %d\n",
    129127__FUNCTION__ , local_cxy , hal_get_cycles() );
     
    133131        kcm_init( &cluster->kcm , KMEM_KCM );
    134132
    135 #if( CONFIG_DEBUG_CLUSTER_INIT & 1 )
     133#if( DEBUG_CLUSTER_INIT & 1 )
    136134uint32_t cycle = (uint32_t)hal_get_cycles();
    137 if( CONFIG_DEBUG_CLUSTER_INIT < cycle )
     135if( DEBUG_CLUSTER_INIT < cycle )
    138136printk("\n[DBG] %s : KCM initialized in cluster %x at cycle %d\n",
    139137__FUNCTION__ , local_cxy , hal_get_cycles() );
     
    148146        }
    149147
    150 #if( CONFIG_DEBUG_CLUSTER_INIT & 1 )
     148#if( DEBUG_CLUSTER_INIT & 1 )
    151149cycle = (uint32_t)hal_get_cycles();
    152 if( CONFIG_DEBUG_CLUSTER_INIT < cycle )
     150if( DEBUG_CLUSTER_INIT < cycle )
    153151printk("\n[DBG] %s : cores initialized in cluster %x / cycle %d\n",
    154152__FUNCTION__ , local_cxy , cycle );
     
    159157    cluster->rpc_threads = 0;
    160158
    161 #if( CONFIG_DEBUG_CLUSTER_INIT & 1 )
     159#if( DEBUG_CLUSTER_INIT & 1 )
    162160cycle = (uint32_t)hal_get_cycles();
    163 if( CONFIG_DEBUG_CLUSTER_INIT < cycle )
     161if( DEBUG_CLUSTER_INIT < cycle )
    164162printk("\n[DBG] %s : RPC fifo inialized in cluster %x at cycle %d\n",
    165163__FUNCTION__ , local_cxy , hal_get_cycles() );
     
    188186    }
    189187
    190 #if CONFIG_DEBUG_CLUSTER_INIT
     188#if DEBUG_CLUSTER_INIT
    191189cycle = (uint32_t)hal_get_cycles();
    192 if( CONFIG_DEBUG_CLUSTER_INIT < cycle )
     190if( DEBUG_CLUSTER_INIT < cycle )
    193191printk("\n[DBG] %s , thread %x exit for cluster %x / cycle %d\n",
    194192__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
     
    456454    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
    457455
    458 #if CONFIG_DEBUG_CLUSTER_PROCESS_COPIES
     456#if DEBUG_CLUSTER_PROCESS_COPIES
    459457uint32_t cycle = (uint32_t)hal_get_cycles();
    460 if( CONFIG_DEBUG_CLUSTER_PROCESS_COPIES < cycle )
     458if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
    461459printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n",
    462460__FUNCTION__ , local_cxy , process , cycle );
     
    487485    remote_spinlock_unlock_busy( copies_lock , irq_state );
    488486
    489 #if CONFIG_DEBUG_CLUSTER_PROCESS_COPIES
     487#if DEBUG_CLUSTER_PROCESS_COPIES
    490488cycle = (uint32_t)hal_get_cycles();
    491 if( CONFIG_DEBUG_CLUSTER_PROCESS_COPIES < cycle )
     489if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
    492490printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n",
    493491__FUNCTION__ , local_cxy , process , cycle );
     
    502500    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
    503501
    504 #if CONFIG_DEBUG_CLUSTER_PROCESS_COPIES
     502#if DEBUG_CLUSTER_PROCESS_COPIES
    505503uint32_t cycle = (uint32_t)hal_get_cycles();
    506 if( CONFIG_DEBUG_CLUSTER_PROCESS_COPIES < cycle )
     504if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
    507505printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n",
    508506__FUNCTION__ , local_cxy , process , cycle );
     
    530528    remote_spinlock_unlock_busy( copies_lock , irq_state );
    531529
    532 #if CONFIG_DEBUG_CLUSTER_PROCESS_COPIES
     530#if DEBUG_CLUSTER_PROCESS_COPIES
    533531cycle = (uint32_t)hal_get_cycles();
    534 if( CONFIG_DEBUG_CLUSTER_PROCESS_COPIES < cycle )
     532if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
    535533printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n",
    536534__FUNCTION__ , local_cxy , process , cycle );
  • trunk/kernel/kern/cluster.h

    r437 r438  
    132132
    133133    // DQDT
    134     int32_t           pages_var;         /*! pages number increment from last DQQT updt   */
    135     int32_t           threads_var;       /*! threads number increment from last DQDT updt */
    136 
    137134        dqdt_node_t       dqdt_tbl[CONFIG_DQDT_LEVELS_NR]; /*! embedded DQDT nodes in cluster */
    138135
  • trunk/kernel/kern/core.c

    r433 r438  
    8585        // handle scheduler
    8686        if( (ticks % CONFIG_SCHED_TICKS_PER_QUANTUM) == 0 ) sched_yield( "TICK");
    87 
    88         // update DQDT
    89         if( ((ticks % CONFIG_DQDT_TICKS_PER_QUANTUM) == 0) && (core->lid == 0) )
    90         dqdt_global_update();
    9187}
    9288
  • trunk/kernel/kern/do_syscall.c

    r437 r438  
    173173        int  error = 0;
    174174       
     175    assert( (this == CURRENT_THREAD), __FUNCTION__,
     176    "wrong <this> argument\n" );
     177
    175178    // update user time
    176179        thread_user_time_update( this );
     
    194197
    195198    // check kernel stack overflow
    196     assert( (this->signature == THREAD_SIGNATURE), __FUNCTION__, "kernel stack overflow\n" );
     199    assert( (CURRENT_THREAD->signature == THREAD_SIGNATURE), __FUNCTION__,
     200    "kernel stack overflow after for thread %x in cluster %x\n", CURRENT_THREAD, local_cxy );
    197201
    198202    // update kernel time
  • trunk/kernel/kern/dqdt.c

    r437 r438  
    2828#include <hal_remote.h>
    2929#include <printk.h>
     30#include <chdev.h>
    3031#include <cluster.h>
    3132#include <bits.h>
     
    3334
    3435
    35 ///////////////////////////////////////////
    36 void dqdt_local_print( dqdt_node_t * node )
    37 {
    38         printk("DQDT node : level = %d / cluster = %x / threads = %x / pages = %x\n",
    39                node->level,
    40                local_cxy,
    41                node->threads,
    42            node->pages );
    43 }
    44 
    45 /////////////////////////////////////////
    46 void dqdt_global_print( xptr_t  node_xp )
     36///////////////////////////////////////////////////////////////////////////////////////////
     37//      Extern variables
     38///////////////////////////////////////////////////////////////////////////////////////////
     39
     40extern chdev_directory_t  chdev_dir;  // defined in chdev.h / allocated in kernel_init.c
     41
     42
     43///////////////////////////////////////////////////////////////////////////////////////////
     44// This static recursive function traverse the DQDT quad-tree from root to bottom.
     45///////////////////////////////////////////////////////////////////////////////////////////
     46static void dqdt_recursive_print( xptr_t  node_xp )
    4747{
    4848        uint32_t i;
    49     dqdt_node_t local_node;
    50 
    51     // get root node local copy
    52     hal_remote_memcpy( XPTR( local_cxy , &local_node ), node_xp , sizeof(dqdt_node_t) );
    53 
    54     // display DQDT node content
    55     dqdt_local_print( &local_node );
     49    dqdt_node_t node;
     50
     51    // get node local copy
     52    hal_remote_memcpy( XPTR( local_cxy , &node ), node_xp , sizeof(dqdt_node_t) );
     53
     54    // display node content
     55        nolock_printk("- level %d in cluster %x (node %x) : threads = %x / pages = %x\n",
     56    node.level, GET_CXY( node_xp ), GET_PTR( node_xp ), node.threads, node.pages );
    5657
    5758    // recursive call on children if node is not terminal
    58     if ( local_node.level > 0 )
     59    if ( node.level > 0 )
    5960    {
    6061        for ( i = 0 ; i < 4 ; i++ )
    6162        {
    62             if ( local_node.children[i] != XPTR_NULL )
    63                 dqdt_global_print( local_node.children[i] );
     63            if ( node.children[i] != XPTR_NULL ) dqdt_recursive_print( node.children[i] );
    6464        }
    6565    }
     66}
     67
     68///////////////////
     69void dqdt_display()
     70{
     71    reg_t   save_sr;
     72
     73    // build extended pointer on DQDT root node
     74        cluster_t * cluster = LOCAL_CLUSTER;
     75    uint32_t    level   = cluster->dqdt_root_level;
     76    xptr_t      root_xp = XPTR( 0 , &cluster->dqdt_tbl[level] );
     77
     78    // get pointers on TXT0 chdev
     79    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
     80    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
     81    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
     82
     83    // get extended pointer on remote TXT0 chdev lock
     84    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
     85
     86    // get TXT0 lock in busy waiting mode
     87    remote_spinlock_lock_busy( lock_xp , &save_sr );
     88
     89    // print header
     90    nolock_printk("\n***** DQDT state\n\n");
     91
     92    // call recursive function
     93    dqdt_recursive_print( root_xp );
     94
     95    // release lock
     96    remote_spinlock_unlock_busy( lock_xp , save_sr );
    6697}
    6798
     
    161192} // end dqdt_init()
    162193
    163 
    164 ///////////////////////////////////////////////////////////////////////////
    165 // This recursive function is called by the dqdt_global_update() function.
     194///////////////////////////////////////////////////////////////////////////
     195// This recursive function is called by the dqdt_update_threads() function.
    166196// It traverses the quad tree from clusters to root.
    167197///////////////////////////////////////////////////////////////////////////
    168 static void dqdt_propagate( xptr_t  node,         // extended pointer on current node
    169                             int32_t threads_var,  // number of threads variation
    170                             int32_t pages_var )   // number of pages variation
     198// @ node       : extended pointer on current node
     199// @ increment  : number of threads variation
     200///////////////////////////////////////////////////////////////////////////
     201static void dqdt_propagate_threads( xptr_t  node,
     202                                    int32_t increment )
    171203{
    172204    // get current node cluster identifier and local pointer
    173     cxy_t         cxy = (cxy_t)GET_CXY( node );
    174     dqdt_node_t * ptr = (dqdt_node_t *)GET_PTR( node );
     205    cxy_t         cxy = GET_CXY( node );
     206    dqdt_node_t * ptr = GET_PTR( node );
    175207
    176208    // update current node threads number
    177     hal_remote_atomic_add( XPTR( cxy , &ptr->threads ) , threads_var );
    178 
    179     // update current node pages number
    180     hal_remote_atomic_add( XPTR( cxy , &ptr->pages ) , pages_var );
     209    hal_remote_atomic_add( XPTR( cxy , &ptr->threads ) , increment );
    181210
    182211    // get extended pointer on parent node
     
    184213
    185214    // propagate if required
    186     if ( parent != XPTR_NULL )
    187     {
    188         dqdt_propagate( parent, threads_var, pages_var );
    189     }
    190 }
    191 
    192 /////////////////////////
    193 void dqdt_global_update()
     215    if ( parent != XPTR_NULL ) dqdt_propagate_threads( parent, increment );
     216}
     217
     218///////////////////////////////////////////////////////////////////////////
     219// This recursive function is called by the dqdt_update_pages() function.
     220// It traverses the quad tree from clusters to root.
     221///////////////////////////////////////////////////////////////////////////
     222// @ node       : extended pointer on current node
     223// @ increment  : number of pages variation
     224///////////////////////////////////////////////////////////////////////////
     225static void dqdt_propagate_pages( xptr_t  node,
     226                                  int32_t increment )
     227{
     228    // get current node cluster identifier and local pointer
     229    cxy_t         cxy = GET_CXY( node );
     230    dqdt_node_t * ptr = GET_PTR( node );
     231
     232    // update current node threads number
     233    hal_remote_atomic_add( XPTR( cxy , &ptr->pages ) , increment );
     234
     235    // get extended pointer on parent node
     236    xptr_t parent = (xptr_t)hal_remote_lwd( XPTR( cxy , &ptr->parent ) );
     237
     238    // propagate if required
     239    if ( parent != XPTR_NULL ) dqdt_propagate_pages( parent, increment );
     240}
     241
     242/////////////////////////////////////////////
     243void dqdt_update_threads( int32_t increment )
    194244{
    195245        cluster_t   * cluster = LOCAL_CLUSTER;
    196246    dqdt_node_t * node    = &cluster->dqdt_tbl[0];
    197247
    198     // get variations
    199     int32_t      threads_var = cluster->threads_var;
    200     int32_t      pages_var   = cluster->pages_var;
    201 
    202     // propagate this variation to DQDT upper levels
    203     if( (threads_var || pages_var) && (node->parent != XPTR_NULL) )
    204     {
    205         dqdt_propagate( node->parent, threads_var, pages_var );
    206     }
    207 
    208     // update variations
    209     hal_atomic_add( &cluster->threads_var , -threads_var );
    210     hal_atomic_add( &cluster->pages_var   , -pages_var   );
    211 }
    212 
    213 ///////////////////////////////////////////////////
    214 void dqdt_local_update_threads( int32_t increment )
    215 {
    216         cluster_t * cluster = LOCAL_CLUSTER;
    217 
    218     // register change for future propagation in DQDT
    219     hal_atomic_add( &cluster->threads_var , increment );
    220 
    221248    // update DQDT node level 0
    222     hal_atomic_add( &cluster->dqdt_tbl[0].threads , increment );
    223 }
    224 
    225 /////////////////////////////////////////////////
    226 void dqdt_local_update_pages( int32_t increment )
    227 {
    228         cluster_t * cluster = LOCAL_CLUSTER;
    229 
    230     // register change for future propagation in DQDT
    231     hal_atomic_add( &cluster->pages_var , increment );
     249    hal_atomic_add( &node->threads , increment );
     250
     251    // propagate to DQDT upper levels
     252    if( node->parent != XPTR_NULL ) dqdt_propagate_threads( node->parent , increment );
     253}
     254
     255///////////////////////////////////////////
     256void dqdt_update_pages( int32_t increment )
     257{
     258        cluster_t   * cluster = LOCAL_CLUSTER;
     259    dqdt_node_t * node    = &cluster->dqdt_tbl[0];
    232260
    233261    // update DQDT node level 0
    234     hal_atomic_add( &cluster->dqdt_tbl[0].pages , increment );
    235 }
     262    hal_atomic_add( &node->pages , increment );
     263
     264    // propagate to DQDT upper levels
     265    if( node->parent != XPTR_NULL ) dqdt_propagate_pages( node->parent , increment );
     266}
     267
    236268
    237269////////////////////////////////////////////////////////////////////////////////
     
    289321        cluster_t * cluster = LOCAL_CLUSTER;
    290322    uint32_t    level   = cluster->dqdt_root_level;
    291     xptr_t      root    = XPTR( 0 , &cluster->dqdt_tbl[level] );
     323    xptr_t      root_xp = XPTR( 0 , &cluster->dqdt_tbl[level] );
    292324
    293325    // call recursive function
    294     return dqdt_select_cluster( root , false );
     326    return dqdt_select_cluster( root_xp , false );
    295327}
    296328
     
    301333        cluster_t * cluster = LOCAL_CLUSTER;
    302334    uint32_t    level   = cluster->dqdt_root_level;
    303     xptr_t      root    = XPTR( 0 , &cluster->dqdt_tbl[level] );
     335    xptr_t      root_xp = XPTR( 0 , &cluster->dqdt_tbl[level] );
    304336
    305337    // call recursive function
    306     return dqdt_select_cluster( root , true );
    307 }
    308 
     338    return dqdt_select_cluster( root_xp , true );
     339}
     340
  • trunk/kernel/kern/dqdt.h

    r437 r438  
    9393
    9494/****************************************************************************************
    95  * This recursive function traverses the DQDT quad-tree from bottom to root, to propagate
    96  * the change in the threads number and allocated pages number in a leaf cluster,
    97  * toward the upper levels of the DQDT quad-tree.
    98  * It should be called periodically by each instance of the kernel.
    99  ***************************************************************************************/
    100 void dqdt_global_update();
    101 
    102 /****************************************************************************************
    103  * This local function updates both the total number of threads,
    104  * in the level 0 DQDT node, and the variation of the number of threads
    105  * for future propagation to the DQDT upper levels.
     95 * This local function updates the total number of threads in level 0 DQDT node,
     96 * and propagates the variation to the DQDT upper levels.
    10697 * It should be called on each thread creation or destruction.
    10798 ****************************************************************************************
    10899 * @ increment : increment (can be positive or negative)
    109100 ***************************************************************************************/
    110 void dqdt_local_update_threads( int32_t  increment );
     101void dqdt_update_threads( int32_t  increment );
    111102
    112103/****************************************************************************************
    113  * This local function updates both the total number of allocated pages,
    114  * in the level 0 DQDT node, and the variation of the number of pages
    115  * for future propagation to the DQDT upper levels.
    116  * It should be called on each memory allocation or release.
     104 * This local function updates the total number of pages in level 0 DQDT node,
     105 * and propagates the variation to the DQDT upper levels.
     106 * It should be called on each physical memory page allocation or release.
    117107 ****************************************************************************************
    118108 * @ increment : increment (can be positive or negative)
    119109 ***************************************************************************************/
    120 void dqdt_local_update_pages( int32_t increment );
     110void dqdt_update_pages( int32_t increment );
    121111
    122112/****************************************************************************************
     
    139129
    140130/****************************************************************************************
    141  * This recursive function displays usage information for all DQDT nodes in the subtree
    142  * defined by the node argument. It traverses the quadtree from root to bottom.
    143  ****************************************************************************************
    144  * @ node_xp   : extended pointer on a DQDT node.
     131 * This function displays on kernel TXT0 the DQDT state for all nodes in the quad-tree.
     132 * It traverses the quadtree from root to bottom, and can be called by a thread
     133 * running in any cluster
    145134 ***************************************************************************************/
    146 void dqdt_global_print( xptr_t  node_xp );
    147 
    148 /****************************************************************************************
    149  * This function displays summary usage information in a given DQDT local node.
    150  ****************************************************************************************
    151  * @ node   : local pointer on a DQDT node.
    152  ***************************************************************************************/
    153 void dqdt_local_print( dqdt_node_t * node );
     135void dqdt_display();
    154136
    155137
  • trunk/kernel/kern/kernel_init.c

    r437 r438  
    125125// these debug variables are used to analyse the sys_read() syscall timing
    126126
    127 #if CONFIG_DEBUG_SYS_READ
     127#if DEBUG_SYS_READ
    128128uint32_t   enter_sys_read;
    129129uint32_t   exit_sys_read;
     
    150150// these debug variables are used to analyse the sys_write() syscall timing
    151151
    152 #if CONFIG_DEBUG_SYS_WRITE   
     152#if DEBUG_SYS_WRITE   
    153153uint32_t   enter_sys_write;
    154154uint32_t   exit_sys_write;
     
    324324            }
    325325
    326 #if( CONFIG_DEBUG_KERNEL_INIT & 0x1 )
    327 if( hal_time_stamp() > CONFIG_DEBUG_KERNEL_INIT )
     326#if( DEBUG_KERNEL_INIT & 0x1 )
     327if( hal_time_stamp() > DEBUG_KERNEL_INIT )
    328328printk("\n[DBG] %s : created MMC in cluster %x / chdev = %x\n",
    329329__FUNCTION__ , local_cxy , chdev_ptr );
     
    353353                chdev_dir.dma[channel] = XPTR( local_cxy , chdev_ptr );
    354354
    355 #if( CONFIG_DEBUG_KERNEL_INIT & 0x1 )
    356 if( hal_time_stamp() > CONFIG_DEBUG_KERNEL_INIT )
     355#if( DEBUG_KERNEL_INIT & 0x1 )
     356if( hal_time_stamp() > DEBUG_KERNEL_INIT )
    357357printk("\n[DBG] %s : created DMA[%d] in cluster %x / chdev = %x\n",
    358358__FUNCTION__ , channel , local_cxy , chdev_ptr );
     
    488488                    }
    489489
    490 #if( CONFIG_DEBUG_KERNEL_INIT & 0x1 )
    491 if( hal_time_stamp() > CONFIG_DEBUG_KERNEL_INIT )
     490#if( DEBUG_KERNEL_INIT & 0x1 )
     491if( hal_time_stamp() > DEBUG_KERNEL_INIT )
    492492printk("\n[DBG] %s : create chdev %s / channel = %d / rx = %d / cluster %x / chdev = %x\n",
    493493__FUNCTION__ , chdev_func_str( func ), channel , rx , local_cxy , chdev );
     
    623623    }
    624624
    625 #if( CONFIG_DEBUG_KERNEL_INIT & 0x1 )
    626 if( hal_time_stamp() > CONFIG_DEBUG_KERNEL_INIT )
     625#if( DEBUG_KERNEL_INIT & 0x1 )
     626if( hal_time_stamp() > DEBUG_KERNEL_INIT )
    627627{
    628628    printk("\n[DBG] %s created PIC chdev in cluster %x at cycle %d\n",
     
    807807    /////////////////////////////////////////////////////////////////////////////////
    808808
    809 #if CONFIG_DEBUG_KERNEL_INIT
    810 if( (core_lid ==  0) && (local_cxy == 0) )
     809#if DEBUG_KERNEL_INIT
     810if( (core_lid ==  0) & (local_cxy == 0) )
    811811printk("\n[DBG] %s : exit barrier 0 : TXT0 initialized / cycle %d\n",
    812812__FUNCTION__, (uint32_t)hal_get_cycles() );
     
    845845    /////////////////////////////////////////////////////////////////////////////////
    846846
    847 #if CONFIG_DEBUG_KERNEL_INIT
    848 if( (core_lid ==  0) && (local_cxy == 0) )
     847#if DEBUG_KERNEL_INIT
     848if( (core_lid ==  0) & (local_cxy == 0) )
    849849printk("\n[DBG] %s : exit barrier 1 : clusters initialised / cycle %d\n",
    850850__FUNCTION__, (uint32_t)hal_get_cycles() );
     
    872872    ////////////////////////////////////////////////////////////////////////////////
    873873
    874 #if CONFIG_DEBUG_KERNEL_INIT
    875 if( (core_lid ==  0) && (local_cxy == 0) )
     874#if DEBUG_KERNEL_INIT
     875if( (core_lid ==  0) & (local_cxy == 0) )
    876876printk("\n[DBG] %s : exit barrier 2 : PIC initialised / cycle %d\n",
    877877__FUNCTION__, (uint32_t)hal_get_cycles() );
     
    905905    /////////////////////////////////////////////////////////////////////////////////
    906906
    907 #if CONFIG_DEBUG_KERNEL_INIT
    908 if( (core_lid ==  0) && (local_cxy == 0) )
     907#if DEBUG_KERNEL_INIT
     908if( (core_lid ==  0) & (local_cxy == 0) )
    909909printk("\n[DBG] %s : exit barrier 3 : all chdev initialised / cycle %d\n",
    910910__FUNCTION__, (uint32_t)hal_get_cycles() );
    911911#endif
    912912
    913 #if( CONFIG_DEBUG_KERNEL_INIT & 1 )
     913#if( DEBUG_KERNEL_INIT & 1 )
    914914chdev_dir_display();
    915915#endif
     
    927927
    928928    // all cores initialize the idle thread descriptor
    929     error = thread_kernel_init( thread,
    930                                 THREAD_IDLE,
    931                                 &thread_idle_func,
    932                                 NULL,
    933                                 core_lid );
     929    error = thread_idle_init( thread,
     930                              THREAD_IDLE,
     931                              &thread_idle_func,
     932                              NULL,
     933                              core_lid );
    934934    if( error )
    935935    {
     
    942942    core->scheduler.idle = thread;
    943943
    944 #if( CONFIG_DEBUG_KERNEL_INIT & 1 )
     944#if( DEBUG_KERNEL_INIT & 1 )
    945945sched_display( core_lid );
    946946#endif
     
    10141014    /////////////////////////////////////////////////////////////////////////////////
    10151015
    1016 #if CONFIG_DEBUG_KERNEL_INIT
    1017 if( (core_lid ==  0) && (local_cxy == 0) )
     1016#if DEBUG_KERNEL_INIT
     1017if( (core_lid ==  0) & (local_cxy == 0) )
    10181018printk("\n[DBG] %s : exit barrier 4 : VFS_root = %l in cluster 0 / cycle %d\n",
    10191019__FUNCTION__, vfs_root_inode_xp , (uint32_t)hal_get_cycles());
     
    10751075    /////////////////////////////////////////////////////////////////////////////////
    10761076
    1077 #if CONFIG_DEBUG_KERNEL_INIT
    1078 if( (core_lid ==  0) && (local_cxy == io_cxy) )
     1077#if DEBUG_KERNEL_INIT
     1078if( (core_lid ==  0) & (local_cxy == 0) )
    10791079printk("\n[DBG] %s : exit barrier 5 : VFS_root = %l in cluster %x / cycle %d\n",
    10801080__FUNCTION__, vfs_root_inode_xp , io_cxy , (uint32_t)hal_get_cycles());
     
    11101110    /////////////////////////////////////////////////////////////////////////////////
    11111111
    1112 #if CONFIG_DEBUG_KERNEL_INIT
    1113 if( (core_lid ==  0) && (local_cxy == io_cxy) )
     1112#if DEBUG_KERNEL_INIT
     1113if( (core_lid ==  0) & (local_cxy == 0) )
    11141114printk("\n[DBG] %s : exit barrier 6 : dev_root = %l in cluster %x / cycle %d\n",
    11151115__FUNCTION__, devfs_dev_inode_xp , io_cxy , (uint32_t)hal_get_cycles() );
     
    11491149    /////////////////////////////////////////////////////////////////////////////////
    11501150
    1151 #if CONFIG_DEBUG_KERNEL_INIT
    1152 if( (core_lid ==  0) && (local_cxy == 0) )
     1151#if DEBUG_KERNEL_INIT
     1152if( (core_lid ==  0) & (local_cxy == 0) )
    11531153printk("\n[DBG] %s : exit barrier 7 : dev_root = %l in cluster 0 / cycle %d\n",
    11541154__FUNCTION__, devfs_dev_inode_xp , (uint32_t)hal_get_cycles() );
     
    11621162    {
    11631163
    1164 #if( CONFIG_DEBUG_KERNEL_INIT & 1 )
     1164#if( DEBUG_KERNEL_INIT & 1 )
    11651165vfs_display( vfs_root_inode_xp );
    11661166#endif
     
    11751175    /////////////////////////////////////////////////////////////////////////////////
    11761176
    1177 #if CONFIG_DEBUG_KERNEL_INIT
    1178 if( (core_lid ==  0) && (local_cxy == 0) )
     1177#if DEBUG_KERNEL_INIT
     1178if( (core_lid ==  0) & (local_cxy == 0) )
    11791179printk("\n[DBG] %s : exit barrier 8 : process init created / cycle %d\n",
    11801180__FUNCTION__ , (uint32_t)hal_get_cycles() );
     
    11891189        print_banner( (info->x_size * info->y_size) , info->cores_nr );
    11901190
    1191 #if( CONFIG_DEBUG_KERNEL_INIT & 1 )
     1191#if( DEBUG_KERNEL_INIT & 1 )
    11921192printk("\n\n***** memory fooprint for main kernel objects\n\n"
    11931193                   " - thread descriptor  : %d bytes\n"
  • trunk/kernel/kern/process.c

    r437 r438  
    124124    model_pid  = hal_remote_lw( XPTR( model_cxy  , &model_ptr->pid ) );
    125125
    126 #if CONFIG_DEBUG_PROCESS_REFERENCE_INIT
     126#if DEBUG_PROCESS_REFERENCE_INIT
    127127uint32_t cycle = (uint32_t)hal_get_cycles();
    128 if( CONFIG_DEBUG_PROCESS_REFERENCE_INIT )
     128if( DEBUG_PROCESS_REFERENCE_INIT )
    129129printk("\n[DBG] %s : thread %x enter / pid = %x / ppid = %x / model_pid = %x / cycle %d\n",
    130130__FUNCTION__ , CURRENT_THREAD , pid , parent_pid , model_pid , cycle );
     
    141141    assert( (error == 0) , __FUNCTION__ , "cannot initialize VMM\n" );
    142142 
    143 #if (CONFIG_DEBUG_PROCESS_REFERENCE_INIT & 1)
    144 cycle = (uint32_t)hal_get_cycles();
    145 if( CONFIG_DEBUG_PROCESS_REFERENCE_INIT )
     143#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
     144cycle = (uint32_t)hal_get_cycles();
     145if( DEBUG_PROCESS_REFERENCE_INIT )
    146146printk("\n[DBG] %s : thread %x / vmm empty for process %x / cycle %d\n",
    147147__FUNCTION__ , CURRENT_THREAD , pid , cycle );
     
    232232    remote_rwlock_init( XPTR( local_cxy , &process->cwd_lock ) );
    233233
    234 #if (CONFIG_DEBUG_PROCESS_REFERENCE_INIT & 1)
    235 cycle = (uint32_t)hal_get_cycles();
    236 if( CONFIG_DEBUG_PROCESS_REFERENCE_INIT )
     234#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
     235cycle = (uint32_t)hal_get_cycles();
     236if( DEBUG_PROCESS_REFERENCE_INIT )
    237237printk("\n[DBG] %s : thread %x / fd_array for process %x / cycle %d\n",
    238238__FUNCTION__ , CURRENT_THREAD , pid , cycle );
     
    272272        hal_fence();
    273273
    274 #if (CONFIG_DEBUG_PROCESS_REFERENCE_INIT & 1)
    275 cycle = (uint32_t)hal_get_cycles();
    276 if( CONFIG_DEBUG_PROCESS_REFERENCE_INIT )
     274#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
     275cycle = (uint32_t)hal_get_cycles();
     276if( DEBUG_PROCESS_REFERENCE_INIT )
    277277printk("\n[DBG] %s : thread %x exit / process %x / cycle %d\n",
    278278__FUNCTION__ , CURRENT_THREAD , pid , cycle );
     
    297297    local_process->term_state = 0;
    298298
    299 #if CONFIG_DEBUG_PROCESS_COPY_INIT
     299#if DEBUG_PROCESS_COPY_INIT
    300300uint32_t cycle = (uint32_t)hal_get_cycles();
    301 if( CONFIG_DEBUG_PROCESS_COPY_INIT )
     301if( DEBUG_PROCESS_COPY_INIT )
    302302printk("\n[DBG] %s : thread %x enter for process %x\n",
    303303__FUNCTION__ , CURRENT_THREAD , local_process->pid );
     
    347347        hal_fence();
    348348
    349 #if CONFIG_DEBUG_PROCESS_COPY_INIT
    350 cycle = (uint32_t)hal_get_cycles();
    351 if( CONFIG_DEBUG_PROCESS_COPY_INIT )
     349#if DEBUG_PROCESS_COPY_INIT
     350cycle = (uint32_t)hal_get_cycles();
     351if( DEBUG_PROCESS_COPY_INIT )
    352352printk("\n[DBG] %s : thread %x exit for process %x\n",
    353353__FUNCTION__ , CURRENT_THREAD , local_process->pid );
     
    371371    "process %x in cluster %x has still active threads", pid , local_cxy );
    372372
    373 #if CONFIG_DEBUG_PROCESS_DESTROY
     373#if DEBUG_PROCESS_DESTROY
    374374uint32_t cycle = (uint32_t)hal_get_cycles();
    375 if( CONFIG_DEBUG_PROCESS_DESTROY )
     375if( DEBUG_PROCESS_DESTROY )
    376376printk("\n[DBG] %s : thread %x enter to destroy process %x (pid = %x) / cycle %d\n",
    377377__FUNCTION__ , CURRENT_THREAD , process, pid , cycle );
    378 #endif
    379 
    380 #if CONFIG_DEBUG_PROCESS_DESTROY
    381 if( CONFIG_DEBUG_PROCESS_DESTROY  & 1 )
    382 cluster_processes_display( CXY_FROM_PID( pid ) );
    383378#endif
    384379
     
    422417    process_free( process );
    423418
    424 #if CONFIG_DEBUG_PROCESS_DESTROY
    425 cycle = (uint32_t)hal_get_cycles();
    426 if( CONFIG_DEBUG_PROCESS_DESTROY )
     419#if DEBUG_PROCESS_DESTROY
     420cycle = (uint32_t)hal_get_cycles();
     421if( DEBUG_PROCESS_DESTROY )
    427422printk("\n[DBG] %s : thread %x exit / destroyed process %x (pid = %x) / cycle %d\n",
    428423__FUNCTION__ , CURRENT_THREAD , process, pid, cycle );
     
    457452    thread_t * client = CURRENT_THREAD;
    458453
    459 #if CONFIG_DEBUG_PROCESS_SIGACTION
     454#if DEBUG_PROCESS_SIGACTION
    460455uint32_t cycle = (uint32_t)hal_get_cycles();
    461 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     456if( DEBUG_PROCESS_SIGACTION < cycle )
    462457printk("\n[DBG] %s : thread %x enter to %s process %x / cycle %d\n",
    463458__FUNCTION__ , client, process_action_str( action_type ) , pid , cycle );
     
    483478    // it can be shared because all parallel, non-blocking, server threads
    484479    // use the same input arguments, and use the shared RPC response field
    485     // but use
    486480
    487481    // the client thread makes the following sequence:
     
    502496
    503497    // initialize shared RPC descriptor
    504     rpc.response = 0;
    505     rpc.blocking = false;
    506     rpc.index    = RPC_PROCESS_SIGACTION;
    507     rpc.thread   = client;
    508     rpc.lid      = client->core->lid;
    509     rpc.args[0]  = action_type;
    510     rpc.args[1]  = pid;
     498    rpc.responses = 0;
     499    rpc.blocking  = false;
     500    rpc.index     = RPC_PROCESS_SIGACTION;
     501    rpc.thread    = client;
     502    rpc.lid       = client->core->lid;
     503    rpc.args[0]   = action_type;
     504    rpc.args[1]   = pid;
    511505
    512506    // send RPCs to all clusters containing process copiess
     
    514508    {
    515509
    516 #if CONFIG_DEBUG_PROCESS_SIGACTION
    517 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     510#if DEBUG_PROCESS_SIGACTION
     511if( DEBUG_PROCESS_SIGACTION < cycle )
    518512printk("\n[DBG] %s : send RPC to %s process %x in cluster %x\n",
    519513__FUNCTION__ , process_action_str( action_type ) , pid , process_cxy );
    520514#endif
    521515        // atomically increment responses counter
    522         hal_atomic_add( (void *)&rpc.response , 1 );
     516        hal_atomic_add( (void *)&rpc.responses , 1 );
    523517
    524518        process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
     
    538532    sched_yield("blocked on rpc_process_sigaction");
    539533
    540 #if CONFIG_DEBUG_PROCESS_SIGACTION
    541 cycle = (uint32_t)hal_get_cycles();
    542 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     534#if DEBUG_PROCESS_SIGACTION
     535cycle = (uint32_t)hal_get_cycles();
     536if( DEBUG_PROCESS_SIGACTION < cycle )
    543537printk("\n[DBG] %s : thread %x exit after %s process %x in cluster %x / cycle %d\n",
    544538__FUNCTION__ , client, process_action_str( action_type ) , pid , local_cxy , cycle );
     
    563557    owner_cxy = CXY_FROM_PID( process->pid );
    564558
    565 #if CONFIG_DEBUG_PROCESS_SIGACTION
     559#if DEBUG_PROCESS_SIGACTION
    566560uint32_t cycle = (uint32_t)hal_get_cycles();
    567 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     561if( DEBUG_PROCESS_SIGACTION < cycle )
    568562printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",
    569563__FUNCTION__ , this , process->pid , local_cxy , cycle );
     
    623617    }
    624618
    625 #if CONFIG_DEBUG_PROCESS_SIGACTION
    626 cycle = (uint32_t)hal_get_cycles();
    627 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     619#if DEBUG_PROCESS_SIGACTION
     620cycle = (uint32_t)hal_get_cycles();
     621if( DEBUG_PROCESS_SIGACTION < cycle )
    628622printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n",
    629623__FUNCTION__ , this , process->pid , local_cxy , cycle );
     
    643637    this = CURRENT_THREAD;
    644638
    645 #if CONFIG_DEBUG_PROCESS_SIGACTION
     639#if DEBUG_PROCESS_SIGACTION
    646640uint32_t cycle = (uint32_t)hal_get_cycles();
    647 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     641if( DEBUG_PROCESS_SIGACTION < cycle )
    648642printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",
    649643__FUNCTION__ , this , process->pid , local_cxy , cycle );
     
    671665    spinlock_unlock( &process->th_lock );
    672666
    673 #if CONFIG_DEBUG_PROCESS_SIGACTION
    674 cycle = (uint32_t)hal_get_cycles();
    675 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     667#if DEBUG_PROCESS_SIGACTION
     668cycle = (uint32_t)hal_get_cycles();
     669if( DEBUG_PROCESS_SIGACTION < cycle )
    676670printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n",
    677671__FUNCTION__ , this , process->pid , local_cxy , cycle );
     
    687681    uint32_t            count;         // threads counter
    688682
    689 #if CONFIG_DEBUG_PROCESS_SIGACTION
     683#if DEBUG_PROCESS_SIGACTION
    690684uint32_t cycle = (uint32_t)hal_get_cycles();
    691 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     685if( DEBUG_PROCESS_SIGACTION < cycle )
    692686printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",
    693687__FUNCTION__ , CURRENT_THREAD , process->pid , local_cxy , cycle );
     
    716710    spinlock_unlock( &process->th_lock );
    717711
    718 #if CONFIG_DEBUG_PROCESS_SIGACTION
    719 cycle = (uint32_t)hal_get_cycles();
    720 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     712#if DEBUG_PROCESS_SIGACTION
     713cycle = (uint32_t)hal_get_cycles();
     714if( DEBUG_PROCESS_SIGACTION < cycle )
    721715printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n",
    722716__FUNCTION__ , CURRENT_THREAD , process->pid , local_cxy , cycle );
     
    10361030    vfs_bin_xp = hal_remote_lwd(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp));
    10371031
    1038     // check parent process is the reference
     1032    // check parent process is the reference process
    10391033    ref_xp = hal_remote_lwd( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) );
     1034
     1035printk("\n@@@ %s : parent_cxy = %x / parent_ptr = %x / ref_cxy = %x / ref_ptr = %x\n",
     1036__FUNCTION__, parent_process_cxy, parent_process_ptr, GET_CXY( ref_xp ), GET_PTR( ref_xp ) );
     1037
    10401038    assert( (parent_process_xp == ref_xp ) , __FUNCTION__ ,
    10411039    "parent process must be the reference process\n" );
    10421040
    1043 #if CONFIG_DEBUG_PROCESS_MAKE_FORK
     1041#if DEBUG_PROCESS_MAKE_FORK
    10441042uint32_t cycle = (uint32_t)hal_get_cycles();
    1045 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )
    1046 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
    1047 __FUNCTION__, CURRENT_THREAD, parent_pid, cycle );
     1043if( DEBUG_PROCESS_MAKE_FORK < cycle )
     1044printk("\n[DBG] %s : thread %x enter for process %x / cluster %x / cycle %d\n",
     1045__FUNCTION__, CURRENT_THREAD, parent_pid, local_cxy, cycle );
    10481046#endif
    10491047
     
    10731071                            parent_process_xp );
    10741072
    1075 #if( CONFIG_DEBUG_PROCESS_MAKE_FORK & 1 )
    1076 cycle = (uint32_t)hal_get_cycles();
    1077 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )
     1073#if( DEBUG_PROCESS_MAKE_FORK & 1 )
     1074cycle = (uint32_t)hal_get_cycles();
     1075if( DEBUG_PROCESS_MAKE_FORK < cycle )
    10781076printk("\n[DBG] %s : thread %x created child_process %x / child_pid %x / cycle %d\n",
    10791077__FUNCTION__, CURRENT_THREAD, process, new_pid, cycle );
     
    10921090    }
    10931091
    1094 #if( CONFIG_DEBUG_PROCESS_MAKE_FORK & 1 )
    1095 cycle = (uint32_t)hal_get_cycles();
    1096 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )
     1092#if( DEBUG_PROCESS_MAKE_FORK & 1 )
     1093cycle = (uint32_t)hal_get_cycles();
     1094if( DEBUG_PROCESS_MAKE_FORK < cycle )
    10971095printk("\n[DBG] %s : thread %x copied VMM from parent %x to child %x / cycle %d\n",
    10981096__FUNCTION__ , CURRENT_THREAD , parent_pid, new_pid, cycle );
     
    11151113    }
    11161114
    1117     // check main thread index
    1118     assert( (thread->trdid == 0) , __FUNCTION__ , "main thread must have index 0\n" );
    1119 
    1120 #if( CONFIG_DEBUG_PROCESS_MAKE_FORK & 1 )
    1121 cycle = (uint32_t)hal_get_cycles();
    1122 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )
     1115    // check main thread LTID
     1116    assert( (LTID_FROM_TRDID(thread->trdid) == 0) , __FUNCTION__ ,
     1117    "main thread must have LTID == 0\n" );
     1118
     1119#if( DEBUG_PROCESS_MAKE_FORK & 1 )
     1120cycle = (uint32_t)hal_get_cycles();
     1121if( DEBUG_PROCESS_MAKE_FORK < cycle )
    11231122printk("\n[DBG] %s : thread %x created child thread %x / cycle %d\n",
    11241123__FUNCTION__ , CURRENT_THREAD, thread, cycle );
     
    11401139    vmm_set_cow( process );
    11411140 
    1142 #if( CONFIG_DEBUG_PROCESS_MAKE_FORK & 1 )
    1143 cycle = (uint32_t)hal_get_cycles();
    1144 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )
     1141#if( DEBUG_PROCESS_MAKE_FORK & 1 )
     1142cycle = (uint32_t)hal_get_cycles();
     1143if( DEBUG_PROCESS_MAKE_FORK < cycle )
    11451144printk("\n[DBG] %s : thread %x set COW in parent and child / cycle %d\n",
    11461145__FUNCTION__ , CURRENT_THREAD, cycle );
     
    11621161    *child_pid    = new_pid;
    11631162
    1164 #if CONFIG_DEBUG_PROCESS_MAKE_FORK
    1165 cycle = (uint32_t)hal_get_cycles();
    1166 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )
     1163#if DEBUG_PROCESS_MAKE_FORK
     1164cycle = (uint32_t)hal_get_cycles();
     1165if( DEBUG_PROCESS_MAKE_FORK < cycle )
    11671166printk("\n[DBG] %s : thread %x exit / cycle %d\n",
    11681167__FUNCTION__, CURRENT_THREAD, cycle );
     
    12051204    "must be called by the main thread\n" );
    12061205 
    1207 #if CONFIG_DEBUG_PROCESS_MAKE_EXEC
     1206#if DEBUG_PROCESS_MAKE_EXEC
    12081207uint32_t cycle = (uint32_t)hal_get_cycles();
    1209 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle )
     1208if( DEBUG_PROCESS_MAKE_EXEC < cycle )
    12101209printk("\n[DBG] %s : thread %x enters for process %x / %s / cycle %d\n",
    12111210__FUNCTION__, old_thread, pid, path, cycle );
     
    12441243    process_txt_set_ownership( XPTR( local_cxy , new_process) );
    12451244
    1246 #if( CONFIG_DEBUG_PROCESS_MAKE_EXEC & 1 )
    1247 cycle = (uint32_t)hal_get_cycles();
    1248 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle )
     1245#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
     1246cycle = (uint32_t)hal_get_cycles();
     1247if( DEBUG_PROCESS_MAKE_EXEC < cycle )
    12491248printk("\n[DBG] %s : thread %x created new process %x / cycle %d \n",
    12501249__FUNCTION__ , old_thread , new_process , cycle );
     
    12611260        }
    12621261
    1263 #if( CONFIG_DEBUG_PROCESS_MAKE_EXEC & 1 )
    1264 cycle = (uint32_t)hal_get_cycles();
    1265 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle )
     1262#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
     1263cycle = (uint32_t)hal_get_cycles();
     1264if( DEBUG_PROCESS_MAKE_EXEC < cycle )
    12661265printk("\n[DBG] %s : thread %x registered code/data vsegs in new process %x / cycle %d\n",
    12671266__FUNCTION__, old_thread , new_process->pid , cycle );
     
    12901289        }
    12911290
    1292     // check main thread index
    1293     assert( (new_thread->trdid == 0) , __FUNCTION__ , "main thread must have index 0\n" );
    1294 
    1295 #if( CONFIG_DEBUG_PROCESS_MAKE_EXEC & 1 )
    1296 cycle = (uint32_t)hal_get_cycles();
    1297 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle )
     1291    // check main thread LTID
     1292    assert( (LTID_FROM_TRDID(new_thread->trdid) == 0) , __FUNCTION__ ,
     1293    "main thread must have LTID == 0\n" );
     1294
     1295#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
     1296cycle = (uint32_t)hal_get_cycles();
     1297if( DEBUG_PROCESS_MAKE_EXEC < cycle )
    12981298printk("\n[DBG] %s : thread %x created new_process main thread %x / cycle %d\n",
    12991299__FUNCTION__ , old_thread , new_thread , cycle );
     
    13271327    hal_fence();
    13281328
    1329 #if CONFIG_DEBUG_PROCESS_MAKE_EXEC
    1330 cycle = (uint32_t)hal_get_cycles();
    1331 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle )
     1329#if DEBUG_PROCESS_MAKE_EXEC
     1330cycle = (uint32_t)hal_get_cycles();
     1331if( DEBUG_PROCESS_MAKE_EXEC < cycle )
    13321332printk("\n[DBG] %s : old_thread %x blocked / new_thread %x activated / cycle %d\n",
    13331333__FUNCTION__ , old_thread , new_thread , cycle );
     
    13421342{
    13431343
    1344 #if CONFIG_DEBUG_PROCESS_ZERO_CREATE
     1344#if DEBUG_PROCESS_ZERO_CREATE
    13451345uint32_t cycle = (uint32_t)hal_get_cycles();
    1346 if( CONFIG_DEBUG_PROCESS_ZERO_CREATE < cycle )
     1346if( DEBUG_PROCESS_ZERO_CREATE < cycle )
    13471347printk("\n[DBG] %s : thread %x enter / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle );
    13481348#endif
     
    13701370        hal_fence();
    13711371
    1372 #if CONFIG_DEBUG_PROCESS_ZERO_CREATE
    1373 cycle = (uint32_t)hal_get_cycles();
    1374 if( CONFIG_DEBUG_PROCESS_ZERO_CREATE < cycle )
     1372#if DEBUG_PROCESS_ZERO_CREATE
     1373cycle = (uint32_t)hal_get_cycles();
     1374if( DEBUG_PROCESS_ZERO_CREATE < cycle )
    13751375printk("\n[DBG] %s : thread %x exit / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle );
    13761376#endif
     
    13881388    error_t          error;
    13891389
    1390 #if CONFIG_DEBUG_PROCESS_INIT_CREATE
     1390#if DEBUG_PROCESS_INIT_CREATE
    13911391uint32_t cycle = (uint32_t)hal_get_cycles();
    1392 if( CONFIG_DEBUG_PROCESS_INIT_CREATE < cycle )
     1392if( DEBUG_PROCESS_INIT_CREATE < cycle )
    13931393printk("\n[DBG] %s : thread %x enter / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle );
    13941394#endif
     
    14681468    hal_fence();
    14691469
    1470 #if CONFIG_DEBUG_PROCESS_INIT_CREATE
    1471 cycle = (uint32_t)hal_get_cycles();
    1472 if( CONFIG_DEBUG_PROCESS_INIT_CREATE < cycle )
     1470#if DEBUG_PROCESS_INIT_CREATE
     1471cycle = (uint32_t)hal_get_cycles();
     1472if( DEBUG_PROCESS_INIT_CREATE < cycle )
    14731473printk("\n[DBG] %s : thread %x exit / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle );
    14741474#endif
     
    16051605    xptr_t      lock_xp;      // extended pointer on list lock in chdev
    16061606
    1607 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH
     1607#if DEBUG_PROCESS_TXT_ATTACH
    16081608uint32_t cycle = (uint32_t)hal_get_cycles();
    1609 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1609if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    16101610printk("\n[DBG] %s : thread %x enter for process %x / txt_id = %d  / cycle %d\n",
    16111611__FUNCTION__, CURRENT_THREAD, process->pid, txt_id, cycle );
     
    16341634    remote_spinlock_unlock( lock_xp );
    16351635
    1636 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH
    1637 cycle = (uint32_t)hal_get_cycles();
    1638 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1636#if DEBUG_PROCESS_TXT_ATTACH
     1637cycle = (uint32_t)hal_get_cycles();
     1638if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    16391639printk("\n[DBG] %s : thread %x exit for process %x / txt_id = %d / cycle %d\n",
    16401640__FUNCTION__, CURRENT_THREAD, process->pid, txt_id , cycle );
     
    16641664    "process descriptor not in owner cluster" );
    16651665
    1666 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH
     1666#if DEBUG_PROCESS_TXT_ATTACH
    16671667uint32_t cycle = (uint32_t)hal_get_cycles();
    1668 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1668if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    16691669printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
    16701670__FUNCTION__, CURRENT_THREAD, process_pid, cycle );
     
    16901690    remote_spinlock_unlock( lock_xp );
    16911691
    1692 #if( CONFIG_DEBUG_PROCESS_TXT_ATTACH & 1 )
    1693 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1692#if( DEBUG_PROCESS_TXT_ATTACH & 1 )
     1693if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    16941694{
    16951695    xptr_t root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
     
    17061706#endif
    17071707
    1708 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH
    1709 cycle = (uint32_t)hal_get_cycles();
    1710 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1708#if DEBUG_PROCESS_TXT_ATTACH
     1709cycle = (uint32_t)hal_get_cycles();
     1710if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    17111711printk("\n[DBG] %s : thread %x exit / process %x detached from TXT / cycle %d\n",
    17121712__FUNCTION__, CURRENT_THREAD, process->pid, cycle );
     
    17371737    "process descriptor not in owner cluster\n" );
    17381738
    1739 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH
     1739#if DEBUG_PROCESS_TXT_ATTACH
    17401740uint32_t cycle = (uint32_t)hal_get_cycles();
    1741 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1741if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    17421742printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
    17431743__FUNCTION__, CURRENT_THREAD, process_pid, cycle );
     
    17551755    hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp );
    17561756
    1757 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH
    1758 cycle = (uint32_t)hal_get_cycles();
    1759 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1757#if DEBUG_PROCESS_TXT_ATTACH
     1758cycle = (uint32_t)hal_get_cycles();
     1759if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    17601760printk("\n[DBG] %s : thread %x exit for process %x / cycle %d\n",
    17611761__FUNCTION__, CURRENT_THREAD, process_pid, cycle );
     
    17941794    "process descriptor not in owner cluster\n" );
    17951795
    1796 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH
     1796#if DEBUG_PROCESS_TXT_ATTACH
    17971797uint32_t cycle = (uint32_t)hal_get_cycles();
    1798 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1798if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    17991799printk("\n[DBG] %s : thread %x enter / process %x / pid %x / cycle %d\n",
    18001800__FUNCTION__, CURRENT_THREAD, process_ptr, process_pid, cycle );
     
    18131813    txt_id   = hal_remote_lw ( XPTR( txt_cxy , &txt_ptr->channel ) );
    18141814
    1815 #if( CONFIG_DEBUG_PROCESS_TXT_ATTACH & 1 )
    1816 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1815#if( DEBUG_PROCESS_TXT_ATTACH & 1 )
     1816if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    18171817printk("\n[DBG] %s : file_ptr %x / txt_ptr %x / txt_id %d / owner_ptr = %x\n",
    18181818__FUNCTION__, GET_PTR(file_xp), txt_ptr, txt_id, GET_PTR(owner_xp) );
     
    18321832        {
    18331833
    1834 #if( CONFIG_DEBUG_PROCESS_TXT_ATTACH & 1 )
    1835 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1834#if( DEBUG_PROCESS_TXT_ATTACH & 1 )
     1835if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    18361836printk("\n[DBG] %s : process is not the KSH process => search the KSH\n", __FUNCTION__ );
    18371837#endif
     
    18511851                    hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
    18521852
    1853 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH
    1854 cycle = (uint32_t)hal_get_cycles();
    1855 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1853#if DEBUG_PROCESS_TXT_ATTACH
     1854cycle = (uint32_t)hal_get_cycles();
     1855if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    18561856printk("\n[DBG] %s : thread %x exit / process %x to KSH process %x / cycle %d\n",
    18571857__FUNCTION__, CURRENT_THREAD, process_pid,
     
    18731873        {
    18741874
    1875 #if( CONFIG_DEBUG_PROCESS_TXT_ATTACH & 1 )
    1876 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1875#if( DEBUG_PROCESS_TXT_ATTACH & 1 )
     1876if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    18771877printk("\n[DBG] %s : process is the KSH process => search another\n", __FUNCTION__ );
    18781878#endif
     
    18931893                    hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
    18941894
    1895 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH
    1896 cycle = (uint32_t)hal_get_cycles();
    1897 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1895#if DEBUG_PROCESS_TXT_ATTACH
     1896cycle = (uint32_t)hal_get_cycles();
     1897if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    18981898printk("\n[DBG] %s : thread %x exit / KSH process %x to process %x / cycle %d\n",
    18991899__FUNCTION__, CURRENT_THREAD, process_pid,
     
    19101910            hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL );
    19111911
    1912 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH
    1913 cycle = (uint32_t)hal_get_cycles();
    1914 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1912#if DEBUG_PROCESS_TXT_ATTACH
     1913cycle = (uint32_t)hal_get_cycles();
     1914if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    19151915printk("\n[DBG] %s : thread %x exit / KSH process %x to nobody / cycle %d\n",
    19161916__FUNCTION__, CURRENT_THREAD, process_pid, cycle );
     
    19221922    {
    19231923
    1924 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH
    1925 cycle = (uint32_t)hal_get_cycles();
    1926 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1924#if DEBUG_PROCESS_TXT_ATTACH
     1925cycle = (uint32_t)hal_get_cycles();
     1926if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    19271927printk("\n[DBG] %s : thread %x exit / process %x is not TXT owner / cycle %d\n",
    19281928__FUNCTION__, CURRENT_THREAD, process_pid, cycle );
  • trunk/kernel/kern/rpc.c

    r437 r438  
    4343
    4444/////////////////////////////////////////////////////////////////////////////////////////
    45 //        Debug macros for marshalling functions
    46 /////////////////////////////////////////////////////////////////////////////////////////
    47 
    48 #if CONFIG_DEBUG_RPC_MARSHALING
    49 
    50 #define RPC_DEBUG_ENTER                                                                \
    51 uint32_t cycle = (uint32_t)hal_get_cycles();                                           \
    52 if( cycle > CONFIG_DEBUG_RPC_MARSHALING )                                              \
    53 printk("\n[DBG] %s : enter thread %x on core[%x,%d] / cycle %d\n",                     \
    54 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
    55 
    56 #define RPC_DEBUG_EXIT                                                                 \
    57 cycle = (uint32_t)hal_get_cycles();                                                    \
    58 if( cycle > CONFIG_DEBUG_RPC_MARSHALING )                                              \
    59 printk("\n[DBG] %s : exit thread %x on core[%x,%d] / cycle %d\n",                      \
    60 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
    61 
    62 #else
    63 
    64 #define RPC_DEBUG_ENTER
    65 
    66 #define RPC_DEBUG_EXIT
    67 
    68 #endif
    69 
    70 /////////////////////////////////////////////////////////////////////////////////////////
    7145//      array of function pointers  (must be consistent with enum in rpc.h)
    7246/////////////////////////////////////////////////////////////////////////////////////////
     
    12296               rpc_desc_t * rpc )
    12397{
    124     volatile error_t   full = 0;
    125     thread_t         * this = CURRENT_THREAD;
    126     core_t           * core = this->core;
    127 
    128 #if CONFIG_DEBUG_RPC_SEND
     98    lid_t              server_core_lid;
     99    lid_t              client_core_lid;
     100    volatile error_t   full;
     101    thread_t         * this;
     102    cluster_t        * cluster;
     103
     104#if DEBUG_RPC_CLIENT_GENERIC
    129105uint32_t cycle = (uint32_t)hal_get_cycles();
    130 if( CONFIG_DEBUG_RPC_SEND < cycle )
     106if( DEBUG_RPC_CLIENT_GENERIC < cycle )
    131107printk("\n[DBG] %s : thread %x in cluster %x enter for rpc[%d] / rpc_ptr %x / cycle %d\n",
    132108__FUNCTION__, CURRENT_THREAD, local_cxy, rpc->index, rpc, cycle );
    133109#endif
    134110
    135     // register client thread pointer and core lid in RPC descriptor
     111    full            = 0;
     112    this            = CURRENT_THREAD;
     113    cluster         = LOCAL_CLUSTER;
     114    client_core_lid = this->core->lid;
     115
     116    // select a server_core index:
     117    // use client core index if possible / core 0 otherwise
     118    if( client_core_lid < hal_remote_lw( XPTR( server_cxy , &cluster->cores_nr ) ) )
     119    {
     120        server_core_lid = client_core_lid;
     121    }
     122    else
     123    {   
     124        server_core_lid = 0;
     125    }
     126
     127    // register client_thread pointer and client_core lid in RPC descriptor
    136128    rpc->thread = this;
    137     rpc->lid    = core->lid;
    138 
    139     // build an extended pointer on the RPC descriptor
     129    rpc->lid    = client_core_lid;
     130
     131    // build extended pointer on the RPC descriptor
    140132        xptr_t   desc_xp = XPTR( local_cxy , rpc );
    141133
     
    160152    hal_fence();
    161153       
    162     // send IPI to the remote core corresponding to the client core
    163         dev_pic_send_ipi( server_cxy , core->lid );
     154    // send IPI to the selected server core
     155        dev_pic_send_ipi( server_cxy , server_core_lid );
    164156
    165157    // wait RPC completion before returning if blocking RPC
     
    171163        {
    172164
    173 #if CONFIG_DEBUG_RPC_SEND
    174 cycle = (uint32_t)hal_get_cycles();
    175 if( CONFIG_DEBUG_RPC_SEND < cycle )
     165#if DEBUG_RPC_CLIENT_GENERIC
     166cycle = (uint32_t)hal_get_cycles();
     167if( DEBUG_RPC_CLIENT_GENERIC < cycle )
    176168printk("\n[DBG] %s : thread %x in cluster %x busy waiting / rpc[%d] / cycle %d\n",
    177169__FUNCTION__, CURRENT_THREAD, local_cxy, rpc->index , cycle );
    178170#endif
    179171
    180             while( rpc->response ) hal_fixed_delay( 100 );
     172            while( rpc->responses ) hal_fixed_delay( 100 );
    181173   
    182 #if CONFIG_DEBUG_RPC_SEND
    183 cycle = (uint32_t)hal_get_cycles();
    184 if( CONFIG_DEBUG_RPC_SEND < cycle )
    185 printk("\n[DBG] %s : thread % in cluster %x resume / rpc[%d] / cycle %d\n",
     174#if DEBUG_RPC_CLIENT_GENERIC
     175cycle = (uint32_t)hal_get_cycles();
     176if( DEBUG_RPC_CLIENT_GENERIC < cycle )
     177printk("\n[DBG] %s : thread %x in cluster %x resumes / rpc[%d] / cycle %d\n",
    186178__FUNCTION__, CURRENT_THREAD, local_cxy, rpc->index, cycle );
    187179#endif
     
    190182        {
    191183
    192 #if CONFIG_DEBUG_RPC_SEND
    193 cycle = (uint32_t)hal_get_cycles();
    194 if( CONFIG_DEBUG_RPC_SEND < cycle )
    195 printk("\n[DBG] %s : thread %x in cluster %x deschedule / rpc[%d] / cycle %d\n",
     184#if DEBUG_RPC_CLIENT_GENERIC
     185cycle = (uint32_t)hal_get_cycles();
     186if( DEBUG_RPC_CLIENT_GENERIC < cycle )
     187printk("\n[DBG] %s : thread %x in cluster %x blocks & deschedules / rpc[%d] / cycle %d\n",
    196188__FUNCTION__, CURRENT_THREAD, local_cxy, rpc->index , cycle );
    197189#endif
     
    199191            sched_yield("blocked on RPC");
    200192
    201 #if CONFIG_DEBUG_RPC_SEND
    202 cycle = (uint32_t)hal_get_cycles();
    203 if( CONFIG_DEBUG_RPC_SEND < cycle )
    204 printk("\n[DBG] %s : thread % in cluster %x resume / rpcr[%d] / cycle %d\n",
     193#if DEBUG_RPC_CLIENT_GENERIC
     194cycle = (uint32_t)hal_get_cycles();
     195if( DEBUG_RPC_CLIENT_GENERIC < cycle )
     196printk("\n[DBG] %s : thread %x in cluster %x resumes / rpcr[%d] / cycle %d\n",
    205197__FUNCTION__, CURRENT_THREAD, local_cxy, rpc->index, cycle );
    206198#endif
     
    208200
    209201        // check response available
    210         assert( (rpc->response == 0) , __FUNCTION__, "illegal RPC response\n" );
    211 
    212         // acknowledge the IPI sent by the server
    213         dev_pic_ack_ipi();
     202        assert( (rpc->responses == 0) , __FUNCTION__, "illegal RPC response\n" );
    214203    }
    215     else
     204    else  // non blocking RPC
    216205    {
    217206
    218 #if CONFIG_DEBUG_RPC_SEND
    219 cycle = (uint32_t)hal_get_cycles();
    220 if( CONFIG_DEBUG_RPC_SEND < cycle )
     207#if DEBUG_RPC_CLIENT_GENERIC
     208cycle = (uint32_t)hal_get_cycles();
     209if( DEBUG_RPC_CLIENT_GENERIC < cycle )
    221210printk("\n[DBG] %s : non blocking rpc[%d] => thread %x return  / cycle %d\n",
    222211__FUNCTION__, rpc->index, CURRENT_THREAD, cycle );
     
    244233        remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;
    245234
    246 #if CONFIG_DEBUG_RPC_SERVER
     235#if DEBUG_RPC_SERVER_GENERIC
    247236uint32_t cycle = (uint32_t)hal_get_cycles();
    248 if( CONFIG_DEBUG_RPC_SERVER < cycle )
     237if( DEBUG_RPC_SERVER_GENERIC < cycle )
    249238printk("\n[DBG] %s : thread %x interrupted in cluster %x / cycle %d\n",
    250239__FUNCTION__, this, local_cxy, cycle );
     
    254243        hal_disable_irq( &sr_save );
    255244
    256     // check RPC FIFO not empty and no RPC thread handling it 
     245    // activate (or create) RPC thread if RPC FIFO not empty 
    257246        if( (rpc_fifo->owner == 0) && (local_fifo_is_empty(rpc_fifo) == false) )
    258247    {
    259         // search one non blocked RPC thread   
     248
     249#if DEBUG_RPC_SERVER_GENERIC
     250cycle = (uint32_t)hal_get_cycles();
     251if( DEBUG_RPC_SERVER_GENERIC < cycle )
     252printk("\n[DBG] %s : RPC FIFO non empty in cluster %x / cycle %d\n",
     253__FUNCTION__, local_cxy, cycle );
     254#endif
     255
     256        // search one IDLE RPC thread   
    260257        list_entry_t * iter;
    261258        LIST_FOREACH( &sched->k_root , iter )
    262259        {
    263260            thread = LIST_ELEMENT( iter , thread_t , sched_list );
    264             if( (thread->type == THREAD_RPC) && (thread->blocked == 0 ) )
     261            if( (thread->type == THREAD_RPC) && (thread->blocked == THREAD_BLOCKED_IDLE ) )
    265262            {
     263                // unblock found RPC thread
     264                thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_IDLE );
     265
     266                // exit loop
    266267                found = true;
    267268                break;
     
    279280                if( error )
    280281            {
    281                 printk("\n[WARNING] in %s : no memory for new RPC thread in cluster %x\n",
    282                 __FUNCTION__ , local_cxy );
     282                assert( false , __FUNCTION__ ,
     283                "no memory to allocate a new RPC thread in cluster %x", local_cxy );
    283284            }
    284             else
    285             {
    286                 // unblock created RPC thread
    287                 thread->blocked = 0;
    288 
    289                 // update core descriptor counter 
    290                     hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 );
    291 
    292 #if CONFIG_DEBUG_RPC_SERVER
    293 cycle = (uint32_t)hal_get_cycles();
    294 if( CONFIG_DEBUG_RPC_SERVER < cycle )
     285
     286            // unblock created RPC thread
     287            thread->blocked = 0;
     288
     289            // update core descriptor counter 
     290            hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 );
     291
     292#if DEBUG_RPC_SERVER_GENERIC
     293cycle = (uint32_t)hal_get_cycles();
     294if( DEBUG_RPC_SERVER_GENERIC < cycle )
    295295printk("\n[DBG] %s : create a new RPC thread %x in cluster %x / cycle %d\n",
    296296__FUNCTION__, thread, local_cxy, cycle );
    297297#endif
    298             }
    299298        }
    300299    }
    301300
    302 #if CONFIG_DEBUG_RPC_SERVER
    303 cycle = (uint32_t)hal_get_cycles();
    304 if( CONFIG_DEBUG_RPC_SERVER < cycle )
     301#if DEBUG_RPC_SERVER_GENERIC
     302cycle = (uint32_t)hal_get_cycles();
     303if( DEBUG_RPC_SERVER_GENERIC < cycle )
    305304printk("\n[DBG] %s : interrupted thread %x deschedules in cluster %x / cycle %d\n",
    306305__FUNCTION__, this, local_cxy, cycle );
    307306#endif
    308307
    309     // interrupted thread deschedule always          
     308    // interrupted thread always deschedule         
    310309        sched_yield("IPI received");
    311310
    312 #if CONFIG_DEBUG_RPC_SERVER
    313 cycle = (uint32_t)hal_get_cycles();
    314 if( CONFIG_DEBUG_RPC_SERVER < cycle )
     311#if DEBUG_RPC_SERVER_GENERIC
     312cycle = (uint32_t)hal_get_cycles();
     313if( DEBUG_RPC_SERVER_GENERIC < cycle )
    315314printk("\n[DBG] %s : interrupted thread %x resumes in cluster %x / cycle %d\n",
    316315__FUNCTION__, this, local_cxy, cycle );
     
    346345    // - internal loop : handle up to CONFIG_RPC_PENDING_MAX RPC requests
    347346 
    348         while(1)  // external loop
     347        while(1)  // infinite loop
    349348        {
    350349        // try to take RPC_FIFO ownership
     
    352351        {
    353352
    354 #if CONFIG_DEBUG_RPC_SERVER
     353#if DEBUG_RPC_SERVER_GENERIC
    355354uint32_t cycle = (uint32_t)hal_get_cycles();
    356 if( CONFIG_DEBUG_RPC_SERVER < cycle )
     355if( DEBUG_RPC_SERVER_GENERIC < cycle )
    357356printk("\n[DBG] %s : RPC thread %x in cluster %x takes RPC fifo ownership / cycle %d\n",
    358357__FUNCTION__, this, local_cxy, cycle );
     
    360359            // initializes RPC requests counter
    361360            count = 0;
    362 
    363             // acknowledge local IPI
    364             dev_pic_ack_ipi();
    365361
    366362                    // exit internal loop in three cases:
     
    381377                    blocking = hal_remote_lw( XPTR( desc_cxy , &desc_ptr->blocking ) );
    382378
    383 #if CONFIG_DEBUG_RPC_SERVER
    384 cycle = (uint32_t)hal_get_cycles();
    385 if( CONFIG_DEBUG_RPC_SERVER < cycle )
    386 printk("\n[DBG] %s : RPC thread %x in cluster %x got rpc[%d] / rpc_ptr %x / cycle %d\n",
    387 __FUNCTION__, this, local_cxy, index, desc_ptr, cycle );
     379#if DEBUG_RPC_SERVER_GENERIC
     380cycle = (uint32_t)hal_get_cycles();
     381if( DEBUG_RPC_SERVER_GENERIC < cycle )
     382printk("\n[DBG] %s : RPC thread %x in cluster %x got rpc[%d] / rpc_cxy %x / rpc_ptr %x\n",
     383__FUNCTION__, this, local_cxy, index, desc_cxy, desc_ptr );
    388384#endif
    389385                    // call the relevant server function
    390386                    rpc_server[index]( desc_xp );
    391387
    392 #if CONFIG_DEBUG_RPC_SERVER
    393 cycle = (uint32_t)hal_get_cycles();
    394 if( CONFIG_DEBUG_RPC_SERVER < cycle )
     388#if DEBUG_RPC_SERVER_GENERIC
     389cycle = (uint32_t)hal_get_cycles();
     390if( DEBUG_RPC_SERVER_GENERIC < cycle )
    395391printk("\n[DBG] %s : RPC thread %x in cluster %x completes rpc[%d] / rpc_ptr %x / cycle %d\n",
    396 __FUNCTION__, this, local_cxy, index, cycle );
     392__FUNCTION__, this, local_cxy, index, desc_ptr, cycle );
    397393#endif
    398394                    // increment handled RPCs counter
     
    403399                    {
    404400                        // decrement responses counter in RPC descriptor
    405                         hal_remote_atomic_add(XPTR( desc_cxy, &desc_ptr->response ), -1);
     401                        hal_remote_atomic_add( XPTR( desc_cxy, &desc_ptr->responses ), -1 );
     402
     403                        // get client thread pointer and client core lid from RPC descriptor
     404                        thread_ptr = hal_remote_lpt( XPTR( desc_cxy , &desc_ptr->thread ) );
     405                        core_lid   = hal_remote_lw ( XPTR( desc_cxy , &desc_ptr->lid ) );
    406406
    407407                        // unblock client thread
    408                         thread_ptr = (thread_t *)hal_remote_lpt(XPTR(desc_cxy,&desc_ptr->thread));
    409                         thread_unblock( XPTR(desc_cxy,thread_ptr) , THREAD_BLOCKED_RPC );
     408                        thread_unblock( XPTR( desc_cxy , thread_ptr ) , THREAD_BLOCKED_RPC );
    410409
    411410                        hal_fence();
    412411
    413                         // get client core lid and send IPI
    414                         core_lid = hal_remote_lw(XPTR(desc_cxy, &desc_ptr->lid));
     412#if DEBUG_RPC_SERVER_GENERIC
     413cycle = (uint32_t)hal_get_cycles();
     414if( DEBUG_RPC_SERVER_GENERIC < cycle )
     415printk("\n[DBG] %s : RPC thread %x (cluster %x) unblocked client thread %x (cluster %x)\n",
     416__FUNCTION__, this, local_cxy, thread_ptr, desc_cxy, cycle );
     417#endif
     418                        // send IPI to client core
    415419                            dev_pic_send_ipi( desc_cxy , core_lid );
    416420                    }
     
    432436            {
    433437
    434 #if CONFIG_DEBUG_RPC_SERVER
     438#if DEBUG_RPC_SERVER_GENERIC
    435439uint32_t cycle = (uint32_t)hal_get_cycles();
    436 if( CONFIG_DEBUG_RPC_SERVER < cycle )
     440if( DEBUG_RPC_SERVER_GENERIC < cycle )
    437441printk("\n[DBG] %s : RPC thread %x in cluster %x suicides / cycle %d\n",
    438442__FUNCTION__, this, local_cxy, cycle );
     
    447451            }
    448452
    449 #if CONFIG_DEBUG_RPC_SERVER
     453#if DEBUG_RPC_SERVER_GENERIC
    450454uint32_t cycle = (uint32_t)hal_get_cycles();
    451 if( CONFIG_DEBUG_RPC_SERVER < cycle )
     455if( DEBUG_RPC_SERVER_GENERIC < cycle )
    452456printk("\n[DBG] %s : RPC thread %x in cluster %x deschedules / cycle %d\n",
    453457__FUNCTION__, this, local_cxy, cycle );
    454458#endif
    455459
    456         // deschedule without blocking
     460        // Block and deschedule
     461        thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_IDLE );
    457462        sched_yield("RPC fifo empty or too much work");
    458463
    459 #if CONFIG_DEBUG_RPC_SERVER
    460 cycle = (uint32_t)hal_get_cycles();
    461 if( CONFIG_DEBUG_RPC_SERVER < cycle )
     464#if DEBUG_RPC_SERVER_GENERIC
     465cycle = (uint32_t)hal_get_cycles();
     466if( DEBUG_RPC_SERVER_GENERIC < cycle )
    462467printk("\n[DBG] %s : RPC thread %x in cluster %x resumes / cycle %d\n",
    463468__FUNCTION__, this, local_cxy, cycle );
    464469#endif
    465470
    466         } // end external loop
     471        } // end infinite loop
    467472
    468473} // end rpc_thread_func()
     
    478483                                page_t  ** page )      // out
    479484{
    480 
    481     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    482 
    483     // initialise RPC descriptor header
    484     rpc_desc_t  rpc;
    485     rpc.index    = RPC_PMEM_GET_PAGES;
    486     rpc.response = 1;
    487     rpc.blocking = true;
     485#if DEBUG_RPC_PMEM_GET_PAGES
     486uint32_t cycle = (uint32_t)hal_get_cycles();
     487if( cycle > DEBUG_RPC_PMEM_GET_PAGES )
     488printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     489__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     490#endif
     491
     492    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
     493
     494    // initialise RPC descriptor header
     495    rpc_desc_t  rpc;
     496    rpc.index     = RPC_PMEM_GET_PAGES;
     497    rpc.blocking  = true;
     498    rpc.responses = 1;
    488499
    489500    // set input arguments in RPC descriptor
     
    496507    *page = (page_t *)(intptr_t)rpc.args[1];
    497508
     509#if DEBUG_RPC_PMEM_GET_PAGES
     510cycle = (uint32_t)hal_get_cycles();
     511if( cycle > DEBUG_RPC_PMEM_GET_PAGES )
     512printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     513__FUNCTION__ , CURRENT_THREAD , cycle );
     514#endif
    498515}
    499516
     
    501518void rpc_pmem_get_pages_server( xptr_t xp )
    502519{
     520#if DEBUG_RPC_PMEM_GET_PAGES
     521uint32_t cycle = (uint32_t)hal_get_cycles();
     522if( cycle > DEBUG_RPC_PMEM_GET_PAGES )
     523printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     524__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     525#endif
    503526
    504527    // get client cluster identifier and pointer on RPC descriptor
     
    515538    hal_remote_swd( XPTR( cxy , &desc->args[1] ) , (uint64_t)(intptr_t)page );
    516539
     540#if DEBUG_RPC_PMEM_GET_PAGES
     541cycle = (uint32_t)hal_get_cycles();
     542if( cycle > DEBUG_RPC_PMEM_GET_PAGES )
     543printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     544__FUNCTION__ , CURRENT_THREAD , cycle );
     545#endif
    517546}
    518547
     
    525554                                    page_t  * page )      // out
    526555{
     556#if DEBUG_RPC_PMEM_RELEASE_PAGES
     557uint32_t cycle = (uint32_t)hal_get_cycles();
     558if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES )
     559printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     560__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     561#endif
    527562
    528563    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
     
    531566    rpc_desc_t  rpc;
    532567    rpc.index    = RPC_PMEM_RELEASE_PAGES;
    533     rpc.response = 1;
    534     rpc.blocking = true;
     568    rpc.blocking = true;
     569    rpc.responses = 1;
    535570
    536571    // set input arguments in RPC descriptor
     
    540575    rpc_send( cxy , &rpc );
    541576
     577#if DEBUG_RPC_PMEM_RELEASE_PAGES
     578cycle = (uint32_t)hal_get_cycles();
     579if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES )
     580printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     581__FUNCTION__ , CURRENT_THREAD , cycle );
     582#endif
    542583}
    543584
     
    545586void rpc_pmem_release_pages_server( xptr_t xp )
    546587{
     588#if DEBUG_RPC_PMEM_RELEASE_PAGES
     589uint32_t cycle = (uint32_t)hal_get_cycles();
     590if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES )
     591printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     592__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     593#endif
    547594
    548595    // get client cluster identifier and pointer on RPC descriptor
     
    559606    kmem_free( &req );
    560607
     608#if DEBUG_RPC_PMEM_RELEASE_PAGES
     609cycle = (uint32_t)hal_get_cycles();
     610if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES )
     611printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     612__FUNCTION__ , CURRENT_THREAD , cycle );
     613#endif
    561614}
    562615
     
    577630                                   error_t   * error )              // out
    578631{
     632#if DEBUG_RPC_PROCESS_MAKE_FORK
     633uint32_t cycle = (uint32_t)hal_get_cycles();
     634if( cycle > DEBUG_RPC_PROCESS_MAKE_FORK )
     635printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     636__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     637#endif
     638
    579639    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    580640
     
    582642    rpc_desc_t  rpc;
    583643    rpc.index    = RPC_PROCESS_MAKE_FORK;
    584     rpc.response = 1;
    585     rpc.blocking = true;
     644    rpc.blocking = true;
     645    rpc.responses = 1;
    586646
    587647    // set input arguments in RPC descriptor 
     
    597657    *error             = (error_t)rpc.args[4];     
    598658
     659#if DEBUG_RPC_PROCESS_MAKE_FORK
     660cycle = (uint32_t)hal_get_cycles();
     661if( cycle > DEBUG_RPC_PROCESS_MAKE_FORK )
     662printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     663__FUNCTION__ , CURRENT_THREAD , cycle );
     664#endif
    599665}
    600666
     
    602668void rpc_process_make_fork_server( xptr_t xp )
    603669{
     670#if DEBUG_RPC_PROCESS_MAKE_FORK
     671uint32_t cycle = (uint32_t)hal_get_cycles();
     672if( cycle > DEBUG_RPC_PROCESS_MAKE_FORK )
     673printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     674__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     675#endif
    604676
    605677    xptr_t     ref_process_xp;     // extended pointer on reference parent process
     
    628700    hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error );
    629701
     702#if DEBUG_RPC_PROCESS_MAKE_FORK
     703cycle = (uint32_t)hal_get_cycles();
     704if( cycle > DEBUG_RPC_PROCESS_MAKE_FORK )
     705printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     706__FUNCTION__ , CURRENT_THREAD , cycle );
     707#endif
    630708}
    631709
     
    656734    rpc_desc_t  rpc;
    657735    rpc.index    = RPC_THREAD_USER_CREATE;
    658     rpc.response = 1;
    659     rpc.blocking = true;
     736    rpc.blocking = true;
     737    rpc.responses = 1;
    660738
    661739    // set input arguments in RPC descriptor
     
    690768    // get client cluster identifier and pointer on RPC descriptor
    691769    cxy_t        client_cxy  = GET_CXY( xp );
    692     rpc_desc_t * desc = GET_PTR( xp );
     770    rpc_desc_t * desc        = GET_PTR( xp );
    693771
    694772    // get pointer on attributes structure in client cluster from RPC descriptor
     
    736814    rpc_desc_t  rpc;
    737815    rpc.index    = RPC_THREAD_KERNEL_CREATE;
    738     rpc.response = 1;
    739     rpc.blocking = true;
     816    rpc.blocking = true;
     817    rpc.responses = 1;
    740818
    741819    // set input arguments in RPC descriptor
     
    763841    // get client cluster identifier and pointer on RPC descriptor
    764842    cxy_t        client_cxy  = GET_CXY( xp );
    765     rpc_desc_t * desc = GET_PTR( xp );
     843    rpc_desc_t * desc        = GET_PTR( xp );
    766844
    767845    // get attributes from RPC descriptor
     
    797875{
    798876
    799 #if (CONFIG_DEBUG_PROCESS_SIGACTION & 1)
     877#if DEBUG_RPC_PROCESS_SIGACTION
    800878uint32_t  cycle  = (uint32_t)hal_get_cycles();
    801879uint32_t  action = rpc->args[0];
    802880pid_t     pid    = rpc->args[1];
    803 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     881if( DEBUG_RPC_PROCESS_SIGACTION < cycle )
    804882printk("\n[DBG] %s : enter to %s process %x in cluster %x / cycle %d\n",
    805883__FUNCTION__ , process_action_str( action ) , pid , cxy , cycle );
     
    813891    rpc_send( cxy , rpc );
    814892
    815 #if (CONFIG_DEBUG_PROCESS_SIGACTION & 1)
    816 cycle = (uint32_t)hal_get_cycles();
    817 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     893#if DEBUG_RPC_PROCESS_SIGACTION
     894cycle = (uint32_t)hal_get_cycles();
     895if( DEBUG_RPC_PROCESS_SIGACTION < cycle )
    818896printk("\n[DBG] %s : exit after requesting to %s process %x in cluster %x / cycle %d\n",
    819897__FUNCTION__ , process_action_str( action ) , pid , cxy , cycle );
     
    842920    pid      = (pid_t)   hal_remote_lwd( XPTR(client_cxy , &rpc->args[1]) );
    843921
    844 #if (CONFIG_DEBUG_PROCESS_SIGACTION & 1)
     922#if DEBUG_RPC_PROCESS_SIGACTION
    845923uint32_t cycle = (uint32_t)hal_get_cycles();
    846 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     924if( DEBUG_RPC_PROCESS_SIGACTION < cycle )
    847925printk("\n[DBG] %s : enter to %s process %x in cluster %x / cycle %d\n",
    848926__FUNCTION__ , process_action_str( action ) , pid , local_cxy , cycle );
     
    858936
    859937    // build extended pointer on response counter in RPC
    860     count_xp = XPTR( client_cxy , &rpc->response );
     938    count_xp = XPTR( client_cxy , &rpc->responses );
    861939
    862940    // decrement the responses counter in RPC descriptor,
     
    872950    }
    873951
    874 #if (CONFIG_DEBUG_PROCESS_SIGACTION & 1)
    875 cycle = (uint32_t)hal_get_cycles();
    876 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     952#if DEBUG_RPC_PROCESS_SIGACTION
     953cycle = (uint32_t)hal_get_cycles();
     954if( DEBUG_RPC_PROCESS_SIGACTION < cycle )
    877955printk("\n[DBG] %s : exit after %s process %x in cluster %x / cycle %d\n",
    878956__FUNCTION__ , process_action_str( action ) , pid , local_cxy , cycle );
     
    903981    rpc_desc_t  rpc;
    904982    rpc.index    = RPC_VFS_INODE_CREATE;
    905     rpc.response = 1;
    906     rpc.blocking = true;
     983    rpc.blocking = true;
     984    rpc.responses = 1;
    907985
    908986    // set input arguments in RPC descriptor
     
    9831061    rpc_desc_t  rpc;
    9841062    rpc.index    = RPC_VFS_INODE_DESTROY;
    985     rpc.response = 1;
    986     rpc.blocking = true;
     1063    rpc.blocking = true;
     1064    rpc.responses = 1;
    9871065
    9881066    // set input arguments in RPC descriptor
     
    10231101                                   error_t              * error )       // out
    10241102{
    1025     RPC_DEBUG_ENTER
     1103#if DEBUG_RPC_VFS_DENTRY_CREATE
     1104uint32_t cycle = (uint32_t)hal_get_cycles();
     1105if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE )
     1106printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1107__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1108#endif
    10261109
    10271110    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
     
    10301113    rpc_desc_t  rpc;
    10311114    rpc.index    = RPC_VFS_DENTRY_CREATE;
    1032     rpc.response = 1;
    1033     rpc.blocking = true;
     1115    rpc.blocking = true;
     1116    rpc.responses = 1;
    10341117
    10351118    // set input arguments in RPC descriptor
     
    10451128    *error     = (error_t)rpc.args[4];
    10461129
    1047     RPC_DEBUG_EXIT
     1130#if DEBUG_RPC_VFS_DENTRY_CREATE
     1131cycle = (uint32_t)hal_get_cycles();
     1132if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE )
     1133printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     1134__FUNCTION__ , CURRENT_THREAD , cycle );
     1135#endif
    10481136}
    10491137
     
    10511139void rpc_vfs_dentry_create_server( xptr_t xp )
    10521140{
     1141#if DEBUG_RPC_VFS_DENTRY_CREATE
     1142uint32_t cycle = (uint32_t)hal_get_cycles();
     1143if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE )
     1144printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1145__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1146#endif
     1147
    10531148    uint32_t      type;
    10541149    char        * name;
     
    10561151    xptr_t        dentry_xp;
    10571152    error_t       error;
    1058 
    1059     RPC_DEBUG_ENTER
    1060 
    10611153    char          name_copy[CONFIG_VFS_MAX_NAME_LENGTH];
    10621154
     
    10831175    hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error );
    10841176
    1085     RPC_DEBUG_EXIT
     1177#if DEBUG_RPC_VFS_DENTRY_CREATE
     1178cycle = (uint32_t)hal_get_cycles();
     1179if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE )
     1180printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     1181__FUNCTION__ , CURRENT_THREAD , cycle );
     1182#endif
    10861183}
    10871184
     
    11001197    rpc_desc_t  rpc;
    11011198    rpc.index    = RPC_VFS_DENTRY_DESTROY;
    1102     rpc.response = 1;
    1103     rpc.blocking = true;
     1199    rpc.blocking = true;
     1200    rpc.responses = 1;
    11041201
    11051202    // set input arguments in RPC descriptor
     
    11401237                                 error_t              * error )      // out
    11411238{
     1239#if DEBUG_RPC_VFS_FILE_CREATE
     1240uint32_t cycle = (uint32_t)hal_get_cycles();
     1241if( cycle > DEBUG_RPC_VFS_FILE_CREATE )
     1242printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1243__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1244#endif
     1245
    11421246    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    11431247
     
    11451249    rpc_desc_t  rpc;
    11461250    rpc.index    = RPC_VFS_FILE_CREATE;
    1147     rpc.response = 1;
    1148     rpc.blocking = true;
     1251    rpc.blocking = true;
     1252    rpc.responses = 1;
    11491253
    11501254    // set input arguments in RPC descriptor
     
    11591263    *error   = (error_t)rpc.args[3];
    11601264
     1265#if DEBUG_RPC_VFS_FILE_CREATE
     1266cycle = (uint32_t)hal_get_cycles();
     1267if( cycle > DEBUG_RPC_VFS_FILE_CREATE )
     1268printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     1269__FUNCTION__ , CURRENT_THREAD , cycle );
     1270#endif
    11611271}
    11621272
     
    11641274void rpc_vfs_file_create_server( xptr_t xp )
    11651275{
     1276#if DEBUG_RPC_VFS_FILE_CREATE
     1277uint32_t cycle = (uint32_t)hal_get_cycles();
     1278if( cycle > DEBUG_RPC_VFS_FILE_CREATE )
     1279printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1280__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1281#endif
     1282
    11661283    uint32_t      file_attr;
    11671284    vfs_inode_t * inode;
     
    11861303    hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error );
    11871304
     1305#if DEBUG_RPC_VFS_FILE_CREATE
     1306cycle = (uint32_t)hal_get_cycles();
     1307if( cycle > DEBUG_RPC_VFS_FILE_CREATE )
     1308printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     1309__FUNCTION__ , CURRENT_THREAD , cycle );
     1310#endif
    11881311}
    11891312
     
    12011324    rpc_desc_t  rpc;
    12021325    rpc.index    = RPC_VFS_FILE_DESTROY;
    1203     rpc.response = 1;
    1204     rpc.blocking = true;
     1326    rpc.blocking = true;
     1327    rpc.responses = 1;
    12051328
    12061329    // set input arguments in RPC descriptor
     
    12451368    rpc_desc_t  rpc;
    12461369    rpc.index    = RPC_VFS_INODE_LOAD;
    1247     rpc.response = 1;
    1248     rpc.blocking = true;
     1370    rpc.blocking = true;
     1371    rpc.responses = 1;
    12491372
    12501373    // set input arguments in RPC descriptor
     
    13061429    rpc_desc_t  rpc;
    13071430    rpc.index    = RPC_VFS_MAPPER_LOAD_ALL;
    1308     rpc.response = 1;
    1309     rpc.blocking = true;
     1431    rpc.blocking = true;
     1432    rpc.responses = 1;
    13101433
    13111434    // set input arguments in RPC descriptor
     
    13581481    rpc_desc_t  rpc;
    13591482    rpc.index    = RPC_FATFS_GET_CLUSTER;
    1360     rpc.response = 1;
    1361     rpc.blocking = true;
     1483    rpc.blocking = true;
     1484    rpc.responses = 1;
    13621485
    13631486    // set input arguments in RPC descriptor
     
    13861509    // get client cluster identifier and pointer on RPC descriptor
    13871510    cxy_t        client_cxy  = GET_CXY( xp );
    1388     rpc_desc_t * desc = GET_PTR( xp );
     1511    rpc_desc_t * desc        = GET_PTR( xp );
    13891512
    13901513    // get input arguments
     
    14181541    rpc_desc_t  rpc;
    14191542    rpc.index    = RPC_VMM_GET_VSEG;
    1420     rpc.response = 1;
    1421     rpc.blocking = true;
     1543    rpc.blocking = true;
     1544    rpc.responses = 1;
    14221545
    14231546    // set input arguments in RPC descriptor
     
    14801603    rpc_desc_t  rpc;
    14811604    rpc.index    = RPC_VMM_GET_PTE;
    1482     rpc.response = 1;
    1483     rpc.blocking = true;
     1605    rpc.blocking = true;
     1606    rpc.responses = 1;
    14841607
    14851608    // set input arguments in RPC descriptor
     
    15411664    rpc_desc_t  rpc;
    15421665    rpc.index    = RPC_THREAD_USER_CREATE;
    1543     rpc.response = 1;
    1544     rpc.blocking = true;
     1666    rpc.blocking = true;
     1667    rpc.responses = 1;
    15451668
    15461669    // set input arguments in RPC descriptor
     
    15601683    // get client cluster identifier and pointer on RPC descriptor
    15611684    cxy_t        client_cxy  = GET_CXY( xp );
    1562     rpc_desc_t * desc = GET_PTR( xp );
     1685    rpc_desc_t * desc        = GET_PTR( xp );
    15631686
    15641687    // get input argument "kmem_type" from client RPC descriptor
     
    15911714    rpc_desc_t  rpc;
    15921715    rpc.index    = RPC_THREAD_USER_CREATE;
    1593     rpc.response = 1;
    1594     rpc.blocking = true;
     1716    rpc.blocking = true;
     1717    rpc.responses = 1;
    15951718
    15961719    // set input arguments in RPC descriptor
     
    16081731    // get client cluster identifier and pointer on RPC descriptor
    16091732    cxy_t        client_cxy  = GET_CXY( xp );
    1610     rpc_desc_t * desc = GET_PTR( xp );
     1733    rpc_desc_t * desc        = GET_PTR( xp );
    16111734
    16121735    // get input arguments "buf" and "kmem_type" from client RPC descriptor
     
    16411764    rpc_desc_t  rpc;
    16421765    rpc.index    = RPC_MAPPER_MOVE_BUFFER;
    1643     rpc.response = 1;
    1644     rpc.blocking = true;
     1766    rpc.blocking = true;
     1767    rpc.responses = 1;
    16451768
    16461769    // set input arguments in RPC descriptor
     
    17251848    rpc_desc_t  rpc;
    17261849    rpc.index    = RPC_MAPPER_GET_PAGE;
    1727     rpc.response = 1;
    1728     rpc.blocking = true;
     1850    rpc.blocking = true;
     1851    rpc.responses = 1;
    17291852
    17301853    // set input arguments in RPC descriptor
     
    17801903    rpc_desc_t  rpc;
    17811904    rpc.index    = RPC_VMM_CREATE_VSEG;
    1782     rpc.response = 1;
    1783     rpc.blocking = true;
     1905    rpc.blocking = true;
     1906    rpc.responses = 1;
    17841907
    17851908    // set input arguments in RPC descriptor
     
    18461969    rpc_desc_t  rpc;
    18471970    rpc.index    = RPC_SCHED_DISPLAY;
    1848     rpc.response = 1;
    1849     rpc.blocking = true;
     1971    rpc.blocking = true;
     1972    rpc.responses = 1;
    18501973
    18511974    // set input arguments in RPC descriptor
     
    18852008    rpc_desc_t  rpc;
    18862009    rpc.index    = RPC_VMM_SET_COW;
    1887     rpc.response = 1;
    1888     rpc.blocking = true;
     2010    rpc.blocking = true;
     2011    rpc.responses = 1;
    18892012
    18902013    // set input arguments in RPC descriptor
     
    19272050    rpc_desc_t  rpc;
    19282051    rpc.index    = RPC_VMM_DISPLAY;
    1929     rpc.response = 1;
    1930     rpc.blocking = true;
     2052    rpc.blocking = true;
     2053    rpc.responses = 1;
    19312054
    19322055    // set input arguments in RPC descriptor
  • trunk/kernel/kern/rpc.h

    r437 r438  
    111111{
    112112        rpc_index_t         index;       /*! index of requested RPC service           */
    113         volatile uint32_t   response;    /*! all responses received when 0            */
     113        volatile uint32_t   responses;   /*! number of expected responses             */
    114114    struct thread_s   * thread;      /*! local pointer on client thread           */
    115115    uint32_t            lid;         /*! index of core running the calling thread */
     
    150150
    151151/***********************************************************************************
    152  * This function is the entry point for RPC handling on the server side.
    153  * It is executed by a core receiving an IPI, and each time the core enters,
    154  * or exit the kernel to handle .
    155  * It does nothing and return if the RPC_FIFO is empty.
    156  * The calling thread checks if it exist at least one non-blocked RPC thread,
    157  * creates a new RPC if required, and deschedule to allow the RPC thead to execute.
     152 * This function is the entry point for RPC handling on the server cluster.
     153 * It is executed by the core receiving the IPI sent by the client thread.
     154 * - If the RPC FIFO is empty, it deschedules.
     155 * - If the RPC FIFO is not empty, it checks if it exist a non-blocked RPC thread
     156 *   in the cluster, creates a new one if required, and deschedule to allow
     157 *   the RPC thead to execute.
    158158 **********************************************************************************/
    159159void rpc_check();
  • trunk/kernel/kern/scheduler.c

    r437 r438  
    125125            thread = LIST_ELEMENT( current , thread_t , sched_list );
    126126
    127             // analyse kernel thread type
    128             switch( thread->type )
     127            // execute RPC thread if non blocked
     128            if( (thread->blocked == 0)  &&
     129                (thread->type == THREAD_RPC) )
    129130            {
    130                 case THREAD_RPC:  // if non blocked and RPC FIFO non-empty
    131                 if( (thread->blocked == 0) &&
    132                     (local_fifo_is_empty( &LOCAL_CLUSTER->rpc_fifo ) == 0) )
    133                 {
    134                     spinlock_unlock( &sched->lock );
    135                     return thread;
    136                 }
    137                 break;
    138 
    139                 case THREAD_DEV:  // if non blocked and waiting queue non empty
    140                 if( (thread->blocked == 0) &&
    141                     (xlist_is_empty( XPTR( local_cxy , &thread->chdev->wait_root)) == 0) )
    142                 {
    143                     spinlock_unlock( &sched->lock );
    144                     return thread;
    145                 }
    146                 break;
    147 
    148                 default:
    149                 break;
     131                spinlock_unlock( &sched->lock );
     132                return thread;
     133            }
     134
     135            // execute DEV thread if non blocked and waiting queue non empty
     136            if( (thread->blocked == 0)  &&
     137                (thread->type == THREAD_DEV) &&
     138                (xlist_is_empty( XPTR( local_cxy , &thread->chdev->wait_root)) == 0) )
     139            {
     140                spinlock_unlock( &sched->lock );
     141                return thread;
    150142            }
    151143        } // end loop on kernel threads
     
    174166            thread = LIST_ELEMENT( current , thread_t , sched_list );
    175167
    176             // return thread if runnable
     168            // return thread if non blocked
    177169            if( thread->blocked == 0 )
    178170            {
     
    227219            process = thread->process;
    228220
    229 #if CONFIG_DEBUG_SCHED_HANDLE_SIGNALS
     221#if DEBUG_SCHED_HANDLE_SIGNALS
    230222uint32_t cycle = (uint32_t)hal_get_cycles();
    231 if( CONFIG_DEBUG_SCHED_HANDLE_SIGNALS < cycle )
     223if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
    232224printk("\n[DBG] %s : thread %x in proces %x must be deleted / cycle %d\n",
    233225__FUNCTION__ , thread , process->pid , cycle );
     
    250242            thread_destroy( thread );
    251243
    252 #if CONFIG_DEBUG_SCHED_HANDLE_SIGNALS
     244#if DEBUG_SCHED_HANDLE_SIGNALS
    253245cycle = (uint32_t)hal_get_cycles();
    254 if( CONFIG_DEBUG_SCHED_HANDLE_SIGNALS < cycle )
     246if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
    255247printk("\n[DBG] %s : thread %x in process %x has been deleted / cycle %d\n",
    256248__FUNCTION__ , thread , process->pid , cycle );
     
    262254                process_destroy( process );
    263255
    264 #if CONFIG_DEBUG_SCHED_HANDLE_SIGNALS
     256#if DEBUG_SCHED_HANDLE_SIGNALS
    265257cycle = (uint32_t)hal_get_cycles();
    266 if( CONFIG_DEBUG_SCHED_HANDLE_SIGNALS < cycle )
     258if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
    267259printk("\n[DBG] %s : process %x has been deleted / cycle %d\n",
    268260__FUNCTION__ , process->pid , cycle );
     
    287279    scheduler_t * sched   = &core->scheduler;
    288280 
    289 #if (CONFIG_DEBUG_SCHED_YIELD & 0x1)
    290 if( CONFIG_DEBUG_SCHED_YIELD < (uint32_t)hal_get_cycles() )
     281#if (DEBUG_SCHED_YIELD & 0x1)
     282if( DEBUG_SCHED_YIELD < (uint32_t)hal_get_cycles() )
    291283sched_display( core->lid );
    292284#endif
     
    322314    {
    323315
    324 #if CONFIG_DEBUG_SCHED_YIELD
     316#if DEBUG_SCHED_YIELD
    325317uint32_t cycle = (uint32_t)hal_get_cycles();
    326 if( CONFIG_DEBUG_SCHED_YIELD < cycle )
     318if( DEBUG_SCHED_YIELD < cycle )
    327319printk("\n[DBG] %s : core[%x,%d] / cause = %s\n"
    328320"      thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n",
     
    350342    {
    351343
    352 #if (CONFIG_DEBUG_SCHED_YIELD & 1)
     344#if (DEBUG_SCHED_YIELD & 1)
    353345uint32_t cycle = (uint32_t)hal_get_cycles();
    354 if( CONFIG_DEBUG_SCHED_YIELD < cycle )
     346if( DEBUG_SCHED_YIELD < cycle )
    355347printk("\n[DBG] %s : core[%x,%d] / cause = %s\n"
    356348"      thread %x (%s) (%x,%x) continue / cycle %d\n",
  • trunk/kernel/kern/thread.c

    r436 r438  
    112112/////////////////////////////////////////////////////////////////////////////////////
    113113// This static function initializes a thread descriptor (kernel or user).
    114 // It can be called by the three functions:
     114// It can be called by the four functions:
    115115// - thread_user_create()
    116116// - thread_user_fork()
    117117// - thread_kernel_create()
     118// - thread_idle_init()
     119// It updates the local DQDT.
    118120/////////////////////////////////////////////////////////////////////////////////////
    119121// @ thread       : pointer on thread descriptor
     
    202204    thread->save_sr = 0xFF13;
    203205
    204     // update local DQDT
    205     dqdt_local_update_threads( 1 );
    206 
    207206    // register new thread in core scheduler
    208207    sched_register_thread( thread->core , thread );
     208
     209        // update DQDT
     210    dqdt_update_threads( 1 );
    209211
    210212        return 0;
     
    227229    assert( (attr != NULL) , __FUNCTION__, "pthread attributes must be defined" );
    228230
    229 #if CONFIG_DEBUG_THREAD_USER_CREATE
     231#if DEBUG_THREAD_USER_CREATE
    230232uint32_t cycle = (uint32_t)hal_get_cycles();
    231 if( CONFIG_DEBUG_THREAD_USER_CREATE < cycle )
     233if( DEBUG_THREAD_USER_CREATE < cycle )
    232234printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n",
    233235__FUNCTION__, CURRENT_THREAD, pid , cycle );
     
    326328    }
    327329
    328         // update DQDT for new thread
    329     dqdt_local_update_threads( 1 );
    330 
    331 #if CONFIG_DEBUG_THREAD_USER_CREATE
     330#if DEBUG_THREAD_USER_CREATE
    332331cycle = (uint32_t)hal_get_cycles();
    333 if( CONFIG_DEBUG_THREAD_USER_CREATE < cycle )
     332if( DEBUG_THREAD_USER_CREATE < cycle )
    334333printk("\n[DBG] %s : thread %x exit / process %x / new_thread %x / core %d / cycle %d\n",
    335334__FUNCTION__, CURRENT_THREAD, pid, thread, core_lid, cycle );
     
    366365    vseg_t       * vseg;             // child thread STACK vseg
    367366
    368 #if CONFIG_DEBUG_THREAD_USER_FORK
     367#if DEBUG_THREAD_USER_FORK
    369368uint32_t cycle = (uint32_t)hal_get_cycles();
    370 if( CONFIG_DEBUG_THREAD_USER_FORK < cycle )
     369if( DEBUG_THREAD_USER_FORK < cycle )
    371370printk("\n[DBG] %s : thread %x enter / child_process %x / cycle %d\n",
    372371__FUNCTION__, CURRENT_THREAD, child_process->pid, cycle );
     
    493492            hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , 1 );
    494493
    495 #if (CONFIG_DEBUG_THREAD_USER_FORK & 1)
     494#if (DEBUG_THREAD_USER_FORK & 1)
    496495cycle = (uint32_t)hal_get_cycles();
    497 if( CONFIG_DEBUG_THREAD_USER_FORK < cycle )
     496if( DEBUG_THREAD_USER_FORK < cycle )
    498497printk("\n[DBG] %s : thread %x copied stack PTE to child GPT : vpn %x\n",
    499498__FUNCTION__, CURRENT_THREAD, vpn );
     
    508507                     vpn_size );
    509508 
    510         // update DQDT for child thread
    511     dqdt_local_update_threads( 1 );
    512 
    513 #if CONFIG_DEBUG_THREAD_USER_FORK
     509#if DEBUG_THREAD_USER_FORK
    514510cycle = (uint32_t)hal_get_cycles();
    515 if( CONFIG_DEBUG_THREAD_USER_FORK < cycle )
     511if( DEBUG_THREAD_USER_FORK < cycle )
    516512printk("\n[DBG] %s : thread %x exit / child_process %x / child_thread %x / cycle %d\n",
    517513__FUNCTION__, CURRENT_THREAD, child_process->pid, child_ptr, cycle );
     
    538534            __FUNCTION__ , "illegal core_lid" );
    539535
    540 #if CONFIG_DEBUG_THREAD_KERNEL_CREATE
     536#if DEBUG_THREAD_KERNEL_CREATE
    541537uint32_t cycle = (uint32_t)hal_get_cycles();
    542 if( CONFIG_DEBUG_THREAD_KERNEL_CREATE < cycle )
     538if( DEBUG_THREAD_KERNEL_CREATE < cycle )
    543539printk("\n[DBG] %s : thread %x enter / requested_type %s / cycle %d\n",
    544540__FUNCTION__, CURRENT_THREAD, thread, thread_type_str(type), cycle );
     
    568564        hal_cpu_context_create( thread );
    569565
    570         // update DQDT for kernel thread
    571     dqdt_local_update_threads( 1 );
    572 
    573 #if CONFIG_DEBUG_THREAD_KERNEL_CREATE
     566#if DEBUG_THREAD_KERNEL_CREATE
    574567cycle = (uint32_t)hal_get_cycles();
    575 if( CONFIG_DEBUG_THREAD_KERNEL_CREATE < cycle )
     568if( DEBUG_THREAD_KERNEL_CREATE < cycle )
    576569printk("\n[DBG] %s : thread %x exit / new_thread %x / type %s / cycle %d\n",
    577570__FUNCTION__, CURRENT_THREAD, thread, thread_type_str(type), cycle );
     
    583576} // end thread_kernel_create()
    584577
    585 ///////////////////////////////////////////////////
    586 error_t thread_kernel_init( thread_t      * thread,
    587                             thread_type_t   type,
    588                             void          * func,
    589                             void          * args,
    590                                             lid_t           core_lid )
     578/////////////////////////////////////////////////
     579error_t thread_idle_init( thread_t      * thread,
     580                          thread_type_t   type,
     581                          void          * func,
     582                          void          * args,
     583                                          lid_t           core_lid )
    591584{
    592585    assert( (type == THREAD_IDLE) , __FUNCTION__ , "illegal thread type" );
     
    607600    return error;
    608601
    609 }  // end thread_kernel_init()
     602}  // end thread_idle_init()
    610603
    611604///////////////////////////////////////////////////////////////////////////////////////
     
    620613    core_t     * core       = thread->core;
    621614
    622 #if CONFIG_DEBUG_THREAD_DESTROY
     615#if DEBUG_THREAD_DESTROY
    623616uint32_t cycle = (uint32_t)hal_get_cycles();
    624 if( CONFIG_DEBUG_THREAD_DESTROY < cycle )
     617if( DEBUG_THREAD_DESTROY < cycle )
    625618printk("\n[DBG] %s : thread %x enter to destroy thread %x in process %x / cycle %d\n",
    626619__FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle );
     
    652645    process_remove_thread( thread );
    653646       
    654     // update local DQDT
    655     dqdt_local_update_threads( -1 );
     647    // update DQDT
     648    dqdt_update_threads( -1 );
    656649
    657650    // invalidate thread descriptor
     
    661654    thread_release( thread );
    662655
    663 #if CONFIG_DEBUG_THREAD_DESTROY
     656#if DEBUG_THREAD_DESTROY
    664657cycle = (uint32_t)hal_get_cycles();
    665 if( CONFIG_DEBUG_THREAD_DESTROY < cycle )
     658if( DEBUG_THREAD_DESTROY < cycle )
    666659printk("\n[DBG] %s : thread %x exit / destroyed thread %x in process %x / cycle %d\n",
    667660__FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle );
     
    811804    hal_fence();
    812805
    813 #if CONFIG_DEBUG_THREAD_BLOCK
     806#if DEBUG_THREAD_BLOCK
    814807uint32_t cycle = (uint32_t)hal_get_cycles();
    815 if( CONFIG_DEBUG_THREAD_BLOCK < cycle )
     808if( DEBUG_THREAD_BLOCK < cycle )
    816809printk("\n[DBG] %s : thread %x blocked thread %x / cause %x / cycle %d\n",
    817810__FUNCTION__ , CURRENT_THREAD , ptr , cause , cycle );
    818811#endif
    819812
    820 #if (CONFIG_DEBUG_THREAD_BLOCK & 1)
    821 if( CONFIG_DEBUG_THREAD_BLOCK < cycle )
     813#if (DEBUG_THREAD_BLOCK & 1)
     814if( DEBUG_THREAD_BLOCK < cycle )
    822815sched_display( ptr->core->lid );
    823816#endif
     
    837830    hal_fence();
    838831
    839 #if CONFIG_DEBUG_THREAD_BLOCK
     832#if DEBUG_THREAD_BLOCK
    840833uint32_t cycle = (uint32_t)hal_get_cycles();
    841 if( CONFIG_DEBUG_THREAD_BLOCK < cycle )
     834if( DEBUG_THREAD_BLOCK < cycle )
    842835printk("\n[DBG] %s : thread %x unblocked thread %x / cause %x / cycle %d\n",
    843836__FUNCTION__ , CURRENT_THREAD , ptr , cause , cycle );
    844837#endif
    845838
    846 #if (CONFIG_DEBUG_THREAD_BLOCK & 1)
    847 if( CONFIG_DEBUG_THREAD_BLOCK < cycle )
     839#if (DEBUG_THREAD_BLOCK & 1)
     840if( DEBUG_THREAD_BLOCK < cycle )
    848841sched_display( ptr->core->lid );
    849842#endif
     
    890883    killer_xp  = XPTR( local_cxy , killer_ptr );
    891884
    892 #if CONFIG_DEBUG_THREAD_KILL
     885#if DEBUG_THREAD_KILL
    893886uint32_t cycle  = (uint32_t)hal_get_cycles;
    894 if( CONFIG_DEBUG_THREAD_KILL < cycle )
     887if( DEBUG_THREAD_KILL < cycle )
    895888printk("\n[DBG] %s : thread %x enter for target thread %x / cycle %d\n",
    896889__FUNCTION__, killer_ptr, target_ptr, cycle );
     
    989982        else          hal_remote_atomic_or( process_state_xp , PROCESS_TERM_KILL );
    990983
    991 #if CONFIG_DEBUG_THREAD_KILL
     984#if DEBUG_THREAD_KILL
    992985cycle  = (uint32_t)hal_get_cycles;
    993 if( CONFIG_DEBUG_THREAD_KILL < cycle )
     986if( DEBUG_THREAD_KILL < cycle )
    994987printk("\n[DBG] %s : thread %x exit for thread %x / main thread / cycle %d\n",
    995988__FUNCTION__, killer_ptr, target_ptr, cycle );
     
    1002995        hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
    1003996
    1004 #if CONFIG_DEBUG_THREAD_KILL
     997#if DEBUG_THREAD_KILL
    1005998cycle  = (uint32_t)hal_get_cycles;
    1006 if( CONFIG_DEBUG_THREAD_KILL < cycle )
     999if( DEBUG_THREAD_KILL < cycle )
    10071000printk("\n[DBG] %s : thread %x exit for thread %x / not the main thread / cycle %d\n",
    10081001__FUNCTION__, killer_ptr, target_ptr, cycle );
     
    10241017        {
    10251018
    1026 #if CONFIG_DEBUG_THREAD_IDLE
     1019#if DEBUG_THREAD_IDLE
    10271020uint32_t cycle  = (uint32_t)hal_get_cycles;
    10281021thread_t * this = CURRENT_THREAD;
    1029 if( CONFIG_DEBUG_THREAD_IDLE < cycle )
     1022if( DEBUG_THREAD_IDLE < cycle )
    10301023printk("\n[DBG] %s : idle thread %x on core[%x,%d] goes to sleep / cycle %d\n",
    10311024__FUNCTION__, this, local_cxy, this->core->lid, cycle );
     
    10341027            hal_core_sleep();
    10351028
    1036 #if CONFIG_DEBUG_THREAD_IDLE
     1029#if DEBUG_THREAD_IDLE
    10371030cycle  = (uint32_t)hal_get_cycles;
    1038 if( CONFIG_DEBUG_THREAD_IDLE < cycle )
     1031if( DEBUG_THREAD_IDLE < cycle )
    10391032printk("\n[DBG] %s : idle thread %x on core[%x,%d] wake up / cycle %d\n",
    10401033__FUNCTION__, this, local_cxy, this->core->lid, cycle );
  • trunk/kernel/kern/thread.h

    r437 r438  
    8787#define THREAD_BLOCKED_SEM       0x0020  /*! thread wait semaphore                    */
    8888#define THREAD_BLOCKED_PAGE      0x0040  /*! thread wait page access                  */
     89#define THREAD_BLOCKED_IDLE      0x0080  /*! thread RPC wait RPC_FIFO non empty       */
    8990#define THREAD_BLOCKED_USERSYNC  0x0100  /*! thread wait (cond/mutex/barrier)         */
    9091#define THREAD_BLOCKED_RPC       0x0200  /*! thread wait RPC completion               */
     
    286287
    287288/***************************************************************************************
    288  * This function initializes an existing thread descriptor from arguments values.
     289 * This function is called by the kernel_init() function to initialize the IDLE thread.
     290 * It initializes an existing thread descriptor from arguments values.
    289291 * The THREAD_BLOCKED_GLOBAL bit is set, and the thread must be activated to start.
    290  * It is called by the kernel_init() function to initialize the IDLE thread.
    291292 ***************************************************************************************
    292293 * @ thread   : pointer on existing thread descriptor.
     
    297298 * @ returns 0 if success / returns EINVAL if error
    298299 **************************************************************************************/
    299 error_t thread_kernel_init( thread_t      * thread,
    300                             thread_type_t   type,
    301                             void          * func,
    302                             void          * args,
    303                             lid_t           core_lid );
     300error_t thread_idle_init( thread_t      * thread,
     301                          thread_type_t   type,
     302                          void          * func,
     303                          void          * args,
     304                          lid_t           core_lid );
    304305
    305306/***************************************************************************************
Note: See TracChangeset for help on using the changeset viewer.