Changeset 564 for trunk


Ignore:
Timestamp:
Oct 4, 2018, 11:47:36 PM (6 years ago)
Author:
alain
Message:

Complete restructuration of kernel locks.

Location:
trunk/kernel/kern
Files:
9 deleted
19 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/kern/chdev.c

    r545 r564  
    3737#include <devfs.h>
    3838
    39 
    40 extern chdev_directory_t    chdev_dir;   // allocated in kernel_init.c
     39//////////////////////////////////////////////////////////////////////////////////////
     40// Extern global variables
     41//////////////////////////////////////////////////////////////////////////////////////
     42
     43extern chdev_directory_t    chdev_dir;         // allocated in kernel_init.c
     44
    4145
    4246#if (DEBUG_SYS_READ & 1)
     
    5761char * chdev_func_str( uint32_t func_type )
    5862{
    59   switch ( func_type ) {
     63    switch ( func_type )
     64    {
    6065    case DEV_FUNC_RAM: return "RAM";
    6166    case DEV_FUNC_ROM: return "ROM";
     
    9196    if( chdev == NULL ) return NULL;
    9297
    93     // initialize waiting threads queue and associated lock
    94     remote_spinlock_init( XPTR( local_cxy , &chdev->wait_lock ) );
     98    // initialize lock
     99    remote_busylock_init( XPTR( local_cxy , &chdev->wait_lock ), LOCK_CHDEV_QUEUE );
     100
     101    // initialise waiting queue
    95102    xlist_root_init( XPTR( local_cxy , &chdev->wait_root ) );
    96103
     
    130137    core_t   * core_ptr;      // local pointer on core running the server thread
    131138    uint32_t   server_lid;    // core running the server thread local index
    132     xptr_t     lock_xp;       // extended pointer on lock protecting the chdev queue
     139    xptr_t     lock_xp;       // extended pointer on lock protecting the chdev state
    133140    uint32_t   save_sr;       // for critical section
    134141
     
    147154    chdev_t * chdev_ptr = GET_PTR( chdev_xp );
    148155
     156// check calling thread can yield
     157assert( (this->busylocks == 0),
     158"cannot yield : busylocks = %d\n", this->busylocks );
     159
    149160    // get local and extended pointers on server thread
    150161    server_ptr = (thread_t *)hal_remote_lpt( XPTR( chdev_cxy , &chdev_ptr->server) );
     
    155166
    156167    // get server core local index
    157     server_lid = hal_remote_lw( XPTR( chdev_cxy , &core_ptr->lid ) );
     168    server_lid = hal_remote_l32( XPTR( chdev_cxy , &core_ptr->lid ) );
    158169
    159170#if (DEBUG_CHDEV_CMD_RX || DEBUG_CHDEV_CMD_TX)
    160 bool_t is_rx = hal_remote_lw( XPTR( chdev_cxy , &chdev_ptr->is_rx ) );
     171bool_t is_rx = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->is_rx ) );
    161172#endif
    162173   
     
    185196
    186197    // build extended pointer on lock protecting chdev waiting queue
    187     lock_xp = XPTR( chdev_cxy , &chdev_ptr->wait_lock );
     198    lock_xp            = XPTR( chdev_cxy , &chdev_ptr->wait_lock );
    188199
    189200    // critical section for the following sequence:
    190     // (1) take the lock protecting waiting queue
     201    // (1) take the lock protecting the chdev state
    191202    // (2) block the client thread
    192203    // (3) unblock the server thread if required
     
    200211    hal_disable_irq( &save_sr );
    201212
    202     // take the lock protecting chdev waiting queue
    203     remote_spinlock_lock( lock_xp );
     213    // take the lock protecting chdev queue
     214    remote_busylock_acquire( lock_xp );
    204215
    205216    // block current thread
     
    217228
    218229    // unblock server thread if required
    219     if( hal_remote_lw( blocked_xp ) & THREAD_BLOCKED_IDLE )
     230    if( hal_remote_l32( blocked_xp ) & THREAD_BLOCKED_IDLE )
    220231    thread_unblock( server_xp , THREAD_BLOCKED_IDLE );
    221232
     
    243254#endif
    244255 
    245     // send IPI to core running the server thread when server != client
     256    // send IPI to core running the server thread when server core != client core
    246257    if( (server_lid != this->core->lid) || (local_cxy != chdev_cxy) )
    247258    {
     
    262273    }
    263274 
    264     // release lock
    265     remote_spinlock_unlock( lock_xp );
     275    // release lock protecting chdev queue
     276    remote_busylock_release( lock_xp );
    266277
    267278    // deschedule
    268     assert( thread_can_yield() , "illegal sched_yield\n" );
    269279    sched_yield("blocked on I/O");
    270280
     
    308318    server = CURRENT_THREAD;
    309319
    310     // get root and lock on command queue
     320    // build extended pointer on root of client threads queue
    311321    root_xp = XPTR( local_cxy , &chdev->wait_root );
     322
     323    // build extended pointer on lock protecting client threads queue
    312324    lock_xp = XPTR( local_cxy , &chdev->wait_lock );
    313325
     
    316328    while( 1 )
    317329    {
     330
     331#if DEBUG_CHDEV_SERVER_RX
     332uint32_t rx_cycle = (uint32_t)hal_get_cycles();
     333if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) )
     334printk("\n[DBG] %s : dev_thread %x start RX / cycle %d\n",
     335__FUNCTION__ , server->trdid , rx_cycle );
     336#endif
     337
     338#if DEBUG_CHDEV_SERVER_TX
     339uint32_t tx_cycle = (uint32_t)hal_get_cycles();
     340if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) )
     341printk("\n[DBG] %s : dev_thread %x start TX / cycle %d\n",
     342__FUNCTION__ , server->trdid , tx_cycle );
     343#endif
     344
    318345        // get the lock protecting the waiting queue
    319         remote_spinlock_lock( lock_xp );
     346        remote_busylock_acquire( lock_xp );
    320347
    321348        // check waiting queue state
    322349        if( xlist_is_empty( root_xp ) ) // waiting queue empty
    323350        {
     351
     352#if DEBUG_CHDEV_SERVER_RX
     353rx_cycle = (uint32_t)hal_get_cycles();
     354if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) )
     355printk("\n[DBG] %s : dev_thread %x found RX queue empty => blocks / cycle %d\n",
     356__FUNCTION__ , server->trdid , rx_cycle );
     357#endif
     358
     359#if DEBUG_CHDEV_SERVER_TX
     360tx_cycle = (uint32_t)hal_get_cycles();
     361if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) )
     362printk("\n[DBG] %s : dev_thread %x found TX queue empty => blocks / cycle %d\n",
     363__FUNCTION__ , server->trdid , tx_cycle );
     364#endif
     365
    324366            // release lock
    325             remote_spinlock_unlock( lock_xp );
     367            remote_busylock_release( lock_xp );
    326368
    327369            // block
    328370            thread_block( XPTR( local_cxy , server ) , THREAD_BLOCKED_IDLE );
    329371
     372// check server thread can yield
     373assert( (server->busylocks == 0),
     374"cannot yield : busylocks = %d\n", server->busylocks );
     375
    330376            // deschedule
    331             assert( thread_can_yield() , "illegal sched_yield\n" );
    332377            sched_yield("I/O queue empty");
    333378        }
     
    335380        {
    336381            // get extended pointer on first client thread
    337             client_xp = XLIST_FIRST_ELEMENT( root_xp , thread_t , wait_list );
     382            client_xp = XLIST_FIRST( root_xp , thread_t , wait_list );
    338383
    339384            // get client thread cluster and local pointer
     
    345390
    346391            // release lock
    347             remote_spinlock_unlock( lock_xp );
     392            remote_busylock_release( lock_xp );
    348393
    349394#if DEBUG_CHDEV_SERVER_RX
    350 uint32_t rx_cycle = (uint32_t)hal_get_cycles();
     395rx_cycle = (uint32_t)hal_get_cycles();
    351396if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) )
    352 printk("\n[DBG] %s : server_thread %x start RX / client %x / cycle %d\n",
    353 __FUNCTION__ , server , client_ptr , rx_cycle );
     397printk("\n[DBG] %s : dev_thread %x for RX found client thread %x in process %x / cycle %d\n",
     398__FUNCTION__, server->trdid ,client_ptr->trdid ,client_ptr->process->pid, rx_cycle );
    354399#endif
    355400
    356401#if DEBUG_CHDEV_SERVER_TX
    357 uint32_t tx_cycle = (uint32_t)hal_get_cycles();
     402tx_cycle = (uint32_t)hal_get_cycles();
    358403if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) )
    359 printk("\n[DBG] %s : server_thread %x start TX / client %x / cycle %d\n",
    360 __FUNCTION__ , server , client_ptr , tx_cycle );
     404printk("\n[DBG] %s : dev_thread %x for TX found client thread %x in process %x / cycle %d\n",
     405__FUNCTION__, server->trdid ,client_ptr->trdid ,client_ptr->process->pid, tx_cycle );
    361406#endif
    362407
     
    378423rx_cycle = (uint32_t)hal_get_cycles();
    379424if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) )
    380 printk("\n[DBG] %s : server_thread %x completes RX / client %x / cycle %d\n",
    381 __FUNCTION__ , server , client_ptr , rx_cycle );
     425printk("\n[DBG] %s : dev_thread %x completes RX for client %x in process %x / cycle %d\n",
     426__FUNCTION__, server->trdid, client_ptr->trdid, client_ptr->process->pid, rx_cycle );
    382427#endif
    383428
     
    385430tx_cycle = (uint32_t)hal_get_cycles();
    386431if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) )
    387 printk("\n[DBG] %s : server_thread %x completes TX / client %x / cycle %d\n",
    388 __FUNCTION__ , server , client_ptr , tx_cycle );
     432printk("\n[DBG] %s : dev_thread %x completes TX for client %x in process %x / cycle %d\n",
     433__FUNCTION__, server->trdid, client_ptr->trdid, client_ptr->process->pid, tx_cycle );
    389434#endif
    390435
     
    419464
    420465    // get inode type from file descriptor
    421     inode_type = hal_remote_lw( XPTR( file_cxy , &file_ptr->type ) );
     466    inode_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type ) );
    422467    inode_ptr  = (vfs_inode_t *)hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ) );
    423468
     
    432477}  // end chdev_from_file()
    433478
    434 ////////////////////////
     479//////////////////////////////
    435480void chdev_dir_display( void )
    436481{
     
    439484    chdev_t * ptr;
    440485    uint32_t  base;
    441     reg_t     save_sr;
    442486
    443487    // get pointers on TXT0 chdev
     
    446490    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
    447491
    448     // get extended pointer on remote TXT0 chdev lock
     492    // get extended pointer on TXT0 lock
    449493    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
    450494
    451     // get TXT0 lock in busy waiting mode
    452     remote_spinlock_lock_busy( lock_xp , &save_sr );
     495    // get TXT0 lock
     496    remote_busylock_acquire( lock_xp );
    453497
    454498    // header
     
    456500
    457501    // IOB
    458     if (chdev_dir.iob != NULL )
     502    if (chdev_dir.iob != XPTR_NULL )
    459503    {
    460504        cxy  = GET_CXY( chdev_dir.iob );
    461505        ptr  = GET_PTR( chdev_dir.iob );
    462         base = (uint32_t)hal_remote_lwd( XPTR( cxy , &ptr->base ) );
     506        base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
    463507        nolock_printk("  - iob       : cxy = %X / ptr = %X / base = %X\n", cxy, ptr, base);
    464508    }
     
    467511    cxy  = GET_CXY( chdev_dir.pic );
    468512    ptr  = GET_PTR( chdev_dir.pic );
    469     base = (uint32_t)hal_remote_lwd( XPTR( cxy , &ptr->base ) );
     513    base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
    470514    nolock_printk("  - pic       : cxy = %X / ptr = %X / base = %X\n", cxy, ptr, base);
    471515
     
    475519        cxy = GET_CXY( chdev_dir.txt_rx[i] );
    476520        ptr = GET_PTR( chdev_dir.txt_rx[i] );
    477         base = (uint32_t)hal_remote_lwd( XPTR( cxy , &ptr->base ) );
     521        base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
    478522        nolock_printk("  - txt_rx[%d] : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
    479523
    480524        cxy = GET_CXY( chdev_dir.txt_tx[i] );
    481525        ptr = GET_PTR( chdev_dir.txt_tx[i] );
    482         base = (uint32_t)hal_remote_lwd( XPTR( cxy , &ptr->base ) );
     526        base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
    483527        nolock_printk("  - txt_tx[%d] : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
    484528    }
     
    489533        cxy = GET_CXY( chdev_dir.ioc[i] );
    490534        ptr = GET_PTR( chdev_dir.ioc[i] );
    491         base = (uint32_t)hal_remote_lwd( XPTR( cxy , &ptr->base ) );
     535        base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
    492536        nolock_printk("  - ioc[%d]    : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
    493537    }
     
    498542        cxy  = GET_CXY( chdev_dir.fbf[i] );
    499543        ptr  = GET_PTR( chdev_dir.fbf[i] );
    500         base = (uint32_t)hal_remote_lwd( XPTR( cxy , &ptr->base ) );
     544        base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
    501545        nolock_printk("  - fbf[%d]    : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
    502546    }
     
    507551        cxy = GET_CXY( chdev_dir.nic_rx[i] );
    508552        ptr = GET_PTR( chdev_dir.nic_rx[i] );
    509         base = (uint32_t)hal_remote_lwd( XPTR( cxy , &ptr->base ) );
     553        base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
    510554        nolock_printk("  - nic_rx[%d] : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
    511555
    512556        cxy = GET_CXY( chdev_dir.nic_tx[i] );
    513557        ptr = GET_PTR( chdev_dir.nic_tx[i] );
    514         base = (uint32_t)hal_remote_lwd( XPTR( cxy , &ptr->base ) );
     558        base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
    515559        nolock_printk("  - nic_tx[%d] : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
    516560    }
    517561
    518562    // release lock
    519     remote_spinlock_unlock_busy( lock_xp , save_sr );
     563    remote_busylock_release( lock_xp );
    520564
    521565}  // end chdev_dir_display()
     
    546590    hal_remote_strcpy( XPTR( local_cxy , name ), XPTR( chdev_cxy , chdev_ptr->name ) );
    547591
     592    // get pointers on TXT0 chdev
     593    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
     594    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
     595    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
     596
     597    // get extended pointer on TXT0 lock
     598    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
     599
     600    // get TXT0 lock
     601    remote_busylock_acquire( lock_xp );
     602
    548603    // check queue empty
    549604    if( xlist_is_empty( root_xp ) )
    550605    {
    551         printk("\n***** Waiting queue empty for chdev %s\n", name );
     606        nolock_printk("\n***** Waiting queue empty for chdev %s\n", name );
    552607    }
    553608    else
    554609    {
    555         printk("\n***** Waiting queue for chdev %s\n", name );
     610        nolock_printk("\n***** Waiting queue for chdev %s\n", name );
    556611
    557612        // scan the waiting queue
     
    561616            thread_cxy = GET_CXY( thread_xp );
    562617            thread_ptr = GET_PTR( thread_xp );
    563             trdid      = hal_remote_lw ( XPTR( thread_cxy , &thread_ptr->trdid   ) );
     618            trdid      = hal_remote_l32 ( XPTR( thread_cxy , &thread_ptr->trdid   ) );
    564619            process    = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->process ) );
    565                         pid        = hal_remote_lw ( XPTR( thread_cxy , &process->pid        ) );
    566 
    567             printk("- thread %X / cluster %X / trdid %X / pid %X\n",
     620                        pid        = hal_remote_l32 ( XPTR( thread_cxy , &process->pid        ) );
     621
     622            nolock_printk("- thread %X / cluster %X / trdid %X / pid %X\n",
    568623            thread_ptr, thread_cxy, trdid, pid );
    569624        }
    570625    }
     626
     627    // release TXT0 lock
     628    remote_busylock_release( lock_xp );
     629
    571630}  // end chdev_queue_display()
    572631
  • trunk/kernel/kern/chdev.h

    r485 r564  
    2828#include <hal_kernel_types.h>
    2929#include <xlist.h>
    30 #include <remote_spinlock.h>
     30#include <remote_busylock.h>
    3131#include <dev_iob.h>
    3232#include <dev_ioc.h>
     
    4343 * ALMOS-MKH supports multi-channels peripherals, and defines one separated chdev
    4444 * descriptor for each channel (and for each RX/TX direction for the NIC and TXT devices).
    45  * Each chdev contains a waiting queue, registering the "client threads" requests,
     45 * Each chdev contains a trans-clusters waiting queue, registering the "client threads",
    4646 * and an associated "server thread", handling these requests.
    4747 * These descriptors are physically distributed on all clusters to minimize contention.
     
    116116 * of client threads is associated to each chdev descriptor (not for ICU, PIC, IOB).
    117117 * For each device type ***, the specific extension is defined in the "dev_***.h" file.
     118 *
     119 * NOTE : For most chdevs, the busylock is used to protect the waiting queue changes,
     120 *        when a thread register in this queue, or is removed after service.
     121 *        This busylock is also used to protect direct access to the kernel TXT0 terminal
     122 *        (without using the server thread).
    118123 *****************************************************************************************/
    119124
     
    136141    uint32_t             irq_id;      /*! associated IRQ index in local ICU              */
    137142
    138         remote_spinlock_t    wait_lock;   /*! lock protecting exclusive access to queue      */
    139     xlist_entry_t        wait_root;   /*! root of waiting threads queue                  */
     143        xlist_entry_t        wait_root;   /*! root of client threads waiting queue           */
     144    remote_busylock_t    wait_lock;   /*! lock protecting waiting queue                  */
    140145
    141146    union
  • trunk/kernel/kern/cluster.c

    r562 r564  
    2929#include <hal_special.h>
    3030#include <hal_ppm.h>
     31#include <hal_macros.h>
    3132#include <remote_fifo.h>
    3233#include <printk.h>
    3334#include <errno.h>
    34 #include <spinlock.h>
     35#include <queuelock.h>
    3536#include <core.h>
    3637#include <chdev.h>
     
    4546#include <process.h>
    4647#include <dqdt.h>
    47 #include <cluster_info.h>
    4848
    4949/////////////////////////////////////////////////////////////////////////////////////
     
    5151/////////////////////////////////////////////////////////////////////////////////////
    5252
    53 extern process_t           process_zero;     // allocated in kernel_init.c file
    54 extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c file
    55 
    56 ///////////////////////////////////////////////n
    57 error_t cluster_init( struct boot_info_s * info )
    58 {
    59     error_t         error;
    60     lpid_t          lpid;     // local process_index
    61     lid_t           lid;      // local core index
    62     uint32_t        i;        // index in loop on external peripherals
     53extern process_t           process_zero;     // allocated in kernel_init.c
     54extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c
     55
     56
     57
     58///////////////////////////////////////////////////
     59void cluster_info_init( struct boot_info_s * info )
     60{
    6361    boot_device_t * dev;      // pointer on external peripheral
    6462    uint32_t        func;     // external peripheral functionnal type
     63    uint32_t        x;
     64    uint32_t        y;
     65    uint32_t        i;   
    6566
    6667        cluster_t * cluster = LOCAL_CLUSTER;
     
    7576
    7677    // initialize the cluster_info[][] array
    77     int x;
    78     int y;
    79     for (x = 0; x < CONFIG_MAX_CLUSTERS_X; x++) {
    80         for (y = 0; y < CONFIG_MAX_CLUSTERS_Y;y++) {
     78    for (x = 0; x < CONFIG_MAX_CLUSTERS_X; x++)
     79    {
     80        for (y = 0; y < CONFIG_MAX_CLUSTERS_Y;y++)
     81        {
    8182            cluster->cluster_info[x][y] = info->cluster_info[x][y];
    8283        }
    8384    }
     85
    8486    // initialize external peripherals channels
    8587    for( i = 0 ; i < info->ext_dev_nr ; i++ )
     
    9395    }
    9496
    95     // initialize cluster local parameters
    96         cluster->cores_nr        = info->cores_nr;
     97    // initialize number of cores
     98        cluster->cores_nr  = info->cores_nr;
     99
     100}  // end cluster_info_init()
     101
     102/////////////////////////////////////////////////////////
     103error_t cluster_manager_init( struct boot_info_s * info )
     104{
     105    error_t         error;
     106    lpid_t          lpid;     // local process_index
     107    lid_t           lid;      // local core index
     108
     109        cluster_t * cluster = LOCAL_CLUSTER;
    97110
    98111    // initialize the lock protecting the embedded kcm allocator
    99         spinlock_init( &cluster->kcm_lock );
     112        busylock_init( &cluster->kcm_lock , LOCK_CLUSTER_KCM );
    100113
    101114#if DEBUG_CLUSTER_INIT
    102115uint32_t cycle = (uint32_t)hal_get_cycles();
    103116if( DEBUG_CLUSTER_INIT < cycle )
    104 printk("\n[DBG] %s : thread %x enters for cluster %x / cycle %d\n",
    105 __FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
     117printk("\n[DBG] %s : thread %x in process %x enters for cluster %x / cycle %d\n",
     118__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, local_cxy , cycle );
    106119#endif
    107120
    108121    // initialises DQDT
    109122    cluster->dqdt_root_level = dqdt_init( info->x_size,
    110                                           info->y_size,
    111                                           info->y_width ) - 1;
     123                                          info->y_size ) - 1;
     124
     125#if( DEBUG_CLUSTER_INIT & 1 )
     126cycle = (uint32_t)hal_get_cycles();
     127if( DEBUG_CLUSTER_INIT < cycle )
     128printk("\n[DBG] %s : DQDT initialized in cluster %x / cycle %d\n",
     129__FUNCTION__ , local_cxy , cycle );
     130#endif
    112131
    113132    // initialises embedded PPM
     
    166185        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
    167186    {
    168             local_fifo_init( &cluster->rpc_fifo[lid] );
     187            remote_fifo_init( &cluster->rpc_fifo[lid] );
    169188        cluster->rpc_threads[lid] = 0;
    170189    }
     
    178197
    179198    // initialise pref_tbl[] in process manager
    180         spinlock_init( &cluster->pmgr.pref_lock );
     199        queuelock_init( &cluster->pmgr.pref_lock , LOCK_CLUSTER_PREFTBL );
    181200    cluster->pmgr.pref_nr = 0;
    182201    cluster->pmgr.pref_tbl[0] = XPTR( local_cxy , &process_zero );
     
    187206
    188207    // initialise local_list in process manager
    189         remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
    190208    xlist_root_init( XPTR( local_cxy , &cluster->pmgr.local_root ) );
    191209    cluster->pmgr.local_nr = 0;
     210        remote_queuelock_init( XPTR( local_cxy , &cluster->pmgr.local_lock ) ,
     211                           LOCK_CLUSTER_LOCALS );
    192212
    193213    // initialise copies_lists in process manager
    194214    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
    195215    {
    196             remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] ) );
    197216        cluster->pmgr.copies_nr[lpid] = 0;
    198217        xlist_root_init( XPTR( local_cxy , &cluster->pmgr.copies_root[lpid] ) );
     218            remote_queuelock_init( XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] ),
     219                               LOCK_CLUSTER_COPIES );
    199220    }
    200221
     
    202223cycle = (uint32_t)hal_get_cycles();
    203224if( DEBUG_CLUSTER_INIT < cycle )
    204 printk("\n[DBG] %s , thread %x exit for cluster %x / cycle %d\n",
    205 __FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
     225printk("\n[DBG] %s : thread %x in process %x exit for cluster %x / cycle %d\n",
     226__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid , local_cxy , cycle );
    206227#endif
    207228
     
    209230
    210231        return 0;
    211 } // end cluster_init()
    212 
    213 /////////////////////////////////
     232} // end cluster_manager_init()
     233
     234///////////////////////////////////
    214235cxy_t cluster_random_select( void )
    215236{
    216     uint32_t  x_size;
    217     uint32_t  y_size;
    218     uint32_t  y_width;
    219237    uint32_t  index;
    220     uint32_t  x;
     238    uint32_t  x;   
    221239    uint32_t  y;
    222 
    223     do {
    224         x_size     = LOCAL_CLUSTER->x_size;
    225         y_size     = LOCAL_CLUSTER->y_size;
    226         y_width   = LOCAL_CLUSTER->y_width;
     240    cxy_t     cxy;
     241
     242    uint32_t  x_size    = LOCAL_CLUSTER->x_size;
     243    uint32_t  y_size    = LOCAL_CLUSTER->y_size;
     244
     245    do
     246    {
    227247        index     = ( hal_get_cycles() + hal_get_gid() ) % (x_size * y_size);
    228248        x         = index / y_size;
    229249        y         = index % y_size;
    230     } while ( cluster_info_is_active( LOCAL_CLUSTER->cluster_info[x][y] ) == 0 );
    231 
    232     return (x<<y_width) + y;
     250        cxy       = HAL_CXY_FROM_XY( x , y );
     251    }
     252    while ( cluster_is_active( cxy ) == false );
     253
     254    return ( cxy );
    233255}
    234256
     
    236258bool_t cluster_is_undefined( cxy_t cxy )
    237259{
    238     cluster_t * cluster = LOCAL_CLUSTER;
    239 
    240     uint32_t y_width = cluster->y_width;
    241 
    242     uint32_t x = cxy >> y_width;
    243     uint32_t y = cxy & ((1<<y_width)-1);
    244 
    245     if( x >= cluster->x_size ) return true;
    246     if( y >= cluster->y_size ) return true;
     260    uint32_t  x_size = LOCAL_CLUSTER->x_size;
     261    uint32_t  y_size = LOCAL_CLUSTER->y_size;
     262
     263    uint32_t  x      = HAL_X_FROM_CXY( cxy );
     264    uint32_t  y      = HAL_Y_FROM_CXY( cxy );
     265
     266    if( x >= x_size ) return true;
     267    if( y >= y_size ) return true;
    247268
    248269    return false;
     270}
     271
     272//////////////////////////////////////
     273bool_t cluster_is_active ( cxy_t cxy )
     274{
     275    uint32_t x = HAL_X_FROM_CXY( cxy );
     276    uint32_t y = HAL_Y_FROM_CXY( cxy );
     277
     278    return ( LOCAL_CLUSTER->cluster_info[x][y] != 0 );
    249279}
    250280
     
    304334
    305335    // take the lock protecting the list of processes
    306     remote_spinlock_lock( lock_xp );
     336    remote_queuelock_acquire( lock_xp );
    307337
    308338    // scan list of processes
     
    320350
    321351    // release the lock protecting the list of processes
    322     remote_spinlock_unlock( lock_xp );
     352    remote_queuelock_release( lock_xp );
    323353
    324354    // return extended pointer on process descriptor in owner cluster
     
    350380
    351381    // take the lock protecting the list of processes
    352     remote_spinlock_lock( lock_xp );
     382    remote_queuelock_acquire( lock_xp );
    353383
    354384    // scan list of processes in owner cluster
     
    358388        current_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
    359389        current_ptr = GET_PTR( current_xp );
    360         current_pid = hal_remote_lw( XPTR( owner_cxy , &current_ptr->pid ) );
     390        current_pid = hal_remote_l32( XPTR( owner_cxy , &current_ptr->pid ) );
    361391
    362392        if( current_pid == pid )
     
    368398
    369399    // release the lock protecting the list of processes
    370     remote_spinlock_unlock( lock_xp );
     400    remote_queuelock_release( lock_xp );
    371401
    372402    // return extended pointer on process descriptor in owner cluster
     
    397427    else                              // use a remote_lwd to access owner cluster
    398428    {
    399         ref_xp = (xptr_t)hal_remote_lwd( XPTR( owner_cxy , &cluster->pmgr.pref_tbl[lpid] ) );
     429        ref_xp = (xptr_t)hal_remote_l64( XPTR( owner_cxy , &cluster->pmgr.pref_tbl[lpid] ) );
    400430    }
    401431
     
    419449    pmgr_t    * pm         = &LOCAL_CLUSTER->pmgr;
    420450
    421     // get the process manager lock
    422     spinlock_lock( &pm->pref_lock );
     451    // get the lock protecting pref_tbl
     452    queuelock_acquire( &pm->pref_lock );
    423453
    424454    // search an empty slot
     
    443473
    444474        // release the processs_manager lock
    445         spinlock_unlock( &pm->pref_lock );
     475        queuelock_release( &pm->pref_lock );
    446476
    447477        return 0;
     
    449479    else
    450480    {
    451         // release the processs_manager lock
    452         spinlock_unlock( &pm->pref_lock );
    453 
    454         return -1;
     481        // release the lock
     482        queuelock_release( &pm->pref_lock );
     483
     484        return 0xFFFFFFFF;
    455485    }
    456486
     
    488518    "local_cluster %x !=  owner_cluster %x" , local_cxy , owner_cxy );
    489519
    490     // get the process manager lock
    491     spinlock_lock( &pm->pref_lock );
     520    // get the lock protecting pref_tbl
     521    queuelock_acquire( &pm->pref_lock );
    492522
    493523    // remove process from pref_tbl[]
     
    496526
    497527    // release the processs_manager lock
    498     spinlock_unlock( &pm->pref_lock );
     528    queuelock_release( &pm->pref_lock );
    499529
    500530#if DEBUG_CLUSTER_PID_RELEASE
     
    538568void cluster_process_local_link( process_t * process )
    539569{
    540     reg_t    save_sr;
    541 
    542570    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
    543571
     
    546574    xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock );
    547575
    548     // get lock protecting the process manager local list
    549     remote_spinlock_lock_busy( lock_xp , &save_sr );
     576    // get lock protecting the local list
     577    remote_queuelock_acquire( lock_xp );
    550578
    551579    // register process in local list
     
    553581    pm->local_nr++;
    554582
    555     // release lock protecting the process manager local list
    556     remote_spinlock_unlock_busy( lock_xp , save_sr );
     583    // release lock protecting the local list
     584    remote_queuelock_release( lock_xp );
    557585}
    558586
     
    560588void cluster_process_local_unlink( process_t * process )
    561589{
    562     reg_t save_sr;
    563 
    564590    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
    565591
     
    567593    xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock );
    568594
    569     // get lock protecting the process manager local list
    570     remote_spinlock_lock_busy( lock_xp , &save_sr );
     595    // get lock protecting the local list
     596    remote_queuelock_acquire( lock_xp );
    571597
    572598    // remove process from local list
     
    574600    pm->local_nr--;
    575601
    576     // release lock protecting the process manager local list
    577     remote_spinlock_unlock_busy( lock_xp , save_sr );
     602    // release lock protecting the local list
     603    remote_queuelock_release( lock_xp );
    578604}
    579605
     
    581607void cluster_process_copies_link( process_t * process )
    582608{
    583     reg_t    irq_state;
    584609    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
    585610
     
    606631
    607632    // get lock protecting copies_list[lpid]
    608     remote_spinlock_lock_busy( copies_lock , &irq_state );
     633    remote_queuelock_acquire( copies_lock );
    609634
    610635    // add copy to copies_list
     
    613638
    614639    // release lock protecting copies_list[lpid]
    615     remote_spinlock_unlock_busy( copies_lock , irq_state );
     640    remote_queuelock_release( copies_lock );
    616641
    617642#if DEBUG_CLUSTER_PROCESS_COPIES
     
    627652void cluster_process_copies_unlink( process_t * process )
    628653{
    629     uint32_t irq_state;
    630654    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
    631655
     
    649673
    650674    // get lock protecting copies_list[lpid]
    651     remote_spinlock_lock_busy( copies_lock , &irq_state );
     675    remote_queuelock_acquire( copies_lock );
    652676
    653677    // remove copy from copies_list
     
    656680
    657681    // release lock protecting copies_list[lpid]
    658     remote_spinlock_unlock_busy( copies_lock , irq_state );
     682    remote_queuelock_release( copies_lock );
    659683
    660684#if DEBUG_CLUSTER_PROCESS_COPIES
     
    678702    xptr_t        txt0_xp;
    679703    xptr_t        txt0_lock_xp;
    680     reg_t         txt0_save_sr;     // save SR to take TXT0 lock in busy mode     
    681704
    682705    assert( (cluster_is_undefined( cxy ) == false),
     
    696719
    697720    // get lock on local process list
    698     remote_spinlock_lock( lock_xp );
    699 
    700     // get TXT0 lock in busy waiting mode
    701     remote_spinlock_lock_busy( txt0_lock_xp , &txt0_save_sr );
     721    remote_queuelock_acquire( lock_xp );
     722
     723    // get TXT0 lock
     724    remote_busylock_acquire( txt0_lock_xp );
    702725     
    703726    // display header
     
    712735    }
    713736
    714     // release TXT0 lock in busy waiting mode
    715     remote_spinlock_unlock_busy( txt0_lock_xp , txt0_save_sr );
     737    // release TXT0 lock
     738    remote_busylock_release( txt0_lock_xp );
    716739
    717740    // release lock on local process list
    718     remote_spinlock_unlock( lock_xp );
     741    remote_queuelock_release( lock_xp );
    719742
    720743}  // end cluster_processes_display()
  • trunk/kernel/kern/cluster.h

    r562 r564  
    3030#include <hal_kernel_types.h>
    3131#include <bits.h>
    32 #include <spinlock.h>
    33 #include <readlock.h>
    34 #include <remote_barrier.h>
     32#include <queuelock.h>
     33#include <remote_queuelock.h>
    3534#include <list.h>
    3635#include <xlist.h>
     
    6867 * 2) The local_root is the root of the local list of all process descriptors in cluster K.
    6968 *    A process descriptor P is present in K, as soon as P has a thread in cluster K.
     69 *    We use an xlist, because this list can be traversed by remote threads.
    7070 *
    7171 * 3) The copies_root[] array is indexed by lpid. There is one entry per owned process,
    7272 *    and each each entry contains the root of the xlist of copies for this process.
     73 $    We use an xlist, because process copies are distributed in all clusters.
    7374 ******************************************************************************************/
    7475
    7576typedef struct process_manager_s
    7677{
    77         xptr_t            pref_tbl[CONFIG_MAX_PROCESS_PER_CLUSTER];  /*! reference  process   */
    78         spinlock_t        pref_lock;              /*! lock protecting lpid allocation/release */
    79     uint32_t          pref_nr;                /*! number of processes owned by cluster    */
    80 
    81     xlist_entry_t     local_root;             /*! root of list of process in cluster      */
    82     remote_spinlock_t local_lock;             /*! lock protecting access to local list    */
    83     uint32_t          local_nr;               /*! number of process in cluster            */
    84 
    85     xlist_entry_t     copies_root[CONFIG_MAX_PROCESS_PER_CLUSTER];  /*! roots of lists    */
    86     remote_spinlock_t copies_lock[CONFIG_MAX_PROCESS_PER_CLUSTER];  /*! one lock per list */
    87     uint32_t          copies_nr[CONFIG_MAX_PROCESS_PER_CLUSTER];    /*! number of copies  */
     78        xptr_t             pref_tbl[CONFIG_MAX_PROCESS_PER_CLUSTER];  /*! owned  processes    */
     79        queuelock_t        pref_lock;              /*! lock protecting pref_tbl              */
     80    uint32_t           pref_nr;                /*! number of processes owned by cluster   */
     81
     82    xlist_entry_t      local_root;            /*! root of list of process in cluster      */
     83    remote_queuelock_t local_lock;            /*! lock protecting local list              */
     84    uint32_t           local_nr;              /*! number of process in cluster            */
     85
     86    xlist_entry_t      copies_root[CONFIG_MAX_PROCESS_PER_CLUSTER];  /*! roots of lists   */
     87    remote_queuelock_t copies_lock[CONFIG_MAX_PROCESS_PER_CLUSTER];  /*! one  per list    */
     88    uint32_t           copies_nr[CONFIG_MAX_PROCESS_PER_CLUSTER];    /*! number of copie  */
    8889}
    8990pmgr_t;
     
    9798typedef struct cluster_s
    9899{
    99         spinlock_t      kcm_lock;          /*! local, protect creation of KCM allocators      */
    100100
    101101    // global parameters
    102         uint32_t        paddr_width;       /*! numer of bits in physical address              */
     102    uint32_t        paddr_width;       /*! numer of bits in physical address              */
    103103    uint32_t        x_width;           /*! number of bits to code x_size  (can be 0)      */
    104104    uint32_t        y_width;           /*! number of bits to code y_size  (can be 0)      */
    105         uint32_t        x_size;            /*! number of clusters in a row    (can be 1)      */
    106         uint32_t        y_size;            /*! number of clusters in a column (can be 1)      */
    107     uint32_t        cluster_info[CONFIG_MAX_CLUSTERS_X][CONFIG_MAX_CLUSTERS_Y];
    108         cxy_t           io_cxy;            /*! io cluster identifier                          */
     105    uint32_t        x_size;            /*! number of clusters in a row    (can be 1)      */
     106    uint32_t        y_size;            /*! number of clusters in a column (can be 1)      */
     107    cxy_t           io_cxy;            /*! io cluster identifier                          */
    109108    uint32_t        dqdt_root_level;   /*! index of root node in dqdt_tbl[]               */
    110109    uint32_t        nb_txt_channels;   /*! number of TXT channels                         */
     
    113112    uint32_t        nb_fbf_channels;   /*! number of FBF channels                         */
    114113
     114    char            cluster_info[CONFIG_MAX_CLUSTERS_X][CONFIG_MAX_CLUSTERS_Y];
     115
    115116    // local parameters
    116         uint32_t        cores_nr;          /*! actual number of cores in cluster              */
     117    uint32_t        cores_nr;          /*! actual number of cores in cluster              */
    117118    uint32_t        ram_size;          /*! physical memory size                           */
    118119    uint32_t        ram_base;          /*! physical memory base (local address)           */
     
    120121        core_t          core_tbl[CONFIG_MAX_LOCAL_CORES];    /*! embedded cores               */
    121122
    122         list_entry_t    dev_root;          /*! root of list of devices in cluster             */
     123    list_entry_t    dev_root;          /*! root of list of devices in cluster             */
    123124
    124125    // memory allocators
    125         ppm_t           ppm;               /*! embedded kernel page manager                   */
    126         khm_t           khm;               /*! embedded kernel heap manager                   */
    127         kcm_t           kcm;               /*! embedded kernel KCMs manager                   */
     126    ppm_t           ppm;               /*! embedded kernel page manager                   */
     127    khm_t           khm;               /*! embedded kernel heap manager                   */
     128    kcm_t           kcm;               /*! embedded kernel KCMs manager                   */
    128129
    129130    kcm_t         * kcm_tbl[KMEM_TYPES_NR];              /*! pointers on allocated KCMs   */
     131    busylock_t      kcm_lock;                            /*! protect kcm_tbl[] updates    */
    130132
    131133    // RPC
    132         remote_fifo_t   rpc_fifo[CONFIG_MAX_LOCAL_CORES];    /*! one RPC FIFO per core        */
     134    remote_fifo_t   rpc_fifo[CONFIG_MAX_LOCAL_CORES];    /*! one RPC FIFO per core        */
    133135    uint32_t        rpc_threads[CONFIG_MAX_LOCAL_CORES]; /*! RPC threads per core         */
    134136
    135137    // DQDT
    136         dqdt_node_t     dqdt_tbl[CONFIG_DQDT_LEVELS_NR];     /*! embedded DQDT nodes          */
     138    dqdt_node_t     dqdt_tbl[CONFIG_DQDT_LEVELS_NR];     /*! embedded DQDT nodes          */
    137139
    138140    // Local process manager
     
    158160
    159161/******************************************************************************************
    160  * This generic function initialises the local cluster manager from information found
    161  * in the local boot-info structure. It initializes the following local resources:
    162  * - the global platform parameters,
    163  * - the specific cluster parameters,
    164  * - the lock protecting KCM creation,
    165  * - the local DQDT nodes,
    166  * - the PPM, KHM, and KCM allocators,
    167  * - the local core descriptors,
    168  * - the local RPC FIFO,
    169  * - the process manager.
    170  * It does NOT initialise the local device descriptors.
     162 * These two functions initialise the local cluster manager from information found
     163 * in the local boot-info structure <info> build by the boot-loader.
     164 * 1) the cluster_info_init() function is called first, to initialize the structural
     165 *    constants, and cannot use the TXT0 kernel terminal.
     166 * 2) the cluster_manager_init() function initialize various complex structures:
     167 *    - the local DQDT nodes,
     168 *    - the PPM, KHM, and KCM allocators,
     169 *    - the local core descriptors,
     170 *    - the local RPC FIFO,
     171 *    - the process manager.
     172 *    It does NOT initialise the local device descriptors.
     173 *    It can use the TXT0 kernel terminal.
    171174 ******************************************************************************************
    172175 * @ info : pointer on the local boot_info_t structure build by the bootloader.
    173176 *****************************************************************************************/
    174 error_t cluster_init( boot_info_t * info );
    175 
    176 /******************************************************************************************
    177  * This function randomly selects a cluster.
    178  ******************************************************************************************
    179  * @ returns the selected cluster identifier.
    180  *****************************************************************************************/
    181 cxy_t cluster_random_select( void );
     177void    cluster_info_init( boot_info_t * info );
     178error_t cluster_manager_init( boot_info_t * info );
    182179
    183180/******************************************************************************************
     
    189186bool_t cluster_is_undefined( cxy_t cxy );
    190187
    191 
    192 /*****************************************************************************************/
    193 /***************   Process Management Operations   ***************************************/
    194 /*****************************************************************************************/
     188/******************************************************************************************
     189 * This function uses the local cluster_info[][] array in cluster descriptor,
     190 * and returns true when the cluster identified by the <cxy> argument is active.
     191 ******************************************************************************************
     192 * @ cxy   : cluster identifier.
     193 * @ return true if cluster contains a kernel instance.
     194 *****************************************************************************************/
     195bool_t cluster_is_active( cxy_t  cxy );
     196
     197/******************************************************************************************
     198 * This function (pseudo) randomly selects a valid cluster.
     199 * It is called by the vfs_cluster_lookup() function to place a new (missing) inode.
     200 * It is called by the vmm_page_allocate() function to place a distributed vseg page.
     201 ******************************************************************************************
     202 * @ returns the selected cluster identifier.
     203 *****************************************************************************************/
     204cxy_t cluster_random_select( void );
    195205
    196206/******************************************************************************************
     
    290300void cluster_process_copies_unlink( struct process_s * process );
    291301
    292 /*********************************************************************************************
     302/******************************************************************************************
    293303 * This function displays on the kernel terminal TXT0 all user processes registered
    294304 * in the cluster defined by the <cxy> argument.
    295305 * It can be called by a thread running in any cluster, because is use remote accesses
    296306 * to scan the xlist of registered processes.
    297  *********************************************************************************************
     307 ******************************************************************************************
    298308 * @ cxy   : cluster identifier.
    299  ********************************************************************************************/
     309 *****************************************************************************************/
    300310void cluster_processes_display( cxy_t cxy );
    301311
    302 
    303 
    304 /*****************************************************************************************/
    305 /***************   Cores Management Operations   *****************************************/
    306 /*****************************************************************************************/
    307 
    308 /******************************************************************************************
    309  * This function returns the core local index that has the lowest usage in local cluster.
     312/******************************************************************************************
     313 * This function uses the local boot_inforeturns the core local index that has the lowest usage in local cluster.
    310314 *****************************************************************************************/
    311315lid_t cluster_select_local_core( void );
    312316
     317             
    313318#endif  /* _CLUSTER_H_ */
     319
  • trunk/kernel/kern/core.c

    r457 r564  
    33 *
    44 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *         Alain Greiner (2016,2017)
     5 *         Alain Greiner (2016,2017,2018)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
  • trunk/kernel/kern/core.h

    r457 r564  
    33 *
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *          Alain Greiner (2016,2017)
     5 *          Alain Greiner (2016,2017,2018)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
  • trunk/kernel/kern/dqdt.c

    r562 r564  
    4040extern chdev_directory_t  chdev_dir;  // defined in chdev.h / allocated in kernel_init.c
    4141
    42 
     42/*
    4343///////////////////////////////////////////////////////////////////////////////////////////
    4444// This static recursive function traverse the DQDT quad-tree from root to bottom.
     
    6565    }
    6666}
    67 
    68 ///////////////////
     67*/
     68
     69/////////////////////////
    6970void dqdt_display( void )
    7071{
    71     /*
    72     reg_t   save_sr;
    73 
     72    return;
     73
     74/*
    7475    // build extended pointer on DQDT root node
    7576        cluster_t * cluster = LOCAL_CLUSTER;
     
    8283    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
    8384
    84     // get extended pointer on remote TXT0 chdev lock
     85    // get extended pointer on remote TXT0 lock
    8586    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
    8687
    87     // get TXT0 lock in busy waiting mode
    88     remote_spinlock_lock_busy( lock_xp , &save_sr );
     88    // get TXT0 lock
     89    remote_busylock_acquire( lock_xp );
    8990
    9091    // print header
     
    9596
    9697    // release lock
    97     remote_spinlock_unlock_busy( lock_xp , save_sr );
    98     */
     98    remote_busylock_release( lock_xp );
     99*/
     100
    99101}
    100102
    101103////////////////////////////////////
    102104uint32_t dqdt_init( uint32_t x_size,
    103                     uint32_t y_size,
    104                     uint32_t y_width )
     105                    uint32_t y_size )
    105106{
    106107    assert( ((x_size <= 32) && (y_size <= 32)) , "illegal mesh size\n");
    107 
     108 
     109    // compute level_max
     110    uint32_t  x_size_ext = POW2_ROUNDUP( x_size );
     111    uint32_t  y_size_ext = POW2_ROUNDUP( y_size );
     112    uint32_t  size_ext   = MAX(x_size_ext , y_size_ext);
     113    uint32_t  level_max  = (bits_log2(size_ext * size_ext) >> 1) + 1;
     114
     115return level_max;
     116
     117/*
    108118        dqdt_node_t * node;
    109119    cxy_t         p_cxy;         // cluster coordinates for parent node
     
    114124    cluster_t   * cluster;       // pointer on local cluster
    115125
    116     cluster = LOCAL_CLUSTER;
    117 
    118     // compute level_max
    119     uint32_t  x_size_ext = POW2_ROUNDUP( x_size );
    120     uint32_t  y_size_ext = POW2_ROUNDUP( y_size );
    121     uint32_t  size_ext   = MAX(x_size_ext , y_size_ext);
    122     uint32_t  level_max  = (bits_log2(size_ext * size_ext) >> 1) + 1;
    123 
    124     return level_max;
    125 
    126     /*
     126    cluster_t   * cluster = LOCAL_CLUSTER;
     127
    127128    // get cluster coordinates
    128     uint32_t    x       = local_cxy >> y_width;
    129     uint32_t    y       = local_cxy & ((1<<y_width)-1);
     129    uint32_t    x       = HAL_X_FROM_CXY( local_cxy );
     130    uint32_t    y       = HAL_Y_FROM_CXY( local_cxy );
    130131
    131132    // loop on local dqdt nodes (at most one node per level)
     
    154155        {
    155156            // set parent extended pointer
    156             p_cxy = ((x & ~pmask)<<y_width) + (y & ~pmask);
     157            p_cxy = HAL_CXY_FROM_XY( (x & ~pmask) , (y & ~pmask) );
    157158            node->parent = XPTR( p_cxy , &cluster->dqdt_tbl[level+1] );
    158159
     
    168169            if ( (level > 0) && ((y + (1<<(level-1))) < y_size) )
    169170            {
    170                 c_cxy = local_cxy + (1<<(level-1));
     171                c_cxy = local_cxy + HAL_CXY_FROM_XY( 0 , (1<<(level-1) );
    171172                node->children[1] = XPTR( c_cxy , &cluster->dqdt_tbl[level-1] );
    172173                node->arity++;
     
    176177            if ( (level > 0) && ((x + (1<<(level-1))) < x_size) )
    177178            {
    178                 c_cxy = local_cxy + ((1<<(level-1))<<y_width);
     179                c_cxy = local_cxy + HAL_CXY_FROM_XY( (1<<(level-1)) , 0 );
    179180                node->children[2] = XPTR( c_cxy , &cluster->dqdt_tbl[level-1]);
    180181                node->arity++;
     
    186187                 ((y + (1<<(level-1))) < y_size) )
    187188            {
    188                 c_cxy = local_cxy + ((1<<(level-1))<<y_width) + (1<<(level-1));
     189                c_cxy = local_cxy + HAL_CXY_FROM_XY( (1<<(level-1)) , (1<<(level-1) );
    189190                node->children[3] = XPTR( c_cxy , &cluster->dqdt_tbl[level-1]);
    190191                node->arity++;
     
    194195
    195196    return level_max;
    196     */
     197*/
    197198
    198199} // end dqdt_init()
    199200
     201/*
    200202///////////////////////////////////////////////////////////////////////////
    201203// This recursive function is called by the dqdt_update_threads() function.
     
    216218
    217219    // get extended pointer on parent node
    218     xptr_t parent = (xptr_t)hal_remote_lwd( XPTR( cxy , &ptr->parent ) );
     220    xptr_t parent = (xptr_t)hal_remote_l64( XPTR( cxy , &ptr->parent ) );
    219221
    220222    // propagate if required
    221223    if ( parent != XPTR_NULL ) dqdt_propagate_threads( parent, increment );
    222224}
    223 
     225*/
     226
     227/*
    224228///////////////////////////////////////////////////////////////////////////
    225229// This recursive function is called by the dqdt_update_pages() function.
     
    240244
    241245    // get extended pointer on parent node
    242     xptr_t parent = (xptr_t)hal_remote_lwd( XPTR( cxy , &ptr->parent ) );
     246    xptr_t parent = (xptr_t)hal_remote_l64( XPTR( cxy , &ptr->parent ) );
    243247
    244248    // propagate if required
    245249    if ( parent != XPTR_NULL ) dqdt_propagate_pages( parent, increment );
    246250}
     251*/
    247252
    248253/////////////////////////////////////////////
    249 void dqdt_update_threads( int32_t increment )
    250 {
    251     return;
    252     /*
     254void dqdt_update_threads( int32_t increment __attribute__ ((__unused__)) )
     255{
     256
     257return;
     258
     259/*
    253260        cluster_t   * cluster = LOCAL_CLUSTER;
    254261    dqdt_node_t * node    = &cluster->dqdt_tbl[0];
     
    259266    // propagate to DQDT upper levels
    260267    if( node->parent != XPTR_NULL ) dqdt_propagate_threads( node->parent , increment );
    261     */
     268*/
     269
    262270}
    263271
    264272///////////////////////////////////////////
    265 void dqdt_update_pages( int32_t increment )
    266 {
    267     return;
    268     /*
     273void dqdt_update_pages( int32_t increment  __attribute__ ((__unused__)) )
     274{
     275
     276return;
     277
     278/*
    269279        cluster_t   * cluster = LOCAL_CLUSTER;
    270280    dqdt_node_t * node    = &cluster->dqdt_tbl[0];
     
    275285    // propagate to DQDT upper levels
    276286    if( node->parent != XPTR_NULL ) dqdt_propagate_pages( node->parent , increment );
    277     */
    278 }
    279 
    280 
     287*/
     288
     289}
     290
     291/*
    281292////////////////////////////////////////////////////////////////////////////////
    282293// This recursive function is called by both the dqdt_get_cluster_for_process()
     
    313324            cxy  = (cxy_t)GET_CXY( child );
    314325            ptr  = (dqdt_node_t *)GET_PTR( child );
    315             if( for_memory ) load = hal_remote_lw( XPTR( cxy , &ptr->pages ) );
    316             else             load = hal_remote_lw( XPTR( cxy , &ptr->threads ) );
     326            if( for_memory ) load = hal_remote_l32( XPTR( cxy , &ptr->pages ) );
     327            else             load = hal_remote_l32( XPTR( cxy , &ptr->threads ) );
    317328            if( load < load_min )
    318329            {
     
    326337    return dqdt_select_cluster( node_copy.children[select], for_memory );
    327338}
    328 
    329 ////////////////////////////////////
     339*/
     340
     341//////////////////////////////////////////
    330342cxy_t dqdt_get_cluster_for_process( void )
    331343{
    332     return cluster_random_select();
    333     /*
     344
     345return cluster_random_select();
     346
     347/*
    334348    // build extended pointer on DQDT root node
    335349        cluster_t * cluster = LOCAL_CLUSTER;
     
    339353    // call recursive function
    340354    return dqdt_select_cluster( root_xp , false );
    341     */
    342 }
    343 
    344 ////////////////////////////////////
     355*/
     356
     357}
     358
     359/////////////////////////////////////////
    345360cxy_t dqdt_get_cluster_for_memory( void )
    346361{
    347     return cluster_random_select();
    348     /*
     362
     363return cluster_random_select();
     364 
     365/*
    349366    // build extended pointer on DQDT root node
    350367        cluster_t * cluster = LOCAL_CLUSTER;
     
    354371    // call recursive function
    355372    return dqdt_select_cluster( root_xp , true );
    356     */
    357 }
    358 
     373*/
     374
     375}
     376
  • trunk/kernel/kern/dqdt.h

    r485 r564  
    3737 *   quad-tree covering this one-dimensionnal vector. If the number of clusters
    3838 *   is not a power of 4, the tree is truncated as required.
     39 *
    3940 *   TODO : the mapping for the one dimensionnal topology is not implemented yet [AG].
    4041 *
     
    5556 *   . Level 4 nodes exist when both X and Y coordinates are multiple of 16
    5657 *   . Level 5 nodes exist when both X and Y coordinates are multiple of 32
     58 *
     59 *   TODO : the cluster_info[x][y] array is not taken into account [AG].
    5760 ***************************************************************************************/
    5861
     
    8588 * @ x_size   : number of clusters (containing memory and CPUs) in a row
    8689 * @ y_size   : number of clusters (containing memory and CPUs) in a column
    87  * @ y_width  : number of LSB used to code the Y value in CXY
    8890 * @ return the number of levels in quad-tree.
    8991 ***************************************************************************************/
    9092uint32_t dqdt_init( uint32_t x_size,
    91                     uint32_t y_size,
    92                     uint32_t y_width );
     93                    uint32_t y_size );
    9394
    9495/****************************************************************************************
  • trunk/kernel/kern/kernel_init.c

    r561 r564  
    2424
    2525#include <kernel_config.h>
    26 #include <hard_config.h> // for the USE_TXT_XXX macros
    2726#include <errno.h>
    2827#include <hal_kernel_types.h>
     
    3029#include <hal_context.h>
    3130#include <hal_irqmask.h>
     31#include <hal_macros.h>
    3232#include <hal_ppm.h>
    3333#include <barrier.h>
    34 #include <remote_barrier.h>
     34#include <xbarrier.h>
    3535#include <remote_fifo.h>
    3636#include <core.h>
     
    5959#include <devfs.h>
    6060#include <mapper.h>
    61 #include <cluster_info.h>
    6261
    6362///////////////////////////////////////////////////////////////////////////////////////////
     
    8685cluster_t            cluster_manager                         CONFIG_CACHE_LINE_ALIGNED;
    8786
    88 // This variable defines the TXT0 kernel terminal (TX only)
     87// This variable defines the TXT_TX[0] chdev
    8988__attribute__((section(".kdata")))
    90 chdev_t              txt0_chdev                              CONFIG_CACHE_LINE_ALIGNED;
    91 
    92 // This variable defines the TXT0 lock for writing characters to MTY0
     89chdev_t              txt0_tx_chdev                           CONFIG_CACHE_LINE_ALIGNED;
     90
     91// This variable defines the TXT_RX[0] chdev
    9392__attribute__((section(".kdata")))
    94 spinlock_t           txt0_lock                               CONFIG_CACHE_LINE_ALIGNED;
     93chdev_t              txt0_rx_chdev                           CONFIG_CACHE_LINE_ALIGNED;
    9594
    9695// This variables define the kernel process0 descriptor
     
    116115// This variable is used for CP0 cores synchronisation in kernel_init()
    117116__attribute__((section(".kdata")))
    118 remote_barrier_t     global_barrier                          CONFIG_CACHE_LINE_ALIGNED;
     117xbarrier_t           global_barrier                          CONFIG_CACHE_LINE_ALIGNED;
    119118
    120119// This variable is used for local cores synchronisation in kernel_init()
     
    127126
    128127// kernel_init is the entry point defined in hal/tsar_mips32/kernel.ld
    129 // It will be used by the bootloader.
     128// It is used by the bootloader.
    130129extern void kernel_init( boot_info_t * info );
    131130
    132 // these debug variables are used to analyse the sys_read() syscall timing
     131// This array is used for debug, and describes the kernel locks usage,
     132// It must be kept consistent with the defines in kernel_config.h file.
     133char * lock_type_str[] =
     134{
     135    "unused_0",              //  0
     136
     137    "CLUSTER_KCM",           //  1
     138    "PPM_FREE",              //  2
     139    "SCHED_STATE",           //  3
     140    "VMM_STACK",             //  4
     141    "VMM_MMAP",              //  5
     142    "VFS_CTX",               //  6
     143    "KCM_STATE",             //  7
     144    "KHM_STATE",             //  8
     145    "HTAB_STATE",            //  9
     146
     147    "THREAD_JOIN",           // 10
     148    "VFS_MAIN",              // 11
     149    "CHDEV_QUEUE",           // 12
     150    "CHDEV_TXT0",            // 13
     151    "CHDEV_TXTLIST",         // 14
     152    "PAGE_STATE",            // 15
     153    "MUTEX_STATE",           // 16
     154    "CONDVAR_STATE",         // 17
     155    "SEM_STATE",             // 18
     156    "XHTAB_STATE",           // 19
     157
     158    "unused_20",             // 20
     159
     160    "CLUSTER_PREFTBL",       // 21
     161    "PPM_DIRTY",             // 22
     162
     163    "CLUSTER_LOCALS",        // 23
     164    "CLUSTER_COPIES",        // 24
     165    "PROCESS_CHILDREN",      // 25
     166    "PROCESS_USERSYNC",      // 26
     167    "PROCESS_FDARRAY",       // 27
     168
     169    "MAPPER_STATE",          // 28
     170    "PROCESS_THTBL",         // 29
     171
     172    "PROCESS_CWD",           // 30
     173    "VFS_INODE",             // 31
     174    "VFS_FILE",              // 32
     175    "VMM_VSL",               // 33
     176};       
     177
     178// these debug variables are used to analyse the sys_read() and sys_write() syscalls timing
    133179
    134180#if DEBUG_SYS_READ
     
    179225uint32_t   exit_tty_isr_write;
    180226#endif
     227
     228// intrumentation variables : cumulated costs per syscall type in cluster
     229uint32_t   syscalls_cumul_cost[SYSCALLS_NR];
     230
     231// intrumentation variables : number of syscalls per syscal type in cluster
     232uint32_t   syscalls_occurences[SYSCALLS_NR];
    181233
    182234///////////////////////////////////////////////////////////////////////////////////////////
     
    201253
    202254///////////////////////////////////////////////////////////////////////////////////////////
    203 // This function initializes the TXT0 chdev descriptor, that is the "kernel terminal",
    204 // shared by all kernel instances for debug messages.
    205 // It is a global variable (replicated in all clusters), because this terminal is used
    206 // before the kmem allocator initialisation, but only the instance in cluster containing
    207 // the calling core is registered in the "chdev_dir" directory.
     255// This function initializes the TXT_TX[0] and TXT_RX[0] chdev descriptors, implementing
     256// the "kernel terminal", shared by all kernel instances for debug messages.
     257// These chdev are implemented as global variables (replicated in all clusters),
     258// because this terminal is used before the kmem allocator initialisation, but only
     259// the chdevs in cluster 0 are registered in the "chdev_dir" directory.
    208260// As this TXT0 chdev supports only the TXT_SYNC_WRITE command, we don't create
    209261// a server thread, we don't allocate a WTI, and we don't initialize the waiting queue.
     262// Note: The TXT_RX[0] chdev is created, but is not used by ALMOS-MKH (september 2018).
    210263///////////////////////////////////////////////////////////////////////////////////////////
    211264// @ info    : pointer on the local boot-info structure.
    212265///////////////////////////////////////////////////////////////////////////////////////////
    213 static void txt0_device_init( boot_info_t * info )
     266static void __attribute__ ((noinline)) txt0_device_init( boot_info_t * info )
    214267{
    215268    boot_device_t * dev_tbl;         // pointer on array of devices in boot_info
     
    237290        if (func == DEV_FUNC_TXT )
    238291        {
    239             assert( (channels > 0) , "number of TXT channels cannot be 0\n");
    240 
    241             // initializes TXT_TX[0] chdev
    242             txt0_chdev.func    = func;
    243             txt0_chdev.impl    = impl;
    244             txt0_chdev.channel = 0;
    245             txt0_chdev.base    = base;
    246             txt0_chdev.is_rx   = false;
    247 
    248             // initializes lock
    249             remote_spinlock_init( XPTR( local_cxy , &txt0_chdev.wait_lock ) );
     292            // initialize TXT_TX[0] chdev
     293            txt0_tx_chdev.func    = func;
     294            txt0_tx_chdev.impl    = impl;
     295            txt0_tx_chdev.channel = 0;
     296            txt0_tx_chdev.base    = base;
     297            txt0_tx_chdev.is_rx   = false;
     298            remote_busylock_init( XPTR( local_cxy , &txt0_tx_chdev.wait_lock ),
     299                                  LOCK_CHDEV_TXT0 );
    250300           
    251             // TXT specific initialisation:
    252             // no server thread & no IRQ routing for channel 0
    253             dev_txt_init( &txt0_chdev );                 
    254 
    255             // register the TXT0 in all chdev_dir[x][y] structures
     301            // initialize TXT_RX[0] chdev
     302            txt0_rx_chdev.func    = func;
     303            txt0_rx_chdev.impl    = impl;
     304            txt0_rx_chdev.channel = 0;
     305            txt0_rx_chdev.base    = base;
     306            txt0_rx_chdev.is_rx   = true;
     307            remote_busylock_init( XPTR( local_cxy , &txt0_rx_chdev.wait_lock ),
     308                                  LOCK_CHDEV_TXT0 );
     309           
     310            // make TXT specific initialisations
     311            dev_txt_init( &txt0_tx_chdev );                 
     312            dev_txt_init( &txt0_rx_chdev );
     313
     314            // register TXT_TX[0] & TXT_RX[0] in chdev_dir[x][y]
     315            // for all valid clusters             
    256316            for( x = 0 ; x < info->x_size ; x++ )
    257317            {
    258                 for( y = 0 ; y < info->y_size; y++ ) // [FIXME]
     318                for( y = 0 ; y < info->y_size ; y++ )
    259319                {
    260                     if (cluster_info_is_active(info->cluster_info[x][y])) {
    261                         cxy_t  cxy = (x<<info->y_width) + y;
    262                         hal_remote_swd( XPTR( cxy , &chdev_dir.txt_tx[0] ) ,
    263                                         XPTR( local_cxy , &txt0_chdev ) );
     320                    cxy_t cxy = HAL_CXY_FROM_XY( x , y );
     321
     322                    if( cluster_is_active( cxy ) )
     323                    {
     324                        hal_remote_s64( XPTR( cxy , &chdev_dir.txt_tx[0] ) ,
     325                                        XPTR( local_cxy , &txt0_tx_chdev ) );
     326                        hal_remote_s64( XPTR( cxy , &chdev_dir.txt_rx[0] ) ,
     327                                        XPTR( local_cxy , &txt0_rx_chdev ) );
    264328                    }
    265329                }
    266330            }
     331
     332            hal_fence();
    267333        }
    268334        } // end loop on devices
    269335}  // end txt0_device_init()
    270 
    271 ///////////////////////////////////////////////////////////////////////////////////////////
    272 // This function is the same as txt0_device_init() but uses the internal multi_tty device
    273 // attached to cluster (0,0) instead of the external tty_tsar.
    274 // This function is used instead of txt0_device_init() only for TSAR LETI.
    275 ///////////////////////////////////////////////////////////////////////////////////////////
    276 // @ info    : pointer on the local boot-info structure.
    277 ///////////////////////////////////////////////////////////////////////////////////////////
    278 static void mtty0_device_init( boot_info_t * info)
    279 {
    280     boot_device_t * dev_tbl;         // pointer on array of devices in boot_info
    281     uint32_t        dev_nr;          // actual number of devices in this cluster
    282     xptr_t          base;            // remote pointer on segment base
    283     uint32_t        func;            // device functional index
    284     uint32_t        impl;            // device implementation index
    285     uint32_t        i;               // device index in dev_tbl
    286     uint32_t        x;               // X cluster coordinate
    287     uint32_t        y;               // Y cluster coordinate
    288 
    289     dev_nr = info->int_dev_nr;
    290     dev_tbl = info->int_dev;
    291 
    292     // Initialize spinlock for writing to MTY0
    293     spinlock_init(&txt0_lock);
    294    
    295     // Loop on internal peripherals of cluster (0,0) to find MTY0
    296     for ( i = 0; i < dev_nr; i++ )
    297     {
    298         base = dev_tbl[i].base;
    299         func = FUNC_FROM_TYPE( dev_tbl[i].type );
    300         impl = IMPL_FROM_TYPE( dev_tbl[i].type );
    301 
    302         if ( func == DEV_FUNC_TXT )
    303         {
    304             txt0_chdev.func     = func;
    305             txt0_chdev.impl     = impl;
    306             txt0_chdev.channel  = 0;
    307             txt0_chdev.base     = base;
    308             txt0_chdev.is_rx    = false;
    309 
    310             // Initialize MTY0 chdev lock
    311             remote_spinlock_init( XPTR( local_cxy, &txt0_chdev.wait_lock ) );
    312 
    313             // MTY specific initialization
    314             dev_txt_init( &txt0_chdev );
    315 
    316             // register the MTY in all chdev_dir[x][y] structures
    317             for( x = 0 ; x < info->x_size ; x++ )
    318             {
    319                 for( y = 0 ; y < info->y_size; y++ ) // [FIXME]
    320                 {
    321                     if (cluster_info_is_active(info->cluster_info[x][y])) {
    322                         cxy_t  cxy = (x<<info->y_width) + y;
    323                         hal_remote_swd( XPTR( cxy , &chdev_dir.txt_tx[0] ) ,
    324                                         XPTR( local_cxy , &txt0_chdev ) );
    325                     }
    326                 }
    327             }
    328         }
    329     } // end loop on internal devices
    330 } // end mty0_device_init()
    331336
    332337///////////////////////////////////////////////////////////////////////////////////////////
     
    338343// @ info    : pointer on the local boot-info structure.
    339344///////////////////////////////////////////////////////////////////////////////////////////
    340 static void internal_devices_init( boot_info_t * info )
     345static void __attribute__ ((noinline)) internal_devices_init( boot_info_t * info )
    341346{
    342347    boot_device_t * dev_tbl;         // pointer on array of internaldevices in boot_info
     
    367372        if( func == DEV_FUNC_MMC ) 
    368373        {
    369             assert( (channels == 1) , "MMC device must be single channel\n" );
     374
     375            // check channels
     376            if( channels != 1 )
     377            printk("\n[PANIC] in %s : MMC device must be single channel\n", __FUNCTION__ );
    370378
    371379            // create chdev in local cluster
     
    376384                                      base );
    377385
    378             assert( (chdev_ptr != NULL) ,
    379                     "cannot allocate memory for MMC chdev\n" );
     386            // check memory
     387            if( chdev_ptr == NULL )
     388            printk("\n[PANIC] in %s : cannot create MMC chdev\n", __FUNCTION__ );
    380389           
    381390            // make MMC specific initialisation
     
    385394            for( x = 0 ; x < info->x_size ; x++ )
    386395            {
    387                 for( y = 0 ; y < info->y_size; y++ ) // [FIXME]
     396                for( y = 0 ; y < info->y_size ; y++ )
    388397                {
    389                     if (cluster_info_is_active(info->cluster_info[x][y])) {
    390                         cxy_t  cxy = (x<<info->y_width) + y;
    391                         hal_remote_swd( XPTR( cxy , &chdev_dir.mmc[local_cxy] ),
     398                    cxy_t cxy = HAL_CXY_FROM_XY( x , y );
     399
     400                    if( cluster_is_active( cxy ) )
     401                    {
     402                        hal_remote_s64( XPTR( cxy , &chdev_dir.mmc[local_cxy] ),
    392403                                        XPTR( local_cxy , chdev_ptr ) );
    393404                    }
     
    414425                                          base );
    415426
    416                 assert( (chdev_ptr != NULL) , "cannot allocate memory for DMA chdev" );
    417 
     427                // check memory
     428                if( chdev_ptr == NULL )
     429                printk("\n[PANIC] in %s : cannot create DMA chdev\n", __FUNCTION__ );
     430           
    418431                // make DMA specific initialisation
    419432                dev_dma_init( chdev_ptr );     
     
    430443            }
    431444        }
    432 
    433         ///////////////////////////////
    434         else if ( func == DEV_FUNC_TXT && USE_TXT_MTY == 1 )
    435         {
    436             assert(impl == IMPL_TXT_MTY,
    437                 "Internal TTYs should have MTY implementation\n");
    438 
    439             for ( channel = 0; channel < channels; channel++ )
    440             {
    441                 int rx;
    442                 for ( rx = 0; rx <= 1; rx++ )
    443                 {
    444                     // skip MTY0_TX since it has already been initialized
    445                     if ( channel == 0 && rx == 0 ) continue;
    446 
    447                     // create chdev in local cluster
    448                     chdev_ptr = chdev_create( func,
    449                                               impl,
    450                                               channel,
    451                                               rx,
    452                                               base );
    453 
    454                     assert( (chdev_ptr != NULL) ,
    455                         "cannot allocate memory for MTY chdev" );
    456 
    457                     // make MTY specific initialization
    458                     dev_txt_init( chdev_ptr );
    459 
    460                     // set the MTY fields in all clusters
    461                     xptr_t *chdev_entry;
    462                     if ( rx == 1 ) {
    463                         chdev_entry = &chdev_dir.txt_rx[channel];
    464                     } else {
    465                         chdev_entry = &chdev_dir.txt_tx[channel];
    466                     }
    467                     for ( x = 0; x < info->x_size; x++ )
    468                     {
    469                         for ( y = 0; y < info->y_size; y++ )
    470                         {
    471                             if (cluster_info_is_active(info->cluster_info[x][y])) {
    472                                 cxy_t cxy = (x<<info->y_width) + y;
    473                                 hal_remote_swd( XPTR( cxy, chdev_entry ),
    474                                                 XPTR( local_cxy, chdev_ptr ) );
    475                             }
    476                         }
    477                     }
    478 #if( DEBUG_KERNEL_INIT & 0x1 )
    479 if( hal_time_stamp() > DEBUG_KERNEL_INIT )
    480 printk("\n[DBG] %s : created MTY[%d] in cluster %x / chdev = %x\n",
    481 __FUNCTION__ , channel , local_cxy , chdev_ptr );
    482 #endif
    483                 }
    484             }
    485         }
    486 
    487         ///////////////////////////////
    488         else if ( func == DEV_FUNC_IOC )
    489         {
    490             assert(impl == IMPL_IOC_SPI, __FUNCTION__,
    491                 "Internal IOC should have SPI implementation\n");
    492 
    493             for ( channel = 0; channel < channels; channel++ )
    494             {
    495                 // create chdev in local cluster
    496                 chdev_ptr = chdev_create( func,
    497                                           impl,
    498                                           channel,
    499                                           0,
    500                                           base );
    501 
    502                 assert( (chdev_ptr != NULL) , __FUNCTION__ ,
    503                     "cannot allocate memory for IOC chdev" );
    504                
    505                 // make IOC specific initialization
    506                 dev_ioc_init( chdev_ptr );
    507 
    508                 // set the IOC fields in all clusters
    509                 xptr_t *chdev_entry = &chdev_dir.ioc[channel];
    510                 for ( x = 0; x < info->x_size; x++ )
    511                 {
    512                     for ( y = 0; y < info->y_size; y++ )
    513                     {
    514                         if (cluster_info_is_active(info->cluster_info[x][y])) {
    515                             cxy_t cxy = (x<<info->y_width) + y;
    516                             hal_remote_swd( XPTR( cxy, chdev_entry ),
    517                                             XPTR( local_cxy, chdev_ptr ) );
    518                         }
    519                     }
    520     }
    521 #if( DEBUG_KERNEL_INIT & 0x1 )
    522 if( hal_time_stamp() > DEBUG_KERNEL_INIT )
    523 printk("\n[DBG] %s : created IOC[%d] in cluster %x / chdev = %x\n",
    524 __FUNCTION__ , channel , local_cxy , chdev_ptr );
    525 #endif
    526             }
    527         }
    528 
    529445    }
    530446}  // end internal_devices_init()
     
    586502
    587503        // check PIC device initialized
    588         assert( (chdev_dir.pic != XPTR_NULL ) ,
    589               "PIC device must be initialized before other devices\n" );
     504        if( chdev_dir.pic == XPTR_NULL )
     505        printk("\n[PANIC] in %s : PIC device must be initialized first\n", __FUNCTION__ );
    590506
    591507        // check external device functionnal type
    592         assert( ( (func == DEV_FUNC_IOB) ||
    593                   (func == DEV_FUNC_IOC) ||
    594                   (func == DEV_FUNC_TXT) ||
    595                   (func == DEV_FUNC_NIC) ||
    596                   (func == DEV_FUNC_FBF) ) ,
    597                   "undefined external peripheral type\n" );
     508        if( (func != DEV_FUNC_IOB) && (func != DEV_FUNC_IOC) && (func != DEV_FUNC_TXT) &&
     509            (func != DEV_FUNC_NIC) && (func != DEV_FUNC_FBF) )
     510        printk("\n[PANIC] in %s : undefined peripheral type\n", __FUNCTION__ );
    598511
    599512        // loops on channels
     
    603516            for( rx = 0 ; rx < directions ; rx++ )
    604517            {
    605                 // skip TXT_TX[0] chdev that has already been created & registered
    606                 if( USE_TXT_MTY == 0 && (func == DEV_FUNC_TXT) && (channel == 0) && (rx == 0) )
     518                // skip TXT0 that has already been initialized
     519                if( (func == DEV_FUNC_TXT) && (channel == 0) ) continue;
     520
     521                // all kernel instances compute the target cluster for all chdevs,
     522                // computing the global index ext_chdev_gid[func,channel,direction]
     523                cxy_t target_cxy;
     524                while( 1 )
    607525                {
    608                     continue;
     526                    uint32_t offset     = ext_chdev_gid % ( info->x_size * info->y_size );
     527                    uint32_t x          = offset / info->y_size;
     528                    uint32_t y          = offset % info->y_size;
     529
     530                    target_cxy = HAL_CXY_FROM_XY( x , y );
     531
     532                    // exit loop if target cluster is active
     533                    if( cluster_is_active( target_cxy ) ) break;
     534               
     535                    // increment global index otherwise
     536                    ext_chdev_gid++;
    609537                }
    610538
    611                 // skip TXT chdevs because they are initialized in internal_devices_init()
    612                 if ( USE_TXT_MTY == 1 && func == DEV_FUNC_TXT )
    613                 {
    614                     continue;
    615                 }
    616 
    617                 if ( func == DEV_FUNC_IOC && impl == IMPL_IOC_SPI )
    618                 {
    619                     continue;
    620                 }
    621 
    622                 // compute target cluster for chdev[func,channel,direction]
    623                 uint32_t offset;
    624                 uint32_t cx;
    625                 uint32_t cy;
    626                 uint32_t target_cxy;
    627                 while (1) {
    628                     offset     = ext_chdev_gid % ( info->x_size * (info->y_size) );
    629                     cx         = offset / (info->y_size);
    630                     cy         = offset % (info->y_size);
    631                     target_cxy = (cx<<info->y_width) + cy;
    632                     // ext_chdev_gid that results in empty target clusters are skipped
    633                     if ( cluster_info_is_active( LOCAL_CLUSTER->cluster_info[cx][cy] ) == 0 ) {
    634                         ext_chdev_gid++;
    635                     } else { // The ext_chdev_gid resulted in a full target cluster
    636                         break;
    637                     }
    638                 }
    639539                // allocate and initialize a local chdev
    640540                // when local cluster matches target cluster
     
    647547                                          base );
    648548
    649                     assert( (chdev != NULL),
    650                             "cannot allocate external device" );
     549                    if( chdev == NULL )
     550                    printk("\n[PANIC] in %s : cannot allocate chdev for external device\n",
     551                    __FUNCTION__ );
    651552
    652553                    // make device type specific initialisation
     
    672573                    for( x = 0 ; x < info->x_size ; x++ )
    673574                    {
    674                         for ( y = 0; y < info->y_size; y++ )
     575                        for( y = 0 ; y < info->y_size ; y++ )
    675576                        {
    676                             if (cluster_info_is_active(info->cluster_info[x][y])) {
    677                                 cxy_t  cxy = (x<<info->y_width) + y;
    678                                 hal_remote_swd( XPTR( cxy , entry ),
     577                            cxy_t cxy = HAL_CXY_FROM_XY( x , y );
     578
     579                            if( cluster_is_active( cxy ) )
     580                            {
     581                                hal_remote_s64( XPTR( cxy , entry ),
    679582                                                XPTR( local_cxy , chdev ) );
    680583                            }
     
    706609// @ info    : pointer on the local boot-info structure.
    707610///////////////////////////////////////////////////////////////////////////////////////////
    708 static void iopic_init( boot_info_t * info )
     611static void __attribute__ ((noinline)) iopic_init( boot_info_t * info )
    709612{
    710613    boot_device_t * dev_tbl;         // pointer on boot_info external devices array
     
    723626    dev_tbl     = info->ext_dev;
    724627
     628    // avoid GCC warning
     629    base        = XPTR_NULL;
     630    impl        = 0;
     631
    725632    // loop on external peripherals to get the IOPIC 
    726633        for( i = 0 , found = false ; i < dev_nr ; i++ )
     
    737644    }
    738645
    739     assert( found , "PIC device not found\n" );
     646    // check PIC existence
     647    if( found == false )
     648    printk("\n[PANIC] in %s : PIC device not found\n", __FUNCTION__ );
    740649
    741650    // allocate and initialize the PIC chdev in cluster 0
     
    746655                          base );
    747656
    748     assert( (chdev != NULL), "no memory for PIC chdev\n" );
     657    // check memory
     658    if( chdev == NULL )
     659    printk("\n[PANIC] in %s : no memory for PIC chdev\n", __FUNCTION__ );
    749660
    750661    // make PIC device type specific initialisation
     
    757668    for( x = 0 ; x < info->x_size ; x++ )
    758669    {
    759         for ( y = 0; y < info->y_size; y++ )
     670        for( y = 0 ; y < info->y_size ; y++ )
    760671        {
    761             if (cluster_info_is_active(info->cluster_info[x][y])) {
    762                 cxy_t  cxy = (x<<info->y_width) + y;
    763                 hal_remote_swd( XPTR( cxy , entry ) ,
     672            cxy_t cxy = HAL_CXY_FROM_XY( x , y );
     673
     674            if( cluster_is_active( cxy ) )
     675            {
     676                hal_remote_s64( XPTR( cxy , entry ) ,
    764677                                XPTR( local_cxy , chdev ) );
    765678            }
     
    773686    for( x = 0 ; x < info->x_size ; x++ )
    774687    {
    775         for ( y = 0; y < info->y_size; y++ )
     688        for( y = 0 ; y < info->y_size ; y++ )
    776689        {
    777             if (cluster_info_is_active(info->cluster_info[x][y])) {
    778                 cxy_t  cxy = (x<<info->y_width) + y;
    779                 hal_remote_memset( XPTR( cxy , &iopic_input ) , 0xFF , sizeof(iopic_input_t) );
     690            cxy_t cxy = HAL_CXY_FROM_XY( x , y );
     691
     692            if( cluster_is_active( cxy ) )
     693            {
     694                hal_remote_memset( XPTR( cxy , &iopic_input ),
     695                                   0xFF , sizeof(iopic_input_t) );
    780696            }
    781697        }
     
    807723            else if((func == DEV_FUNC_NIC) && (is_rx != 0)) ptr = &iopic_input.nic_rx[channel];
    808724            else if( func == DEV_FUNC_IOB )                 ptr = &iopic_input.iob;
    809             else     assert( false , "illegal source device for IOPIC input" );
     725            else     printk("\n[PANIC] in %s : illegal source device for IOPIC input" );
    810726
    811727            // set one entry in all "iopic_input" structures
    812728            for( x = 0 ; x < info->x_size ; x++ )
    813729            {
    814                 for ( y = 0; y < info->y_size; y++ )
     730                for( y = 0 ; y < info->y_size ; y++ )
    815731                {
    816                     if (cluster_info_is_active(info->cluster_info[x][y])) {
    817                         cxy_t  cxy = (x<<info->y_width) + y;
    818                         hal_remote_swd( XPTR( cxy , ptr ) , id );
     732                    cxy_t cxy = HAL_CXY_FROM_XY( x , y );
     733
     734                    if( cluster_is_active( cxy ) )
     735                    {
     736                        hal_remote_s64( XPTR( cxy , ptr ) , id );
    819737                    }
    820738                }
     
    824742
    825743#if( DEBUG_KERNEL_INIT & 0x1 )
    826 if( hal_time_stamp() > DEBUG_KERNEL_INIT )
     744if( hal_tim_stamp() > DEBUG_KERNEL_INIT )
    827745{
    828746    printk("\n[DBG] %s created PIC chdev in cluster %x at cycle %d\n",
     
    843761// @ info    : pointer on the local boot-info structure.
    844762///////////////////////////////////////////////////////////////////////////////////////////
    845 static void lapic_init( boot_info_t * info )
     763static void __attribute__ ((noinline)) lapic_init( boot_info_t * info )
    846764{
    847765    boot_device_t * dev_tbl;      // pointer on boot_info internal devices array
     
    896814                if     ( func == DEV_FUNC_MMC ) lapic_input.mmc = id;
    897815                else if( func == DEV_FUNC_DMA ) lapic_input.dma[channel] = id;
    898                 else if( func == DEV_FUNC_TXT ) lapic_input.mtty = id;
    899                 else if( func == DEV_FUNC_IOC ) lapic_input.sdcard = id;
    900                 else assert( false , "illegal source device for LAPIC input" );
     816                else     printk("\n[PANIC] in %s : illegal source device for LAPIC input" );
    901817            }
    902818        }
     
    913829// @ return 0 if success / return EINVAL if not found.
    914830///////////////////////////////////////////////////////////////////////////////////////////
    915 static error_t get_core_identifiers( boot_info_t * info,
    916                                      lid_t       * lid,
    917                                      cxy_t       * cxy,
    918                                      gid_t       * gid )
     831static error_t __attribute__ ((noinline)) get_core_identifiers( boot_info_t * info,
     832                                                                lid_t       * lid,
     833                                                                cxy_t       * cxy,
     834                                                                gid_t       * gid )
    919835{
    920836    uint32_t   i;
     
    989905    thread->core = &LOCAL_CLUSTER->core_tbl[core_lid];
    990906
    991     // each core initializes the idle thread lists of locks
    992     list_root_init( &thread->locks_root );
    993     xlist_root_init( XPTR( local_cxy , &thread->xlocks_root ) );
    994     thread->local_locks = 0;
    995     thread->remote_locks = 0;
    996 
    997     // CP0 in cluster 0 initializes TXT0 chdev descriptor
    998     if( core_cxy == 0 && core_lid == 0 ) // [MODIF]
    999     {
    1000         if( USE_TXT_MTY == 1 ) {
    1001             mtty0_device_init( info );
    1002         } else {
    1003             txt0_device_init( info );
    1004         }
    1005     }
    1006 
    1007     /////////////////////////////////////////////////////////////////////////////////
    1008     if( core_lid == 0 ) remote_barrier( XPTR( 0 , &global_barrier ), // [FIXME]
    1009                                         cluster_info_nb_actives(info->cluster_info) );
     907    // each core initializes the idle thread locks counters
     908    thread->busylocks = 0;
     909
     910#if DEBUG_BUSYLOCK
     911    // each core initialise the idle thread list of busylocks
     912    xlist_root_init( XPTR( local_cxy , &thread->busylocks_root ) );
     913#endif
     914
     915    // CP0 initializes cluster info
     916    if( core_lid == 0 ) cluster_info_init( info );
     917
     918    // CP0 in cluster 0 initialises TXT0 chdev descriptor
     919    if( (core_lid == 0) && (core_cxy == 0) ) txt0_device_init( info );
     920
     921    /////////////////////////////////////////////////////////////////////////////////
     922    if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ),
     923                                        (info->x_size * info->y_size) );
    1010924    barrier_wait( &local_barrier , info->cores_nr );
    1011925    /////////////////////////////////////////////////////////////////////////////////
    1012926
    1013927#if DEBUG_KERNEL_INIT
    1014 if( (core_lid ==  0) & (local_cxy == 0) )
    1015 printk("\n[DBG] %s : exit barrier 0 : TXT0 initialized / cycle %d\n",
    1016 __FUNCTION__, (uint32_t)hal_get_cycles() );
     928// if( (core_lid ==  0) & (local_cxy == 0) )
     929printk("\n[DBG] %s : exit barrier 0 : TXT0 initialized / sr %x / cycle %d\n",
     930__FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() );
    1017931#endif
    1018932
     
    1025939    // all cores check identifiers
    1026940    if( error )
    1027     {
    1028         assert( false ,
    1029         "illegal core identifiers gid = %x / cxy = %x / lid = %d",
    1030         core_lid , core_cxy , core_lid );
    1031     }
    1032 
    1033     // CP0 initializes cluster manager
     941    printk("\n[PANIC] in %s : illegal core : gid %x / cxy %x / lid %d",
     942    __FUNCTION__, core_lid, core_cxy, core_lid );
     943
     944    // CP0 initializes cluster manager complex structures
    1034945    if( core_lid == 0 )
    1035946    {
    1036         error = cluster_init( info );
     947        error = cluster_manager_init( info );
    1037948
    1038949        if( error )
    1039         {
    1040             assert( false ,
    1041             "cannot initialise cluster %x", local_cxy );
    1042         }
     950        printk("\n[PANIC] in %s : cannot initialize cluster manager in cluster %x\n",
     951        __FUNCTION__, local_cxy );
    1043952    }
    1044953
    1045954    /////////////////////////////////////////////////////////////////////////////////
    1046     if( core_lid == 0 ) remote_barrier( XPTR( 0 , &global_barrier ), // [FIXME]
    1047                                         cluster_info_nb_actives(info->cluster_info) );
     955    if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ),
     956                                        (info->x_size * info->y_size) );
    1048957    barrier_wait( &local_barrier , info->cores_nr );
    1049958    /////////////////////////////////////////////////////////////////////////////////
     
    1051960#if DEBUG_KERNEL_INIT
    1052961if( (core_lid ==  0) & (local_cxy == 0) )
    1053 printk("\n[DBG] %s : exit barrier 1 : clusters initialised / cycle %d\n",
    1054 __FUNCTION__, (uint32_t)hal_get_cycles() );
     962printk("\n[DBG] %s : exit barrier 1 : clusters initialised / sr %x / cycle %d\n",
     963__FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() );
    1055964#endif
    1056965
     
    1071980   
    1072981    ////////////////////////////////////////////////////////////////////////////////
    1073     if( core_lid == 0 ) remote_barrier( XPTR( 0 , &global_barrier ), // [FIXME]
    1074                                         cluster_info_nb_actives(info->cluster_info) );
     982    if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ),
     983                                        (info->x_size * info->y_size) );
    1075984    barrier_wait( &local_barrier , info->cores_nr );
    1076985    ////////////////////////////////////////////////////////////////////////////////
     
    1078987#if DEBUG_KERNEL_INIT
    1079988if( (core_lid ==  0) & (local_cxy == 0) )
    1080 printk("\n[DBG] %s : exit barrier 2 : PIC initialised / cycle %d\n",
    1081 __FUNCTION__, (uint32_t)hal_get_cycles() );
     989printk("\n[DBG] %s : exit barrier 2 : PIC initialised / sr %x / cycle %d\n",
     990__FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() );
    1082991#endif
    1083992
     
    11041013
    11051014    /////////////////////////////////////////////////////////////////////////////////
    1106     if( core_lid == 0 ) remote_barrier( XPTR( 0 , &global_barrier ), // [FIXME]
    1107                                         cluster_info_nb_actives(info->cluster_info) );
     1015    if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ),
     1016                                        (info->x_size * info->y_size) );
    11081017    barrier_wait( &local_barrier , info->cores_nr );
    11091018    /////////////////////////////////////////////////////////////////////////////////
     
    11111020#if DEBUG_KERNEL_INIT
    11121021if( (core_lid ==  0) & (local_cxy == 0) )
    1113 printk("\n[DBG] %s : exit barrier 3 : all chdev initialised / cycle %d\n",
    1114 __FUNCTION__, (uint32_t)hal_get_cycles() );
     1022printk("\n[DBG] %s : exit barrier 3 : all chdev initialised / sr %x / cycle %d\n",
     1023__FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() );
    11151024#endif
    11161025
     
    11271036    /////////////////////////////////////////////////////////////////////////////////
    11281037
    1129     // All cores enable the shared IPI channel
     1038    // All cores enable IPI
    11301039    dev_pic_enable_ipi();
    11311040    hal_enable_irq( &status );
    1132 
    1133 #if DEBUG_KERNEL_INIT
    1134 printk("\n[DBG] %s: IPI enabled for core %d cluster %d\n", __FUNCTION__,
    1135   core_lid, local_cxy);
    1136 #endif
    11371041
    11381042    // all cores initialize the idle thread descriptor
     
    11631067            fatfs_ctx_t * fatfs_ctx = fatfs_ctx_alloc();
    11641068
    1165             assert( (fatfs_ctx != NULL) ,
    1166                     "cannot create FATFS context in cluster 0\n" );
     1069            if( fatfs_ctx == NULL )
     1070            printk("\n[PANIC] in %s : cannot create FATFS context in cluster 0\n",
     1071            __FUNCTION__ );
    11671072
    11681073            // 2. access boot device to initialize FATFS context
     
    11751080            uint32_t total_clusters   = fatfs_ctx->fat_sectors_count << 7;
    11761081 
    1177             // 4. create VFS root inode in cluster 0
     1082            // 4. initialize the FATFS entry in the vfs_context[] array
     1083            vfs_ctx_init( FS_TYPE_FATFS,                               // fs type
     1084                          0,                                           // attributes: unused
     1085                              total_clusters,               
     1086                              cluster_size,
     1087                              vfs_root_inode_xp,                           // VFS root
     1088                          fatfs_ctx );                                 // extend
     1089
     1090            // 5. create VFS root inode in cluster 0
    11781091            error = vfs_inode_create( XPTR_NULL,                           // dentry_xp
    11791092                                      FS_TYPE_FATFS,                       // fs_type
     
    11851098                                      0,                                   // gid
    11861099                                      &vfs_root_inode_xp );                // return
    1187 
    1188             assert( (error == 0) ,
    1189                     "cannot create VFS root inode\n" );
    1190 
    1191             // 5. initialize VFS context for FAT in cluster 0
    1192             vfs_ctx_init( FS_TYPE_FATFS,                 // file system type
    1193                           0,                             // attributes
    1194                               total_clusters,               
    1195                               cluster_size,
    1196                               vfs_root_inode_xp,             // VFS root
    1197                           fatfs_ctx );                   // extend
    1198 
    1199             // 6. check initialisation
     1100            if( error )
     1101            printk("\n[PANIC] in %s : cannot create VFS root inode in cluster 0\n",
     1102            __FUNCTION__ );
     1103
     1104            // 6. update the FATFS entry in vfs_context[] array
     1105            fs_context[FS_TYPE_FATFS].vfs_root_xp = vfs_root_inode_xp;
     1106
     1107            // 7. check FATFS initialization
    12001108            vfs_ctx_t   * vfs_ctx = &fs_context[FS_TYPE_FATFS];
    1201             assert( (((fatfs_ctx_t *)vfs_ctx->extend)->sectors_per_cluster == 8),
    1202              "illegal value for FATFS context in cluster %x\n", local_cxy );
     1109
     1110            if( ((fatfs_ctx_t *)vfs_ctx->extend)->sectors_per_cluster != 8 )
     1111            printk("\n[PANIC] in %s : illegal FATFS context in cluster 0\n",
     1112            __FUNCTION__ );
    12031113        }
    12041114        else
    12051115        {
    1206             assert( false ,
    1207             "root FS must be FATFS" );
     1116            printk("\n[PANIC] in %s : unsupported VFS type in cluster 0\n",
     1117            __FUNCTION__ );
    12081118        }
    12091119
     
    12141124
    12151125    /////////////////////////////////////////////////////////////////////////////////
    1216     if( core_lid == 0 ) remote_barrier( XPTR( 0 , &global_barrier ), // [FIXME]
    1217                                         cluster_info_nb_actives(info->cluster_info) );
     1126    if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ),
     1127                                        (info->x_size * info->y_size) );
    12181128    barrier_wait( &local_barrier , info->cores_nr );
    12191129    /////////////////////////////////////////////////////////////////////////////////
     
    12211131#if DEBUG_KERNEL_INIT
    12221132if( (core_lid ==  0) & (local_cxy == 0) )
    1223 printk("\n[DBG] %s : exit barrier 4 : VFS_root = %l in cluster 0 / cycle %d\n",
    1224 __FUNCTION__, vfs_root_inode_xp , (uint32_t)hal_get_cycles());
     1133printk("\n[DBG] %s : exit barrier 4 : VFS root initialized in cluster 0 / sr %x / cycle %d\n",
     1134__FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() );
    12251135#endif
    12261136
     
    12411151            fatfs_ctx_t * local_fatfs_ctx = fatfs_ctx_alloc();
    12421152
    1243             assert( (local_fatfs_ctx != NULL) ,
    1244             "cannot create FATFS context in cluster %x\n", local_cxy );
     1153            // check memory
     1154            if( local_fatfs_ctx == NULL )
     1155            printk("\n[PANIC] in %s : cannot create FATFS context in cluster %x\n",
     1156            __FUNCTION__ , local_cxy );
    12451157
    12461158            // 2. get local pointer on VFS context for FATFS
     
    12611173            vfs_ctx->extend = local_fatfs_ctx;
    12621174
    1263             // 7. check initialisation
    1264             assert( (((fatfs_ctx_t *)vfs_ctx->extend)->sectors_per_cluster == 8),
    1265             "illegal value for FATFS context in cluster %x\n", local_cxy );
     1175            if( ((fatfs_ctx_t *)vfs_ctx->extend)->sectors_per_cluster != 8 )
     1176            printk("\n[PANIC] in %s : illegal FATFS context in cluster %x\n",
     1177            __FUNCTION__ , local_cxy );
    12661178        }
    12671179
    12681180        // get extended pointer on VFS root inode from cluster 0
    1269         vfs_root_inode_xp = hal_remote_lwd( XPTR( 0 , &process_zero.vfs_root_xp ) );
     1181        vfs_root_inode_xp = hal_remote_l64( XPTR( 0 , &process_zero.vfs_root_xp ) );
    12701182
    12711183        // update local process_zero descriptor
     
    12751187
    12761188    /////////////////////////////////////////////////////////////////////////////////
    1277     if( core_lid == 0 ) remote_barrier( XPTR( 0 , &global_barrier ), // [FIXME]
    1278                                         cluster_info_nb_actives(info->cluster_info) );
     1189    if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ),
     1190                                        (info->x_size * info->y_size) );
    12791191    barrier_wait( &local_barrier , info->cores_nr );
    12801192    /////////////////////////////////////////////////////////////////////////////////
    12811193
    12821194#if DEBUG_KERNEL_INIT
    1283 if( (core_lid ==  0) & (local_cxy == 0) )
    1284 printk("\n[DBG] %s : exit barrier 5 : VFS_root = %l in cluster 0 / cycle %d\n",
    1285 __FUNCTION__, vfs_root_inode_xp , (uint32_t)hal_get_cycles());
    1286 #endif
    1287 
    1288     /////////////////////////////////////////////////////////////////////////////////
    1289     // STEP 6 : CP0 in cluster IO makes the global DEVFS tree initialisation:
    1290     //          It creates the DEVFS directory "dev", and the DEVFS "external"
    1291     //          directory in cluster IO and mount these inodes into VFS.
    1292     /////////////////////////////////////////////////////////////////////////////////
    1293 
    1294     if( (core_lid ==  0) && (local_cxy == 0) )  // [FIXME]
     1195if( (core_lid ==  0) & (local_cxy == 1) )
     1196printk("\n[DBG] %s : exit barrier 5 : VFS root initialized in cluster 1 / sr %x / cycle %d\n",
     1197__FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() );
     1198#endif
     1199
     1200    /////////////////////////////////////////////////////////////////////////////////
     1201    // STEP 6 : CP0 in cluster 0 makes the global DEVFS tree initialisation:
     1202    //          It initializes the DEVFS context, and creates the DEVFS
     1203    //          "dev" and "external" inodes in cluster 0.
     1204    /////////////////////////////////////////////////////////////////////////////////
     1205
     1206    if( (core_lid ==  0) && (local_cxy == 0) )
    12951207    {
    1296         // create "dev" and "external" directories.
     1208        // 1. allocate memory for DEVFS context extension in cluster 0
     1209        devfs_ctx_t * devfs_ctx = devfs_ctx_alloc();
     1210
     1211        if( devfs_ctx == NULL )
     1212        printk("\n[PANIC] in %s : cannot create DEVFS context in cluster 0\n",
     1213        __FUNCTION__ , local_cxy );
     1214
     1215        // 2. initialize the DEVFS entry in the vfs_context[] array
     1216        vfs_ctx_init( FS_TYPE_DEVFS,                                // fs type
     1217                      0,                                            // attributes: unused
     1218                          0,                                            // total_clusters: unused
     1219                          0,                                            // cluster_size: unused
     1220                          vfs_root_inode_xp,                            // VFS root
     1221                      devfs_ctx );                                  // extend
     1222
     1223        // 3. create "dev" and "external" inodes (directories)
    12971224        devfs_global_init( process_zero.vfs_root_xp,
    12981225                           &devfs_dev_inode_xp,
    12991226                           &devfs_external_inode_xp );
    13001227
    1301         // creates the DEVFS context in cluster IO
    1302         devfs_ctx_t * devfs_ctx = devfs_ctx_alloc();
    1303 
    1304         assert( (devfs_ctx != NULL) ,
    1305                 "cannot create DEVFS context in cluster IO\n");
    1306 
    1307         // register DEVFS root and external directories
    1308         devfs_ctx_init( devfs_ctx, devfs_dev_inode_xp, devfs_external_inode_xp );
     1228        // 4. initializes DEVFS context extension
     1229        devfs_ctx_init( devfs_ctx,
     1230                        devfs_dev_inode_xp,
     1231                        devfs_external_inode_xp );
    13091232    }   
    13101233
    13111234    /////////////////////////////////////////////////////////////////////////////////
    1312     if( core_lid == 0 ) remote_barrier( XPTR( 0 , &global_barrier ), // [FIXME]
    1313                                         cluster_info_nb_actives(info->cluster_info) );
     1235    if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ),
     1236                                        (info->x_size * info->y_size) );
    13141237    barrier_wait( &local_barrier , info->cores_nr );
    13151238    /////////////////////////////////////////////////////////////////////////////////
     
    13171240#if DEBUG_KERNEL_INIT
    13181241if( (core_lid ==  0) & (local_cxy == 0) )
    1319 printk("\n[DBG] %s : exit barrier 6 : dev_root = %l in cluster 0 / cycle %d\n",
    1320 __FUNCTION__, devfs_dev_inode_xp , (uint32_t)hal_get_cycles() );
     1242printk("\n[DBG] %s : exit barrier 6 : DEVFS root initialized in cluster 0 / sr %x / cycle %d\n",
     1243__FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() );
    13211244#endif
    13221245
     
    13241247    // STEP 7 : All CP0s complete in parallel the DEVFS tree initialization.
    13251248    //          Each CP0 get the "dev" and "external" extended pointers from
    1326     //          values stored in cluster IO.
    1327     //          Then each CP0 in cluster(i) creates the DEVFS "internal directory,
     1249    //          values stored in cluster 0.
     1250    //          Then each CP0 in cluster(i) creates the DEVFS "internal" directory,
    13281251    //          and creates the pseudo-files for all chdevs in cluster (i).
    13291252    /////////////////////////////////////////////////////////////////////////////////
     
    13311254    if( core_lid == 0 )
    13321255    {
    1333         // get extended pointer on "extend" field of VFS context for DEVFS in cluster IO
    1334         xptr_t  extend_xp = XPTR( 0 , &fs_context[FS_TYPE_DEVFS].extend ); // [FIXME]
     1256        // get extended pointer on "extend" field of VFS context for DEVFS in cluster 0
     1257        xptr_t  extend_xp = XPTR( 0 , &fs_context[FS_TYPE_DEVFS].extend );
    13351258
    13361259        // get pointer on DEVFS context in cluster 0
    13371260        devfs_ctx_t * devfs_ctx = hal_remote_lpt( extend_xp );
    13381261       
    1339         devfs_dev_inode_xp      = hal_remote_lwd( XPTR( 0 , &devfs_ctx->dev_inode_xp ) );
    1340         devfs_external_inode_xp = hal_remote_lwd( XPTR( 0 , &devfs_ctx->external_inode_xp ) );
     1262        devfs_dev_inode_xp      = hal_remote_l64( XPTR( 0 , &devfs_ctx->dev_inode_xp ) );
     1263        devfs_external_inode_xp = hal_remote_l64( XPTR( 0 , &devfs_ctx->external_inode_xp ) );
    13411264
    13421265        // populate DEVFS in all clusters
     
    13471270
    13481271    /////////////////////////////////////////////////////////////////////////////////
    1349     if( core_lid == 0 ) remote_barrier( XPTR( 0 , &global_barrier ), // [FIXME]
    1350                                         cluster_info_nb_actives(info->cluster_info) );
     1272    if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ),
     1273                                        (info->x_size * info->y_size) );
    13511274    barrier_wait( &local_barrier , info->cores_nr );
    13521275    /////////////////////////////////////////////////////////////////////////////////
     
    13541277#if DEBUG_KERNEL_INIT
    13551278if( (core_lid ==  0) & (local_cxy == 0) )
    1356 printk("\n[DBG] %s : exit barrier 7 : dev_root = %l in cluster 0 / cycle %d\n",
    1357 __FUNCTION__, devfs_dev_inode_xp , (uint32_t)hal_get_cycles() );
     1279printk("\n[DBG] %s : exit barrier 7 : DEV initialized in cluster 0 / sr %x / cycle %d\n",
     1280__FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() );
    13581281#endif
    13591282
     
    13731296
    13741297    /////////////////////////////////////////////////////////////////////////////////
    1375     if( core_lid == 0 ) remote_barrier( XPTR( 0 , &global_barrier ), // [FIXME]
    1376                                         cluster_info_nb_actives(info->cluster_info) );
     1298    if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ),
     1299                                        (info->x_size * info->y_size) );
    13771300    barrier_wait( &local_barrier , info->cores_nr );
    13781301    /////////////////////////////////////////////////////////////////////////////////
     
    13801303#if DEBUG_KERNEL_INIT
    13811304if( (core_lid ==  0) & (local_cxy == 0) )
    1382 printk("\n[DBG] %s : exit barrier 8 : process init created / cycle %d\n",
    1383 __FUNCTION__ , (uint32_t)hal_get_cycles() );
     1305printk("\n[DBG] %s : exit barrier 8 : process init created / sr %x / cycle %d\n",
     1306__FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() );
    13841307#endif
    13851308
    13861309#if (DEBUG_KERNEL_INIT & 1)
    1387 if( (core_lid ==  0) /*& (local_cxy == 0)*/ )
     1310if( (core_lid ==  0) & (local_cxy == 0) )
    13881311sched_display( 0 );
    13891312#endif
     
    13931316    /////////////////////////////////////////////////////////////////////////////////
    13941317   
    1395     if( (core_lid ==  0) && (local_cxy == 0) ) // [FIXME]
     1318    if( (core_lid == 0) && (local_cxy == 0) )
    13961319    {
    13971320        print_banner( (info->x_size * info->y_size) , info->cores_nr );
     
    14151338                   " - list item          : %d bytes\n"
    14161339                   " - xlist item         : %d bytes\n"
    1417                    " - spinlock           : %d bytes\n"
    1418                    " - remote spinlock    : %d bytes\n"
     1340                   " - busylock           : %d bytes\n"
     1341                   " - remote busylock    : %d bytes\n"
     1342                   " - queuelock          : %d bytes\n"
     1343                   " - remote queuelock   : %d bytes\n"
    14191344                   " - rwlock             : %d bytes\n"
    14201345                   " - remote rwlock      : %d bytes\n",
    1421                    sizeof( thread_t          ),
    1422                    sizeof( process_t         ),
    1423                    sizeof( cluster_t         ),
    1424                    sizeof( chdev_t           ),
    1425                    sizeof( core_t            ),
    1426                    sizeof( scheduler_t       ),
    1427                    sizeof( remote_fifo_t     ),
    1428                    sizeof( page_t            ),
    1429                    sizeof( mapper_t          ),
    1430                    sizeof( ppm_t             ),
    1431                    sizeof( kcm_t             ),
    1432                    sizeof( khm_t             ),
    1433                    sizeof( vmm_t             ),
    1434                    sizeof( gpt_t             ),
    1435                    sizeof( list_entry_t      ),
    1436                    sizeof( xlist_entry_t     ),
    1437                    sizeof( spinlock_t        ),
    1438                    sizeof( remote_spinlock_t ),
    1439                    sizeof( rwlock_t          ),
    1440                    sizeof( remote_rwlock_t   ));
     1346                   sizeof( thread_t           ),
     1347                   sizeof( process_t          ),
     1348                   sizeof( cluster_t          ),
     1349                   sizeof( chdev_t            ),
     1350                   sizeof( core_t             ),
     1351                   sizeof( scheduler_t        ),
     1352                   sizeof( remote_fifo_t      ),
     1353                   sizeof( page_t             ),
     1354                   sizeof( mapper_t           ),
     1355                   sizeof( ppm_t              ),
     1356                   sizeof( kcm_t              ),
     1357                   sizeof( khm_t              ),
     1358                   sizeof( vmm_t              ),
     1359                   sizeof( gpt_t              ),
     1360                   sizeof( list_entry_t       ),
     1361                   sizeof( xlist_entry_t      ),
     1362                   sizeof( busylock_t         ),
     1363                   sizeof( remote_busylock_t  ),
     1364                   sizeof( queuelock_t        ),
     1365                   sizeof( remote_queuelock_t ),
     1366                   sizeof( rwlock_t           ),
     1367                   sizeof( remote_rwlock_t    ));
    14411368#endif
    14421369
  • trunk/kernel/kern/printk.c

    r502 r564  
    2626#include <hal_special.h>
    2727#include <dev_txt.h>
    28 #include <remote_spinlock.h>
     28#include <remote_busylock.h>
    2929#include <cluster.h>
    3030#include <thread.h>
     
    201201// @ args      : va_list of arguments.
    202202//////////////////////////////////////////////////////////////////////////////////////
    203 static void kernel_printf( char    * format,
    204                            va_list  * args )
     203static void kernel_printf( const char * format,
     204                           va_list    * args )
    205205{
    206206
     
    352352{
    353353    va_list       args;
    354     reg_t         save_sr;
    355354
    356355    // get pointers on TXT0 chdev
     
    359358    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
    360359
    361     // get extended pointer on remote TXT0 chdev lock
     360    // get extended pointer on remote TXT0 lock
    362361    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
    363362
    364     // get TXT0 lock in busy waiting mode
    365     remote_spinlock_lock_busy( lock_xp , &save_sr );
    366 
    367     // call kernel_printf on TXT0, in busy waiting mode
     363    // get TXT0 lock
     364    remote_busylock_acquire( lock_xp );
     365
     366    // display format on TXT0 in busy waiting mode
    368367    va_start( args , format );
    369368    kernel_printf( format , &args );
    370369    va_end( args );
    371370
    372     // release lock
    373     remote_spinlock_unlock_busy( lock_xp , save_sr );
     371    // release TXT0 lock
     372    remote_busylock_release( lock_xp );
    374373}
    375374
     
    386385
    387386////////////////////////////////////
    388 void __panic( const char * file_name,
    389               const char * function_name,
    390               uint32_t     line,
    391               cycle_t      cycle,
    392               const char * format,
    393               ... )
     387void panic( const char * file_name,
     388            const char * function_name,
     389            uint32_t     line,
     390            cycle_t      cycle,
     391            const char * format,
     392            ... )
    394393{
    395394    // get pointers on TXT0 chdev
     
    399398
    400399    // get extended pointer on remote TXT0 lock
    401     xptr_t lock_txt0_xp = XPTR(txt0_cxy, &txt0_ptr->wait_lock);
    402 
    403     // get TXT0 lock in busy waiting mode
    404     {
    405         uint32_t save_sr;
    406         remote_spinlock_lock_busy(lock_txt0_xp, &save_sr);
    407 
    408         thread_t *current = CURRENT_THREAD;
    409         nolock_printk(
    410             "\n[PANIC] in %s: line %d | funct %s | cycle %d\n"
    411             "core[%x,%d] | thread %x in process %x\n"
    412             "            | thread_ptr %x | procress_ptr %x\n",
     400    xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
     401
     402    // get TXT0 lock
     403    remote_busylock_acquire( lock_xp );
     404
     405    // get calling thread
     406    thread_t * current = CURRENT_THREAD;
     407
     408    // print generic infos
     409    nolock_printk(
     410            "\n[PANIC] in %s: line %d | function %s | cycle %d\n"
     411            "core[%x,%d] | thread %x (%x) in process %x (%x)\n",
    413412            file_name, line, function_name, (uint32_t) cycle,
    414             local_cxy, current->core->lid, current->trdid, current->process->pid,
    415             current, current->process);
    416 
    417         // call kernel_printf on TXT0, in busy waiting to print format
    418         va_list args;
    419         va_start(args, format);
    420         kernel_printf(format, &args);
    421         va_end(args);
    422 
    423         // release TXT0 lock
    424         remote_spinlock_unlock_busy(lock_txt0_xp, save_sr);
    425     }
     413            local_cxy, current->core->lid,
     414            current->trdid, current,
     415            current->process->pid, current->process );
     416
     417    // call kernel_printf to print format
     418    va_list args;
     419    va_start(args, format);
     420    kernel_printf(format, &args);
     421    va_end(args);
     422
     423    // release TXT0 lock
     424    remote_busylock_release( lock_xp );
    426425
    427426    // suicide
     
    432431void puts( char * string )
    433432{
    434     uint32_t   save_sr;
    435433    uint32_t   n = 0;
    436434
     
    443441    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
    444442
    445     // get extended pointer on remote TXT0 chdev lock
     443    // get extended pointer on remote TXT0 lock
    446444    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
    447445
    448     // get TXT0 lock in busy waiting mode
    449     remote_spinlock_lock_busy( lock_xp , &save_sr );
     446    // get TXT0 lock
     447    remote_busylock_acquire( lock_xp );
    450448
    451449    // display string on TTY0
    452450    dev_txt_sync_write( string , n );
    453451
    454     // release TXT0 lock in busy waiting mode
    455     remote_spinlock_unlock_busy( lock_xp , save_sr );
     452    // release TXT0 lock
     453    remote_busylock_release( lock_xp );
    456454}
    457455
     
    464462    char      buf[10];
    465463    uint32_t  c;
    466     uint32_t  save_sr;
    467464
    468465    buf[0] = '0';
     
    484481    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
    485482
    486     // get TXT0 lock in busy waiting mode
    487     remote_spinlock_lock_busy( lock_xp , &save_sr );
     483    // get TXT0 lock
     484    remote_busylock_acquire( lock_xp );
    488485
    489486    // display string on TTY0
    490487    dev_txt_sync_write( buf , 10 );
    491488
    492     // release TXT0 lock in busy waiting mode
    493     remote_spinlock_unlock_busy( lock_xp , save_sr );
     489    // release TXT0 lock
     490    remote_busylock_release( lock_xp );
    494491}
    495492
     
    501498    char      buf[18];
    502499    uint32_t  c;
    503     uint32_t  save_sr;
    504500
    505501    buf[0] = '0';
     
    521517    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
    522518
    523     // get TXT0 lock in busy waiting mode
    524     remote_spinlock_lock_busy( lock_xp , &save_sr );
     519    // get TXT0 lock
     520    remote_busylock_acquire( lock_xp );
    525521
    526522    // display string on TTY0
    527523    dev_txt_sync_write( buf , 18 );
    528524
    529     // release TXT0 lock in busy waiting mode
    530     remote_spinlock_unlock_busy( lock_xp , save_sr );
     525    // release TXT0 lock
     526    remote_busylock_release( lock_xp );
    531527}
    532528
  • trunk/kernel/kern/printk.h

    r502 r564  
    2828// - The printk() function displays kernel messages on the kernel terminal TXT0,
    2929//   using a busy waiting policy: It calls directly the relevant TXT driver,
    30 //   after taking the TXT0 chdev lock for exclusive access to the TXT0 terminal.
     30//   after taking the TXT0 busylock for exclusive access to the TXT0 terminal.
    3131// - The user_printk() function displays messages on the calling thread private
    3232//   terminal, using a descheduling policy: it register the request in the selected
     
    6767/**********************************************************************************
    6868 * This function displays a formatted string on the kernel terminal TXT0,
    69  * using a busy waiting policy: It calls directly the relevant TXT driver,
    7069 * after taking the TXT0 lock.
     70 * It uses a busy waiting policy, calling directly the relevant TXT driver,
    7171 **********************************************************************************
    7272 * @ format     : formatted string.
     
    7676/**********************************************************************************
    7777 * This function displays a formatted string on the kernel terminal TXT0,
    78  * using a busy waiting policy: It calls directly the relevant TXT driver,
    7978 * without taking the TXT0 lock.
     79 * It uses a busy waiting policy, calling directly the relevant TXT driver,
    8080 **********************************************************************************
    8181 * @ format     : formatted string.
     
    8585
    8686/**********************************************************************************
    87  * Private function designed to be called by the assert macro (below)
     87 * This function is called in case of kernel panic. It printt a detailed message
     88 * on the TXT0 terminal after taking the TXT0 lock, and call the hal_core_sleep()
     89 * function to block the calling core.  It is used by the assert macro (below).
    8890 **********************************************************************************
    8991 * @ file_name     : File where the assert macro was invoked
     
    9698 * See assert macro documentation for information about printed information.
    9799 *********************************************************************************/
    98 void __panic( const char * file_name,
    99               const char * function_name,
    100               uint32_t     line,
    101               cycle_t      cycle,
    102               const char * format,
    103               ... )
    104 __attribute__((__noreturn__));
     100void panic( const char * file_name,
     101            const char * function_name,
     102            uint32_t     line,
     103            cycle_t      cycle,
     104            const char * format,
     105            ... ) __attribute__((__noreturn__));
    105106
    106107/**********************************************************************************
     
    134135 * @ format        : formatted string
    135136 *********************************************************************************/
    136 #define assert( expr, format, ... ) { uint32_t __line_at_expansion = __LINE__;    \
    137   const volatile cycle_t __assert_cycle = hal_get_cycles();                       \
    138   if ( ( expr ) == false ) {                                                      \
    139     __panic( __FILE__, __FUNCTION__,                                              \
    140              __line_at_expansion, __assert_cycle,                                 \
    141              ( format ), ##__VA_ARGS__ );                                         \
    142   }                                                                               \
     137#define assert( expr, format, ... )                                               \
     138{                                                                                 \
     139    uint32_t __line_at_expansion = __LINE__;                                      \
     140    const volatile cycle_t __assert_cycle = hal_get_cycles();                     \
     141    if ( ( expr ) == false )                                                      \
     142    {                                                                             \
     143        panic( __FILE__, __FUNCTION__,                                            \
     144               __line_at_expansion, __assert_cycle,                               \
     145               ( format ), ##__VA_ARGS__ );                                       \
     146    }                                                                             \
    143147}
    144148
     
    168172
    169173
    170 
    171 /*  deprecated march 2018 [AG]
    172 
    173 #if CONFIG_CHDEV_DEBUG
    174 #define chdev_dmsg(...)   if(hal_time_stamp() > CONFIG_CHDEV_DEBUG) printk(__VA_ARGS__)
    175 #else
    176 #define chdev_dmsg(...)
    177 #endif
    178 
    179 #if CONFIG_CLUSTER_DEBUG
    180 #define cluster_dmsg(...)   if(hal_time_stamp() > CONFIG_CLUSTER_DEBUG) printk(__VA_ARGS__)
    181 #else
    182 #define cluster_dmsg(...)
    183 #endif
    184 
    185 #if CONFIG_CONTEXT_DEBUG
    186 #define context_dmsg(...)   if(hal_time_stamp() > CONFIG_CONTEXT_DEBUG) printk(__VA_ARGS__)
    187 #else
    188 #define context_dmsg(...)
    189 #endif
    190 
    191 #if CONFIG_CORE_DEBUG
    192 #define core_dmsg(...)   if(hal_time_stamp() > CONFIG_CORE_DEBUG) printk(__VA_ARGS__)
    193 #else
    194 #define core_dmsg(...)
    195 #endif
    196 
    197 #if CONFIG_DEVFS_DEBUG
    198 #define devfs_dmsg(...)   if(hal_time_stamp() > CONFIG_DEVFS_DEBUG) printk(__VA_ARGS__)
    199 #else
    200 #define devfs_dmsg(...)
    201 #endif
    202 
    203 #if CONFIG_DMA_DEBUG
    204 #define dma_dmsg(...)   if(hal_time_stamp() > CONFIG_DMA_DEBUG) printk(__VA_ARGS__)
    205 #else
    206 #define dma_dmsg(...)
    207 #endif
    208 
    209 #if CONFIG_DQDT_DEBUG
    210 #define dqdt_dmsg(...)   if(hal_time_stamp() > CONFIG_DQDT_DEBUG) printk(__VA_ARGS__)
    211 #else
    212 #define dqdt_dmsg(...)
    213 #endif
    214 
    215 #if CONFIG_ELF_DEBUG
    216 #define elf_dmsg(...)   if(hal_time_stamp() > CONFIG_ELF_DEBUG) printk(__VA_ARGS__)
    217 #else
    218 #define elf_dmsg(...)
    219 #endif
    220 
    221 #if CONFIG_EXEC_DEBUG
    222 #define exec_dmsg(...)   if(hal_time_stamp() > CONFIG_EXEC_DEBUG) printk(__VA_ARGS__)
    223 #else
    224 #define exec_dmsg(...)
    225 #endif
    226 
    227 #if CONFIG_EXCP_DEBUG
    228 #define excp_dmsg(...)   if(hal_time_stamp() > CONFIG_EXCP_DEBUG) printk(__VA_ARGS__)
    229 #else
    230 #define excp_dmsg(...)
    231 #endif
    232 
    233 #if CONFIG_FATFS_DEBUG
    234 #define fatfs_dmsg(...)   if(hal_time_stamp() > CONFIG_FATFS_DEBUG) printk(__VA_ARGS__)
    235 #else
    236 #define fatfs_dmsg(...)
    237 #endif
    238 
    239 #if CONFIG_FBF_DEBUG
    240 #define fbf_dmsg(...)   if(hal_time_stamp() > CONFIG_FBF_DEBUG) printk(__VA_ARGS__)
    241 #else
    242 #define fbf_dmsg(...)
    243 #endif
    244 
    245 #if CONFIG_FORK_DEBUG
    246 #define fork_dmsg(...)   if(hal_time_stamp() > CONFIG_FORK_DEBUG) printk(__VA_ARGS__)
    247 #else
    248 #define fork_dmsg(...)
    249 #endif
    250 
    251 #if CONFIG_GPT_DEBUG
    252 #define gpt_dmsg(...)   if(hal_time_stamp() > CONFIG_GPT_DEBUG) printk(__VA_ARGS__)
    253 #else
    254 #define gpt_dmsg(...)
    255 #endif
    256 
    257 #if CONFIG_GRPC_DEBUG
    258 #define grpc_dmsg(...)   if(hal_time_stamp() > CONFIG_GRPC_DEBUG) printk(__VA_ARGS__)
    259 #else
    260 #define grpc_dmsg(...)
    261 #endif
    262 
    263 #if CONFIG_IDLE_DEBUG
    264 #define idle_dmsg(...)   if(hal_time_stamp() > CONFIG_IDLE_DEBUG) printk(__VA_ARGS__)
    265 #else
    266 #define idle_dmsg(...)
    267 #endif
    268 
    269 #if CONFIG_IOC_DEBUG
    270 #define ioc_dmsg(...)   if(hal_time_stamp() > CONFIG_IOC_DEBUG) printk(__VA_ARGS__)
    271 #else
    272 #define ioc_dmsg(...)
    273 #endif
    274 
    275 #if CONFIG_IRQ_DEBUG
    276 #define irq_dmsg(...)   if(hal_time_stamp() > CONFIG_IRQ_DEBUG) printk(__VA_ARGS__)
    277 #else
    278 #define irq_dmsg(...)
    279 #endif
    280 
    281 #if CONFIG_KCM_DEBUG
    282 #define kcm_dmsg(...)   if(hal_time_stamp() > CONFIG_KCM_DEBUG) printk(__VA_ARGS__)
    283 #else
    284 #define kcm_dmsg(...)
    285 #endif
    286 
    287 #if CONFIG_KHM_DEBUG
    288 #define khm_dmsg(...)   if(hal_time_stamp() > CONFIG_KHM_DEBUG) printk(__VA_ARGS__)
    289 #else
    290 #define khm_dmsg(...)
    291 #endif
    292 
    293 #if CONFIG_KILL_DEBUG
    294 #define kill_dmsg(...)   if(hal_time_stamp() > CONFIG_KILL_DEBUG) printk(__VA_ARGS__)
    295 #else
    296 #define kill_dmsg(...)
    297 #endif
    298 
    299 #if CONFIG_KINIT_DEBUG
    300 #define kinit_dmsg(...)   if(hal_time_stamp() > CONFIG_KINIT_DEBUG) printk(__VA_ARGS__)
    301 #else
    302 #define kinit_dmsg(...)
    303 #endif
    304 
    305 #if CONFIG_KMEM_DEBUG
    306 #define kmem_dmsg(...)   if(hal_time_stamp() > CONFIG_KMEM_DEBUG) printk(__VA_ARGS__)
    307 #else
    308 #define kmem_dmsg(...)
    309 #endif
    310 
    311 #if CONFIG_MAPPER_DEBUG
    312 #define mapper_dmsg(...)   if(hal_time_stamp() > CONFIG_MAPPER_DEBUG) printk(__VA_ARGS__)
    313 #else
    314 #define mapper_dmsg(...)
    315 #endif
    316 
    317 #if CONFIG_MMAP_DEBUG
    318 #define mmap_dmsg(...)   if(hal_time_stamp() > CONFIG_MMAP_DEBUG) printk(__VA_ARGS__)
    319 #else
    320 #define mmap_dmsg(...)
    321 #endif
    322 
    323 #if CONFIG_MMC_DEBUG
    324 #define mmc_dmsg(...)   if(hal_time_stamp() > CONFIG_MMC_DEBUG) printk(__VA_ARGS__)
    325 #else
    326 #define mmc_dmsg(...)
    327 #endif
    328 
    329 #if CONFIG_NIC_DEBUG
    330 #define nic_dmsg(...)   if(hal_time_stamp() > CONFIG_NIC_DEBUG) printk(__VA_ARGS__)
    331 #else
    332 #define nic_dmsg(...)
    333 #endif
    334 
    335 #if CONFIG_PIC_DEBUG
    336 #define pic_dmsg(...)   if(hal_time_stamp() > CONFIG_PIC_DEBUG) printk(__VA_ARGS__)
    337 #else
    338 #define pic_dmsg(...)
    339 #endif
    340 
    341 #if CONFIG_PPM_DEBUG
    342 #define ppm_dmsg(...)   if(hal_time_stamp() > CONFIG_PPM_DEBUG) printk(__VA_ARGS__)
    343 #else
    344 #define ppm_dmsg(...)
    345 #endif
    346 
    347 #if CONFIG_PROCESS_DEBUG
    348 #define process_dmsg(...)   if(hal_time_stamp() > CONFIG_PROCESS_DEBUG) printk(__VA_ARGS__)
    349 #else
    350 #define process_dmsg(...)
    351 #endif
    352 
    353 #if CONFIG_READ_DEBUG
    354 #define read_dmsg(...)   if(hal_time_stamp() > CONFIG_READ_DEBUG) printk(__VA_ARGS__)
    355 #else
    356 #define read_dmsg(...)
    357 #endif
    358 
    359 #if CONFIG_RPC_DEBUG
    360 #define rpc_dmsg(...)   if(hal_time_stamp() > CONFIG_RPC_DEBUG) printk(__VA_ARGS__)
    361 #else
    362 #define rpc_dmsg(...)
    363 #endif
    364 
    365 #if CONFIG_SCHED_DEBUG
    366 #define sched_dmsg(...)   if(hal_time_stamp() > CONFIG_SCHED_DEBUG) printk(__VA_ARGS__)
    367 #else
    368 #define sched_dmsg(...)
    369 #endif
    370 
    371 #if CONFIG_SIGACTION_DEBUG
    372 #define sigaction_dmsg(...)   if(hal_time_stamp() > CONFIG_SIGACTION_DEBUG) printk(__VA_ARGS__)
    373 #else
    374 #define sigaction_dmsg(...)
    375 #endif
    376 
    377 #if CONFIG_SYSCALL_DEBUG
    378 #define syscall_dmsg(...)   if(hal_time_stamp() > CONFIG_SYSCALL_DEBUG) printk(__VA_ARGS__)
    379 #else
    380 #define syscall_dmsg(...)
    381 #endif
    382 
    383 #if CONFIG_THREAD_DEBUG
    384 #define thread_dmsg(...)   if(hal_time_stamp() > CONFIG_THREAD_DEBUG) printk(__VA_ARGS__)
    385 #else
    386 #define thread_dmsg(...)
    387 #endif
    388 
    389 #if CONFIG_TXT_DEBUG
    390 #define txt_dmsg(...)   if(hal_time_stamp() > CONFIG_TXT_DEBUG) printk(__VA_ARGS__)
    391 #else
    392 #define txt_dmsg(...)
    393 #endif
    394 
    395 #if CONFIG_VFS_DEBUG
    396 #define vfs_dmsg(...)   if(hal_time_stamp() > CONFIG_VFS_DEBUG) printk(__VA_ARGS__)
    397 #else
    398 #define vfs_dmsg(...)
    399 #endif
    400 
    401 #if CONFIG_VMM_DEBUG
    402 #define vmm_dmsg(...)   if(hal_time_stamp() > CONFIG_VMM_DEBUG) printk(__VA_ARGS__)
    403 #else
    404 #define vmm_dmsg(...)
    405 #endif
    406 
    407 #if CONFIG_WRITE_DEBUG
    408 #define write_dmsg(...)   if(hal_time_stamp() > CONFIG_WRITE_DEBUG) printk(__VA_ARGS__)
    409 #else
    410 #define write_dmsg(...)
    411 #endif
    412 
    413 */
    414 
    415174#endif  // _PRINTK_H
    416175
  • trunk/kernel/kern/process.c

    r527 r564  
    11/*
    2  * process.c - process related management
     2 * process.c - process related functions definition.
    33 *
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
     
    4343#include <string.h>
    4444#include <scheduler.h>
    45 #include <remote_spinlock.h>
     45#include <busylock.h>
     46#include <queuelock.h>
     47#include <remote_queuelock.h>
     48#include <rwlock.h>
     49#include <remote_rwlock.h>
    4650#include <dqdt.h>
    4751#include <cluster.h>
     
    114118
    115119    // get parent_pid
    116     parent_pid = hal_remote_lw( XPTR( parent_cxy , &parent_ptr->pid ) );
     120    parent_pid = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
    117121
    118122#if DEBUG_PROCESS_REFERENCE_INIT
     
    132136    // initialize vmm as empty
    133137    error = vmm_init( process );
    134     assert( (error == 0) , "cannot initialize VMM\n" );
     138
     139assert( (error == 0) , "cannot initialize VMM\n" );
    135140 
    136141#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
     
    138143if( DEBUG_PROCESS_REFERENCE_INIT )
    139144printk("\n[DBG] %s : thread %x in process %x / vmm empty for process %x / cycle %d\n",
    140 __FUNCTION__, CURRENT_THREAD->trdid, parent_pid , cycle );
     145__FUNCTION__, CURRENT_THREAD->trdid, parent_pid , pid, cycle );
    141146#endif
    142147
     
    160165__FUNCTION__, CURRENT_THREAD->trdid, parent_pid, pid, txt_id, cycle );
    161166#endif
    162 
    163 
    164 
    165167        // build path to TXT_RX[i] and TXT_TX[i] chdevs
    166168        snprintf( rx_path , 40 , "/dev/external/txt%d_rx", txt_id );
     
    175177                           &stdin_id );
    176178
    177         assert( (error == 0) , "cannot open stdin pseudo file" );
    178         assert( (stdin_id == 0) , "stdin index must be 0" );
     179assert( (error == 0) , "cannot open stdin pseudo file" );
     180assert( (stdin_id == 0) , "stdin index must be 0" );
    179181
    180182#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
     
    225227    {
    226228        // get extended pointer on stdin pseudo file in parent process
    227         file_xp = (xptr_t)hal_remote_lwd( XPTR( parent_cxy , &parent_ptr->fd_array.array[0] ) );
     229        file_xp = (xptr_t)hal_remote_l64( XPTR( parent_cxy , &parent_ptr->fd_array.array[0] ) );
    228230
    229231        // get extended pointer on parent process TXT chdev
     
    234236        chdev_ptr = GET_PTR( chdev_xp );
    235237 
    236         // get TXT terminal index
    237         txt_id = hal_remote_lw( XPTR( chdev_cxy , &chdev_ptr->channel ) );
    238 
    239         // attach process to TXT[txt_id]
     238        // get parent process TXT terminal index
     239        txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
     240
     241        // attach child process to parent process TXT terminal
    240242        process_txt_attach( process , txt_id );
    241243
     
    246248
    247249    // initialize specific inodes root and cwd
    248     process->vfs_root_xp = (xptr_t)hal_remote_lwd( XPTR( parent_cxy,
     250    process->vfs_root_xp = (xptr_t)hal_remote_l64( XPTR( parent_cxy,
    249251                                                         &parent_ptr->vfs_root_xp ) );
    250     process->vfs_cwd_xp  = (xptr_t)hal_remote_lwd( XPTR( parent_cxy,
     252    process->vfs_cwd_xp  = (xptr_t)hal_remote_l64( XPTR( parent_cxy,
    251253                                                         &parent_ptr->vfs_cwd_xp ) );
    252254    vfs_inode_remote_up( process->vfs_root_xp );
    253255    vfs_inode_remote_up( process->vfs_cwd_xp );
    254256
    255     remote_rwlock_init( XPTR( local_cxy , &process->cwd_lock ) );
     257    remote_rwlock_init( XPTR( local_cxy , &process->cwd_lock ), LOCK_PROCESS_CWD );
    256258
    257259#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
    258260cycle = (uint32_t)hal_get_cycles();
    259261if( DEBUG_PROCESS_REFERENCE_INIT )
    260 printk("\n[DBG] %s : thread %x / fd_array for process %x / cycle %d\n",
    261 __FUNCTION__ , CURRENT_THREAD , pid , cycle );
     262printk("\n[DBG] %s : thread %x in process %x / set fd_array for process %x / cycle %d\n",
     263__FUNCTION__, CURRENT_THREAD->trdid, parent_pid, pid , cycle );
    262264#endif
    263265
     
    265267    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
    266268    process->children_nr     = 0;
    267     remote_spinlock_init( XPTR( local_cxy , &process->children_lock ) );
     269    remote_queuelock_init( XPTR( local_cxy , &process->children_lock ), LOCK_PROCESS_CHILDREN );
    268270
    269271    // reset semaphore / mutex / barrier / condvar list roots
     
    272274    xlist_root_init( XPTR( local_cxy , &process->barrier_root ) );
    273275    xlist_root_init( XPTR( local_cxy , &process->condvar_root ) );
    274     remote_spinlock_init( XPTR( local_cxy , &process->sync_lock ) );
     276    remote_queuelock_init( XPTR( local_cxy , &process->sync_lock ), LOCK_PROCESS_USERSYNC );
    275277
    276278    // register new process in the local cluster manager pref_tbl[]
     
    284286    cluster_process_copies_link( process );
    285287
    286     // reset th_tbl[] array as empty in process descriptor
     288    // initialize th_tbl[] array and associated threads
    287289    uint32_t i;
    288     for( i = 0 ; i < CONFIG_THREAD_MAX_PER_CLUSTER ; i++ )
     290
     291    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
    289292        {
    290293        process->th_tbl[i] = NULL;
    291294    }
    292295    process->th_nr  = 0;
    293     spinlock_init( &process->th_lock );
     296    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
    294297
    295298        hal_fence();
     
    298301cycle = (uint32_t)hal_get_cycles();
    299302if( DEBUG_PROCESS_REFERENCE_INIT )
    300 printk("\n[DBG] %s : thread %x exit / process %x / cycle %d\n",
    301 __FUNCTION__ , CURRENT_THREAD , pid , cycle );
     303printk("\n[DBG] %s : thread %x in process %x exit for process %x / cycle %d\n",
     304__FUNCTION__, CURRENT_THREAD->trdid, parent_pid, pid, cycle );
    302305#endif
    303306
     
    315318
    316319    // initialize PID, REF_XP, PARENT_XP, and STATE
    317     local_process->pid        = hal_remote_lw(  XPTR( ref_cxy , &ref_ptr->pid ) );
    318     local_process->parent_xp  = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->parent_xp ) );
     320    local_process->pid        = hal_remote_l32(  XPTR( ref_cxy , &ref_ptr->pid ) );
     321    local_process->parent_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->parent_xp ) );
    319322    local_process->ref_xp     = reference_process_xp;
    320323    local_process->owner_xp   = reference_process_xp;
    321324    local_process->term_state = 0;
    322325
    323 #if DEBUG_PROCESS_COPY_INIT
     326#if DEBUG_PROCESS_COPY_INIT
     327thread_t * this = CURRET_THREAD; 
    324328uint32_t cycle = (uint32_t)hal_get_cycles();
    325329if( DEBUG_PROCESS_COPY_INIT )
    326 printk("\n[DBG] %s : thread %x enter for process %x\n",
    327 __FUNCTION__ , CURRENT_THREAD , local_process->pid );
    328 #endif
     330printk("\n[DBG] %s : thread %x in process %x enter for process %x / cycle %d\n",
     331__FUNCTION__, this->trdid, this->process->pid, local_process->pid, cycle );
     332#endif
     333
     334// check user process
     335assert( (local_process->pid != 0), "PID cannot be 0" );
    329336
    330337    // reset local process vmm
     
    336343
    337344    // reset vfs_root_xp / vfs_bin_xp / vfs_cwd_xp fields
    338     local_process->vfs_root_xp = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) );
    339     local_process->vfs_bin_xp  = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) );
     345    local_process->vfs_root_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) );
     346    local_process->vfs_bin_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) );
    340347    local_process->vfs_cwd_xp  = XPTR_NULL;
    341348
     
    343350    xlist_root_init( XPTR( local_cxy , &local_process->children_root ) );
    344351    local_process->children_nr   = 0;
    345     remote_spinlock_init( XPTR( local_cxy , &local_process->children_lock ) );
     352    remote_queuelock_init( XPTR( local_cxy , &local_process->children_lock ),
     353                           LOCK_PROCESS_CHILDREN );
    346354
    347355    // reset children_list (not used in a process descriptor copy)
     
    354362    xlist_root_init( XPTR( local_cxy , &local_process->condvar_root ) );
    355363
    356     // reset th_tbl[] array as empty
     364    // initialize th_tbl[] array and associated fields
    357365    uint32_t i;
    358     for( i = 0 ; i < CONFIG_THREAD_MAX_PER_CLUSTER ; i++ )
     366    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
    359367        {
    360368        local_process->th_tbl[i] = NULL;
    361369    }
    362370    local_process->th_nr  = 0;
    363     spinlock_init( &local_process->th_lock );
     371    rwlock_init( &local_process->th_lock , LOCK_PROCESS_THTBL );
     372
    364373
    365374    // register new process descriptor in local cluster manager local_list
     
    374383cycle = (uint32_t)hal_get_cycles();
    375384if( DEBUG_PROCESS_COPY_INIT )
    376 printk("\n[DBG] %s : thread %x exit for process %x\n",
    377 __FUNCTION__ , CURRENT_THREAD , local_process->pid );
     385printk("\n[DBG] %s : thread %x in process %x exit for process %x / cycle %d\n",
     386__FUNCTION__, this->trdid, this->process->pid, local_process->pid, cycle );
    378387#endif
    379388
     
    399408uint32_t cycle = (uint32_t)hal_get_cycles();
    400409if( DEBUG_PROCESS_DESTROY )
    401 printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",
    402 __FUNCTION__ , CURRENT_THREAD , pid , local_cxy , cycle );
     410printk("\n[DBG] %s : thread %x in process %x enter for process %x in cluster %x / cycle %d\n",
     411__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, pid, local_cxy, cycle );
    403412#endif
    404413
     
    423432
    424433        // remove process from children_list
    425         remote_spinlock_lock( children_lock_xp );
     434        remote_queuelock_acquire( children_lock_xp );
    426435        xlist_unlink( XPTR( local_cxy , &process->children_list ) );
    427436            hal_remote_atomic_add( children_nr_xp , -1 );
    428         remote_spinlock_unlock( children_lock_xp );
    429 
    430     // release the process PID to cluster manager
    431     cluster_pid_release( pid );
    432 
    433     }
    434 
    435     // FIXME close all open files and update dirty [AG]
     437        remote_queuelock_release( children_lock_xp );
     438
     439        // release the process PID to cluster manager
     440        cluster_pid_release( pid );
     441    }
     442
     443    // FIXME close all open files and synchronize dirty [AG]
    436444
    437445    // decrease refcount for bin file, root file and cwd file
     
    449457cycle = (uint32_t)hal_get_cycles();
    450458if( DEBUG_PROCESS_DESTROY )
    451 printk("\n[DBG] %s : thread %x exit / destroyed process %x in cluster %x / cycle %d\n",
    452 __FUNCTION__ , CURRENT_THREAD , pid, local_cxy, cycle );
     459printk("\n[DBG] %s : thread %x in process %x exit / process %x in cluster %x / cycle %d\n",
     460__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, pid, local_cxy, cycle );
    453461#endif
    454462
     
    491499    remote_nr = 0;
    492500
     501// check calling thread can yield
     502assert( (client->busylocks == 0),
     503"cannot yield : busylocks = %d\n", client->busylocks );
     504
    493505#if DEBUG_PROCESS_SIGACTION
    494506uint32_t cycle = (uint32_t)hal_get_cycles();
     
    517529
    518530    // The client thread send parallel RPCs to all remote clusters containing
    519     // target process copies, wait all responses, and then handles directly the
    520     // threads in local cluster, when required.
     531    // target process copies, wait all responses, and then handles directly
     532    // the threads in local cluster, when required.
    521533    // The client thread allocates a - shared - RPC descriptor in the stack,
    522534    // because all parallel, non-blocking, server threads use the same input
     
    529541    thread_block( client_xp , THREAD_BLOCKED_RPC );
    530542
    531     // take the lock protecting the copies
    532     remote_spinlock_lock( lock_xp );
     543    // take the lock protecting process copies
     544    remote_queuelock_acquire( lock_xp );
    533545
    534546    // initialize shared RPC descriptor
     
    573585
    574586    // release the lock protecting process copies
    575     remote_spinlock_unlock( lock_xp );
     587    remote_queuelock_release( lock_xp );
    576588
    577589    // restore IRQs
     
    620632    thread_t          * target;         // pointer on target thread
    621633    thread_t          * this;           // pointer on calling thread
    622     uint32_t            ltid;           // index in process th_tbl
     634    uint32_t            ltid;           // index in process th_tbl[]
    623635    cxy_t               owner_cxy;      // target process owner cluster
    624636    uint32_t            count;          // requests counter
     
    628640    this = CURRENT_THREAD;
    629641
     642#if DEBUG_PROCESS_SIGACTION
     643pid_t pid = process->pid;
     644uint32_t cycle = (uint32_t)hal_get_cycles();
     645if( DEBUG_PROCESS_SIGACTION < cycle )
     646printk("\n[DBG] %s : thread %x in process %x enter for process %x in cluster %x / cycle %d\n",
     647__FUNCTION__, this->trdid, this->process->pid, pid, local_cxy , cycle );
     648#endif
     649
     650// check target process is an user process
     651assert( ( process->pid != 0 ),
     652"target process must be an user process" );
     653
    630654    // get target process owner cluster
    631655    owner_cxy = CXY_FROM_PID( process->pid );
    632656
    633 #if DEBUG_PROCESS_SIGACTION
    634 uint32_t cycle = (uint32_t)hal_get_cycles();
    635 if( DEBUG_PROCESS_SIGACTION < cycle )
    636 printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",
    637 __FUNCTION__ , this , process->pid , local_cxy , cycle );
    638 #endif
    639 
    640657    // get lock protecting process th_tbl[]
    641     spinlock_lock( &process->th_lock );
     658    rwlock_rd_acquire( &process->th_lock );
    642659
    643660    // loop on target process local threads
     
    680697
    681698    // release lock protecting process th_tbl[]
    682     spinlock_unlock( &process->th_lock );
    683 
    684     // wait acknowledges
     699    rwlock_rd_release( &process->th_lock );
     700
     701    // busy waiting acknowledges
     702    // TODO this could be improved...
    685703    while( 1 )
    686704    {
     
    695713cycle = (uint32_t)hal_get_cycles();
    696714if( DEBUG_PROCESS_SIGACTION < cycle )
    697 printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n",
    698 __FUNCTION__ , this , process->pid , local_cxy , cycle );
     715printk("\n[DBG] %s : thread %x in process %x exit for process %x in cluster %x / cycle %d\n",
     716__FUNCTION__, this, this->process->pid, pid, local_cxy , cycle );
    699717#endif
    700718
     
    719737
    720738#if DEBUG_PROCESS_SIGACTION
     739pid_t pid = process->pid;
    721740uint32_t cycle = (uint32_t)hal_get_cycles();
    722741if( DEBUG_PROCESS_SIGACTION < cycle )
    723 printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",
    724 __FUNCTION__ , this , process->pid , local_cxy , cycle );
    725 #endif
     742printk("\n[DBG] %s : thread %x n process %x enter for process %x in cluster %x / cycle %d\n",
     743__FUNCTION__, this->trdid, this->process->pid, pid, local_cxy , cycle );
     744#endif
     745
     746// check target process is an user process
     747assert( ( process->pid != 0 ),
     748"target process must be an user process" );
    726749
    727750    // get lock protecting process th_tbl[]
    728     spinlock_lock( &process->th_lock );
     751    rwlock_rd_acquire( &process->th_lock );
    729752
    730753    // loop on target process local threads                       
     
    739762            target_xp = XPTR( local_cxy , target );
    740763
    741             // main thread and client thread should not be blocked
     764            // main thread and client thread should not be deleted
    742765            if( ((ltid != 0) || (owner_cxy != local_cxy)) &&         // not main thread
    743766                (client_xp) != target_xp )                           // not client thread
     
    750773
    751774    // release lock protecting process th_tbl[]
    752     spinlock_unlock( &process->th_lock );
     775    rwlock_rd_release( &process->th_lock );
    753776
    754777#if DEBUG_PROCESS_SIGACTION
    755778cycle = (uint32_t)hal_get_cycles();
    756779if( DEBUG_PROCESS_SIGACTION < cycle )
    757 printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n",
    758 __FUNCTION__ , this , process->pid , local_cxy , cycle );
     780printk("\n[DBG] %s : thread %x in process %x exit for process %x in cluster %x / cycle %d\n",
     781__FUNCTION__, this->trdid, this->process->pid, pid, local_cxy , cycle );
    759782#endif
    760783
     
    773796
    774797#if DEBUG_PROCESS_SIGACTION
     798pid_t pid = process->pid;
    775799uint32_t cycle = (uint32_t)hal_get_cycles();
    776800if( DEBUG_PROCESS_SIGACTION < cycle )
    777 printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",
    778 __FUNCTION__ , this , process->pid , local_cxy , cycle );
    779 #endif
     801printk("\n[DBG] %s : thread %x in process %x enter for process %x in cluster %x / cycle %d\n",
     802__FUNCTION__, this->trdid, this->process->pid, pid, local_cxy , cycle );
     803#endif
     804
     805// check target process is an user process
     806assert( ( process->pid != 0 ),
     807"target process must be an user process" );
    780808
    781809    // get lock protecting process th_tbl[]
    782     spinlock_lock( &process->th_lock );
     810    rwlock_rd_acquire( &process->th_lock );
    783811
    784812    // loop on process threads to unblock all threads
     
    798826
    799827    // release lock protecting process th_tbl[]
    800     spinlock_unlock( &process->th_lock );
     828    rwlock_rd_release( &process->th_lock );
    801829
    802830#if DEBUG_PROCESS_SIGACTION
    803831cycle = (uint32_t)hal_get_cycles();
    804832if( DEBUG_PROCESS_SIGACTION < cycle )
    805 printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n",
    806 __FUNCTION__ , this , process->pid , local_cxy , cycle );
     833printk("\n[DBG] %s : thread %x in process %x exit for process %x in cluster %x / cycle %d\n",
     834__FUNCTION__, this->trdid, this->process->pid, pid, local_cxy, cycle );
    807835#endif
    808836
     
    818846    cluster_t * cluster = LOCAL_CLUSTER;
    819847
     848#if DEBUG_PROCESS_GET_LOCAL_COPY
     849thread_t * this = CURRENT_THREAD;
     850uint32_t cycle = (uint32_t)hal_get_cycles();
     851if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
     852printk("\n[DBG] %s : thread %x in cluster %x enter for process %x in cluster %x / cycle %d\n",
     853__FUNCTION__, this->trdid, this->process->pid, pid, local_cxy, cycle );
     854#endif
     855
    820856    // get lock protecting local list of processes
    821     remote_spinlock_lock( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
     857    remote_queuelock_acquire( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
    822858
    823859    // scan the local list of process descriptors to find the process
     
    836872
    837873    // release lock protecting local list of processes
    838     remote_spinlock_unlock( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
     874    remote_queuelock_release( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
    839875
    840876    // allocate memory for a new local process descriptor
     
    859895
    860896#if DEBUG_PROCESS_GET_LOCAL_COPY
    861 uint32_t cycle = (uint32_t)hal_get_cycles();
     897cycle = (uint32_t)hal_get_cycles();
    862898if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
    863 printk("\n[DBG] %s : enter in cluster %x / pid %x / process %x / cycle %d\n",
    864 __FUNCTION__ , local_cxy , pid , process_ptr , cycle );
     899printk("\n[DBG] %s : thread %x in cluster %x exit in cluster %x / process %x / cycle %d\n",
     900__FUNCTION__, this->trdid, this->process->pid, local_cxy, process_ptr, cycle );
    865901#endif
    866902
     
    883919
    884920    // get pointers on parent process
    885     parent_xp  = (xptr_t)hal_remote_lwd( XPTR( process_cxy , &process_ptr->parent_xp ) );
     921    parent_xp  = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
    886922    parent_cxy = GET_CXY( parent_xp );
    887923    parent_ptr = GET_PTR( parent_xp );
    888924
    889     return hal_remote_lw( XPTR( parent_cxy , &parent_ptr->pid ) );
     925    return hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
    890926}
    891927
     
    899935    uint32_t fd;
    900936
    901     remote_spinlock_init( XPTR( local_cxy , &process->fd_array.lock ) );
     937    remote_queuelock_init( XPTR( local_cxy , &process->fd_array.lock ), LOCK_PROCESS_FDARRAY );
    902938
    903939    process->fd_array.current = 0;
     
    909945    }
    910946}
    911 
    912 //////////////////////////////
    913 bool_t process_fd_array_full( void )
    914 {
    915     // get extended pointer on reference process
    916     xptr_t ref_xp = CURRENT_THREAD->process->ref_xp;
    917 
    918     // get reference process cluster and local pointer
    919     process_t * ref_ptr = GET_PTR( ref_xp );
    920     cxy_t       ref_cxy = GET_CXY( ref_xp );
    921 
    922     // get number of open file descriptors from reference fd_array
    923     uint32_t current = hal_remote_lw( XPTR( ref_cxy , &ref_ptr->fd_array.current ) );
    924 
    925         return ( current >= CONFIG_PROCESS_FILE_MAX_NR );
    926 }
    927 
    928947/////////////////////////////////////////////////
    929948error_t process_fd_register( process_t * process,
     
    933952    bool_t    found;
    934953    uint32_t  id;
     954    uint32_t  count;
    935955    xptr_t    xp;
    936956
     
    941961
    942962    // take lock protecting reference fd_array
    943         remote_spinlock_lock( XPTR( ref_cxy , &ref_ptr->fd_array.lock ) );
     963        remote_queuelock_acquire( XPTR( ref_cxy , &ref_ptr->fd_array.lock ) );
    944964
    945965    found   = false;
     
    947967    for ( id = 0; id < CONFIG_PROCESS_FILE_MAX_NR ; id++ )
    948968    {
    949         xp = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->fd_array.array[id] ) );
     969        xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->fd_array.array[id] ) );
    950970        if ( xp == XPTR_NULL )
    951971        {
     972            // update reference fd_array
     973            hal_remote_s64( XPTR( ref_cxy , &ref_ptr->fd_array.array[id] ) , file_xp );
     974                count = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->fd_array.current ) ) + 1;
     975            hal_remote_s32( XPTR( ref_cxy , &ref_ptr->fd_array.current ) , count );
     976
     977            // update local fd_array copy if required
     978            if( ref_cxy != local_cxy )
     979            {
     980                process->fd_array.array[id] = file_xp;
     981                process->fd_array.current   = count;
     982            }
     983
     984            // exit
     985                        *fdid = id;
    952986            found = true;
    953             hal_remote_swd( XPTR( ref_cxy , &ref_ptr->fd_array.array[id] ) , file_xp );
    954                 hal_remote_atomic_add( XPTR( ref_cxy , &ref_ptr->fd_array.current ) , 1 );
    955                         *fdid = id;
    956987            break;
    957988        }
     
    959990
    960991    // release lock protecting reference fd_array
    961         remote_spinlock_unlock( XPTR( ref_cxy , &ref_ptr->fd_array.lock ) );
     992        remote_queuelock_release( XPTR( ref_cxy , &ref_ptr->fd_array.lock ) );
    962993
    963994    if ( !found ) return -1;
     
    9701001{
    9711002    xptr_t  file_xp;
     1003    xptr_t  lock_xp;
    9721004
    9731005    // access local copy of process descriptor
     
    9811013        process_t * ref_ptr = GET_PTR( ref_xp );
    9821014
     1015        // build extended pointer on lock protecting reference fd_array
     1016        lock_xp = XPTR( ref_cxy , &ref_ptr->fd_array.lock );
     1017
     1018        // take lock protecting reference fd_array
     1019            remote_queuelock_acquire( lock_xp );
     1020
    9831021        // access reference process descriptor
    984         file_xp = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->fd_array.array[fdid] ) );
     1022        file_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->fd_array.array[fdid] ) );
    9851023
    9861024        // update local fd_array if found
    987         if( file_xp != XPTR_NULL )
    988         {
    989             process->fd_array.array[fdid] = file_xp;
    990         }
     1025        if( file_xp != XPTR_NULL )  process->fd_array.array[fdid] = file_xp;
     1026       
     1027        // release lock protecting reference fd_array
     1028            remote_queuelock_release( lock_xp );
    9911029    }
    9921030
     
    10111049
    10121050    // get the remote lock protecting the src fd_array
    1013         remote_spinlock_lock( XPTR( src_cxy , &src_ptr->lock ) );
     1051        remote_queuelock_acquire( XPTR( src_cxy , &src_ptr->lock ) );
    10141052
    10151053    // loop on all fd_array entries
    10161054    for( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
    10171055        {
    1018                 entry = (xptr_t)hal_remote_lwd( XPTR( src_cxy , &src_ptr->array[fd] ) );
     1056                entry = (xptr_t)hal_remote_l64( XPTR( src_cxy , &src_ptr->array[fd] ) );
    10191057
    10201058                if( entry != XPTR_NULL )
     
    10241062
    10251063                        // copy entry in destination process fd_array
    1026                         hal_remote_swd( XPTR( dst_cxy , &dst_ptr->array[fd] ) , entry );
     1064                        hal_remote_s64( XPTR( dst_cxy , &dst_ptr->array[fd] ) , entry );
    10271065                }
    10281066        }
    10291067
    10301068    // release lock on source process fd_array
    1031         remote_spinlock_unlock( XPTR( src_cxy , &src_ptr->lock ) );
     1069        remote_queuelock_release( XPTR( src_cxy , &src_ptr->lock ) );
    10321070
    10331071}  // end process_fd_remote_copy()
     1072
     1073
     1074////////////////////////////////////
     1075bool_t process_fd_array_full( void )
     1076{
     1077    // get extended pointer on reference process
     1078    xptr_t ref_xp = CURRENT_THREAD->process->ref_xp;
     1079
     1080    // get reference process cluster and local pointer
     1081    process_t * ref_ptr = GET_PTR( ref_xp );
     1082    cxy_t       ref_cxy = GET_CXY( ref_xp );
     1083
     1084    // get number of open file descriptors from reference fd_array
     1085    uint32_t current = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->fd_array.current ) );
     1086
     1087        return ( current >= CONFIG_PROCESS_FILE_MAX_NR );
     1088}
     1089
    10341090
    10351091////////////////////////////////////////////////////////////////////////////////////
     
    10431099{
    10441100    ltid_t         ltid;
    1045     reg_t          save_sr;
    10461101    bool_t         found = false;
    10471102 
    1048 
    1049     assert( (process != NULL) , "process argument is NULL" );
    1050 
    1051     assert( (thread != NULL) , "thread argument is NULL" );
    1052 
    1053     // take lock protecting th_tbl, depending on thread type:
    1054     // we don't want to use a descheduling policy for idle thread initialisation
    1055     if ( thread->type == THREAD_IDLE ) {
    1056         spinlock_lock_busy( &process->th_lock , &save_sr );
    1057     } else {
    1058         spinlock_lock( &process->th_lock );
    1059     }
    1060 
    1061     // search a free slot in th_tbl[]
    1062     for( ltid = 0 ; ltid < CONFIG_THREAD_MAX_PER_CLUSTER ; ltid++ )
     1103// check arguments
     1104assert( (process != NULL) , "process argument is NULL" );
     1105assert( (thread != NULL) , "thread argument is NULL" );
     1106
     1107    // get the lock protecting th_tbl for all threads
     1108    // but the idle thread executing kernel_init (cannot yield)
     1109    if( thread->type != THREAD_IDLE ) rwlock_wr_acquire( &process->th_lock );
     1110
     1111    // scan kth_tbl
     1112    for( ltid = 0 ; ltid < CONFIG_THREADS_MAX_PER_CLUSTER ; ltid++ )
    10631113    {
    10641114        if( process->th_tbl[ltid] == NULL )
     
    10791129    }
    10801130
    1081     // release lock protecting th_tbl
    1082     hal_fence();
    1083     if( thread->type == THREAD_IDLE ) {
    1084         spinlock_unlock_busy( &process->th_lock , save_sr );
    1085     } else {
    1086         spinlock_unlock( &process->th_lock );
    1087     }
    1088 
    1089     return (found) ? 0 : ENOMEM;
     1131    // get the lock protecting th_tbl for all threads
     1132    // but the idle thread executing kernel_init (cannot yield)
     1133    if( thread->type != THREAD_IDLE ) rwlock_wr_release( &process->th_lock );
     1134
     1135    return (found) ? 0 : 0xFFFFFFFF;
    10901136
    10911137}  // end process_register_thread()
     
    10961142    uint32_t count;  // number of threads in local process descriptor
    10971143
    1098     assert( (thread != NULL) , "thread argument is NULL" );
     1144// check argument
     1145assert( (thread != NULL) , "thread argument is NULL" );
    10991146
    11001147    process_t * process = thread->process;
     
    11021149    // get thread local index
    11031150    ltid_t  ltid = LTID_FROM_TRDID( thread->trdid );
    1104 
    1105     // take lock protecting th_tbl
    1106     spinlock_lock( &process->th_lock );
    1107 
     1151   
     1152    // the lock depends on thread user/kernel type, because we cannot
     1153    // use a descheduling policy for the lock protecting the kth_tbl
     1154
     1155    // get the lock protecting th_tbl[]
     1156    rwlock_wr_acquire( &process->th_lock );
     1157
     1158    // get number of kernel threads
    11081159    count = process->th_nr;
    11091160
    1110     assert( (count > 0) , "process th_nr cannot be 0\n" );
     1161// check th_nr value
     1162assert( (count > 0) , "process kth_nr cannot be 0\n" );
    11111163
    11121164    // remove thread from th_tbl[]
     
    11141166    process->th_nr = count-1;
    11151167
    1116     // release lock protecting th_tbl
    1117     hal_fence();
    1118     spinlock_unlock( &process->th_lock );
     1168    // release lock protecting kth_tbl
     1169    rwlock_wr_release( &process->th_lock );
    11191170
    11201171    return (count == 1);
     
    11411192
    11421193    // get parent process PID and extended pointer on .elf file
    1143     parent_pid = hal_remote_lw (XPTR( parent_process_cxy , &parent_process_ptr->pid));
    1144     vfs_bin_xp = hal_remote_lwd(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp));
    1145 
    1146     // check parent process is the reference process
    1147     ref_xp = hal_remote_lwd( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) );
    1148 
    1149     assert( (parent_process_xp == ref_xp ) ,
    1150     "parent process must be the reference process\n" );
     1194    parent_pid = hal_remote_l32 (XPTR( parent_process_cxy , &parent_process_ptr->pid));
     1195    vfs_bin_xp = hal_remote_l64(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp));
     1196
     1197    // get extended pointer on reference process
     1198    ref_xp = hal_remote_l64( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) );
     1199
     1200// check parent process is the reference process
     1201assert( (parent_process_xp == ref_xp ) ,
     1202"parent process must be the reference process\n" );
    11511203
    11521204#if DEBUG_PROCESS_MAKE_FORK
     
    11951247#endif
    11961248
    1197     // give TXT ownership to child process
    1198     process_txt_set_ownership( XPTR( local_cxy , process ) );
    11991249
    12001250    // copy VMM from parent descriptor to child descriptor
     
    12181268#endif
    12191269
    1220     // parent process gives TXT ownership to child process if required
    1221     if( process_txt_is_owner(parent_process_xp) )
     1270    // if parent_process is INIT, or if parent_process is the TXT owner,
     1271    // the child_process becomes the owner of its TXT terminal
     1272    if( (parent_pid == 1) || process_txt_is_owner( parent_process_xp ) )
    12221273    {
    12231274        process_txt_set_ownership( XPTR( local_cxy , process ) );
     
    12261277cycle = (uint32_t)hal_get_cycles();
    12271278if( DEBUG_PROCESS_MAKE_EXEC < cycle )
    1228 printk("\n[DBG] %s : thread %x in process %x gives TXT from parent %x to child %x / cycle %d\n",
    1229 __FUNCTION__ , CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,
    1230 parent_pid, new_pid, cycle );
     1279printk("\n[DBG] %s : thread %x in process %x / child takes TXT ownership / cycle %d\n",
     1280__FUNCTION__ , CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, cycle );
    12311281#endif
    12321282
     
    12491299    }
    12501300
    1251     // check main thread LTID
    1252     assert( (LTID_FROM_TRDID(thread->trdid) == 0) ,
    1253     "main thread must have LTID == 0\n" );
    1254 
    1255 //#if( DEBUG_PROCESS_MAKE_FORK & 1 )
    1256 #if DEBUG_PROCESS_MAKE_FORK
     1301// check main thread LTID
     1302assert( (LTID_FROM_TRDID(thread->trdid) == 0) ,
     1303"main thread must have LTID == 0\n" );
     1304
     1305#if( DEBUG_PROCESS_MAKE_FORK & 1 )
    12571306cycle = (uint32_t)hal_get_cycles();
    12581307if( DEBUG_PROCESS_MAKE_FORK < cycle )
     
    12891338
    12901339    // register process in parent children list
    1291     remote_spinlock_lock( children_lock_xp );
     1340    remote_queuelock_acquire( children_lock_xp );
    12921341        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
    12931342        hal_remote_atomic_add( children_nr_xp , 1 );
    1294     remote_spinlock_unlock( children_lock_xp );
     1343    remote_queuelock_release( children_lock_xp );
    12951344
    12961345    // return success
     
    13411390    // open the file identified by <path>
    13421391    file_xp = XPTR_NULL;
    1343     file_id = -1;
     1392    file_id = 0xFFFFFFFF;
    13441393        error   = vfs_open( process,
    13451394                            path,
     
    14421491uint32_t cycle = (uint32_t)hal_get_cycles();
    14431492if( DEBUG_PROCESS_ZERO_CREATE < cycle )
    1444 printk("\n[DBG] %s : thread %x enter / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle );
     1493printk("\n[DBG] %s : enter / cluster %x / cycle %d\n",
     1494__FUNCTION__, local_cxy, cycle );
    14451495#endif
    14461496
     
    14521502    process->term_state = 0;
    14531503
    1454     // reset th_tbl[] array as empty
     1504    // reset th_tbl[] array and associated fields
    14551505    uint32_t i;
    1456     for( i = 0 ; i < CONFIG_THREAD_MAX_PER_CLUSTER ; i++ )
     1506    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
    14571507        {
    14581508        process->th_tbl[i] = NULL;
    14591509    }
    14601510    process->th_nr  = 0;
    1461     spinlock_init( &process->th_lock );
     1511    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
     1512
    14621513
    14631514    // reset children list as empty
    14641515    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
    1465     remote_spinlock_init( XPTR( local_cxy , &process->children_lock ) );
    14661516    process->children_nr = 0;
     1517    remote_queuelock_init( XPTR( local_cxy , &process->children_lock ),
     1518                           LOCK_PROCESS_CHILDREN );
    14671519
    14681520        hal_fence();
     
    14711523cycle = (uint32_t)hal_get_cycles();
    14721524if( DEBUG_PROCESS_ZERO_CREATE < cycle )
    1473 printk("\n[DBG] %s : thread %x exit / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle );
     1525printk("\n[DBG] %s : exit / cluster %x / cycle %d\n",
     1526__FUNCTION__, local_cxy, cycle );
    14741527#endif
    14751528
    14761529}  // end process_zero_init()
    14771530
    1478 //////////////////////////
     1531////////////////////////////////
    14791532void process_init_create( void )
    14801533{
     
    14981551        process = process_alloc();
    14991552       
    1500     assert( (process != NULL),
    1501     "no memory for process descriptor in cluster %x\n", local_cxy  );
     1553// check memory allocator
     1554assert( (process != NULL),
     1555"no memory for process descriptor in cluster %x\n", local_cxy  );
    15021556
    15031557    // get PID from local cluster
    15041558    error = cluster_pid_alloc( process , &pid );
    15051559
    1506     assert( (error == 0),
    1507     "cannot allocate PID in cluster %x\n", local_cxy );
    1508 
    1509     assert( (pid == 1) ,
    1510     "process INIT must be first process in cluster 0\n" );
     1560// check PID allocator
     1561assert( (error == 0),
     1562"cannot allocate PID in cluster %x\n", local_cxy );
     1563
     1564// check PID value
     1565assert( (pid == 1) ,
     1566"process INIT must be first process in cluster 0\n" );
    15111567
    15121568    // initialize process descriptor / parent is local process_zero
     
    15141570                            pid,
    15151571                            XPTR( local_cxy , &process_zero ) ); 
     1572
     1573#if(DEBUG_PROCESS_INIT_CREATE & 1)
     1574if( DEBUG_PROCESS_INIT_CREATE < cycle )
     1575printk("\n[DBG] %s : thread %x in process %x initialized process descriptor\n",
     1576__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid );
     1577#endif
    15161578
    15171579    // open the file identified by CONFIG_PROCESS_INIT_PATH
     
    15251587                            &file_id );
    15261588
    1527         assert( (error == 0),
    1528     "failed to open file <%s>\n", CONFIG_PROCESS_INIT_PATH );
    1529 
    1530     // register "code" and "data" vsegs as well as entry-point
     1589assert( (error == 0),
     1590"failed to open file <%s>\n", CONFIG_PROCESS_INIT_PATH );
     1591
     1592#if(DEBUG_PROCESS_INIT_CREATE & 1)
     1593if( DEBUG_PROCESS_INIT_CREATE < cycle )
     1594printk("\n[DBG] %s : thread %x in process %x open .elf file decriptor\n",
     1595__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid );
     1596#endif
     1597
     1598   // register "code" and "data" vsegs as well as entry-point
    15311599    // in process VMM, using information contained in the elf file.
    15321600        error = elf_load_process( file_xp , process );
    15331601
    1534         assert( (error == 0),
    1535     "cannot access .elf file <%s>\n", CONFIG_PROCESS_INIT_PATH );
     1602assert( (error == 0),
     1603"cannot access .elf file <%s>\n", CONFIG_PROCESS_INIT_PATH );
     1604
     1605#if(DEBUG_PROCESS_INIT_CREATE & 1)
     1606if( DEBUG_PROCESS_INIT_CREATE < cycle )
     1607printk("\n[DBG] %s : thread %x in process %x registered code/data vsegs in VMM\n",
     1608__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid );
     1609#endif
    15361610
    15371611    // get extended pointers on process_zero children_root, children_lock
     
    15391613    xptr_t children_lock_xp = XPTR( local_cxy , &process_zero.children_lock );
    15401614
     1615    // take lock protecting kernel process children list
     1616    remote_queuelock_acquire( children_lock_xp );
     1617
    15411618    // register process INIT in parent local process_zero
    1542     remote_spinlock_lock( children_lock_xp );
    15431619        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
    15441620        hal_atomic_add( &process_zero.children_nr , 1 );
    1545     remote_spinlock_unlock( children_lock_xp );
     1621
     1622    // release lock protecting kernel process children list
     1623    remote_queuelock_release( children_lock_xp );
     1624
     1625#if(DEBUG_PROCESS_INIT_CREATE & 1)
     1626if( DEBUG_PROCESS_INIT_CREATE < cycle )
     1627printk("\n[DBG] %s : thread %x in process %x registered init process in parent\n",
     1628__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid );
     1629#endif
    15461630
    15471631    // select a core in local cluster to execute the main thread
     
    15601644                                &thread );
    15611645
    1562         assert( (error == 0),
    1563     "cannot create main thread for <%s>\n", CONFIG_PROCESS_INIT_PATH );
    1564 
    1565     assert( (thread->trdid == 0),
    1566     "main thread must have index 0 for <%s>\n", CONFIG_PROCESS_INIT_PATH );
     1646assert( (error == 0),
     1647"cannot create main thread for <%s>\n", CONFIG_PROCESS_INIT_PATH );
     1648
     1649assert( (thread->trdid == 0),
     1650"main thread must have index 0 for <%s>\n", CONFIG_PROCESS_INIT_PATH );
     1651
     1652#if(DEBUG_PROCESS_INIT_CREATE & 1)
     1653if( DEBUG_PROCESS_INIT_CREATE < cycle )
     1654printk("\n[DBG] %s : thread %x in process %x created main thread\n",
     1655__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid );
     1656#endif
    15671657
    15681658    // activate thread
     
    16181708
    16191709    // get PID and state
    1620     pid   = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) );
    1621     state = hal_remote_lw( XPTR( process_cxy , &process_ptr->term_state ) );
     1710    pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
     1711    state = hal_remote_l32( XPTR( process_cxy , &process_ptr->term_state ) );
    16221712
    16231713    // get PPID
    1624     parent_xp  = hal_remote_lwd( XPTR( process_cxy , &process_ptr->parent_xp ) );
     1714    parent_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
    16251715    parent_cxy = GET_CXY( parent_xp );
    16261716    parent_ptr = GET_PTR( parent_xp );
    1627     ppid       = hal_remote_lw( XPTR( parent_cxy , &parent_ptr->pid ) );
     1717    ppid       = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
    16281718
    16291719    // get number of threads
    1630     th_nr      = hal_remote_lw( XPTR( process_cxy , &process_ptr->th_nr ) );
     1720    th_nr      = hal_remote_l32( XPTR( process_cxy , &process_ptr->th_nr ) );
    16311721
    16321722    // get pointers on owner process descriptor
    1633     owner_xp  = hal_remote_lwd( XPTR( process_cxy , &process_ptr->owner_xp ) );
     1723    owner_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) );
    16341724    owner_cxy = GET_CXY( owner_xp );
    16351725    owner_ptr = GET_PTR( owner_xp );
    16361726
    16371727    // get extended pointer on TXT_RX file descriptor attached to process
    1638     txt_file_xp = hal_remote_lwd( XPTR( owner_cxy , &owner_ptr->fd_array.array[0] ) );
     1728    txt_file_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->fd_array.array[0] ) );
    16391729
    16401730    assert( (txt_file_xp != XPTR_NULL) ,
     
    16501740                       XPTR( txt_chdev_cxy , txt_chdev_ptr->name ) );
    16511741   
    1652     txt_owner_xp = (xptr_t)hal_remote_lwd( XPTR( txt_chdev_cxy,
     1742    txt_owner_xp = (xptr_t)hal_remote_l64( XPTR( txt_chdev_cxy,
    16531743                                                 &txt_chdev_ptr->ext.txt.owner_xp ) );
    16541744   
    16551745    // get process .elf name
    1656     elf_file_xp   = hal_remote_lwd( XPTR( process_cxy , &process_ptr->vfs_bin_xp ) );
     1746    elf_file_xp   = hal_remote_l64( XPTR( process_cxy , &process_ptr->vfs_bin_xp ) );
    16571747    elf_file_cxy  = GET_CXY( elf_file_xp );
    16581748    elf_file_ptr  = (vfs_file_t *)GET_PTR( elf_file_xp );
     
    17181808    xptr_t      lock_xp;      // extended pointer on list lock in chdev
    17191809
    1720     // check process is in owner cluster
    1721     assert( (CXY_FROM_PID( process->pid ) == local_cxy) ,
    1722     "process descriptor not in owner cluster" );
    1723 
    1724     // check terminal index
    1725     assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
    1726     "illegal TXT terminal index" );
     1810// check process is in owner cluster
     1811assert( (CXY_FROM_PID( process->pid ) == local_cxy) ,
     1812"process descriptor not in owner cluster" );
     1813
     1814// check terminal index
     1815assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
     1816"illegal TXT terminal index" );
    17271817
    17281818    // get pointers on TXT_RX[txt_id] chdev
     
    17351825    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
    17361826
     1827    // get lock protecting list of processes attached to TXT
     1828    remote_busylock_acquire( lock_xp );
     1829
    17371830    // insert process in attached process list
    1738     remote_spinlock_lock( lock_xp );
    17391831    xlist_add_last( root_xp , XPTR( local_cxy , &process->txt_list ) );
    1740     remote_spinlock_unlock( lock_xp );
     1832
     1833    // release lock protecting list of processes attached to TXT
     1834    remote_busylock_release( lock_xp );
    17411835
    17421836#if DEBUG_PROCESS_TXT
     
    17651859    process_cxy = GET_CXY( process_xp );
    17661860    process_ptr = GET_PTR( process_xp );
    1767 
    1768     // check process descriptor in owner cluster
    1769     process_pid = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) );
    1770     assert( (CXY_FROM_PID( process_pid ) == process_cxy ) ,
    1771     "process descriptor not in owner cluster" );
     1861    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
     1862
     1863// check process descriptor in owner cluster
     1864assert( (CXY_FROM_PID( process_pid ) == process_cxy ) ,
     1865"process descriptor not in owner cluster" );
    17721866
    17731867    // release TXT ownership (does nothing if not TXT owner)
     
    17751869
    17761870    // get extended pointer on process stdin file
    1777     file_xp = (xptr_t)hal_remote_lwd( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
     1871    file_xp = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
    17781872
    17791873    // get pointers on TXT_RX chdev
     
    17851879    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
    17861880
     1881    // get lock protecting list of processes attached to TXT
     1882    remote_busylock_acquire( lock_xp );
     1883
    17871884    // unlink process from attached process list
    1788     remote_spinlock_lock( lock_xp );
    17891885    xlist_unlink( XPTR( process_cxy , &process_ptr->txt_list ) );
    1790     remote_spinlock_unlock( lock_xp );
     1886
     1887    // release lock protecting list of processes attached to TXT
     1888    remote_busylock_release( lock_xp );
    17911889
    17921890#if DEBUG_PROCESS_TXT
    17931891uint32_t cycle  = (uint32_t)hal_get_cycles();
    1794 uint32_t txt_id = hal_remote_lw( XPTR( chdev_cxy , &chdev_ptr->channel ) );
     1892uint32_t txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
    17951893if( DEBUG_PROCESS_TXT < cycle )
    17961894printk("\n[DBG] %s : thread %x in process %x detached process %x from TXT %d / cycle %d\n",
     
    18151913    process_cxy = GET_CXY( process_xp );
    18161914    process_ptr = GET_PTR( process_xp );
    1817     process_pid = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) );
     1915    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
    18181916
    18191917    // check owner cluster
     
    18221920
    18231921    // get extended pointer on stdin pseudo file
    1824     file_xp = hal_remote_lwd( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
     1922    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
    18251923
    18261924    // get pointers on TXT chdev
     
    18301928
    18311929    // set owner field in TXT chdev
    1832     hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp );
     1930    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp );
    18331931
    18341932#if DEBUG_PROCESS_TXT
    18351933uint32_t cycle  = (uint32_t)hal_get_cycles();
    1836 uint32_t txt_id = hal_remote_lw( XPTR( txt_cxy , &txt_ptr->channel ) );
     1934uint32_t txt_id = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
    18371935if( DEBUG_PROCESS_TXT < cycle )
    18381936printk("\n[DBG] %s : thread %x in process %x give TXT %d to process %x / cycle %d\n",
     
    18681966    process_cxy = GET_CXY( process_xp );
    18691967    process_ptr = GET_PTR( process_xp );
    1870     process_pid = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) );
     1968    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
    18711969
    18721970    // check owner cluster
     
    18751973
    18761974    // get extended pointer on stdin pseudo file
    1877     file_xp = hal_remote_lwd( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
     1975    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
    18781976
    18791977    // get pointers on TXT chdev
     
    18831981
    18841982    // get extended pointer on TXT_RX owner and TXT channel
    1885     owner_xp = hal_remote_lwd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
    1886     txt_id   = hal_remote_lw ( XPTR( txt_cxy , &txt_ptr->channel ) );
     1983    owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
     1984    txt_id   = hal_remote_l32 ( XPTR( txt_cxy , &txt_ptr->channel ) );
    18871985
    18881986    // transfer ownership only if process is the TXT owner
     
    18941992
    18951993        // get lock
    1896         remote_spinlock_lock( lock_xp );
     1994        remote_busylock_acquire( lock_xp );
    18971995
    18981996        if( process_get_ppid( process_xp ) != 1 )           // process is not KSH
     
    19082006                {
    19092007                    // release lock
    1910                     remote_spinlock_unlock( lock_xp );
     2008                    remote_busylock_release( lock_xp );
    19112009
    19122010                    // set owner field in TXT chdev
    1913                     hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
     2011                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
    19142012
    19152013#if DEBUG_PROCESS_TXT
    19162014cycle   = (uint32_t)hal_get_cycles();
    1917 uint32_t ksh_pid = hal_remote_lw( XPTR( current_cxy , &current_ptr->pid ) );
     2015uint32_t ksh_pid = hal_remote_l32( XPTR( current_cxy , &current_ptr->pid ) );
    19182016if( DEBUG_PROCESS_TXT < cycle )
    19192017printk("\n[DBG] %s : thread %x in process %x release TXT %d to KSH %x / cycle %d\n",
     
    19262024 
    19272025            // release lock
    1928             remote_spinlock_unlock( lock_xp );
     2026            remote_busylock_release( lock_xp );
    19292027
    19302028            // PANIC if KSH not found
     
    19452043                {
    19462044                    // release lock
    1947                     remote_spinlock_unlock( lock_xp );
     2045                    remote_busylock_release( lock_xp );
    19482046
    19492047                    // set owner field in TXT chdev
    1950                     hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
     2048                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
    19512049
    19522050#if DEBUG_PROCESS_TXT
    19532051cycle   = (uint32_t)hal_get_cycles();
    1954 uint32_t new_pid = hal_remote_lw( XPTR( current_cxy , &current_ptr->pid ) );
     2052uint32_t new_pid = hal_remote_l32( XPTR( current_cxy , &current_ptr->pid ) );
    19552053if( DEBUG_PROCESS_TXT < cycle )
    19562054printk("\n[DBG] %s : thread %x in process %x release TXT %d to process %x / cycle %d\n",
     
    19632061
    19642062            // release lock
    1965             remote_spinlock_unlock( lock_xp );
     2063            remote_busylock_release( lock_xp );
    19662064
    19672065            // no more owner for TXT if no other process found
    1968             hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL );
     2066            hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL );
    19692067
    19702068#if DEBUG_PROCESS_TXT
     
    19932091
    19942092
    1995 //////////////////////////////////////////////////
    1996 uint32_t process_txt_is_owner( xptr_t process_xp )
     2093////////////////////////////////////////////////
     2094bool_t process_txt_is_owner( xptr_t process_xp )
    19972095{
    19982096    // get local pointer and cluster of process in owner cluster
     
    20002098    process_t * process_ptr = GET_PTR( process_xp );
    20012099
    2002     // check owner cluster
    2003     pid_t process_pid = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) );
    2004     assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
    2005     "process descriptor not in owner cluster\n" );
     2100// check calling thread execute in target process owner cluster
     2101pid_t process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
     2102assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
     2103"process descriptor not in owner cluster\n" );
    20062104
    20072105    // get extended pointer on stdin pseudo file
    2008     xptr_t file_xp = hal_remote_lwd( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
     2106    xptr_t file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
    20092107
    20102108    // get pointers on TXT chdev
     
    20142112
    20152113    // get extended pointer on TXT_RX owner process
    2016     xptr_t owner_xp = hal_remote_lwd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
     2114    xptr_t owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
    20172115
    20182116    return (process_xp == owner_xp);
     
    20272125    chdev_t *   txt_rx_ptr = GET_PTR( txt_rx_xp );
    20282126
    2029     return (xptr_t)hal_remote_lwd( XPTR( txt_rx_cxy , &txt_rx_ptr->ext.txt.owner_xp ) );
     2127    return (xptr_t)hal_remote_l64( XPTR( txt_rx_cxy , &txt_rx_ptr->ext.txt.owner_xp ) );
    20302128
    20312129}  // end process_txt_get_owner()
     
    20452143    xptr_t      txt0_xp;
    20462144    xptr_t      txt0_lock_xp;
    2047     reg_t       txt0_save_sr;    // save SR to take TXT0 lock in busy mode
    20482145   
    20492146    assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
     
    20682165
    20692166    // get lock on attached process list
    2070     remote_spinlock_lock( lock_xp );
     2167    remote_busylock_acquire( lock_xp );
    20712168
    20722169    // get TXT0 lock in busy waiting mode
    2073     remote_spinlock_lock_busy( txt0_lock_xp , &txt0_save_sr );
     2170    remote_busylock_acquire( txt0_lock_xp );
    20742171
    20752172    // display header
     
    20852182
    20862183    // release TXT0 lock in busy waiting mode
    2087     remote_spinlock_unlock_busy( txt0_lock_xp , txt0_save_sr );
     2184    remote_busylock_release( txt0_lock_xp );
    20882185
    20892186    // release lock on attached process list
    2090     remote_spinlock_unlock( lock_xp );
     2187    remote_busylock_release( lock_xp );
    20912188
    20922189}  // end process_txt_display
  • trunk/kernel/kern/process.h

    r527 r564  
    11/*
    2  * process.h - process related management functions
     2 * process.h - process related functions definition.
    33 *
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
     
    3333#include <xlist.h>
    3434#include <bits.h>
    35 #include <spinlock.h>
     35#include <busylock.h>
     36#include <queuelock.h>
     37#include <remote_queuelock.h>
     38#include <remote_rwlock.h>
    3639#include <hal_atomic.h>
    3740#include <vmm.h>
     
    6972 * A free entry in this array contains the XPTR_NULL value.
    7073 * The array size is defined by a the CONFIG_PROCESS_FILE_MAX_NR parameter.
    71  * All modifications (open/close) in this structure must be done by the reference cluster,
    72  * and reported in process copies.
     74 *
     75 * NOTE: - Only the fd_array[] in the reference process contains a complete list of open
     76 *         files, and is protected by the lock against concurrent access.
     77 *       - the fd_array[] in a process copy is simply a cache containing a subset of the
     78 *         open files to speed the fdid to xptr translation, but the "lock" and "current
     79 *         fields should not be used.
     80 *       - all modifications made by the process_fd_remove() are done in reference cluster
     81 *         and reported in all process_copies.
    7382 ********************************************************************************************/
    7483
    7584typedef struct fd_array_s
    7685{
    77         remote_spinlock_t lock;                               /*! lock protecting fd_array      */
    78     uint32_t          current;                            /*! current number of open files  */
    79         xptr_t            array[CONFIG_PROCESS_FILE_MAX_NR];  /*! xptr on open file descriptors */
     86        remote_queuelock_t lock;                              /*! lock protecting fd_array      */
     87    uint32_t           current;                           /*! current number of open files  */
     88        xptr_t             array[CONFIG_PROCESS_FILE_MAX_NR]; /*! open file descriptors        */
    8089}
    8190fd_array_t;
     
    100109 *    complete in the reference process cluster, other copies are read-only caches.
    101110 * 4) The <sem_root>, <mutex_root>, <barrier_root>, <condvar_root>, and the associated
    102  *    <sync_lock>, that are dynamically allocated, are only defined in the reference cluster.
     111 *    <sync_lock>, dynamically allocated, are only defined in the reference cluster.
    103112 * 5) The <children_root>, <children_nr>, <children_list>, and <txt_list> fields are only
    104113 *    defined in the reference cluster, and are undefined in other clusters.
    105  * 6) The <local_list>, <copies_list>, <th_tbl>, <th_nr>, <th_lock> fields
    106  *    are defined in all process descriptors copies.
     114 * 6) The <local_list>, <copies_list>, <th_tbl>, <th_nr>, <u_th_lock> or <k_th_lock> fields
     115 *    are specific n each cluster, and are defined in all process descriptors copies.
    107116 * 7) The termination <flags> and <exit_status> are only defined in the reference cluster.
    108  *    The term_state format is defined in the shared_syscalls.h file.
     117 *    (The term_state format is defined in the shared_syscalls.h file ).
    109118 ********************************************************************************************/
    110119
    111120typedef struct process_s
    112121{
    113         vmm_t             vmm;              /*! embedded virtual memory manager                 */
    114 
    115         fd_array_t        fd_array;         /*! embedded open file descriptors array            */
    116 
    117         xptr_t            vfs_root_xp;      /*! extended pointer on current VFS root inode      */
    118         xptr_t            vfs_bin_xp;       /*! extended pointer on .elf file descriptor        */
    119         pid_t             pid;              /*! process identifier                              */
    120     xptr_t            ref_xp;           /*! extended pointer on reference process           */
    121     xptr_t            owner_xp;         /*! extended pointer on owner process               */
    122     xptr_t            parent_xp;        /*! extended pointer on parent process              */
    123 
    124         xptr_t            vfs_cwd_xp;       /*! extended pointer on current working dir inode   */
    125         remote_rwlock_t   cwd_lock;         /*! lock protecting working directory changes       */
    126 
    127         xlist_entry_t     children_root;    /*! root of the children process xlist              */
    128     remote_spinlock_t children_lock;    /*! lock protecting children process xlist          */
    129     uint32_t          children_nr;      /*! number of children processes                    */
    130 
    131         xlist_entry_t     children_list;    /*! member of list of children of same parent       */
    132     xlist_entry_t     local_list;       /*! member of list of process in same cluster       */
    133     xlist_entry_t     copies_list;      /*! member of list of copies of same process        */
    134     xlist_entry_t     txt_list;         /*! member of list of processes sharing same TXT    */
    135 
    136         spinlock_t        th_lock;          /*! lock protecting th_tbl[] concurrent access      */
    137         uint32_t          th_nr;            /*! number of threads in this cluster               */
    138 
    139         struct thread_s * th_tbl[CONFIG_THREAD_MAX_PER_CLUSTER]; /*! pointers on local threads  */
    140 
    141     xlist_entry_t     sem_root;         /*! root of the process semaphore list              */
    142     xlist_entry_t     mutex_root;       /*! root of the process mutex list                  */
    143     xlist_entry_t     barrier_root;     /*! root of the process barrier list                */
    144     xlist_entry_t     condvar_root;     /*! root of the process condvar list                */
    145     remote_spinlock_t sync_lock;        /*! lock protecting sem,mutex,barrier,condvar lists */
    146 
    147     uint32_t          term_state;       /*! termination status (flags & exit status)        */
     122        vmm_t              vmm;              /*! embedded virtual memory manager                 */
     123
     124        fd_array_t         fd_array;         /*! embedded open file descriptors array            */
     125
     126        xptr_t             vfs_root_xp;      /*! extended pointer on current VFS root inode      */
     127        xptr_t             vfs_bin_xp;       /*! extended pointer on .elf file descriptor        */
     128        pid_t              pid;              /*! process identifier                              */
     129    xptr_t             ref_xp;           /*! extended pointer on reference process           */
     130    xptr_t             owner_xp;         /*! extended pointer on owner process               */
     131    xptr_t             parent_xp;        /*! extended pointer on parent process              */
     132
     133        xptr_t             vfs_cwd_xp;       /*! extended pointer on current working dir inode   */
     134        remote_rwlock_t    cwd_lock;         /*! lock protecting working directory changes       */
     135
     136        xlist_entry_t      children_root;    /*! root of the children process xlist              */
     137    remote_queuelock_t children_lock;    /*! lock protecting children process xlist          */
     138    uint32_t           children_nr;      /*! number of children processes                    */
     139
     140        xlist_entry_t      children_list;    /*! member of list of children of same parent       */
     141    xlist_entry_t      local_list;       /*! member of list of process in same cluster       */
     142    xlist_entry_t      copies_list;      /*! member of list of copies of same process        */
     143    xlist_entry_t      txt_list;         /*! member of list of processes sharing same TXT    */
     144
     145        struct thread_s  * th_tbl[CONFIG_THREADS_MAX_PER_CLUSTER];       /*! local threads       */
     146        uint32_t           th_nr;            /*! number of threads in this cluster               */
     147    rwlock_t           th_lock;          /*! lock protecting th_tbl[]  i                     */
     148
     149    xlist_entry_t      sem_root;         /*! root of the user definedsemaphore list          */
     150    xlist_entry_t      mutex_root;       /*! root of the user defined mutex list             */
     151    xlist_entry_t      barrier_root;     /*! root of the user defined barrier list           */
     152    xlist_entry_t      condvar_root;     /*! root of the user defined condvar list           */
     153    remote_queuelock_t sync_lock;        /*! lock protecting user defined synchro lists      */
     154
     155    uint32_t           term_state;       /*! termination status (flags & exit status)        */
    148156}
    149157process_t;
     
    210218
    211219/*********************************************************************************************
    212  * This function initializes a local, reference, user process descriptor from another process
    213  * descriptor, defined by the <parent_xp> argument. The <process> and <pid> arguments must
    214  * be previously allocated by the caller. This function can be called by two functions:
     220 * This function initializes a reference, user process descriptor from another process
     221 * descriptor, defined by the <parent_xp> argument. The <process> and <pid> arguments
     222 * are previously allocated by the caller. This function can be called by two functions:
    215223 * 1) process_init_create() : process is the INIT process; parent is process-zero.
    216224 * 2) process_make_fork() : the parent process descriptor is generally remote.
     
    411419
    412420/*********************************************************************************************
    413  * This function uses as many remote accesses as required, to reset an entry in fd_array[],
    414  * in all clusters containing a copy. The entry is identified by the <fdid> argument.
    415  * This function must be executed by a thread running reference cluster, that contains
    416  * the complete list of process descriptors copies.
    417  *********************************************************************************************
    418  * @ process  : pointer on the local process descriptor.
    419  * @ fdid     : file descriptor index in the fd_array.
    420  ********************************************************************************************/
    421 void process_fd_remove( process_t * process,
    422                         uint32_t    fdid );
    423 
    424 /*********************************************************************************************
    425  * This function returns an extended pointer on a file descriptor identified by its index
    426  * in fd_array. It can be called by any thread running in any cluster.
    427  * It accesses first the local process descriptor. In case of local miss, it uses remote
    428  * access to access the reference process descriptor.
    429  * It updates the local fd_array when the file descriptor exists in reference cluster.
    430  * The file descriptor refcount is not incremented.
    431  *********************************************************************************************
    432  * @ process  : pointer on the local process descriptor.
    433  * @ fdid     : file descriptor index in the fd_array.
    434  * @ return extended pointer on file descriptor if success / return XPTR_NULL if not found.
    435  ********************************************************************************************/
    436 xptr_t process_fd_get_xptr( process_t * process,
    437                             uint32_t    fdid );
    438 
    439 /*********************************************************************************************
    440  * This function checks the number of open files for a given process.
    441  * It can be called by any thread in any cluster, because it uses portable remote access
    442  * primitives to access the reference process descriptor.
    443  *********************************************************************************************
    444  * @ returns true if file descriptor array full.
    445  ********************************************************************************************/
    446 bool_t process_fd_array_full( void );
    447 
    448 /*********************************************************************************************
    449421 * This function allocates a free slot in the fd_array of the reference process,
    450422 * register the <file_xp> argument in the allocated slot, and return the slot index.
    451423 * It can be called by any thread in any cluster, because it uses portable remote access
    452424 * primitives to access the reference process descriptor.
     425 * It takes the lock protecting the reference fd_array against concurrent accesses.
    453426 *********************************************************************************************
    454427 * @ file_xp  : extended pointer on the file descriptor to be registered.
     
    459432                             xptr_t      file_xp,
    460433                             uint32_t  * fdid );
     434
     435/*********************************************************************************************
     436 * This function uses as many remote accesses as required, to reset an entry in fd_array[],
     437 * in all clusters containing a copy. The entry is identified by the <fdid> argument.
     438 * This function must be executed by a thread running in reference cluster, that contains
     439 * the complete list of process descriptors copies.
     440 * It takes the lock protecting the reference fd_array against concurrent accesses.
     441 * TODO this function is not implemented yet.
     442 *********************************************************************************************
     443 * @ process  : pointer on the local process descriptor.
     444 * @ fdid     : file descriptor index in the fd_array.
     445 ********************************************************************************************/
     446void process_fd_remove( process_t * process,
     447                        uint32_t    fdid );
     448
     449/*********************************************************************************************
     450 * This function returns an extended pointer on a file descriptor identified by its index
     451 * in fd_array. It can be called by any thread running in any cluster.
     452 * It accesses first the local process descriptor. In case of local miss, it takes
     453 * the lock protecting the reference fd_array, and access the reference process descriptor.
     454 * It updates the local fd_array when the file descriptor exists in reference cluster.
     455 * It takes the lock protecting the reference fd_array against concurrent accesses.
     456 * The file descriptor refcount is not incremented.
     457 *********************************************************************************************
     458 * @ process  : pointer on the local process descriptor.
     459 * @ fdid     : file descriptor index in the fd_array.
     460 * @ return extended pointer on file descriptor if success / return XPTR_NULL if not found.
     461 ********************************************************************************************/
     462xptr_t process_fd_get_xptr( process_t * process,
     463                            uint32_t    fdid );
    461464
    462465/*********************************************************************************************
     
    465468 * <dst_xp> fd_array, embedded in another process descriptor.
    466469 * The calling thread can be running in any cluster.
    467  * It takes the remote lock protecting the <src_xp> fd_array during the copy.
     470 * It takes the lock protecting the reference fd_array against concurrent accesses.
    468471 * For each involved file descriptor, the refcount is incremented.
    469472 *********************************************************************************************
     
    474477                             xptr_t src_xp );
    475478
     479/*********************************************************************************************
     480 * This function checks the current number of open files for a given process.
     481 * It can be called by any thread in any cluster, because it uses portable remote access
     482 * primitives to access the reference process descriptor.
     483 * It does not take the lock protecting the reference fd_array.
     484 *********************************************************************************************
     485 * @ returns true if file descriptor array full.
     486 ********************************************************************************************/
     487bool_t process_fd_array_full( void );
     488
    476489
    477490
     
    479492
    480493/*********************************************************************************************
    481  * This function registers a new thread in the local process descriptor.
    482  * It checks that there is an available slot in the local th_tbl[] array,
    483  * allocates a new LTID, and registers the new thread in the th_tbl[].
    484  * It takes the lock protecting exclusive access to the th_tbl[].
     494 * This function atomically registers a new thread in the local process descriptor.
     495 * It checks that there is an available slot in the local th_tbl[] array, and allocates
     496 * a new LTID using the relevant lock depending on the kernel/user type.
    485497 *********************************************************************************************
    486498 * @ process  : pointer on the local process descriptor.
     
    494506
    495507/*********************************************************************************************
    496  * This function removes a thread registration from the local process descriptor.
    497  * It takes the lock protecting exclusive access to the th_tbl[].
     508 * This function atomically removes a thread registration from the local process descriptor
     509 * th_tbl[] array, using the relevant lock, depending on the kernel/user type.
    498510 *********************************************************************************************
    499511 * @ thread   : local pointer on thread to be removed.
     
    541553
    542554/*********************************************************************************************
    543  * This function gives the TXT ownership to a process identified by the <process_xp> argument.
     555 * This function gives a process identified by the <process_xp> argument the exclusive
     556 * ownership of its attached TXT_RX terminal (i.e. put the process in foreground).
    544557 * It can be called by a thread running in any cluster, but the <process_xp> must be the
    545558 * owner cluster process descriptor.
     
    568581 * process_xp must be the owner cluster process descriptor.
    569582 *********************************************************************************************
    570  * @ return a non-zero value if target process is TXT owner.
    571  ********************************************************************************************/
    572 uint32_t process_txt_is_owner( xptr_t process_xp );
     583 * @ returns true if target process is TXT owner.
     584 ********************************************************************************************/
     585bool_t process_txt_is_owner( xptr_t process_xp );
    573586
    574587/*********************************************************************************************
  • trunk/kernel/kern/rpc.c

    r503 r564  
    4343
    4444/////////////////////////////////////////////////////////////////////////////////////////
    45 //      array of function pointers  (must be consistent with enum in rpc.h)
     45// Array of function pointers and array of printable strings.
     46// These arrays must be kept consistent with enum in rpc.h file.
    4647/////////////////////////////////////////////////////////////////////////////////////////
    4748
     
    8283};
    8384
    84 //////////////////////////////////////////////
     85char * rpc_str[RPC_MAX_INDEX] =
     86{
     87    "PMEM_GET_PAGES",         // 0
     88    "PMEM_RELEASE_PAGES",     // 1
     89    "undefined",              // 2
     90    "PROCESS_MAKE_FORK",      // 3
     91    "undefined",              // 4
     92    "undefined",              // 5
     93    "THREAD_USER_CREATE",     // 6
     94    "THREAD_KERNEL_CREATE",   // 7
     95    "undefined",              // 8
     96    "PROCESS_SIGACTION",      // 9
     97
     98    "VFS_INODE_CREATE",       // 10
     99    "VFS_INODE_DESTROY",      // 11
     100    "VFS_DENTRY_CREATE",      // 12
     101    "VFS_DENTRY_DESTROY",     // 13
     102    "VFS_FILE_CREATE",        // 14
     103    "VFS_FILE_DESTROY",       // 15
     104    "VFS_INODE_LOAD",         // 16
     105    "VFS_MAPPER_LOAD_ALL",    // 17
     106    "FATFS_GET_CLUSTER",      // 18
     107    "undefined",              // 19
     108
     109    "GET_VSEG",               // 20
     110    "GET_PTE",                // 21
     111    "KCM_ALLOC",              // 22
     112    "KCM_FREE",               // 23
     113    "MAPPER_MOVE_BUFFER",     // 24
     114    "MAPPER_GET_PAGE",        // 25
     115    "VMM_CREATE_VSEG",        // 26
     116    "undefined",              // 27
     117    "VMM_SET_COW",            // 28
     118    "VMM_DISPLAY",            // 29
     119};
     120
     121//////////////////////////////////////////////////////////////////////////////////
    85122void __attribute__((noinline)) rpc_undefined( xptr_t xp __attribute__ ((unused)) )
    86123{
     
    105142    client_core_lid = this->core->lid;
    106143
     144// check calling thread can yield when it is not the idle thread
     145assert( (this->busylocks == 0) || (this->type == THREAD_IDLE),
     146"cannot yield : busylocks = %d\n", this->busylocks );
     147
    107148#if DEBUG_RPC_CLIENT_GENERIC
    108149uint32_t cycle = (uint32_t)hal_get_cycles();
    109150if( DEBUG_RPC_CLIENT_GENERIC < cycle )
    110 printk("\n[DBG] %s : thread %x in process %x enter for rpc[%d] / cycle %d\n",
    111 __FUNCTION__, this->trdid, this->process->pid, rpc->index, cycle );
     151printk("\n[DBG] %s : thread %x in process %x enter for rpc %s / server_cxy %x / cycle %d\n",
     152__FUNCTION__, this->trdid, this->process->pid, rpc_str[rpc->index], server_cxy, cycle );
    112153#endif
    113154
    114155    // select a server_core : use client core index if possible / core 0 otherwise
    115     if( client_core_lid < hal_remote_lw( XPTR( server_cxy , &LOCAL_CLUSTER->cores_nr ) ) )
     156    if( client_core_lid < hal_remote_l32( XPTR( server_cxy , &LOCAL_CLUSTER->cores_nr ) ) )
    116157    {
    117158        server_core_lid = client_core_lid;
     
    130171
    131172    // get local pointer on rpc_fifo in remote cluster,
    132     remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo[server_core_lid];
    133 
    134         // post RPC in remote fifo / deschedule and retry if fifo full
     173    remote_fifo_t * rpc_fifo    = &LOCAL_CLUSTER->rpc_fifo[server_core_lid];
     174    xptr_t          rpc_fifo_xp = XPTR( server_cxy , rpc_fifo );
     175
     176        // post RPC in remote fifo / deschedule without blocking if fifo full
    135177    do
    136178    {
    137         full = remote_fifo_put_item( XPTR( server_cxy , rpc_fifo ), (uint64_t )desc_xp );
     179        full = remote_fifo_put_item( rpc_fifo_xp , (uint64_t )desc_xp );
     180
    138181            if ( full )
    139182        {
     
    151194#if DEBUG_RPC_CLIENT_GENERIC
    152195cycle = (uint32_t)hal_get_cycles();
     196uint32_t items = remote_fifo_items( rpc_fifo_xp );
    153197if( DEBUG_RPC_CLIENT_GENERIC < cycle )
    154 printk("\n[DBG] %s : thread %x in process %x / rpc[%d] / rpc_ptr %x / cycle %d\n",
    155 __FUNCTION__, this->trdid, this->process->pid, rpc->index, rpc, cycle );
     198printk("\n[DBG] %s : thread %x in process %x / rpc %s / items %d / cycle %d\n",
     199__FUNCTION__, this->trdid, this->process->pid, rpc_str[rpc->index], items, cycle );
    156200#endif
    157201       
     
    159203   dev_pic_send_ipi( server_cxy , server_core_lid );
    160204
    161     // wait RPC completion before returning if blocking RPC
    162     // - busy waiting policy during kernel_init, or if threads cannot yield
    163     // - block and deschedule in all other cases
     205    // wait RPC completion before returning if blocking RPC :
     206    // - descheduling without blocking if thread idle (in lernel init)
     207    // - block and deschedule policy for any other thread
    164208    if ( rpc->blocking )
    165209    {
    166         if( (this->type == THREAD_IDLE) || (thread_can_yield() == false) ) // busy waiting
     210        if( this->type == THREAD_IDLE )  // deschedule without blocking policy
    167211        {
    168 
     212 
    169213#if DEBUG_RPC_CLIENT_GENERIC
    170214cycle = (uint32_t)hal_get_cycles();
    171215if( DEBUG_RPC_CLIENT_GENERIC < cycle )
    172 printk("\n[DBG] %s : thread %x in process %x busy waiting for rpc[%d] / cycle %d\n",
    173 __FUNCTION__, this->trdid, this->process->pid, rpc->index , cycle );
    174 #endif
    175 
    176             while( rpc->responses ) hal_fixed_delay( 100 );
     216printk("\n[DBG] %s : thread %x in process %x enter waiting loop for rpc %s / cycle %d\n",
     217__FUNCTION__, this->trdid, this->process->pid, rpc_str[rpc->index], cycle );
     218#endif
     219
     220             while( rpc->responses ) sched_yield( "busy waiting on RPC");
    177221   
    178222#if DEBUG_RPC_CLIENT_GENERIC
    179223cycle = (uint32_t)hal_get_cycles();
    180224if( DEBUG_RPC_CLIENT_GENERIC < cycle )
    181 printk("\n[DBG] %s : thread %x in process %x resumes for rpc[%d] / cycle %d\n",
    182 __FUNCTION__, this->trdid, this->process->pid, rpc->index, cycle );
    183 #endif
    184         }
    185         else                                                         // block & deschedule
     225printk("\n[DBG] %s : thread %x in process %x received response for rpc %s / cycle %d\n",
     226__FUNCTION__, this->trdid, this->process->pid, rpc_str[rpc->index], cycle );
     227#endif
     228 
     229        }
     230        else                            // block and deschedule policy
    186231        {
    187232
     
    189234cycle = (uint32_t)hal_get_cycles();
    190235if( DEBUG_RPC_CLIENT_GENERIC < cycle )
    191 printk("\n[DBG] %s : thread %x in process %x blocks & deschedules for rpc[%d] / cycle %d\n",
    192 __FUNCTION__, this->trdid, this->process->pid, rpc->index , cycle );
    193 #endif
    194             thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC );
    195             sched_yield("blocked on RPC");
     236printk("\n[DBG] %s : thread %x in process %x blocks & deschedules for rpc %s / cycle %d\n",
     237__FUNCTION__, this->trdid, this->process->pid, rpc_str[rpc->index], cycle );
     238#endif
     239
     240        // block client thread
     241        thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC );
     242
     243        // deschedule
     244        sched_yield("blocked on RPC");
    196245
    197246#if DEBUG_RPC_CLIENT_GENERIC
    198247cycle = (uint32_t)hal_get_cycles();
    199248if( DEBUG_RPC_CLIENT_GENERIC < cycle )
    200 printk("\n[DBG] %s : thread %x in process %x resumes for rpc[%d] / cycle %d\n",
    201 __FUNCTION__, this->trdid, this->process->pid, rpc->index, cycle );
     249printk("\n[DBG] %s : thread %x in process %x resumes for rpc %s / cycle %d\n",
     250__FUNCTION__, this->trdid, this->process->pid, rpc_str[rpc->index], cycle );
    202251#endif
    203252        }
    204253
    205         // check response available
    206         assert( (rpc->responses == 0) , "illegal RPC response\n" );
     254// response must be available for a blocking RPC
     255assert( (rpc->responses == 0) , "illegal response for RPC %s\n", rpc_str[rpc->index] );
     256
    207257    }
    208     else  // non blocking RPC
     258    else       // non blocking RPC
    209259    {
    210260
     
    212262cycle = (uint32_t)hal_get_cycles();
    213263if( DEBUG_RPC_CLIENT_GENERIC < cycle )
    214 printk("\n[DBG] %s : thread %x in process %x returns for non blocking rpc[%d] / cycle %d\n",
    215 __FUNCTION__, this->trdid, this->process->pid, rpc->index, cycle );
     264printk("\n[DBG] %s : thread %x in process %x returns for non blocking rpc %s / cycle %d\n",
     265__FUNCTION__, this->trdid, this->process->pid, rpc_str[rpc->index], cycle );
    216266#endif
    217267
     
    224274/***************************************************************************************/
    225275
    226 ////////////////
    227 void rpc_check( void )
    228 {
    229     error_t         error;
    230     thread_t      * thread; 
    231     uint32_t        sr_save;
    232 
    233 #if DEBUG_RPC_SERVER_GENERIC
    234 uint32_t cycle;
    235 #endif
    236 
    237     bool_t          found    = false;
    238         thread_t      * this     = CURRENT_THREAD;
    239     core_t        * core     = this->core;
    240     scheduler_t   * sched    = &core->scheduler;
    241         remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo[core->lid];
    242 
    243     // interrupted thread not preemptable during RPC chek
    244         hal_disable_irq( &sr_save );
    245 
    246     // activate (or create) RPC thread if RPC FIFO not empty and no acive RPC thread 
    247         if( (rpc_fifo->owner == 0) && (local_fifo_is_empty(rpc_fifo) == false) )
    248     {
    249 
    250 #if DEBUG_RPC_SERVER_GENERIC
    251 cycle = (uint32_t)hal_get_cycles();
    252 if( DEBUG_RPC_SERVER_GENERIC < cycle )
    253 printk("\n[DBG] %s : RPC FIFO non empty for core[%x,%d] / cycle %d\n",
    254 __FUNCTION__, local_cxy, core->lid, cycle );
    255 #endif
    256 
    257         // search one IDLE RPC thread associated to the selected core   
    258         list_entry_t * iter;
    259         LIST_FOREACH( &sched->k_root , iter )
    260         {
    261             thread = LIST_ELEMENT( iter , thread_t , sched_list );
    262             if( (thread->type == THREAD_RPC) && (thread->blocked == THREAD_BLOCKED_IDLE ) )
    263             {
    264                 // unblock found RPC thread
    265                 thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_IDLE );
    266 
    267                 // exit loop
    268                 found = true;
    269                 break;
    270             }
    271         }
    272 
    273         // create new RPC thread for the selected core if not found   
    274         if( found == false )                   
    275         {
    276             error = thread_kernel_create( &thread,
    277                                           THREAD_RPC,
    278                                                       &rpc_thread_func,
    279                                           NULL,
    280                                                       core->lid );
    281                  
    282             assert( (error == 0),
    283             "no memory to allocate a new RPC thread in cluster %x", local_cxy );
    284 
    285             // unblock created RPC thread
    286             thread->blocked = 0;
    287 
    288             // update RRPC threads counter 
    289             hal_atomic_add( &LOCAL_CLUSTER->rpc_threads[core->lid] , 1 );
    290 
    291 #if DEBUG_RPC_SERVER_GENERIC
    292 cycle = (uint32_t)hal_get_cycles();
    293 if( DEBUG_RPC_SERVER_GENERIC < cycle )
    294 printk("\n[DBG] %s : new RPC thread %x created for core[%x,%d] / cycle %d\n",
    295 __FUNCTION__, thread, local_cxy, core->lid, cycle );
    296 #endif
    297         }
    298     }
    299 
    300 #if DEBUG_RPC_SERVER_GENERIC
    301 cycle = (uint32_t)hal_get_cycles();
    302 if( DEBUG_RPC_SERVER_GENERIC < cycle )
    303 printk("\n[DBG] %s : interrupted thread %x deschedules on core[%x,%d] / cycle %d\n",
    304 __FUNCTION__, this, local_cxy, core->lid, cycle );
    305 #endif
    306 
    307     // interrupted thread always deschedule         
    308         sched_yield("IPI received");
    309 
    310 #if DEBUG_RPC_SERVER_GENERIC
    311 cycle = (uint32_t)hal_get_cycles();
    312 if( DEBUG_RPC_SERVER_GENERIC < cycle )
    313 printk("\n[DBG] %s : interrupted thread %x resumes on core[%x,%d] / cycle %d\n",
    314 __FUNCTION__, this, local_cxy, core->lid, cycle );
    315 #endif
    316 
    317     // interrupted thread restore IRQs after resume
    318         hal_restore_irq( sr_save );
    319 
    320 } // end rpc_check()
    321 
    322 
    323 //////////////////////
     276////////////////////////////
    324277void rpc_thread_func( void )
    325278{
     
    345298        rpc_fifo        = &LOCAL_CLUSTER->rpc_fifo[server_core_lid];
    346299
    347     // two embedded loops:
    348     // - external loop : "infinite" RPC thread
    349     // - internal loop : handle one RPC request per iteration
    350  
    351         while(1)  // infinite loop
     300    // "infinite" RPC thread loop
     301        while(1)
    352302        {
    353303        // try to take RPC_FIFO ownership
    354         if( hal_atomic_test_set( &rpc_fifo->owner , server_ptr->trdid ) )
     304        if( hal_atomic_test_set( &rpc_fifo->owner , server_ptr->trdid ) ) 
    355305        {
    356306
     
    358308uint32_t cycle = (uint32_t)hal_get_cycles();
    359309if( DEBUG_RPC_SERVER_GENERIC < cycle )
    360 printk("\n[DBG] %s : RPC thread %x in cluster %x takes RPC fifo ownership / cycle %d\n",
    361 __FUNCTION__, server_ptr, local_cxy, cycle );
    362 #endif
    363                 while( 1 )  //  one RPC request per iteration
     310printk("\n[DBG] %s : RPC thread %x on core[%d] takes RPC_FIFO ownership / cycle %d\n",
     311__FUNCTION__, server_ptr->trdid, server_core_lid, cycle );
     312#endif
     313                // try to consume one RPC request 
     314                empty = remote_fifo_get_item( rpc_fifo , (uint64_t *)&desc_xp );
     315
     316            // release RPC_FIFO ownership
     317            rpc_fifo->owner = 0;
     318
     319            // handle RPC request if success
     320                if ( empty == 0 )   
    364321            {
    365                     empty = local_fifo_get_item( rpc_fifo , (uint64_t *)&desc_xp );
    366 
    367                 // exit when FIFO empty or FIFO ownership lost (in case of descheduling)
    368                     if ( (empty == 0) && (rpc_fifo->owner == server_ptr->trdid) )
     322                // get client cluster and pointer on RPC descriptor
     323                desc_cxy = GET_CXY( desc_xp );
     324                desc_ptr = GET_PTR( desc_xp );
     325
     326                    index    = hal_remote_l32( XPTR( desc_cxy , &desc_ptr->index ) );
     327                blocking = hal_remote_l32( XPTR( desc_cxy , &desc_ptr->blocking ) );
     328
     329#if DEBUG_RPC_SERVER_GENERIC
     330cycle = (uint32_t)hal_get_cycles();
     331uint32_t items = remote_fifo_items( XPTR( local_cxy , rpc_fifo ) );
     332if( DEBUG_RPC_SERVER_GENERIC < cycle )
     333printk("\n[DBG] %s : RPC thread %x got rpc %s / client_cxy %x / items %d / cycle %d\n",
     334__FUNCTION__, server_ptr->trdid, rpc_str[index], desc_cxy, items, cycle );
     335#endif
     336                // call the relevant server function
     337                rpc_server[index]( desc_xp );
     338
     339#if DEBUG_RPC_SERVER_GENERIC
     340cycle = (uint32_t)hal_get_cycles();
     341if( DEBUG_RPC_SERVER_GENERIC < cycle )
     342printk("\n[DBG] %s : RPC thread %x completes rpc %s / client_cxy %x / cycle %d\n",
     343__FUNCTION__, server_ptr->trdid, rpc_str[index], desc_cxy, cycle );
     344#endif
     345                // decrement response counter in RPC descriptor if blocking RPC
     346                if( blocking )
    369347                {
    370                     // get client cluster and pointer on RPC descriptor
    371                     desc_cxy = GET_CXY( desc_xp );
    372                     desc_ptr = GET_PTR( desc_xp );
    373 
    374                         index    = hal_remote_lw( XPTR( desc_cxy , &desc_ptr->index ) );
    375                     blocking = hal_remote_lw( XPTR( desc_cxy , &desc_ptr->blocking ) );
     348                    // decrement responses counter in RPC descriptor
     349                    hal_remote_atomic_add( XPTR( desc_cxy, &desc_ptr->responses ), -1 );
     350
     351                    // get client thread pointer and client core lid from RPC descriptor
     352                    client_ptr      = hal_remote_lpt( XPTR( desc_cxy , &desc_ptr->thread ) );
     353                    client_core_lid = hal_remote_l32 ( XPTR( desc_cxy , &desc_ptr->lid ) );
     354
     355                    // unblock client thread
     356                    thread_unblock( XPTR( desc_cxy , client_ptr ) , THREAD_BLOCKED_RPC );
     357
     358                    hal_fence();
    376359
    377360#if DEBUG_RPC_SERVER_GENERIC
    378361cycle = (uint32_t)hal_get_cycles();
    379362if( DEBUG_RPC_SERVER_GENERIC < cycle )
    380 printk("\n[DBG] %s : RPC thread %x in cluster %x got rpc[%d] / rpc_cxy %x / rpc_ptr %x\n",
    381 __FUNCTION__, server_ptr, local_cxy, index, desc_cxy, desc_ptr );
    382 #endif
    383                     // call the relevant server function
    384                     rpc_server[index]( desc_xp );
    385 
    386 #if DEBUG_RPC_SERVER_GENERIC
    387 cycle = (uint32_t)hal_get_cycles();
    388 if( DEBUG_RPC_SERVER_GENERIC < cycle )
    389 printk("\n[DBG] %s : RPC thread %x in cluster %x completes rpc[%d] / rpc_ptr %x / cycle %d\n",
    390 __FUNCTION__, server_ptr, local_cxy, index, desc_ptr, cycle );
    391 #endif
    392                     // decrement response counter in RPC descriptor if blocking
    393                     if( blocking )
    394                     {
    395                         // decrement responses counter in RPC descriptor
    396                         hal_remote_atomic_add( XPTR( desc_cxy, &desc_ptr->responses ), -1 );
    397 
    398                         // get client thread pointer and client core lid from RPC descriptor
    399                         client_ptr      = hal_remote_lpt( XPTR( desc_cxy , &desc_ptr->thread ) );
    400                         client_core_lid = hal_remote_lw ( XPTR( desc_cxy , &desc_ptr->lid ) );
    401 
    402                         // unblock client thread
    403                         thread_unblock( XPTR( desc_cxy , client_ptr ) , THREAD_BLOCKED_RPC );
    404 
    405                         hal_fence();
    406 
    407 #if DEBUG_RPC_SERVER_GENERIC
    408 cycle = (uint32_t)hal_get_cycles();
    409 if( DEBUG_RPC_SERVER_GENERIC < cycle )
    410 printk("\n[DBG] %s : RPC thread %x (cluster %x) unblocked client thread %x (cluster %x)\n",
    411 __FUNCTION__, server_ptr, local_cxy, client_ptr, desc_cxy, cycle );
    412 #endif
    413                         // send IPI to client core
    414                             // dev_pic_send_ipi( desc_cxy , client_core_lid );
    415                     }
    416                         }
    417                 else
    418                 {
    419                     break;
    420                 }
    421                 } // end internal loop
    422 
    423             // release rpc_fifo ownership if not lost
    424             if( rpc_fifo->owner == server_ptr->trdid ) rpc_fifo->owner = 0;
    425 
    426         }  // end if RPC fifo
    427 
    428         // RPC thread blocks on IDLE
    429         thread_block( server_xp , THREAD_BLOCKED_IDLE );
    430 
    431         // sucide if too many RPC threads / simply deschedule otherwise
     363printk("\n[DBG] %s : RPC thread %x unblocked client thread %x / cycle %d\n",
     364__FUNCTION__, server_ptr->trdid, client_ptr->trdid, cycle );
     365#endif
     366                    // send IPI to client core
     367                    dev_pic_send_ipi( desc_cxy , client_core_lid );
     368
     369                }  // end if blocking RPC
     370            }  // end RPC handling if fifo non empty
     371        }  // end if RPC_fIFO ownership successfully taken and released
     372
     373        // sucide if too many RPC threads
    432374        if( LOCAL_CLUSTER->rpc_threads[server_core_lid] >= CONFIG_RPC_THREADS_MAX )
    433375            {
     
    436378uint32_t cycle = (uint32_t)hal_get_cycles();
    437379if( DEBUG_RPC_SERVER_GENERIC < cycle )
    438 printk("\n[DBG] %s : RPC thread %x in cluster %x suicides / cycle %d\n",
    439 __FUNCTION__, server_ptr, local_cxy, cycle );
     380printk("\n[DBG] %s : RPC thread %x suicides / cycle %d\n",
     381__FUNCTION__, server_ptr->trdid, cycle );
    440382#endif
    441383            // update RPC threads counter
    442                 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , -1 );
     384                hal_atomic_add( &LOCAL_CLUSTER->rpc_threads[server_core_lid] , -1 );
    443385
    444386            // RPC thread blocks on GLOBAL
     
    448390            hal_remote_atomic_or( server_xp , THREAD_FLAG_REQ_DELETE );
    449391            }
     392        // block and deschedule otherwise
    450393        else
    451394        {
     
    454397uint32_t cycle = (uint32_t)hal_get_cycles();
    455398if( DEBUG_RPC_SERVER_GENERIC < cycle )
    456 printk("\n[DBG] %s : RPC thread %x in cluster %x block & deschedules / cycle %d\n",
    457 __FUNCTION__, server_ptr, local_cxy, cycle );
    458 #endif
     399printk("\n[DBG] %s : RPC thread %x block IDLE & deschedules / cycle %d\n",
     400__FUNCTION__, server_ptr->trdid, cycle );
     401#endif
     402            // RPC thread blocks on IDLE
     403            thread_block( server_xp , THREAD_BLOCKED_IDLE );
    459404
    460405            // RPC thread deschedules
    461             assert( thread_can_yield() , "illegal sched_yield\n" );
    462             sched_yield("RPC fifo empty");
     406            sched_yield("RPC_FIFO empty");
    463407        }
    464 
    465408        } // end infinite loop
    466 
    467409} // end rpc_thread_func()
    468410
     
    478420{
    479421#if DEBUG_RPC_PMEM_GET_PAGES
     422thread_t * this = CURRENT_THREAD;
    480423uint32_t cycle = (uint32_t)hal_get_cycles();
    481424if( cycle > DEBUG_RPC_PMEM_GET_PAGES )
    482 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
    483 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     425printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     426__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
    484427#endif
    485428
     
    504447cycle = (uint32_t)hal_get_cycles();
    505448if( cycle > DEBUG_RPC_PMEM_GET_PAGES )
    506 printk("\n[DBG] %s : thread %x exit / cycle %d\n",
    507 __FUNCTION__ , CURRENT_THREAD , cycle );
     449printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     450__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
    508451#endif
    509452}
     
    513456{
    514457#if DEBUG_RPC_PMEM_GET_PAGES
     458thread_t * this = CURRENT_THREAD;
    515459uint32_t cycle = (uint32_t)hal_get_cycles();
    516460if( cycle > DEBUG_RPC_PMEM_GET_PAGES )
    517 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
    518 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     461printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     462__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
    519463#endif
    520464
     
    524468
    525469    // get input arguments from client RPC descriptor
    526     uint32_t order = (uint32_t)hal_remote_lwd( XPTR( cxy , &desc->args[0] ) );
     470    uint32_t order = (uint32_t)hal_remote_l64( XPTR( cxy , &desc->args[0] ) );
    527471   
    528472    // call local pmem allocator
     
    530474
    531475    // set output arguments into client RPC descriptor
    532     hal_remote_swd( XPTR( cxy , &desc->args[1] ) , (uint64_t)(intptr_t)page );
     476    hal_remote_s64( XPTR( cxy , &desc->args[1] ) , (uint64_t)(intptr_t)page );
    533477
    534478#if DEBUG_RPC_PMEM_GET_PAGES
    535479cycle = (uint32_t)hal_get_cycles();
    536480if( cycle > DEBUG_RPC_PMEM_GET_PAGES )
    537 printk("\n[DBG] %s : thread %x exit / cycle %d\n",
    538 __FUNCTION__ , CURRENT_THREAD , cycle );
     481printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     482__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
    539483#endif
    540484}
     
    549493{
    550494#if DEBUG_RPC_PMEM_RELEASE_PAGES
     495thread_t * this = CURRENT_THREAD;
    551496uint32_t cycle = (uint32_t)hal_get_cycles();
    552497if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES )
    553 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
    554 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     498printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     499__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
    555500#endif
    556501
     
    572517cycle = (uint32_t)hal_get_cycles();
    573518if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES )
    574 printk("\n[DBG] %s : thread %x exit / cycle %d\n",
    575 __FUNCTION__ , CURRENT_THREAD , cycle );
     519printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     520__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
    576521#endif
    577522}
     
    581526{
    582527#if DEBUG_RPC_PMEM_RELEASE_PAGES
     528thread_t * this = CURRENT_THREAD;
    583529uint32_t cycle = (uint32_t)hal_get_cycles();
    584530if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES )
    585 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
    586 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     531printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     532__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
    587533#endif
    588534
     
    592538
    593539    // get input arguments from client RPC descriptor
    594     page_t * page = (page_t *)(intptr_t)hal_remote_lwd( XPTR( cxy , &desc->args[0] ) );
     540    page_t * page = (page_t *)(intptr_t)hal_remote_l64( XPTR( cxy , &desc->args[0] ) );
    595541   
    596542    // release memory to local pmem
     
    603549cycle = (uint32_t)hal_get_cycles();
    604550if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES )
    605 printk("\n[DBG] %s : thread %x exit / cycle %d\n",
    606 __FUNCTION__ , CURRENT_THREAD , cycle );
     551printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     552__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
    607553#endif
    608554}
     
    625571{
    626572#if DEBUG_RPC_PROCESS_MAKE_FORK
     573thread_t * this = CURRENT_THREAD;
    627574uint32_t cycle = (uint32_t)hal_get_cycles();
    628575if( cycle > DEBUG_RPC_PROCESS_MAKE_FORK )
    629 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
    630 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     576printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     577__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
    631578#endif
    632579
     
    654601cycle = (uint32_t)hal_get_cycles();
    655602if( cycle > DEBUG_RPC_PROCESS_MAKE_FORK )
    656 printk("\n[DBG] %s : thread %x exit / cycle %d\n",
    657 __FUNCTION__ , CURRENT_THREAD , cycle );
     603printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     604__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
    658605#endif
    659606}
     
    663610{
    664611#if DEBUG_RPC_PROCESS_MAKE_FORK
     612thread_t * this = CURRENT_THREAD;
    665613uint32_t cycle = (uint32_t)hal_get_cycles();
    666614if( cycle > DEBUG_RPC_PROCESS_MAKE_FORK )
    667 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
    668 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     615printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     616__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
    669617#endif
    670618
     
    680628
    681629    // get input arguments from cient RPC descriptor
    682     ref_process_xp   = (xptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) );
    683     parent_thread_xp = (xptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) );
     630    ref_process_xp   = (xptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) );
     631    parent_thread_xp = (xptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) );
    684632
    685633    // call local kernel function
     
    690638
    691639    // set output argument into client RPC descriptor
    692     hal_remote_swd( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)child_pid );
    693     hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)(intptr_t)child_thread_ptr );
    694     hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error );
     640    hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)child_pid );
     641    hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)(intptr_t)child_thread_ptr );
     642    hal_remote_s64( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error );
    695643
    696644#if DEBUG_RPC_PROCESS_MAKE_FORK
    697645cycle = (uint32_t)hal_get_cycles();
    698646if( cycle > DEBUG_RPC_PROCESS_MAKE_FORK )
    699 printk("\n[DBG] %s : thread %x exit / cycle %d\n",
    700 __FUNCTION__ , CURRENT_THREAD , cycle );
     647printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     648__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
    701649#endif
    702650}
     
    723671                                    error_t        * error )      // out
    724672{
     673#if DEBUG_RPC_THREAD_USER_CREATE
     674thread_t * this = CURRENT_THREAD;
     675uint32_t cycle = (uint32_t)hal_get_cycles();
     676if( cycle > DEBUG_RPC_THREAD_USER_CREATE)
     677printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     678__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
     679#endif
     680
    725681    assert( (cxy != local_cxy) , "target cluster is not remote\n");
    726682
     
    744700    *error     = (error_t)rpc.args[5];
    745701
     702#if DEBUG_RPC_THREAD_USER_CREATE
     703cycle = (uint32_t)hal_get_cycles();
     704if( cycle > DEBUG_RPC_THREAD_USER_CREATE)
     705printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     706__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
     707#endif
    746708}
    747709
     
    749711void rpc_thread_user_create_server( xptr_t xp )
    750712{
     713#if DEBUG_RPC_THREAD_USER_CREATE
     714thread_t * this = CURRENT_THREAD;
     715uint32_t cycle = (uint32_t)hal_get_cycles();
     716if( cycle > DEBUG_RPC_THREAD_USER_CREATE)
     717printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     718__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
     719#endif
    751720
    752721    pthread_attr_t * attr_ptr;   // pointer on attributes structure in client cluster
     
    767736
    768737    // get input arguments from RPC descriptor
    769     pid        = (pid_t)                     hal_remote_lwd(XPTR(client_cxy , &desc->args[0]));
    770     start_func = (void *)(intptr_t)          hal_remote_lwd(XPTR(client_cxy , &desc->args[1]));
    771     start_arg  = (void *)(intptr_t)          hal_remote_lwd(XPTR(client_cxy , &desc->args[2]));
    772     attr_ptr   = (pthread_attr_t *)(intptr_t)hal_remote_lwd(XPTR(client_cxy , &desc->args[3]));
     738    pid        = (pid_t)                     hal_remote_l64(XPTR(client_cxy , &desc->args[0]));
     739    start_func = (void *)(intptr_t)          hal_remote_l64(XPTR(client_cxy , &desc->args[1]));
     740    start_arg  = (void *)(intptr_t)          hal_remote_l64(XPTR(client_cxy , &desc->args[2]));
     741    attr_ptr   = (pthread_attr_t *)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[3]));
    773742
    774743    // makes a local copy of attributes structure
     
    786755    // set output arguments
    787756    thread_xp = XPTR( local_cxy , thread_ptr );
    788     hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)thread_xp );
    789     hal_remote_swd( XPTR( client_cxy , &desc->args[5] ) , (uint64_t)error );
    790 
     757    hal_remote_s64( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)thread_xp );
     758    hal_remote_s64( XPTR( client_cxy , &desc->args[5] ) , (uint64_t)error );
     759
     760#if DEBUG_RPC_THREAD_USER_CREATE
     761cycle = (uint32_t)hal_get_cycles();
     762if( cycle > DEBUG_RPC_THREAD_USER_CREATE)
     763printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     764__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
     765#endif
    791766}
    792767
     
    803778                                      error_t * error )      // out
    804779{
     780#if DEBUG_RPC_THREAD_KERNEL_CREATE
     781thread_t * this = CURRENT_THREAD;
     782uint32_t cycle = (uint32_t)hal_get_cycles();
     783if( cycle > DEBUG_RPC_THREAD_KERNEL_CREATE)
     784printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     785__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
     786#endif
     787
    805788    assert( (cxy != local_cxy) , "target cluster is not remote\n");
    806789
     
    823806    *error     = (error_t)rpc.args[4];
    824807
     808#if DEBUG_RPC_THREAD_KERNEL_CREATE
     809cycle = (uint32_t)hal_get_cycles();
     810if( cycle > DEBUG_RPC_THREAD_KERNEL_CREATE)
     811printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     812__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
     813#endif
    825814}
    826815
     
    828817void rpc_thread_kernel_create_server( xptr_t xp )
    829818{
     819#if DEBUG_RPC_THREAD_KERNEL_CREATE
     820thread_t * this = CURRENT_THREAD;
     821uint32_t cycle = (uint32_t)hal_get_cycles();
     822if( cycle > DEBUG_RPC_THREAD_KERNEL_CREATE)
     823printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     824__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
     825#endif
     826
    830827    thread_t       * thread_ptr;  // local pointer on thread descriptor
    831828    xptr_t           thread_xp;   // extended pointer on thread descriptor
     
    838835
    839836    // get attributes from RPC descriptor
    840     uint32_t  type = (uint32_t)       hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) );
    841     void    * func = (void*)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) );
    842     void    * args = (void*)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[2] ) );
     837    uint32_t  type = (uint32_t)       hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) );
     838    void    * func = (void*)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) );
     839    void    * args = (void*)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) );
    843840
    844841    // select one core
     
    850847    // set output arguments
    851848    thread_xp = XPTR( local_cxy , thread_ptr );
    852     hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error );
    853     hal_remote_swd( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)thread_xp );
    854 
     849    hal_remote_s64( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error );
     850    hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)thread_xp );
     851
     852#if DEBUG_RPC_THREAD_KERNEL_CREATE
     853cycle = (uint32_t)hal_get_cycles();
     854if( cycle > DEBUG_RPC_THREAD_KERNEL_CREATE)
     855printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     856__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
     857#endif
    855858}
    856859
     
    913916
    914917    // get arguments from RPC descriptor
    915     action   = (uint32_t)hal_remote_lwd( XPTR(client_cxy , &rpc->args[0]) );
    916     pid      = (pid_t)   hal_remote_lwd( XPTR(client_cxy , &rpc->args[1]) );
     918    action   = (uint32_t)hal_remote_l64( XPTR(client_cxy , &rpc->args[0]) );
     919    pid      = (pid_t)   hal_remote_l64( XPTR(client_cxy , &rpc->args[1]) );
    917920
    918921#if DEBUG_RPC_PROCESS_SIGACTION
     
    945948    {
    946949        // get client core lid
    947         client_lid    = (lid_t)     hal_remote_lw ( XPTR( client_cxy , &rpc->lid    ) );
     950        client_lid    = (lid_t)     hal_remote_l32 ( XPTR( client_cxy , &rpc->lid    ) );
    948951
    949952        // unblock client thread
     
    981984{
    982985#if DEBUG_RPC_VFS_INODE_CREATE
     986thread_t * this = CURRENT_THREAD;
    983987uint32_t cycle = (uint32_t)hal_get_cycles();
    984988if( cycle > DEBUG_RPC_VFS_INODE_CREATE )
    985 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
    986 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     989printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     990__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
    987991#endif
    988992
     
    10131017
    10141018#if DEBUG_RPC_VFS_INODE_CREATE
    1015 uint32_t cycle = (uint32_t)hal_get_cycles();
     1019cycle = (uint32_t)hal_get_cycles();
    10161020if( cycle > DEBUG_RPC_VFS_INODE_CREATE )
    1017 printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n",
    1018 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1021printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     1022__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
    10191023#endif
    10201024}
     
    10241028{
    10251029#if DEBUG_RPC_VFS_INODE_CREATE
     1030thread_t * this = CURRENT_THREAD;
    10261031uint32_t cycle = (uint32_t)hal_get_cycles();
    10271032if( cycle > DEBUG_RPC_VFS_INODE_CREATE )
    1028 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
    1029 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1033printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     1034__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
    10301035#endif
    10311036
     
    10461051
    10471052    // get input arguments from client rpc descriptor
    1048     dentry_xp  = (xptr_t)          hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) );
    1049     fs_type    = (uint32_t)        hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) );
    1050     inode_type = (uint32_t)        hal_remote_lwd( XPTR( client_cxy , &desc->args[2] ) );
    1051     extend     = (void *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[3] ) );
    1052     attr       = (uint32_t)        hal_remote_lwd( XPTR( client_cxy , &desc->args[4] ) );
    1053     rights     = (uint32_t)        hal_remote_lwd( XPTR( client_cxy , &desc->args[5] ) );
    1054     uid        = (uid_t)           hal_remote_lwd( XPTR( client_cxy , &desc->args[6] ) );
    1055     gid        = (gid_t)           hal_remote_lwd( XPTR( client_cxy , &desc->args[7] ) );
     1053    dentry_xp  = (xptr_t)          hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) );
     1054    fs_type    = (uint32_t)        hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) );
     1055    inode_type = (uint32_t)        hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) );
     1056    extend     = (void *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[3] ) );
     1057    attr       = (uint32_t)        hal_remote_l64( XPTR( client_cxy , &desc->args[4] ) );
     1058    rights     = (uint32_t)        hal_remote_l64( XPTR( client_cxy , &desc->args[5] ) );
     1059    uid        = (uid_t)           hal_remote_l64( XPTR( client_cxy , &desc->args[6] ) );
     1060    gid        = (gid_t)           hal_remote_l64( XPTR( client_cxy , &desc->args[7] ) );
    10561061
    10571062    // call local kernel function
     
    10671072
    10681073    // set output arguments
    1069     hal_remote_swd( XPTR( client_cxy , &desc->args[8] ) , (uint64_t)inode_xp );
    1070     hal_remote_swd( XPTR( client_cxy , &desc->args[9] ) , (uint64_t)error );
     1074    hal_remote_s64( XPTR( client_cxy , &desc->args[8] ) , (uint64_t)inode_xp );
     1075    hal_remote_s64( XPTR( client_cxy , &desc->args[9] ) , (uint64_t)error );
    10711076
    10721077#if DEBUG_RPC_VFS_INODE_CREATE
    1073 uint32_t cycle = (uint32_t)hal_get_cycles();
     1078cycle = (uint32_t)hal_get_cycles();
    10741079if( cycle > DEBUG_RPC_VFS_INODE_CREATE )
    1075 printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n",
    1076 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1080printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     1081__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
    10771082#endif
    10781083}
     
    10881093{
    10891094#if DEBUG_RPC_VFS_INODE_DESTROY
     1095thread_t * this = CURRENT_THREAD;
    10901096uint32_t cycle = (uint32_t)hal_get_cycles();
    10911097if( cycle > DEBUG_RPC_VFS_INODE_DESTROY )
    1092 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
    1093 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1098printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     1099__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
    10941100#endif
    10951101
     
    11121118
    11131119#if DEBUG_RPC_VFS_INODE_DESTROY
    1114 uint32_t cycle = (uint32_t)hal_get_cycles();
     1120cycle = (uint32_t)hal_get_cycles();
    11151121if( cycle > DEBUG_RPC_VFS_INODE_DESTROY )
    1116 printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n",
    1117 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1122printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     1123__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
    11181124#endif
    11191125}
     
    11231129{
    11241130#if DEBUG_RPC_VFS_INODE_DESTROY
     1131thread_t * this = CURRENT_THREAD;
    11251132uint32_t cycle = (uint32_t)hal_get_cycles();
    11261133if( cycle > DEBUG_RPC_VFS_INODE_DESTROY )
    1127 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
    1128 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1134printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     1135__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
    11291136#endif
    11301137
     
    11371144
    11381145    // get arguments "inode" from client RPC descriptor
    1139     inode = (vfs_inode_t *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) );
     1146    inode = (vfs_inode_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) );
    11401147                       
    11411148    // call local kernel function
     
    11431150
    11441151    // set output argument
    1145     hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error );
     1152    hal_remote_s64( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error );
    11461153
    11471154#if DEBUG_RPC_VFS_INODE_DESTROY
    1148 uint32_t cycle = (uint32_t)hal_get_cycles();
     1155cycle = (uint32_t)hal_get_cycles();
    11491156if( cycle > DEBUG_RPC_VFS_INODE_DESTROY )
    1150 printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n",
    1151 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1157printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     1158__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
    11521159#endif
    11531160}
     
    11661173{
    11671174#if DEBUG_RPC_VFS_DENTRY_CREATE
     1175thread_t * this = CURRENT_THREAD;
    11681176uint32_t cycle = (uint32_t)hal_get_cycles();
    11691177if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE )
    1170 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
    1171 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1178printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     1179__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
    11721180#endif
    11731181
     
    11951203cycle = (uint32_t)hal_get_cycles();
    11961204if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE )
    1197 printk("\n[DBG] %s : thread %x exit / cycle %d\n",
    1198 __FUNCTION__ , CURRENT_THREAD , cycle );
     1205printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     1206__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
    11991207#endif
    12001208}
     
    12041212{
    12051213#if DEBUG_RPC_VFS_DENTRY_CREATE
     1214thread_t * this = CURRENT_THREAD;
    12061215uint32_t cycle = (uint32_t)hal_get_cycles();
    12071216if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE )
    1208 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
    1209 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1217printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     1218__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
    12101219#endif
    12111220
     
    12221231
    12231232    // get arguments "name", "type", and "parent" from client RPC descriptor
    1224     type   = (uint32_t)               hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) );
    1225     name   = (char *)(intptr_t)       hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) );
    1226     parent = (vfs_inode_t *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[2] ) );
     1233    type   = (uint32_t)               hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) );
     1234    name   = (char *)(intptr_t)       hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) );
     1235    parent = (vfs_inode_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) );
    12271236
    12281237    // makes a local copy of  name
     
    12361245                               &dentry_xp );
    12371246    // set output arguments
    1238     hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)dentry_xp );
    1239     hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error );
     1247    hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)dentry_xp );
     1248    hal_remote_s64( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error );
    12401249
    12411250#if DEBUG_RPC_VFS_DENTRY_CREATE
    12421251cycle = (uint32_t)hal_get_cycles();
    12431252if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE )
    1244 printk("\n[DBG] %s : thread %x exit / cycle %d\n",
    1245 __FUNCTION__ , CURRENT_THREAD , cycle );
     1253printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     1254__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
    12461255#endif
    12471256}
     
    12571266{
    12581267#if DEBUG_RPC_VFS_DENTRY_DESTROY
     1268thread_t * this = CURRENT_THREAD;
    12591269uint32_t cycle = (uint32_t)hal_get_cycles();
    12601270if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY )
    1261 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
    1262 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1271printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     1272__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
    12631273#endif
    12641274
     
    12831293cycle = (uint32_t)hal_get_cycles();
    12841294if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY )
    1285 printk("\n[DBG] %s : thread %x exit / cycle %d\n",
    1286 __FUNCTION__ , CURRENT_THREAD , cycle );
     1295printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     1296__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
    12871297#endif
    12881298}
     
    12921302{
    12931303#if DEBUG_RPC_VFS_DENTRY_DESTROY
     1304thread_t * this = CURRENT_THREAD;
    12941305uint32_t cycle = (uint32_t)hal_get_cycles();
    12951306if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY )
    1296 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
    1297 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1307printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     1308__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
    12981309#endif
    12991310
     
    13061317
    13071318    // get arguments "dentry" from client RPC descriptor
    1308     dentry = (vfs_dentry_t *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) );
     1319    dentry = (vfs_dentry_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) );
    13091320                       
    13101321    // call local kernel function
     
    13121323
    13131324    // set output argument
    1314     hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error );
     1325    hal_remote_s64( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error );
    13151326
    13161327#if DEBUG_RPC_VFS_DENTRY_DESTROY
    13171328cycle = (uint32_t)hal_get_cycles();
    13181329if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY )
    1319 printk("\n[DBG] %s : thread %x exit / cycle %d\n",
    1320 __FUNCTION__ , CURRENT_THREAD , cycle );
     1330printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     1331__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
    13211332#endif
    13221333}
     
    13351346{
    13361347#if DEBUG_RPC_VFS_FILE_CREATE
     1348thread_t * this = CURRENT_THREAD;
    13371349uint32_t cycle = (uint32_t)hal_get_cycles();
    13381350if( cycle > DEBUG_RPC_VFS_FILE_CREATE )
    1339 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
    1340 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1351printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     1352__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
    13411353#endif
    13421354
     
    13631375cycle = (uint32_t)hal_get_cycles();
    13641376if( cycle > DEBUG_RPC_VFS_FILE_CREATE )
    1365 printk("\n[DBG] %s : thread %x exit / cycle %d\n",
    1366 __FUNCTION__ , CURRENT_THREAD , cycle );
     1377printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     1378__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
    13671379#endif
    13681380}
     
    13721384{
    13731385#if DEBUG_RPC_VFS_FILE_CREATE
     1386thread_t * this = CURRENT_THREAD;
    13741387uint32_t cycle = (uint32_t)hal_get_cycles();
    13751388if( cycle > DEBUG_RPC_VFS_FILE_CREATE )
    1376 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
    1377 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1389printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     1390__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
    13781391#endif
    13791392
     
    13881401
    13891402    // get arguments "file_attr" and "inode" from client RPC descriptor
    1390     inode     = (vfs_inode_t *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) );
    1391     file_attr = (uint32_t)               hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) );
     1403    inode     = (vfs_inode_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) );
     1404    file_attr = (uint32_t)               hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) );
    13921405                       
    13931406    // call local kernel function
     
    13971410 
    13981411    // set output arguments
    1399     hal_remote_swd( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)file_xp );
    1400     hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error );
     1412    hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)file_xp );
     1413    hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error );
    14011414
    14021415#if DEBUG_RPC_VFS_FILE_CREATE
    14031416cycle = (uint32_t)hal_get_cycles();
    14041417if( cycle > DEBUG_RPC_VFS_FILE_CREATE )
    1405 printk("\n[DBG] %s : thread %x exit / cycle %d\n",
    1406 __FUNCTION__ , CURRENT_THREAD , cycle );
     1418printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     1419__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
    14071420#endif
    14081421}
     
    14171430{
    14181431#if DEBUG_RPC_VFS_FILE_DESTROY
     1432thread_t * this = CURRENT_THREAD;
    14191433uint32_t cycle = (uint32_t)hal_get_cycles();
    14201434if( cycle > DEBUG_RPC_VFS_FILE_DESTROY )
    1421 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
    1422 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1435printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     1436__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
    14231437#endif
    14241438
     
    14401454cycle = (uint32_t)hal_get_cycles();
    14411455if( cycle > DEBUG_RPC_VFS_FILE_DESTROY )
    1442 printk("\n[DBG] %s : thread %x exit / cycle %d\n",
    1443 __FUNCTION__ , CURRENT_THREAD , cycle );
     1456printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     1457__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
    14441458#endif
    14451459}
     
    14491463{
    14501464#if DEBUG_RPC_VFS_FILE_DESTROY
     1465thread_t * this = CURRENT_THREAD;
    14511466uint32_t cycle = (uint32_t)hal_get_cycles();
    14521467if( cycle > DEBUG_RPC_VFS_FILE_DESTROY )
    1453 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
    1454 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1468printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     1469__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
    14551470#endif
    14561471
     
    14621477
    14631478    // get arguments "dentry" from client RPC descriptor
    1464     file = (vfs_file_t *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) );
     1479    file = (vfs_file_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) );
    14651480                       
    14661481    // call local kernel function
     
    14701485cycle = (uint32_t)hal_get_cycles();
    14711486if( cycle > DEBUG_RPC_VFS_FILE_DESTROY )
    1472 printk("\n[DBG] %s : thread %x exit / cycle %d\n",
    1473 __FUNCTION__ , CURRENT_THREAD , cycle );
     1487printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     1488__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
    14741489#endif
    14751490}
     
    15221537
    15231538    // get arguments "parent", "name", and "child_xp"
    1524     parent     = (vfs_inode_t*)(intptr_t)hal_remote_lwd(XPTR(client_cxy , &desc->args[0]));
    1525     name       = (char*)(intptr_t)       hal_remote_lwd(XPTR(client_cxy , &desc->args[1]));
    1526     child_xp   = (xptr_t)                hal_remote_lwd(XPTR(client_cxy , &desc->args[2]));
     1539    parent     = (vfs_inode_t*)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[0]));
     1540    name       = (char*)(intptr_t)       hal_remote_l64(XPTR(client_cxy , &desc->args[1]));
     1541    child_xp   = (xptr_t)                hal_remote_l64(XPTR(client_cxy , &desc->args[2]));
    15271542
    15281543    // get name local copy
     
    15341549
    15351550    // set output argument
    1536     hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error );
     1551    hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error );
    15371552
    15381553}
     
    15771592
    15781593    // get arguments "parent", "name", and "child_xp"
    1579     inode = (vfs_inode_t*)(intptr_t)hal_remote_lwd(XPTR(client_cxy , &desc->args[0]));
     1594    inode = (vfs_inode_t*)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[0]));
    15801595
    15811596    // call the kernel function
     
    15831598
    15841599    // set output argument
    1585     hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error );
     1600    hal_remote_s64( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error );
    15861601
    15871602}
     
    16361651    // get input arguments
    16371652    mapper = (mapper_t *)(intptr_t)hal_remote_lpt( XPTR( client_cxy , &desc->args[0] ) );
    1638     first  = (uint32_t)            hal_remote_lw ( XPTR( client_cxy , &desc->args[1] ) );
    1639     index  = (uint32_t)            hal_remote_lw ( XPTR( client_cxy , &desc->args[2] ) );
     1653    first  = (uint32_t)            hal_remote_l32 ( XPTR( client_cxy , &desc->args[1] ) );
     1654    index  = (uint32_t)            hal_remote_l32 ( XPTR( client_cxy , &desc->args[2] ) );
    16401655
    16411656    // call the kernel function
     
    16431658
    16441659    // set output argument
    1645     hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)cluster );
    1646     hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error );
     1660    hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)cluster );
     1661    hal_remote_s64( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error );
    16471662
    16481663}
     
    16601675{
    16611676#if DEBUG_RPC_VMM_GET_VSEG
     1677thread_t * this = CURRENT_THREAD;
    16621678uint32_t cycle = (uint32_t)hal_get_cycles();
    16631679if( cycle > DEBUG_RPC_VMM_GET_VSEG )
    1664 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
    1665 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1680printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     1681__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
    16661682#endif
    16671683
     
    16881704cycle = (uint32_t)hal_get_cycles();
    16891705if( cycle > DEBUG_RPC_VMM_GET_VSEG )
    1690 printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n",
    1691 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1706printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     1707__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
    16921708#endif
    16931709}
     
    16971713{
    16981714#if DEBUG_RPC_VMM_GET_VSEG
     1715thread_t * this = CURRENT_THREAD;
    16991716uint32_t cycle = (uint32_t)hal_get_cycles();
    17001717if( cycle > DEBUG_RPC_VMM_GET_VSEG )
    1701 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
    1702 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1718printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     1719__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
    17031720#endif
    17041721
     
    17141731
    17151732    // get input argument from client RPC descriptor
    1716     process = (process_t *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) );
    1717     vaddr   = (intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) );
     1733    process = (process_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) );
     1734    vaddr   = (intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) );
    17181735   
    17191736    // call local kernel function
     
    17221739    // set output arguments to client RPC descriptor
    17231740    vseg_xp = XPTR( local_cxy , vseg_ptr );
    1724     hal_remote_swd( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)vseg_xp );
    1725     hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error );
     1741    hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)vseg_xp );
     1742    hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error );
    17261743
    17271744#if DEBUG_RPC_VMM_GET_VSEG
    17281745cycle = (uint32_t)hal_get_cycles();
    17291746if( cycle > DEBUG_RPC_VMM_GET_VSEG )
    1730 printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n",
    1731 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1747printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     1748__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
    17321749#endif
    17331750}
     
    17481765{
    17491766#if DEBUG_RPC_VMM_GET_PTE
     1767thread_t * this = CURRENT_THREAD;
    17501768uint32_t cycle = (uint32_t)hal_get_cycles();
    17511769if( cycle > DEBUG_RPC_VMM_GET_PTE )
    1752 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
    1753 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1770printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     1771__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
    17541772#endif
    17551773
     
    17781796cycle = (uint32_t)hal_get_cycles();
    17791797if( cycle > DEBUG_RPC_VMM_GET_PTE )
    1780 printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n",
    1781 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1798printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     1799__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
    17821800#endif
    17831801}
     
    17871805{
    17881806#if DEBUG_RPC_VMM_GET_PTE
     1807thread_t * this = CURRENT_THREAD;
    17891808uint32_t cycle = (uint32_t)hal_get_cycles();
    17901809if( cycle > DEBUG_RPC_VMM_GET_PTE )
    1791 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
    1792 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1810printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     1811__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
    17931812#endif
    17941813
     
    18051824
    18061825    // get input argument "process" & "vpn" from client RPC descriptor
    1807     process = (process_t *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) );
    1808     vpn     = (vpn_t)                hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) );
    1809     cow     = (bool_t)               hal_remote_lwd( XPTR( client_cxy , &desc->args[2] ) );
     1826    process = (process_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) );
     1827    vpn     = (vpn_t)                hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) );
     1828    cow     = (bool_t)               hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) );
    18101829   
    18111830    // call local kernel function
     
    18131832
    18141833    // set output argument "attr" & "ppn" to client RPC descriptor
    1815     hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)attr );
    1816     hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)ppn );
    1817     hal_remote_swd( XPTR( client_cxy , &desc->args[5] ) , (uint64_t)error );
     1834    hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)attr );
     1835    hal_remote_s64( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)ppn );
     1836    hal_remote_s64( XPTR( client_cxy , &desc->args[5] ) , (uint64_t)error );
    18181837
    18191838#if DEBUG_RPC_VMM_GET_PTE
    18201839cycle = (uint32_t)hal_get_cycles();
    18211840if( cycle > DEBUG_RPC_VMM_GET_PTE )
    1822 printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n",
    1823 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1841printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     1842__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
    18241843#endif
    18251844}
     
    18341853                           xptr_t *   buf_xp )     // out
    18351854{
     1855#if DEBUG_RPC_KCM_ALLOC
     1856thread_t * this = CURRENT_THREAD;
     1857uint32_t cycle = (uint32_t)hal_get_cycles();
     1858if( cycle > DEBUG_RPC_KCM_ALLOC )
     1859printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     1860__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
     1861#endif
     1862
    18361863    assert( (cxy != local_cxy) , "target cluster is not remote\n");
    18371864
     
    18511878    *buf_xp = (xptr_t)rpc.args[1];
    18521879
     1880#if DEBUG_RPC_KCM_ALLOC
     1881cycle = (uint32_t)hal_get_cycles();
     1882if( cycle > DEBUG_RPC_KCM_ALLOC )
     1883printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     1884__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
     1885#endif
    18531886}
    18541887
     
    18561889void rpc_kcm_alloc_server( xptr_t xp )
    18571890{
     1891#if DEBUG_RPC_KCM_ALLOC
     1892thread_t * this = CURRENT_THREAD;
     1893uint32_t cycle = (uint32_t)hal_get_cycles();
     1894if( cycle > DEBUG_RPC_KCM_ALLOC )
     1895printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     1896__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
     1897#endif
     1898
    18581899    // get client cluster identifier and pointer on RPC descriptor
    18591900    cxy_t        client_cxy  = GET_CXY( xp );
     
    18611902
    18621903    // get input argument "kmem_type" from client RPC descriptor
    1863     uint32_t kmem_type = (uint32_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) );
     1904    uint32_t kmem_type = (uint32_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) );
    18641905
    18651906    // allocates memory for kcm
     
    18711912    // set output argument
    18721913    xptr_t buf_xp = XPTR( local_cxy , buf_ptr );
    1873     hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)buf_xp );
    1874 
     1914    hal_remote_s64( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)buf_xp );
     1915
     1916#if DEBUG_RPC_KCM_ALLOC
     1917cycle = (uint32_t)hal_get_cycles();
     1918if( cycle > DEBUG_RPC_KCM_ALLOC )
     1919printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     1920__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
     1921#endif
    18751922}   
    18761923
     
    18841931                          uint32_t   kmem_type )   // in
    18851932{
     1933#if DEBUG_RPC_KCM_FREE
     1934thread_t * this = CURRENT_THREAD;
     1935uint32_t cycle = (uint32_t)hal_get_cycles();
     1936if( cycle > DEBUG_RPC_KCM_FREE )
     1937printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     1938__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
     1939#endif
     1940
    18861941    assert( (cxy != local_cxy) , "target cluster is not remote\n");
    18871942
     
    18991954    rpc_send( cxy , &rpc );
    19001955
     1956#if DEBUG_RPC_KCM_FREE
     1957cycle = (uint32_t)hal_get_cycles();
     1958if( cycle > DEBUG_RPC_KCM_FREE )
     1959printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     1960__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
     1961#endif
    19011962}
    19021963
     
    19041965void rpc_kcm_free_server( xptr_t xp )
    19051966{
     1967#if DEBUG_RPC_KCM_FREE
     1968thread_t * this = CURRENT_THREAD;
     1969uint32_t cycle = (uint32_t)hal_get_cycles();
     1970if( cycle > DEBUG_RPC_KCM_FREE )
     1971printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     1972__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
     1973#endif
     1974
    19061975    // get client cluster identifier and pointer on RPC descriptor
    19071976    cxy_t        client_cxy  = GET_CXY( xp );
     
    19091978
    19101979    // get input arguments "buf" and "kmem_type" from client RPC descriptor
    1911     void     * buf = (void *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) );
    1912     uint32_t   kmem_type = (uint32_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) );
     1980    void     * buf = (void *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) );
     1981    uint32_t   kmem_type = (uint32_t)hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) );
    19131982
    19141983    // releases memory
     
    19181987    kmem_free( &req );
    19191988
     1989#if DEBUG_RPC_KCM_FREE
     1990cycle = (uint32_t)hal_get_cycles();
     1991if( cycle > DEBUG_RPC_KCM_FREE )
     1992printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     1993__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
     1994#endif
    19201995}   
    19211996
     
    19752050
    19762051    // get arguments from client RPC descriptor
    1977     mapper      = (mapper_t *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) );
    1978     to_buffer   =                       hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) );
    1979     is_user     =                       hal_remote_lwd( XPTR( client_cxy , &desc->args[2] ) );
    1980     file_offset =                       hal_remote_lwd( XPTR( client_cxy , &desc->args[3] ) );
    1981     size        =                       hal_remote_lwd( XPTR( client_cxy , &desc->args[5] ) );
     2052    mapper      = (mapper_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) );
     2053    to_buffer   =                       hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) );
     2054    is_user     =                       hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) );
     2055    file_offset =                       hal_remote_l64( XPTR( client_cxy , &desc->args[3] ) );
     2056    size        =                       hal_remote_l64( XPTR( client_cxy , &desc->args[5] ) );
    19822057
    19832058    // call local kernel function
    19842059    if( is_user )
    19852060    {
    1986         user_buffer = (void *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[4] ) );
     2061        user_buffer = (void *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[4] ) );
    19872062
    19882063        error = mapper_move_user( mapper,
     
    19942069    else
    19952070    {
    1996         kern_buffer = (xptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[4] ) );
     2071        kern_buffer = (xptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[4] ) );
    19972072
    19982073        error = mapper_move_kernel( mapper,
     
    20042079
    20052080    // set output argument to client RPC descriptor
    2006     hal_remote_swd( XPTR( client_cxy , &desc->args[6] ) , (uint64_t)error );
     2081    hal_remote_s64( XPTR( client_cxy , &desc->args[6] ) , (uint64_t)error );
    20072082
    20082083}
     
    20462121
    20472122    // get input arguments from client RPC descriptor
    2048     mapper_t * mapper = (mapper_t *)(intptr_t)hal_remote_lwd( XPTR( cxy , &desc->args[0] ) );
    2049     uint32_t   index  = (uint32_t)            hal_remote_lwd( XPTR( cxy , &desc->args[1] ) );
     2123    mapper_t * mapper = (mapper_t *)(intptr_t)hal_remote_l64( XPTR( cxy , &desc->args[0] ) );
     2124    uint32_t   index  = (uint32_t)            hal_remote_l64( XPTR( cxy , &desc->args[1] ) );
    20502125   
    20512126    // call local pmem allocator
     
    20532128
    20542129    // set output arguments into client RPC descriptor
    2055     hal_remote_swd( XPTR( cxy , &desc->args[1] ) , (uint64_t)(intptr_t)page );
     2130    hal_remote_s64( XPTR( cxy , &desc->args[1] ) , (uint64_t)(intptr_t)page );
    20562131
    20572132}
     
    21072182
    21082183    // get input arguments from client RPC descriptor
    2109     process_t * process     = (process_t *)(intptr_t)hal_remote_lwd( XPTR(cxy , &desc->args[0]));
    2110     vseg_type_t type        = (vseg_type_t)(uint32_t)hal_remote_lwd( XPTR(cxy , &desc->args[1]));
    2111     intptr_t    base        = (intptr_t)             hal_remote_lwd( XPTR(cxy , &desc->args[2]));
    2112     uint32_t    size        = (uint32_t)             hal_remote_lwd( XPTR(cxy , &desc->args[3]));
    2113     uint32_t    file_offset = (uint32_t)             hal_remote_lwd( XPTR(cxy , &desc->args[4]));
    2114     uint32_t    file_size   = (uint32_t)             hal_remote_lwd( XPTR(cxy , &desc->args[5]));
    2115     xptr_t      mapper_xp   = (xptr_t)               hal_remote_lwd( XPTR(cxy , &desc->args[6]));
    2116     cxy_t       vseg_cxy    = (cxy_t)(uint32_t)      hal_remote_lwd( XPTR(cxy , &desc->args[7]));
     2184    process_t * process     = (process_t *)(intptr_t)hal_remote_l64( XPTR(cxy , &desc->args[0]));
     2185    vseg_type_t type        = (vseg_type_t)(uint32_t)hal_remote_l64( XPTR(cxy , &desc->args[1]));
     2186    intptr_t    base        = (intptr_t)             hal_remote_l64( XPTR(cxy , &desc->args[2]));
     2187    uint32_t    size        = (uint32_t)             hal_remote_l64( XPTR(cxy , &desc->args[3]));
     2188    uint32_t    file_offset = (uint32_t)             hal_remote_l64( XPTR(cxy , &desc->args[4]));
     2189    uint32_t    file_size   = (uint32_t)             hal_remote_l64( XPTR(cxy , &desc->args[5]));
     2190    xptr_t      mapper_xp   = (xptr_t)               hal_remote_l64( XPTR(cxy , &desc->args[6]));
     2191    cxy_t       vseg_cxy    = (cxy_t)(uint32_t)      hal_remote_l64( XPTR(cxy , &desc->args[7]));
    21172192   
    21182193    // call local kernel function
     
    21272202
    21282203    // set output arguments into client RPC descriptor
    2129     hal_remote_swd( XPTR( cxy , &desc->args[8] ) , (uint64_t)(intptr_t)vseg );
     2204    hal_remote_s64( XPTR( cxy , &desc->args[8] ) , (uint64_t)(intptr_t)vseg );
    21302205
    21312206}
     
    21692244
    21702245    // get input arguments from client RPC descriptor
    2171     process = (process_t *)(intptr_t)hal_remote_lwd( XPTR(cxy , &desc->args[0]));
     2246    process = (process_t *)(intptr_t)hal_remote_l64( XPTR(cxy , &desc->args[0]));
    21722247   
    21732248    // call local kernel function
     
    22132288
    22142289    // get input arguments from client RPC descriptor
    2215     process  = (process_t *)(intptr_t)hal_remote_lwd( XPTR(cxy , &desc->args[0]));
    2216     detailed = (bool_t)               hal_remote_lwd( XPTR(cxy , &desc->args[1]));
     2290    process  = (process_t *)(intptr_t)hal_remote_l64( XPTR(cxy , &desc->args[0]));
     2291    detailed = (bool_t)               hal_remote_l64( XPTR(cxy , &desc->args[1]));
    22172292   
    22182293    // call local kernel function
  • trunk/kernel/kern/rpc.h

    r503 r564  
    2929#include <hal_atomic.h>
    3030#include <bits.h>
    31 #include <spinlock.h>
    3231#include <vseg.h>
    3332#include <remote_fifo.h>
     
    150149
    151150/***********************************************************************************
    152  * This function is the entry point for RPC handling on the server cluster.
    153  * It is executed by the core receiving the IPI sent by the client thread.
    154  * - If the RPC FIFO is empty, it deschedules.
    155  * - If the RPC FIFO is not empty, it checks if it exist a non-blocked RPC thread
    156  *   in the cluster, creates a new one if required, and deschedule to allow
    157  *   the RPC thead to execute.
    158  **********************************************************************************/
    159 void rpc_check( void );
    160 
    161 /***********************************************************************************
    162  * This function contains the loop to execute all pending RPCs on the server side.
    163  * It is called by the rpc_thread_func() function with irq disabled, and after
    164  * RPC_FIFO ownership acquisition.
    165  ***********************************************************************************
    166  * @ rpc_fifo  : pointer on the local RPC fifo
    167  **********************************************************************************/
    168 void rpc_execute_all( remote_fifo_t * rpc_fifo );
    169 
    170 /***********************************************************************************
    171  * This function contains the infinite loop executed by a RPC thread.
     151 * This function contains the infinite loop executed by a RPC thread,
     152 * to handle all pending RPCs registered in the RPC fifo attached to a given core.
    172153 **********************************************************************************/
    173154void rpc_thread_func( void );
     
    177158 **********************************************************************************/
    178159void __attribute__((noinline)) rpc_undefined( xptr_t xp __attribute__ ((unused)) );
    179 
    180160
    181161
  • trunk/kernel/kern/scheduler.c

    r551 r564  
    22 * scheduler.c - Core scheduler implementation.
    33 *
    4  * Author    Alain Greiner (2016)
     4 * Author    Alain Greiner (2016,2017,2018)
    55 *
    66 * Copyright (c)  UPMC Sorbonne Universites
     
    3636
    3737///////////////////////////////////////////////////////////////////////////////////////////
    38 // Extern global variables
     38//         global variables
    3939///////////////////////////////////////////////////////////////////////////////////////////
    4040
    41 uint32_t   idle_thread_count;
    42 uint32_t   idle_thread_count_active;
    43 
    44 extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c file
    45 extern uint32_t             switch_save_sr[];   // allocated in kernel_init.c file
    46 
    47 ////////////////////////////////
    48 void sched_init( core_t * core )
    49 {
    50     scheduler_t * sched = &core->scheduler;
    51 
    52     sched->u_threads_nr   = 0;
    53     sched->k_threads_nr   = 0;
    54 
    55     sched->current        = CURRENT_THREAD;
    56     sched->idle           = NULL;               // initialized in kernel_init()
    57     sched->u_last         = NULL;               // initialized in sched_register_thread()
    58     sched->k_last         = NULL;               // initialized in sched_register_thread()
    59 
    60     // initialise threads lists
    61     list_root_init( &sched->u_root );
    62     list_root_init( &sched->k_root );
    63 
    64     // init spinlock
    65     spinlock_init( &sched->lock );
    66 
    67     sched->req_ack_pending = false;             // no pending request
    68     sched->trace           = false;             // context switches trace desactivated
    69 
    70 }  // end sched_init()
    71 
    72 ////////////////////////////////////////////
    73 void sched_register_thread( core_t   * core,
    74                             thread_t * thread )
    75 {
    76     scheduler_t * sched = &core->scheduler;
    77     thread_type_t type  = thread->type;
    78 
    79     // take lock protecting sheduler lists
    80     uint32_t       irq_state;
    81     spinlock_lock_busy( &sched->lock, &irq_state );
    82 
    83     if( type == THREAD_USER )
    84     {
    85         list_add_last( &sched->u_root , &thread->sched_list );
    86         sched->u_threads_nr++;
    87         if( sched->u_last == NULL ) sched->u_last = &thread->sched_list;
    88     }
    89     else // kernel thread
    90     {
    91         list_add_last( &sched->k_root , &thread->sched_list );
    92         sched->k_threads_nr++;
    93         if( sched->k_last == NULL ) sched->k_last = &thread->sched_list;
    94     }
    95 
    96     // release lock
    97     hal_fence();
    98     spinlock_unlock_busy( &sched->lock, irq_state);
    99 
    100 }  // end sched_register_thread()
    101 
    102 //////////////////////////////////////////////
     41extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c
     42
     43///////////////////////////////////////////////////////////////////////////////////////////
     44//         private functions
     45///////////////////////////////////////////////////////////////////////////////////////////
     46
     47
     48////////////////////////////////////////////////////////////////////////////////////////////
     49// This static function does NOT modify the scheduler state.
     50// It just select a thread in the list of attached threads, implementing the following
     51// three steps policy:
     52// 1) It scan the list of kernel threads, from the next thread after the last executed one,
     53//    and returns the first runnable found : not IDLE, not blocked, client queue not empty.
     54//    It can be the current thread.
     55// 2) If no kernel thread found, it scan the list of user thread, from the next thread after
     56//    the last executed one, and returns the first runable found : not blocked.
     57//    It can be the current thread.
     58// 3) If no runable thread found, it returns the idle thread.
     59////////////////////////////////////////////////////////////////////////////////////////////
     60// @ sched   : local pointer on scheduler.
     61// @ returns pointer on selected thread descriptor
     62////////////////////////////////////////////////////////////////////////////////////////////
    10363thread_t * sched_select( scheduler_t * sched )
    10464{
     
    11070    uint32_t       count;
    11171
    112     // take lock protecting sheduler lists
    113     spinlock_lock( &sched->lock );
    114 
    11572    // first : scan the kernel threads list if not empty
    11673    if( list_is_empty( &sched->k_root ) == false )
     
    12481        while( done == false )
    12582        {
    126             assert( (count < sched->k_threads_nr), "bad kernel threads list" );
     83
     84// check kernel threads list
     85assert( (count < sched->k_threads_nr),
     86"bad kernel threads list" );
    12787
    12888            // get next entry in kernel list
     
    140100
    141101            // select kernel thread if non blocked and non THREAD_IDLE
    142             if( (thread->blocked == 0)  && (thread->type != THREAD_IDLE) )
    143             {
    144                 spinlock_unlock( &sched->lock );
    145                 return thread;
    146             }
     102            if( (thread->blocked == 0)  && (thread->type != THREAD_IDLE) ) return thread;
     103
    147104        } // end loop on kernel threads
    148105    } // end kernel threads
     
    159116        while( done == false )
    160117        {
    161             assert( (count < sched->u_threads_nr), "bad user threads list" );
     118
     119// check user threads list
     120assert( (count < sched->u_threads_nr),
     121"bad user threads list" );
    162122
    163123            // get next entry in user list
     
    175135
    176136            // select thread if non blocked
    177             if( thread->blocked == 0 )
    178             {
    179                 spinlock_unlock( &sched->lock );
    180                 return thread;
    181             }
     137            if( thread->blocked == 0 )  return thread;
     138
    182139        } // end loop on user threads
    183140    } // end user threads
    184141
    185142    // third : return idle thread if no other runnable thread
    186     spinlock_unlock( &sched->lock );
    187143    return sched->idle;
    188144
    189145}  // end sched_select()
    190146
    191 ///////////////////////////////////////////
    192 void sched_handle_signals( core_t * core )
     147////////////////////////////////////////////////////////////////////////////////////////////
     148// This static function is the only function that can remove a thread from the scheduler.
     149// It is private, because it is called by the sched_yield() public function.
     150// It scan all threads attached to a given scheduler, and executes the relevant
     151// actions for pending requests:
     152// - REQ_ACK : it checks that target thread is blocked, decrements the response counter
     153//   to acknowledge the client thread, and reset the pending request.
     154// - REQ_DELETE : it detach the target thread from parent if attached, detach it from
     155//   the process, remove it from scheduler, release memory allocated to thread descriptor,
     156//   and destroy the process descriptor it the target thread was the last thread.
     157////////////////////////////////////////////////////////////////////////////////////////////
     158// @ core    : local pointer on the core descriptor.
     159////////////////////////////////////////////////////////////////////////////////////////////
     160static void sched_handle_signals( core_t * core )
    193161{
    194162
     
    197165    thread_t     * thread;
    198166    process_t    * process;
    199     bool_t         last_thread;
     167    scheduler_t  * sched;
     168    bool_t         last;
    200169
    201170    // get pointer on scheduler
    202     scheduler_t  * sched = &core->scheduler;
     171    sched = &core->scheduler;
    203172
    204173    // get pointer on user threads root
    205174    root = &sched->u_root;
    206175
    207     // take lock protecting threads lists
    208     spinlock_lock( &sched->lock );
    209 
    210176    // We use a while to scan the user threads, to control the iterator increment,
    211     // because some threads will be destroyed, and we cannot use a LIST_FOREACH()
     177    // because some threads will be destroyed, and we want not use a LIST_FOREACH()
    212178
    213179    // initialise list iterator
     
    226192        if( thread->flags & THREAD_FLAG_REQ_ACK )
    227193        {
    228             // check thread blocked
    229             assert( (thread->blocked & THREAD_BLOCKED_GLOBAL) ,
    230             "thread not blocked" );
     194
     195// check thread blocked
     196assert( (thread->blocked & THREAD_BLOCKED_GLOBAL) ,
     197"thread not blocked" );
    231198 
    232199            // decrement response counter
     
    237204        }
    238205
    239         // handle REQ_DELETE
    240         if( thread->flags & THREAD_FLAG_REQ_DELETE )
     206        // handle REQ_DELETE only if target thread != calling thread
     207        if( (thread->flags & THREAD_FLAG_REQ_DELETE) && (thread != CURRENT_THREAD) )
    241208        {
    242209            // get thread process descriptor
     
    246213                if( thread->core->fpu_owner == thread )  thread->core->fpu_owner = NULL;
    247214
    248             // remove thread from scheduler (scheduler lock already taken)
     215            // take lock protecting sheduler state
     216            busylock_acquire( &sched->lock );
     217
     218            // update scheduler state
    249219            uint32_t threads_nr = sched->u_threads_nr;
    250 
    251             assert( (threads_nr != 0) , "u_threads_nr cannot be 0\n" );
    252 
    253220            sched->u_threads_nr = threads_nr - 1;
    254221            list_unlink( &thread->sched_list );
     
    269236            }
    270237
     238            // release lock protecting scheduler state
     239            busylock_release( &sched->lock );
     240
    271241            // delete thread descriptor
    272             last_thread = thread_destroy( thread );
     242            last = thread_destroy( thread );
    273243
    274244#if DEBUG_SCHED_HANDLE_SIGNALS
     
    279249#endif
    280250            // destroy process descriptor if no more threads
    281             if( last_thread )
     251            if( last )
    282252            {
    283253                // delete process   
     
    293263        }
    294264    }
     265} // end sched_handle_signals()
     266
     267////////////////////////////////////////////////////////////////////////////////////////////
     268// This static function is called by the sched_yield function when the RFC_FIFO
     269// associated to the core is not empty.
     270// It checks if it exists an idle (blocked) RPC thread for this core, and unblock
     271// it if found. It creates a new RPC thread if no idle RPC thread is found.
     272////////////////////////////////////////////////////////////////////////////////////////////
     273// @ sched   : local pointer on scheduler.
     274////////////////////////////////////////////////////////////////////////////////////////////
     275void sched_rpc_activate( scheduler_t * sched )
     276{
     277    error_t         error;
     278    thread_t      * thread; 
     279    list_entry_t  * iter;
     280    lid_t           lid = CURRENT_THREAD->core->lid;
     281    bool_t          found = false;
     282
     283    // search one IDLE RPC thread associated to the selected core   
     284    LIST_FOREACH( &sched->k_root , iter )
     285    {
     286        thread = LIST_ELEMENT( iter , thread_t , sched_list );
     287        if( (thread->type == THREAD_RPC) && (thread->blocked == THREAD_BLOCKED_IDLE ) )
     288        {
     289            // exit loop
     290            found = true;
     291            break;
     292        }
     293    }
     294
     295    if( found == false )     // create new RPC thread     
     296    {
     297        error = thread_kernel_create( &thread,
     298                                      THREAD_RPC,
     299                                              &rpc_thread_func,
     300                                      NULL,
     301                                          lid );
     302        // check memory
     303        if ( error )
     304        {
     305            printk("\n[WARNING] in %s : no memory to create a RPC thread in cluster %x\n",
     306            __FUNCTION__, local_cxy );
     307        }
     308        else
     309        {
     310            // unblock created RPC thread
     311            thread->blocked = 0;
     312
     313            // update RPC threads counter 
     314            hal_atomic_add( &LOCAL_CLUSTER->rpc_threads[lid] , 1 );
     315
     316#if DEBUG_SCHED_RPC_ACTIVATE
     317uint32_t cycle = (uint32_t)hal_get_cycles();
     318if( DEBUG_SCHED_RPC_ACTIVATE < cycle )
     319printk("\n[DBG] %s : new RPC thread %x created for core[%x,%d] / cycle %d\n",
     320__FUNCTION__, thread->trdid, local_cxy, lid, cycle );
     321#endif
     322        }
     323    }
     324    else                 // RPC thread found => unblock it
     325    {
     326        // unblock found RPC thread
     327        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_IDLE );
     328
     329#if DEBUG_SCHED_RPC_ACTIVATE
     330uint32_t cycle = (uint32_t)hal_get_cycles();
     331if( DEBUG_SCHED_RPC_ACTIVATE < cycle )
     332printk("\n[DBG] %s : idle RPC thread %x unblocked for core[%x,%d] / cycle %d\n",
     333__FUNCTION__, thread->trdid, local_cxy, lid, cycle );
     334#endif
     335
     336    }
     337
     338} // end sched_rpc_activate()
     339
     340
     341
     342///////////////////////////////////////////////////////////////////////////////////////////
     343//         public functions
     344///////////////////////////////////////////////////////////////////////////////////////////
     345
     346////////////////////////////////
     347void sched_init( core_t * core )
     348{
     349    scheduler_t * sched = &core->scheduler;
     350
     351    sched->u_threads_nr   = 0;
     352    sched->k_threads_nr   = 0;
     353
     354    sched->current        = CURRENT_THREAD;
     355    sched->idle           = NULL;               // initialized in kernel_init()
     356    sched->u_last         = NULL;               // initialized in sched_register_thread()
     357    sched->k_last         = NULL;               // initialized in sched_register_thread()
     358
     359    // initialise threads lists
     360    list_root_init( &sched->u_root );
     361    list_root_init( &sched->k_root );
     362
     363    // init lock
     364    busylock_init( &sched->lock , LOCK_SCHED_STATE );
     365
     366    sched->req_ack_pending = false;             // no pending request
     367    sched->trace           = false;             // context switches trace desactivated
     368
     369}  // end sched_init()
     370
     371////////////////////////////////////////////
     372void sched_register_thread( core_t   * core,
     373                            thread_t * thread )
     374{
     375    scheduler_t * sched = &core->scheduler;
     376    thread_type_t type  = thread->type;
     377
     378    // take lock protecting sheduler state
     379    busylock_acquire( &sched->lock );
     380
     381    if( type == THREAD_USER )
     382    {
     383        list_add_last( &sched->u_root , &thread->sched_list );
     384        sched->u_threads_nr++;
     385        if( sched->u_last == NULL ) sched->u_last = &thread->sched_list;
     386    }
     387    else // kernel thread
     388    {
     389        list_add_last( &sched->k_root , &thread->sched_list );
     390        sched->k_threads_nr++;
     391        if( sched->k_last == NULL ) sched->k_last = &thread->sched_list;
     392    }
    295393
    296394    // release lock
    297     hal_fence();
    298     spinlock_unlock( &sched->lock );
    299 
    300 } // end sched_handle_signals()
    301 
    302 ////////////////////////////////
     395    busylock_release( &sched->lock );
     396
     397}  // end sched_register_thread()
     398
     399//////////////////////////////////////
    303400void sched_yield( const char * cause )
    304401{
    305     thread_t    * next;
    306     thread_t    * current = CURRENT_THREAD;
    307     core_t      * core    = current->core;
    308     scheduler_t * sched   = &core->scheduler;
     402    thread_t      * next;
     403    thread_t      * current = CURRENT_THREAD;
     404    core_t        * core    = current->core;
     405    lid_t           lid     = core->lid;
     406    scheduler_t   * sched   = &core->scheduler;
     407    remote_fifo_t * fifo    = &LOCAL_CLUSTER->rpc_fifo[lid];
    309408 
    310409#if (DEBUG_SCHED_YIELD & 0x1)
    311 if( sched->trace )
    312 sched_display( core->lid );
     410if( sched->trace ) sched_display( lid );
    313411#endif
    314412
    315     // delay the yield if current thread has locks
    316     if( (current->local_locks != 0) || (current->remote_locks != 0) )
    317     {
    318         current->flags |= THREAD_FLAG_SCHED;
    319         return;
    320     }
    321 
    322     // enter critical section / save SR in current thread descriptor
    323     hal_disable_irq( &CURRENT_THREAD->save_sr );
    324 
    325     // loop on threads to select next thread
     413// check current thread busylocks counter
     414assert( (current->busylocks == 0),
     415"thread cannot yield : busylocks = %d\n", current->busylocks );
     416
     417    // activate or create an RPC thread if RPC_FIFO non empty
     418    if( remote_fifo_is_empty( fifo ) == false )  sched_rpc_activate( sched );
     419
     420    // disable IRQs / save SR in current thread descriptor
     421    hal_disable_irq( &current->save_sr );
     422
     423    // take lock protecting sheduler state
     424    busylock_acquire( &sched->lock );
     425   
     426    // select next thread
    326427    next = sched_select( sched );
    327428
    328     // check next thread kernel_stack overflow
    329     assert( (next->signature == THREAD_SIGNATURE),
    330     "kernel stack overflow for thread %x on core[%x,%d] \n", next, local_cxy, core->lid );
    331 
    332     // check next thread attached to same core as the calling thread
    333     assert( (next->core == current->core),
    334     "next core %x != current core %x\n", next->core, current->core );
    335 
    336     // check next thread not blocked when type != IDLE
    337     assert( ((next->blocked == 0) || (next->type == THREAD_IDLE)) ,
    338     "next thread %x (%s) is blocked on core[%x,%d]\n",
    339     next->trdid , thread_type_str(next->type) , local_cxy , core->lid );
     429// check next thread kernel_stack overflow
     430assert( (next->signature == THREAD_SIGNATURE),
     431"kernel stack overflow for thread %x on core[%x,%d] \n", next, local_cxy, lid );
     432
     433// check next thread attached to same core as the calling thread
     434assert( (next->core == current->core),
     435"next core %x != current core %x\n", next->core, current->core );
     436
     437// check next thread not blocked when type != IDLE
     438assert( ((next->blocked == 0) || (next->type == THREAD_IDLE)) ,
     439"next thread %x (%s) is blocked on core[%x,%d]\n",
     440next->trdid , thread_type_str(next->type) , local_cxy , lid );
    340441
    341442    // switch contexts and update scheduler state if next != current
    342443        if( next != current )
    343444    {
     445        // update scheduler
     446        sched->current = next;
     447        if( next->type == THREAD_USER ) sched->u_last = &next->sched_list;
     448        else                            sched->k_last = &next->sched_list;
     449
     450        // handle FPU ownership
     451            if( next->type == THREAD_USER )
     452        {
     453                if( next == current->core->fpu_owner )  hal_fpu_enable();
     454                else                                    hal_fpu_disable();
     455        }
     456
     457        // release lock protecting scheduler state
     458        busylock_release( &sched->lock );
    344459
    345460#if DEBUG_SCHED_YIELD
     
    347462printk("\n[DBG] %s : core[%x,%d] / cause = %s\n"
    348463"      thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n",
    349 __FUNCTION__, local_cxy, core->lid, cause,
     464__FUNCTION__, local_cxy, lid, cause,
    350465current, thread_type_str(current->type), current->process->pid, current->trdid,next ,
    351466thread_type_str(next->type) , next->process->pid , next->trdid , (uint32_t)hal_get_cycles() );
    352467#endif
    353468
    354         // update scheduler
    355         sched->current = next;
    356         if( next->type == THREAD_USER ) sched->u_last = &next->sched_list;
    357         else                            sched->k_last = &next->sched_list;
    358 
    359         // handle FPU ownership
    360             if( next->type == THREAD_USER )
    361         {
    362                 if( next == current->core->fpu_owner )  hal_fpu_enable();
    363                 else                                    hal_fpu_disable();
    364         }
    365 
    366469        // switch CPU from current thread context to new thread context
    367470        hal_do_cpu_switch( current->cpu_context, next->cpu_context );
     
    369472    else
    370473    {
     474        // release lock protecting scheduler state
     475        busylock_release( &sched->lock );
    371476
    372477#if DEBUG_SCHED_YIELD
     
    374479printk("\n[DBG] %s : core[%x,%d] / cause = %s\n"
    375480"      thread %x (%s) (%x,%x) continue / cycle %d\n",
    376 __FUNCTION__, local_cxy, core->lid, cause, current, thread_type_str(current->type),
     481__FUNCTION__, local_cxy, lid, cause, current, thread_type_str(current->type),
    377482current->process->pid, current->trdid, (uint32_t)hal_get_cycles() );
    378483#endif
     
    394499    list_entry_t * iter;
    395500    thread_t     * thread;
    396     uint32_t       save_sr;
    397 
    398     assert( (lid < LOCAL_CLUSTER->cores_nr), "illegal core index %d\n", lid);
     501
     502// check lid
     503assert( (lid < LOCAL_CLUSTER->cores_nr),
     504"illegal core index %d\n", lid);
    399505
    400506    core_t       * core    = &LOCAL_CLUSTER->core_tbl[lid];
     
    406512    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
    407513
    408     // get extended pointer on remote TXT0 chdev lock
     514    // get extended pointer on remote TXT0 lock
    409515    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
    410516
    411     // get TXT0 lock in busy waiting mode
    412     remote_spinlock_lock_busy( lock_xp , &save_sr );
     517    // get TXT0 lock
     518    remote_busylock_acquire( lock_xp );
    413519
    414520    nolock_printk("\n***** threads on core[%x,%d] / current %x / cycle %d\n",
     
    443549
    444550    // release TXT0 lock
    445     remote_spinlock_unlock_busy( lock_xp , save_sr );
     551    remote_busylock_release( lock_xp );
    446552
    447553}  // end sched_display()
     
    452558{
    453559    thread_t     * thread;
    454     uint32_t       save_sr;
    455 
    456     // check cxy
    457     bool_t undefined = cluster_is_undefined( cxy );
    458     assert( (undefined == false), "illegal cluster %x\n", cxy );
    459 
    460     // check lid
    461     uint32_t cores = hal_remote_lw( XPTR( cxy , &LOCAL_CLUSTER->cores_nr ) );
    462     assert( (lid < cores), "illegal core index %d\n", lid);
     560
     561// check cxy
     562assert( (cluster_is_undefined( cxy ) == false),
     563"illegal cluster %x\n", cxy );
     564
     565// check lid
     566assert( (lid < hal_remote_l32( XPTR( cxy , &LOCAL_CLUSTER->cores_nr ) ) ),
     567"illegal core index %d\n", lid );
    463568
    464569    // get local pointer on target scheduler
     
    481586    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
    482587
    483     // get TXT0 lock in busy waiting mode
    484     remote_spinlock_lock_busy( lock_xp , &save_sr );
     588    // get TXT0 lock
     589    remote_busylock_acquire( lock_xp );
    485590
    486591    // display header
     
    495600
    496601        // get relevant thead info
    497         thread_type_t type    = hal_remote_lw ( XPTR( cxy , &thread->type ) );
    498         trdid_t       trdid   = hal_remote_lw ( XPTR( cxy , &thread->trdid ) );
    499         uint32_t      blocked = hal_remote_lw ( XPTR( cxy , &thread->blocked ) );
    500         uint32_t      flags   = hal_remote_lw ( XPTR( cxy , &thread->flags ) );
     602        thread_type_t type    = hal_remote_l32 ( XPTR( cxy , &thread->type ) );
     603        trdid_t       trdid   = hal_remote_l32 ( XPTR( cxy , &thread->trdid ) );
     604        uint32_t      blocked = hal_remote_l32 ( XPTR( cxy , &thread->blocked ) );
     605        uint32_t      flags   = hal_remote_l32 ( XPTR( cxy , &thread->flags ) );
    501606        process_t *   process = hal_remote_lpt( XPTR( cxy , &thread->process ) );
    502         pid_t         pid     = hal_remote_lw ( XPTR( cxy , &process->pid ) );
     607        pid_t         pid     = hal_remote_l32 ( XPTR( cxy , &process->pid ) );
    503608
    504609        // display thread info
     
    529634
    530635        // get relevant thead info
    531         thread_type_t type    = hal_remote_lw ( XPTR( cxy , &thread->type ) );
    532         trdid_t       trdid   = hal_remote_lw ( XPTR( cxy , &thread->trdid ) );
    533         uint32_t      blocked = hal_remote_lw ( XPTR( cxy , &thread->blocked ) );
    534         uint32_t      flags   = hal_remote_lw ( XPTR( cxy , &thread->flags ) );
     636        thread_type_t type    = hal_remote_l32 ( XPTR( cxy , &thread->type ) );
     637        trdid_t       trdid   = hal_remote_l32 ( XPTR( cxy , &thread->trdid ) );
     638        uint32_t      blocked = hal_remote_l32 ( XPTR( cxy , &thread->blocked ) );
     639        uint32_t      flags   = hal_remote_l32 ( XPTR( cxy , &thread->flags ) );
    535640        process_t *   process = hal_remote_lpt( XPTR( cxy , &thread->process ) );
    536         pid_t         pid     = hal_remote_lw ( XPTR( cxy , &process->pid ) );
     641        pid_t         pid     = hal_remote_l32 ( XPTR( cxy , &process->pid ) );
    537642
    538643        nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n",
     
    544649
    545650    // release TXT0 lock
    546     remote_spinlock_unlock_busy( lock_xp , save_sr );
     651    remote_busylock_release( lock_xp );
    547652
    548653}  // end sched_remote_display()
    549654
     655
  • trunk/kernel/kern/scheduler.h

    r470 r564  
    22 * scheduler.h - Core scheduler definition.
    33 *
    4  * Author    Alain Greiner (2016)
     4 * Author    Alain Greiner (2016,2017,2018)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2727#include <hal_kernel_types.h>
    2828#include <list.h>
    29 #include <spinlock.h>
     29#include <busylock.h>
    3030
    3131/****  Forward declarations  ****/
     
    4040typedef struct scheduler_s
    4141{
    42     spinlock_t        lock;            /*! lock protecting lists of threads                 */
     42    busylock_t        lock;            /*! lock protecting scheduler state                  */
    4343    uint16_t          u_threads_nr;    /*! total number of attached user threads            */
    4444    uint16_t          k_threads_nr;    /*! total number of attached kernel threads          */
     
    6161/*********************************************************************************************
    6262 * This function atomically register a new thread in a given core scheduler.
     63 * Note: There is no specific sched_remove_thread(), as a thread is always deleted
     64 * by the ched_handle_signals() function, called by the sched_yield() function.
    6365 *********************************************************************************************
    6466 * @ core    : local pointer on the core descriptor.
     
    7072/*********************************************************************************************
    7173 * This function is the only method to make a context switch. It is called in cas of TICK,
    72  * or when when a thread explicitely requires a scheduling.
    73  * It handles the pending signals for all threads attached to the core running the calling
    74  * thread, and calls the sched_select() function to select a new thread.
    75  * The cause argument is only used for debug by the sched_display() function, and
    76  * indicates the scheduling cause.
     74 * or when a thread explicitely requires to be descheduled.
     75 * It takes the scheduler busylock to atomically update the scheduled state.
     76 * It calls the sched_select() private function to select a new thread. After switch, it
     77 * calls the sched_handle_signals() private function to handle the pending REQ_ACK and
     78 * REQ_DELETE flagss for all threads attached to the scheduler: it deletes all threads
     79 * marked for delete (and the process descriptor when the deleted thread is the main thread).
     80 * As the REQ_DELETE flag can be asynchronously set (between the select and the handle),
     81 * the sched_handle-signals() function check that the thread to delete is not the new thread,
     82 * because a thread cannot delete itself.
     83 * The cause argument is only used for debug by the sched_display() functions, and indicates
     84 * the scheduling cause.
    7785 *********************************************************************************************
    7886 * @ cause    : character string defining the scheduling cause.
     
    8088void sched_yield( const char * cause );
    8189
    82 /*********************************************************************************************
    83  * This function scan all threads attached to a given scheduler, and executes the relevant
    84  * actions for pending THREAD_FLAG_REQ_ACK or THREAD_FLAG_REQ_DELETE requests.
    85  * It is called in by the sched_yield() function, with IRQ disabled.
    86  * - REQ_ACK : it checks that target thread is blocked, decrements the response counter
    87  *   to acknowledge the client thread, and reset the pending request.
    88  * - REQ_DELETE : it detach the target thread from parent if attached, detach it from
    89  *   the process, remove it from scheduler, release memory allocated to thread descriptor,
    90  *   and destroy the process descriptor it the target thread was the last thread.
    91  *********************************************************************************************
    92  * @ core    : local pointer on the core descriptor.
    93  ********************************************************************************************/
    94 void sched_handle_signals( struct core_s * core );
    95 
    96 /*********************************************************************************************
    97  * This function does NOT modify the scheduler state.
    98  * It just select a thread in the list of attached threads, implementing the following
    99  * three steps policy:
    100  * 1) It scan the list of kernel threads, from the next thread after the last executed one,
    101  *    and returns the first runnable found : not IDLE, not blocked, client queue not empty.
    102  *    It can be the current thread.
    103  * 2) If no kernel thread found, it scan the list of user thread, from the next thread after
    104  *    the last executed one, and returns the first runable found : not blocked.
    105  *    It can be the current thread.
    106  * 3) If no runable thread found, it returns the idle thread.
    107  *********************************************************************************************
    108  * @ core    : local pointer on scheduler.
    109  * @ returns pointer on selected thread descriptor
    110  ********************************************************************************************/
    111 struct thread_s * sched_select( struct scheduler_s * sched );
    112 
    11390/*********************************************************************************************
    11491 * This debug function displays on TXT0 the internal state of a local scheduler,
    115  * identified by the core local index <lid>.
     92 * identified by the core local index <lid>. It must be called by a local thread.
    11693 *********************************************************************************************
    11794 * @ lid      : local index of target core.
     
    123100 * identified by the target cluster identifier <cxy> and the core local index <lid>.
    124101 * It can be called by a thread running in any cluster, as it uses remote accesses,
    125  * to scan the scheduler local lists of threads.
     102 * to scan the scheduler lists of threads.
    126103 *********************************************************************************************
    127104 * @ cxy      : target cluster identifier
  • trunk/kernel/kern/thread.c

    r531 r564  
    11/*
    2  * thread.c -  implementation of thread operations (user & kernel)
     2 * thread.c -   thread operations implementation (user & kernel)
    33 *
    44 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *         Alain Greiner (2016,2017)
     5 *         Alain Greiner (2016,2017,2018)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    4848//////////////////////////////////////////////////////////////////////////////////////
    4949
    50 extern process_t      process_zero;
     50extern process_t            process_zero;       // allocated in kernel_init.c
     51extern char               * lock_type_str[];    // allocated in kernel_init.c
     52extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c
    5153
    5254//////////////////////////////////////////////////////////////////////////////////////
     
    145147        cluster_t    * local_cluster = LOCAL_CLUSTER;
    146148
    147 #if DEBUG_THREAD_USER_INIT
     149#if DEBUG_THREAD_INIT
    148150uint32_t cycle = (uint32_t)hal_get_cycles();
    149 if( DEBUG_THREAD_USER_INIT < cycle )
    150 printk("\n[DBG] %s : thread %x enter to init thread %x in process %x / cycle %d\n",
    151 __FUNCTION__, CURRENT_THREAD, thread, process->pid , cycle );
    152 #endif
    153 
    154     // register new thread in process descriptor, and get a TRDID
    155     thread->type = type; // needed by process_register_thread.
    156     error = process_register_thread( process, thread , &trdid );
    157 
    158     if( error )
    159     {
    160         printk("\n[ERROR] in %s : cannot get TRDID\n", __FUNCTION__ );
    161         return EINVAL;
    162     }
     151if( DEBUG_THREAD_INIT < cycle )
     152printk("\n[DBG] %s : thread %x in process %x enter fot thread %x in process %x / cycle %d\n",
     153__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,
     154 thread, process->pid , cycle );
     155#endif
    163156
    164157    // compute thread descriptor size without kernel stack
     
    166159
    167160        // Initialize new thread descriptor
    168     thread->trdid           = trdid;
     161        thread->type            = type;
    169162    thread->quantum         = 0;            // TODO
    170163    thread->ticks_nr        = 0;            // TODO
     
    173166        thread->process         = process;
    174167
    175     thread->local_locks     = 0;
    176     thread->remote_locks    = 0;
    177 
    178 #if CONFIG_LOCKS_DEBUG
    179     list_root_init( &thread->locks_root ); 
    180     xlist_root_init( XPTR( local_cxy , &thread->xlocks_root ) );
     168    thread->busylocks       = 0;
     169
     170#if DEBUG_BUSYLOCK
     171    xlist_root_init( XPTR( local_cxy , &thread->busylocks_root ) );
    181172#endif
    182173
     
    194185    thread->blocked         = THREAD_BLOCKED_GLOBAL;
    195186
    196     // reset sched list
     187    // register new thread in process descriptor, and get a TRDID
     188    error = process_register_thread( process, thread , &trdid );
     189
     190    if( error )
     191    {
     192        printk("\n[ERROR] in %s : cannot get TRDID\n", __FUNCTION__ );
     193        return EINVAL;
     194    }
     195
     196    // initialize trdid
     197    thread->trdid           = trdid;
     198
     199    // initialize sched list
    197200    list_entry_init( &thread->sched_list );
    198201
    199     // reset thread info
     202    // initialize waiting queue entries
     203    list_entry_init( &thread->wait_list );
     204    xlist_entry_init( XPTR( local_cxy , &thread->wait_xlist ) );
     205
     206    // initialize thread info
    200207    memset( &thread->info , 0 , sizeof(thread_info_t) );
    201208
    202     // initializes join_lock
    203     remote_spinlock_init( XPTR( local_cxy , &thread->join_lock ) );
     209    // initialize join_lock
     210    remote_busylock_init( XPTR( local_cxy , &thread->join_lock ), LOCK_THREAD_JOIN );
    204211
    205212    // initialise signature
     
    216223    dqdt_update_threads( 1 );
    217224
    218 #if DEBUG_THREAD_USER_INIT
     225#if DEBUG_THREAD_INIT
    219226cycle = (uint32_t)hal_get_cycles();
    220 if( DEBUG_THREAD_USER_INIT < cycle )
    221 printk("\n[DBG] %s : thread %x exit  after init of thread %x in process %x / cycle %d\n",
    222 __FUNCTION__, CURRENT_THREAD, thread, process->pid , cycle );
     227if( DEBUG_THREAD_INIT < cycle )
     228printk("\n[DBG] %s : thread %x in process %x exit for thread %x in process %x / cycle %d\n",
     229__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,
     230thread, process->pid , cycle );
    223231#endif
    224232
     
    436444    args  = (void *)  hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->entry_args    ));
    437445    base  = (intptr_t)hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->u_stack_base  ));
    438     size  = (uint32_t)hal_remote_lw ( XPTR( parent_cxy , &parent_ptr->u_stack_size  ));
    439     flags =           hal_remote_lw ( XPTR( parent_cxy , &parent_ptr->flags         ));
     446    size  = (uint32_t)hal_remote_l32 ( XPTR( parent_cxy , &parent_ptr->u_stack_size  ));
     447    flags =           hal_remote_l32 ( XPTR( parent_cxy , &parent_ptr->flags         ));
    440448    uzone = (reg_t *) hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->uzone_current ));
    441449
     
    474482    }
    475483
     484#if (DEBUG_THREAD_USER_FORK & 1)
     485if( DEBUG_THREAD_USER_FORK < cycle )
     486printk("\n[DBG] %s : thread %x in process %x / initialised thread %x in process %x\n",
     487__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,
     488child_ptr->trdid, child_process->pid );
     489#endif
     490
    476491    // return child pointer
    477492    *child_thread = child_ptr;
     
    502517    }
    503518
    504     // create and initialize STACK vseg
     519#if (DEBUG_THREAD_USER_FORK & 1)
     520if( DEBUG_THREAD_USER_FORK < cycle )
     521printk("\n[DBG] %s : thread %x in process %x / created CPU & FPU contexts\n",
     522__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid );
     523#endif
     524
     525   // create and initialize STACK vseg
    505526    vseg = vseg_alloc();
    506527    vseg_init( vseg,
     
    514535
    515536    // register STACK vseg in local child VSL
    516     vseg_attach( &child_process->vmm , vseg );
     537    vmm_vseg_attach( &child_process->vmm , vseg );
     538
     539#if (DEBUG_THREAD_USER_FORK & 1)
     540if( DEBUG_THREAD_USER_FORK < cycle )
     541printk("\n[DBG] %s : thread %x in process %x / created stack vseg\n",
     542__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid );
     543#endif
    517544
    518545    // copy all valid STACK GPT entries   
     
    530557        if( error )
    531558        {
    532             vseg_detach( vseg );
     559            vmm_vseg_detach( &child_process->vmm , vseg );
    533560            vseg_free( vseg );
    534561            thread_release( child_ptr );
     
    549576            xptr_t lock_xp  = XPTR( page_cxy , &page_ptr->lock );
    550577
    551             // increment the forks counter
    552             remote_spinlock_lock( lock_xp ); 
     578            // get lock protecting page
     579            remote_busylock_acquire( lock_xp ); 
     580
     581            // increment the forks counter in page descriptor
    553582            hal_remote_atomic_add( forks_xp , 1 );
    554             remote_spinlock_unlock( lock_xp ); 
     583
     584            // release lock protecting page
     585            remote_busylock_release( lock_xp ); 
    555586
    556587#if (DEBUG_THREAD_USER_FORK & 1)
     
    559590printk("\n[DBG] %s : thread %x in process %x copied one PTE to child GPT : vpn %x / forks %d\n",
    560591__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, vpn,
    561 hal_remote_lw( XPTR( page_cxy , &page_ptr->forks) ) );
     592hal_remote_l32( XPTR( page_cxy , &page_ptr->forks) ) );
    562593#endif
    563594
     
    596627#endif
    597628
    598         assert( (thread->type == THREAD_USER )          , "bad type" );
    599         assert( (thread->signature == THREAD_SIGNATURE) , "bad signature" );
    600         assert( (thread->local_locks == 0)              , "bad local locks" );
    601         assert( (thread->remote_locks == 0)             , "bad remote locks" );
     629// check parent thread attributes
     630assert( (thread->type == THREAD_USER )          , "bad type" );
     631assert( (thread->signature == THREAD_SIGNATURE) , "bad signature" );
     632assert( (thread->busylocks == 0)                , "bad busylocks" );
    602633
    603634        // re-initialize various thread descriptor fields
     
    605636    thread->ticks_nr        = 0;            // TODO
    606637    thread->time_last_check = 0;            // TODO
    607 
    608 #if CONFIG_LOCKS_DEBUG
    609     list_root_init( &thread->locks_root ); 
    610     xlist_root_init( XPTR( local_cxy , &thread->xlocks_root ) );
    611 #endif
    612638
    613639    thread->entry_func      = entry_func;
     
    622648    thread->fork_cxy        = 0;    // not inherited
    623649
     650    // re-initialize busylocks counters
     651    thread->busylocks       = 0;
     652
    624653    // reset thread info
    625654    memset( &thread->info , 0 , sizeof(thread_info_t) );
    626655
    627     // initialize join_lock
    628     remote_spinlock_init( XPTR( local_cxy , &thread->join_lock ) );
     656    // re-initialize join_lock
     657    remote_busylock_init( XPTR( local_cxy , &thread->join_lock ), LOCK_THREAD_JOIN );
    629658
    630659    // allocate an user stack vseg for main thread
     
    664693        hal_cpu_context_exec( thread );
    665694
    666     assert( false, "we should execute this code");
     695    assert( false, "we should not execute this code");
    667696 
    668697    return 0;
     
    742771                           lid_t           core_lid )
    743772{
    744     assert( (type == THREAD_IDLE) , "illegal thread type" );
    745     assert( (core_lid < LOCAL_CLUSTER->cores_nr) , "illegal core index" );
     773
     774// check arguments
     775assert( (type == THREAD_IDLE) , "illegal thread type" );
     776assert( (core_lid < LOCAL_CLUSTER->cores_nr) , "illegal core index" );
    746777
    747778    // initialize thread descriptor
     
    784815#endif
    785816
    786     assert( (thread->local_locks == 0) ,
    787     "local lock not released for thread %x in process %x", thread->trdid, process->pid );
    788 
    789     assert( (thread->remote_locks == 0) ,
    790     "remote lock not released for thread %x in process %x", thread->trdid, process->pid );
     817// check busylocks counter
     818assert( (thread->busylocks == 0) ,
     819"busylock not released for thread %x in process %x", thread->trdid, process->pid );
    791820
    792821    // update intrumentation values
     
    890919}  // thread_reset_req_ack()
    891920
    892 ////////////////////////////////
    893 inline bool_t thread_can_yield( void )
    894 {
    895     thread_t * this = CURRENT_THREAD;
    896     return (this->local_locks == 0) && (this->remote_locks == 0);
    897 }
    898 
    899 /////////////////////////
    900 void thread_check_sched( void )
    901 {
    902     thread_t * this = CURRENT_THREAD;
    903 
    904         if( (this->local_locks == 0) &&
    905         (this->remote_locks == 0) &&
    906         (this->flags & THREAD_FLAG_SCHED) )
    907     {
    908         this->flags &= ~THREAD_FLAG_SCHED;
    909         sched_yield( "delayed scheduling" );
    910     }
    911 
    912 }  // end thread_check_sched()
    913 
    914921//////////////////////////////////////
    915922void thread_block( xptr_t   thread_xp,
     
    930937printk("\n[DBG] %s : thread %x in process %x blocked thread %x in process %x / cause %x\n",
    931938__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,
    932 ptr->trdid, hal_remote_lw(XPTR( cxy , &process->pid )), cause );
     939ptr->trdid, hal_remote_l32(XPTR( cxy , &process->pid )), cause );
    933940#endif
    934941
     
    953960printk("\n[DBG] %s : thread %x in process %x unblocked thread %x in process %x / cause %x\n",
    954961__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,
    955 ptr->trdid, hal_remote_lw(XPTR( cxy , &process->pid )), cause );
     962ptr->trdid, hal_remote_l32(XPTR( cxy , &process->pid )), cause );
    956963#endif
    957964
     
    974981    thread_t  * target_ptr;             // pointer on target thread
    975982    xptr_t      target_flags_xp;        // extended pointer on target thread <flags>
    976     uint32_t    target_flags;           // target thread <flags> value
    977983    xptr_t      target_join_lock_xp;    // extended pointer on target thread <join_lock>
    978984    xptr_t      target_join_xp_xp;      // extended pointer on target thread <join_xp>
     
    982988    thread_t  * joining_ptr;            // pointer on joining thread
    983989    cxy_t       joining_cxy;            // joining thread cluster
    984     cxy_t       owner_cxy;              // process owner cluster
    985 
    986 
    987     // get target thread pointers, identifiers, and flags
     990
     991    // get target thread cluster and local pointer
    988992    target_cxy      = GET_CXY( target_xp );
    989993    target_ptr      = GET_PTR( target_xp );
    990     target_trdid    = hal_remote_lw( XPTR( target_cxy , &target_ptr->trdid ) );
     994
     995    // get target thread identifiers, and attached flag
     996    target_trdid    = hal_remote_l32( XPTR( target_cxy , &target_ptr->trdid ) );
    991997    target_ltid     = LTID_FROM_TRDID( target_trdid );
    992998    target_flags_xp = XPTR( target_cxy , &target_ptr->flags );
    993     target_flags    = hal_remote_lw( target_flags_xp );
     999    target_attached = ( (hal_remote_l32( target_flags_xp ) & THREAD_FLAG_DETACHED) == 0 );
    9941000
    9951001    // get killer thread pointers
     
    9981004
    9991005#if DEBUG_THREAD_DELETE
    1000 uint32_t cycle  = (uint32_t)hal_get_cycles;
     1006uint32_t cycle  = (uint32_t)hal_get_cycles();
    10011007if( DEBUG_THREAD_DELETE < cycle )
    1002 printk("\n[DBG] %s : killer thread %x enter for target thread %x / cycle %d\n",
    1003 __FUNCTION__, killer_ptr, target_ptr, cycle );
    1004 #endif
    1005 
    1006     // target thread cannot be the main thread, because the main thread
    1007     // must be deleted by the parent process sys_wait() function
    1008     owner_cxy = CXY_FROM_PID( pid );
    1009     assert( ((owner_cxy != target_cxy) || (target_ltid != 0)),
    1010     "tharget thread cannot be the main thread\n" );
     1008printk("\n[DBG] %s : thread %x in process %x enters / target thread %x / cycle %d\n",
     1009__FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid, target_ptr->trdid, cycle );
     1010#endif
     1011
     1012// check killer thread can yield
     1013assert( (killer_ptr->busylocks == 0),
     1014"cannot yield : busylocks = %d\n", killer_ptr->busylocks );
     1015
     1016// check target thread is not the main thread, because the main thread
     1017// must be deleted by the parent process sys_wait() function
     1018assert( ((CXY_FROM_PID( pid ) != target_cxy) || (target_ltid != 0)),
     1019"tharget thread cannot be the main thread\n" );
    10111020
    10121021    // block the target thread
    10131022    thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
    10141023
    1015     // get attached from target flag descriptor
    1016     target_attached = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_DETACHED) != 0);
    1017 
    1018     // synchronize with the joining thread if the target thread is attached
    1019     if( target_attached && (is_forced == false) )
    1020     {
     1024    // synchronize with the joining thread if attached
     1025    if( target_attached && (is_forced == false) )
     1026    {
     1027
     1028#if (DEBUG_THREAD_DELETE & 1)
     1029if( DEBUG_THREAD_DELETE < cycle )
     1030printk("\n[DBG] %s : thread %x in process %x / target thread is attached\n",
     1031__FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid );
     1032#endif
    10211033        // build extended pointers on target thread join fields
    10221034        target_join_lock_xp  = XPTR( target_cxy , &target_ptr->join_lock );
     
    10271039
    10281040        // take the join_lock in target thread descriptor
    1029         remote_spinlock_lock( target_join_lock_xp );
     1041        remote_busylock_acquire( target_join_lock_xp );
    10301042
    10311043        // get join_done from target thread descriptor
    1032         target_join_done = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_JOIN_DONE) != 0);
     1044        target_join_done = ((hal_remote_l32( target_flags_xp ) & THREAD_FLAG_JOIN_DONE) != 0);
    10331045   
    10341046        if( target_join_done )  // joining thread arrived first => unblock the joining thread
    10351047        {
     1048
     1049#if (DEBUG_THREAD_DELETE & 1)
     1050if( DEBUG_THREAD_DELETE < cycle )
     1051printk("\n[DBG] %s : thread %x in process %x / joining thread arrived first\n",
     1052__FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid );
     1053#endif
    10361054            // get extended pointer on joining thread
    1037             joining_xp  = (xptr_t)hal_remote_lwd( target_join_xp_xp );
     1055            joining_xp  = (xptr_t)hal_remote_l64( target_join_xp_xp );
    10381056            joining_ptr = GET_PTR( joining_xp );
    10391057            joining_cxy = GET_CXY( joining_xp );
     
    10461064
    10471065            // release the join_lock in target thread descriptor
    1048             remote_spinlock_unlock( target_join_lock_xp );
     1066            remote_busylock_release( target_join_lock_xp );
     1067
     1068            // set the REQ_DELETE flag in target thread descriptor
     1069            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
    10491070
    10501071            // restore IRQs
    10511072            hal_restore_irq( save_sr );
    10521073        }
    1053         else                // this thread arrived first => register flags and deschedule
     1074        else                // killer thread arrived first => register flags and deschedule
    10541075        {
     1076
     1077#if (DEBUG_THREAD_DELETE & 1)
     1078if( DEBUG_THREAD_DELETE < cycle )
     1079printk("\n[DBG] %s : thread %x in process %x / killer thread arrived first\n",
     1080__FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid );
     1081#endif
    10551082            // set the kill_done flag in target thread
    10561083            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_KILL_DONE );
     
    10601087
    10611088            // set extended pointer on killer thread in target thread
    1062             hal_remote_swd( target_join_xp_xp , killer_xp );
     1089            hal_remote_s64( target_join_xp_xp , killer_xp );
    10631090
    10641091            // release the join_lock in target thread descriptor
    1065             remote_spinlock_unlock( target_join_lock_xp );
    1066 
     1092            remote_busylock_release( target_join_lock_xp );
     1093
     1094#if (DEBUG_THREAD_DELETE & 1)
     1095if( DEBUG_THREAD_DELETE < cycle )
     1096printk("\n[DBG] %s : thread %x in process %x / killer thread deschedule\n",
     1097__FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid );
     1098#endif
    10671099            // deschedule
    10681100            sched_yield( "killer thread wait joining thread" );
     1101
     1102#if (DEBUG_THREAD_DELETE & 1)
     1103if( DEBUG_THREAD_DELETE < cycle )
     1104printk("\n[DBG] %s : thread %x in process %x / killer thread resume\n",
     1105__FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid );
     1106#endif
     1107            // set the REQ_DELETE flag in target thread descriptor
     1108            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
    10691109
    10701110            // restore IRQs
    10711111            hal_restore_irq( save_sr );
    10721112        }
    1073     }  // end if attached
    1074 
    1075     // set the REQ_DELETE flag in target thread descriptor
    1076     hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
     1113    }
     1114    else                                                   // target thread not attached
     1115    {
     1116        // set the REQ_DELETE flag in target thread descriptor
     1117        hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
     1118    }
    10771119
    10781120#if DEBUG_THREAD_DELETE
    10791121cycle  = (uint32_t)hal_get_cycles;
    10801122if( DEBUG_THREAD_DELETE < cycle )
    1081 printk("\n[DBG] %s : killer thread %x exit for target thread %x / cycle %d\n",
    1082 __FUNCTION__, killer_ptr, target_ptr, cycle );
     1123printk("\n[DBG] %s : thread %x in process %x exit / target thread %x / cycle %d\n",
     1124__FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid, target_ptr->trdid, cycle );
    10831125#endif
    10841126
     
    10871129
    10881130
    1089 ///////////////////////
     1131/////////////////////////////
    10901132void thread_idle_func( void )
    10911133{
    1092 
    1093 #if DEBUG_THREAD_IDLE
    1094 uint32_t cycle;
    1095 #endif
    1096 
    10971134    while( 1 )
    10981135    {
     
    11041141        {
    11051142
    1106 #if (DEBUG_THREAD_IDLE & 1)
    1107 cycle  = (uint32_t)hal_get_cycles;
     1143#if DEBUG_THREAD_IDLE
     1144{
     1145uint32_t cycle = (uint32_t)hal_get_cycles();
    11081146if( DEBUG_THREAD_IDLE < cycle )
    11091147printk("\n[DBG] %s : idle thread on core[%x,%d] goes to sleep / cycle %d\n",
    11101148__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cycle );
     1149}
    11111150#endif
    11121151
    11131152            hal_core_sleep();
    11141153
    1115 #if (DEBUG_THREAD_IDLE & 1)
    1116 cycle  = (uint32_t)hal_get_cycles;
     1154#if DEBUG_THREAD_IDLE
     1155{
     1156uint32_t cycle = (uint32_t)hal_get_cycles();
    11171157if( DEBUG_THREAD_IDLE < cycle )
    11181158printk("\n[DBG] %s : idle thread on core[%x,%d] wake up / cycle %d\n",
    11191159__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cycle );
     1160}
    11201161#endif
    11211162
     
    11231164
    11241165#if DEBUG_THREAD_IDLE
     1166{
     1167uint32_t cycle = (uint32_t)hal_get_cycles();
     1168if( DEBUG_THREAD_IDLE < cycle )
    11251169sched_display( CURRENT_THREAD->core->lid );
     1170}
    11261171#endif     
    1127 
    11281172        // search a runable thread
    1129         sched_yield( "IDLE" );
    1130     }
     1173        sched_yield( "running idle thread" );
     1174
     1175    } // end while
     1176
    11311177}  // end thread_idle()
    11321178
     
    11341180///////////////////////////////////////////
    11351181void thread_time_update( thread_t * thread,
    1136                          uint32_t   is_user )
     1182                         bool_t     is_user )
    11371183{
    11381184    cycle_t current_cycle;   // current cycle counter value
     
    11541200    if( is_user ) info->usr_cycles += (current_cycle - last_cycle);
    11551201    else          info->sys_cycles += (current_cycle - last_cycle);
    1156 }
     1202
     1203}  // end thread_time_update()
    11571204
    11581205/////////////////////////////////////
     
    11741221
    11751222    // check trdid argument
    1176         if( (target_thread_ltid >= CONFIG_THREAD_MAX_PER_CLUSTER) ||
     1223        if( (target_thread_ltid >= CONFIG_THREADS_MAX_PER_CLUSTER) ||
    11771224        cluster_is_undefined( target_cxy ) )         return XPTR_NULL;
    11781225
     
    11821229                       sizeof(xlist_entry_t) );
    11831230
    1184     // get extended pointer on lock protecting the list of processes
     1231    // get extended pointer on lock protecting the list of local processes
    11851232    lock_xp = XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_lock );
    11861233
    11871234    // take the lock protecting the list of processes in target cluster
    1188     remote_spinlock_lock( lock_xp );
    1189 
    1190     // loop on list of process in target cluster to find the PID process
     1235    remote_queuelock_acquire( lock_xp );
     1236
     1237    // scan the list of local processes in target cluster
    11911238    xptr_t  iter;
    11921239    bool_t  found = false;
     
    11951242        target_process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
    11961243        target_process_ptr = GET_PTR( target_process_xp );
    1197         target_process_pid = hal_remote_lw( XPTR( target_cxy , &target_process_ptr->pid ) );
     1244        target_process_pid = hal_remote_l32( XPTR( target_cxy , &target_process_ptr->pid ) );
    11981245        if( target_process_pid == pid )
    11991246        {
     
    12041251
    12051252    // release the lock protecting the list of processes in target cluster
    1206     remote_spinlock_unlock( lock_xp );
     1253    remote_queuelock_release( lock_xp );
    12071254
    12081255    // check PID found
     
    12161263
    12171264    return XPTR( target_cxy , target_thread_ptr );
     1265
     1266}  // end thread_get_xptr()
     1267
     1268///////////////////////////////////////////////////
     1269void thread_assert_can_yield( thread_t    * thread,
     1270                              const char  * func_str )
     1271{
     1272    // does nothing if thread does not hold any busylock
     1273
     1274    if( thread->busylocks )
     1275    {
     1276        // get pointers on TXT0 chdev
     1277        xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
     1278        cxy_t     txt0_cxy = GET_CXY( txt0_xp );
     1279        chdev_t * txt0_ptr = GET_PTR( txt0_xp );
     1280
     1281        // get extended pointer on TXT0 lock
     1282        xptr_t  txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
     1283
     1284        // get TXT0 lock
     1285        remote_busylock_acquire( txt0_lock_xp );
     1286
     1287        // display error message on TXT0
     1288        nolock_printk("\n[PANIC] in %s / thread %x in process %x [%x] cannot yield : "
     1289        "%d busylock(s) / cycle %d\n",
     1290        func_str, thread->trdid, thread->process->pid, thread,
     1291        thread->busylocks, (uint32_t)hal_get_cycles() );
     1292
     1293#if DEBUG_BUSYLOCK
     1294if( XPTR( local_cxy , thread ) == DEBUG_BUSYLOCK_THREAD_XP )
     1295{
     1296    // get root of list of taken busylocks
     1297    xptr_t    root_xp  = XPTR( local_cxy , &thread->busylocks_root );
     1298    xptr_t    iter_xp;
     1299
     1300    // scan list of busylocks
     1301    XLIST_FOREACH( root_xp , iter_xp )
     1302    {
     1303        xptr_t       lock_xp   = XLIST_ELEMENT( iter_xp , busylock_t , xlist );
     1304        cxy_t        lock_cxy  = GET_CXY( lock_xp );
     1305        busylock_t * lock_ptr  = GET_PTR( lock_xp );
     1306        uint32_t     lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->type ) );
     1307        nolock_printk(" - %s in cluster %x\n", lock_type_str[lock_type] , lock_cxy );
     1308    }
    12181309}
    1219 
     1310#endif
     1311
     1312        // release TXT0 lock
     1313        remote_busylock_release( txt0_lock_xp );
     1314
     1315        // suicide
     1316        hal_core_sleep();
     1317    }
     1318}  // end thread_assert_can yield()
     1319
     1320#if DEBUG_BUSYLOCK
     1321
     1322////////////////////////////////////////////////////
     1323void thread_display_busylocks( uint32_t   lock_type,
     1324                               bool_t     is_acquire )
     1325{
     1326    xptr_t    iter_xp;
     1327
     1328    // get cluster and local pointer of target thread
     1329    cxy_t      thread_cxy = GET_CXY( DEBUG_BUSYLOCK_THREAD_XP );
     1330    thread_t * thread_ptr = GET_PTR( DEBUG_BUSYLOCK_THREAD_XP );
     1331
     1332    // get extended pointer on root of busylocks
     1333    xptr_t    root_xp = XPTR( thread_cxy , &thread_ptr->busylocks_root );
     1334
     1335   // get pointers on TXT0 chdev
     1336    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
     1337    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
     1338    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
     1339
     1340    // get extended pointer on remote TXT0 lock
     1341    xptr_t  txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
     1342
     1343    // get TXT0 lock
     1344    remote_busylock_acquire( txt0_lock_xp );
     1345
     1346    if( is_acquire )
     1347    {
     1348        nolock_printk("\n### thread [%x,%x] ACQUIRE lock %s / root %x / locks :\n",
     1349        thread_cxy, thread_ptr, lock_type_str[lock_type], GET_PTR(root_xp) );
     1350    }
     1351    else
     1352    {
     1353        nolock_printk("\n### thread [%x,%x] RELEASE lock %s / root %x / locks :\n",
     1354        thread_cxy, thread_ptr, lock_type_str[lock_type], GET_PTR(root_xp) );
     1355    }
     1356
     1357    int i;
     1358
     1359    XLIST_FOREACH( root_xp , iter_xp )
     1360    {
     1361        xptr_t       ilock_xp   = XLIST_ELEMENT( iter_xp , busylock_t , xlist );
     1362        cxy_t        ilock_cxy  = GET_CXY( ilock_xp );
     1363        busylock_t * ilock_ptr  = GET_PTR( ilock_xp );
     1364        uint32_t     ilock_type = hal_remote_l32( XPTR( ilock_cxy , &ilock_ptr->type ) );
     1365        nolock_printk(" - %s in cluster %x\n", lock_type_str[ilock_type] , ilock_cxy );
     1366    }
     1367
     1368    // release TXT0 lock
     1369    remote_busylock_release( txt0_lock_xp );
     1370}
     1371#endif
  • trunk/kernel/kern/thread.h

    r527 r564  
    33 *
    44 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *         Alain Greiner (2016)
     5 *         Alain Greiner (2016,2017,2018)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    3232#include <list.h>
    3333#include <hal_context.h>
    34 #include <spinlock.h>
     34#include <remote_busylock.h>
    3535#include <core.h>
    3636#include <chdev.h>
     
    9292#define THREAD_BLOCKED_ISR       0x0400  /*! thread DEV wait ISR                      */
    9393#define THREAD_BLOCKED_WAIT      0x0800  /*! thread wait child process termination    */
     94#define THREAD_BLOCKED_LOCK      0x1000  /*! thread wait queuelock or rwlock          */
    9495
    9596/***************************************************************************************
     
    119120 * This TRDID is computed by the process_register_thread() function, when the user
    120121 * thread is registered in the local copy of the process descriptor.
    121  * WARNING : Don't modify the first 4 fields order, as this order is used by the
    122  * hal_kentry assembly code for the TSAR architecture.
     122 *
     123 * WARNING (1) Don't modify the first 4 fields order, as this order is used by the
     124 *             hal_kentry assembly code for some architectures (TSAR).
     125 *
     126 * WARNING (2) Most of the thread state is private and accessed only by this thread,
     127 *             but some fields are shared, and can be modified by other threads.
     128 *             - the "blocked" bit_vector can be modified by another thread
     129 *               running in another cluster (using atomic instructions),
     130 *               to change this thread scheduling status.
     131 *             - the "flags" bit_vector can be modified by another thread
     132 *               running in another cluster (using atomic instructions),
     133 *               to register requests such as ACK or DELETE.
     134 *             - the "join_xp" field can be modified by the joining thread,
     135 *               and this rendez-vous is protected by the dedicated "join_lock".
     136 *
     137 * WARNING (3) When this thread is blocked on a shared resource (queuelock, condvar,
     138 *             or chdev), it registers in the associated waiting queue, using the
     139 *             "wait_list" (local list) or "wait_xlist" (trans-cluster list) fields.
    123140 **************************************************************************************/
    124141
     
    144161    xptr_t              parent;          /*! extended pointer on parent thread        */
    145162
    146     remote_spinlock_t   join_lock;       /*! lock protecting the join/exit            */
     163    remote_busylock_t   join_lock;       /*! lock protecting the join/exit            */
    147164    xptr_t              join_xp;         /*! joining/killer thread extended pointer   */
    148165
     
    180197        cxy_t               rpc_client_cxy;  /*! client cluster index (for a RPC thread)  */
    181198
    182     xlist_entry_t       wait_list;       /*! member of threads blocked on same cond   */
    183 
    184     list_entry_t        locks_root;      /*! root of list of locks taken              */
    185     xlist_entry_t       xlocks_root;     /*! root of xlist of remote locks taken      */
    186         uint32_t            local_locks;         /*! number of local locks owned by thread    */
    187         uint32_t            remote_locks;        /*! number of remote locks owned by thread   */
     199    list_entry_t        wait_list;       /*! member of a local waiting queue          */
     200    xlist_entry_t       wait_xlist;      /*! member of a trans-cluster waiting queue  */
     201
     202        uint32_t            busylocks;       /*! number of taken busylocks                */
     203
     204#if DEBUG_BUSYLOCK
     205    xlist_entry_t       busylocks_root;  /*! root of xlist of taken busylocks         */
     206#endif
    188207
    189208        thread_info_t       info;            /*! embedded thread_info_t                   */
     
    311330
    312331/***************************************************************************************
    313  * This function is called by the sched_handle_signals() function to releases
     332 * This low-level function is called by the sched_handle_signals() function to releases
    314333 * the physical memory allocated for a thread in a given cluster, when this thread
    315334 * is marked for delete. This include the thread descriptor itself, the associated
     
    363382 **************************************************************************************/
    364383void thread_reset_req_ack( thread_t * target );
    365 
    366 /***************************************************************************************
    367  * This function checks if the calling thread can deschedule.
    368  ***************************************************************************************
    369  * @ returns true if no locks taken.
    370  **************************************************************************************/
    371 inline bool_t thread_can_yield( void );
    372 
    373 /***************************************************************************************
    374  * This function implements the delayed descheduling mechanism : It is called  by
    375  * all lock release functions, and calls the sched_yield() function when all locks
    376  * have beeen released and the calling thread THREAD_FLAG_SCHED flag is set.
    377  **************************************************************************************/
    378 void thread_check_sched( void );
    379384
    380385/***************************************************************************************
     
    417422 * thread descriptor identified by the <thread_xp> argument.
    418423 * We need an extended pointer, because the client thread of an I/O operation on a
    419  * given device is not in the same cluster as the associated device descriptor.
     424 * given device is generally not in the same cluster as the associated server thread.
    420425 * WARNING : this function does not reschedule the remote thread.
    421426 * The scheduling can be forced by sending an IPI to the core running the remote thread.
     
    432437 ***************************************************************************************
    433438 * @ thread   : local pointer on target thread.
    434  * @ is_user  : update user time if non zero / update kernel time if zero
     439 * @ is_user  : update user time if true / update kernel time if false
    435440 **************************************************************************************/
    436441void thread_time_update( thread_t * thread,
    437                          uint32_t   is_user );
     442                         bool_t     is_user );
    438443
    439444/***************************************************************************************
     
    449454                        trdid_t  trdid );
    450455
     456/***************************************************************************************
     457 * This function checks that the thread identified by the <thread> argument does hold
     458 * any busylock (local or remote).
     459 * If the xlist of taken busylocks is not empty, it displays the set of taken locks,
     460 * and makes a kernel panic. 
     461 ***************************************************************************************
     462 * @ thread    : local pointer on target thread.
     463 * @ func_str  : faulty function name.
     464 **************************************************************************************/
     465void thread_assert_can_yield( thread_t    * thread,
     466                              const char  * func_str );
     467
     468/***************************************************************************************
     469 * This debug function display the list of busylocks currently owned by a thread
     470 * identified by the DEBUG_BUSYLOCK_THREAD_XP parameter.
     471 * It is called each time the target thread acquire or release a busylock
     472 * (local or remote). It is never called when DEBUG_BUSYLOCK_THEAD_CP == 0.
     473 ***************************************************************************************
     474 * @ lock_type  : type of acquired / released busylock.
     475 * @ is_acquire : change is an acquire when true / change is a release when false.
     476 **************************************************************************************/
     477void thread_display_busylocks( uint32_t lock_type,
     478                               bool_t   is_acquire );
     479
     480
    451481
    452482#endif  /* _THREAD_H_ */
Note: See TracChangeset for help on using the changeset viewer.