Changeset 564 for trunk


Ignore:
Timestamp:
Oct 4, 2018, 11:47:36 PM (3 years ago)
Author:
alain
Message:

Complete restructuration of kernel locks.

Location:
trunk/kernel/kern
Files:
9 deleted
19 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/kern/chdev.c

    r545 r564  
    3737#include <devfs.h>
    3838
    39 
    40 extern chdev_directory_t    chdev_dir;   // allocated in kernel_init.c
     39//////////////////////////////////////////////////////////////////////////////////////
     40// Extern global variables
     41//////////////////////////////////////////////////////////////////////////////////////
     42
     43extern chdev_directory_t    chdev_dir;         // allocated in kernel_init.c
     44
    4145
    4246#if (DEBUG_SYS_READ & 1)
     
    5761char * chdev_func_str( uint32_t func_type )
    5862{
    59   switch ( func_type ) {
     63    switch ( func_type )
     64    {
    6065    case DEV_FUNC_RAM: return "RAM";
    6166    case DEV_FUNC_ROM: return "ROM";
     
    9196    if( chdev == NULL ) return NULL;
    9297
    93     // initialize waiting threads queue and associated lock
    94     remote_spinlock_init( XPTR( local_cxy , &chdev->wait_lock ) );
     98    // initialize lock
     99    remote_busylock_init( XPTR( local_cxy , &chdev->wait_lock ), LOCK_CHDEV_QUEUE );
     100
     101    // initialise waiting queue
    95102    xlist_root_init( XPTR( local_cxy , &chdev->wait_root ) );
    96103
     
    130137    core_t   * core_ptr;      // local pointer on core running the server thread
    131138    uint32_t   server_lid;    // core running the server thread local index
    132     xptr_t     lock_xp;       // extended pointer on lock protecting the chdev queue
     139    xptr_t     lock_xp;       // extended pointer on lock protecting the chdev state
    133140    uint32_t   save_sr;       // for critical section
    134141
     
    147154    chdev_t * chdev_ptr = GET_PTR( chdev_xp );
    148155
     156// check calling thread can yield
     157assert( (this->busylocks == 0),
     158"cannot yield : busylocks = %d\n", this->busylocks );
     159
    149160    // get local and extended pointers on server thread
    150161    server_ptr = (thread_t *)hal_remote_lpt( XPTR( chdev_cxy , &chdev_ptr->server) );
     
    155166
    156167    // get server core local index
    157     server_lid = hal_remote_lw( XPTR( chdev_cxy , &core_ptr->lid ) );
     168    server_lid = hal_remote_l32( XPTR( chdev_cxy , &core_ptr->lid ) );
    158169
    159170#if (DEBUG_CHDEV_CMD_RX || DEBUG_CHDEV_CMD_TX)
    160 bool_t is_rx = hal_remote_lw( XPTR( chdev_cxy , &chdev_ptr->is_rx ) );
     171bool_t is_rx = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->is_rx ) );
    161172#endif
    162173   
     
    185196
    186197    // build extended pointer on lock protecting chdev waiting queue
    187     lock_xp = XPTR( chdev_cxy , &chdev_ptr->wait_lock );
     198    lock_xp            = XPTR( chdev_cxy , &chdev_ptr->wait_lock );
    188199
    189200    // critical section for the following sequence:
    190     // (1) take the lock protecting waiting queue
     201    // (1) take the lock protecting the chdev state
    191202    // (2) block the client thread
    192203    // (3) unblock the server thread if required
     
    200211    hal_disable_irq( &save_sr );
    201212
    202     // take the lock protecting chdev waiting queue
    203     remote_spinlock_lock( lock_xp );
     213    // take the lock protecting chdev queue
     214    remote_busylock_acquire( lock_xp );
    204215
    205216    // block current thread
     
    217228
    218229    // unblock server thread if required
    219     if( hal_remote_lw( blocked_xp ) & THREAD_BLOCKED_IDLE )
     230    if( hal_remote_l32( blocked_xp ) & THREAD_BLOCKED_IDLE )
    220231    thread_unblock( server_xp , THREAD_BLOCKED_IDLE );
    221232
     
    243254#endif
    244255 
    245     // send IPI to core running the server thread when server != client
     256    // send IPI to core running the server thread when server core != client core
    246257    if( (server_lid != this->core->lid) || (local_cxy != chdev_cxy) )
    247258    {
     
    262273    }
    263274 
    264     // release lock
    265     remote_spinlock_unlock( lock_xp );
     275    // release lock protecting chdev queue
     276    remote_busylock_release( lock_xp );
    266277
    267278    // deschedule
    268     assert( thread_can_yield() , "illegal sched_yield\n" );
    269279    sched_yield("blocked on I/O");
    270280
     
    308318    server = CURRENT_THREAD;
    309319
    310     // get root and lock on command queue
     320    // build extended pointer on root of client threads queue
    311321    root_xp = XPTR( local_cxy , &chdev->wait_root );
     322
     323    // build extended pointer on lock protecting client threads queue
    312324    lock_xp = XPTR( local_cxy , &chdev->wait_lock );
    313325
     
    316328    while( 1 )
    317329    {
     330
     331#if DEBUG_CHDEV_SERVER_RX
     332uint32_t rx_cycle = (uint32_t)hal_get_cycles();
     333if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) )
     334printk("\n[DBG] %s : dev_thread %x start RX / cycle %d\n",
     335__FUNCTION__ , server->trdid , rx_cycle );
     336#endif
     337
     338#if DEBUG_CHDEV_SERVER_TX
     339uint32_t tx_cycle = (uint32_t)hal_get_cycles();
     340if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) )
     341printk("\n[DBG] %s : dev_thread %x start TX / cycle %d\n",
     342__FUNCTION__ , server->trdid , tx_cycle );
     343#endif
     344
    318345        // get the lock protecting the waiting queue
    319         remote_spinlock_lock( lock_xp );
     346        remote_busylock_acquire( lock_xp );
    320347
    321348        // check waiting queue state
    322349        if( xlist_is_empty( root_xp ) ) // waiting queue empty
    323350        {
     351
     352#if DEBUG_CHDEV_SERVER_RX
     353rx_cycle = (uint32_t)hal_get_cycles();
     354if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) )
     355printk("\n[DBG] %s : dev_thread %x found RX queue empty => blocks / cycle %d\n",
     356__FUNCTION__ , server->trdid , rx_cycle );
     357#endif
     358
     359#if DEBUG_CHDEV_SERVER_TX
     360tx_cycle = (uint32_t)hal_get_cycles();
     361if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) )
     362printk("\n[DBG] %s : dev_thread %x found TX queue empty => blocks / cycle %d\n",
     363__FUNCTION__ , server->trdid , tx_cycle );
     364#endif
     365
    324366            // release lock
    325             remote_spinlock_unlock( lock_xp );
     367            remote_busylock_release( lock_xp );
    326368
    327369            // block
    328370            thread_block( XPTR( local_cxy , server ) , THREAD_BLOCKED_IDLE );
    329371
     372// check server thread can yield
     373assert( (server->busylocks == 0),
     374"cannot yield : busylocks = %d\n", server->busylocks );
     375
    330376            // deschedule
    331             assert( thread_can_yield() , "illegal sched_yield\n" );
    332377            sched_yield("I/O queue empty");
    333378        }
     
    335380        {
    336381            // get extended pointer on first client thread
    337             client_xp = XLIST_FIRST_ELEMENT( root_xp , thread_t , wait_list );
     382            client_xp = XLIST_FIRST( root_xp , thread_t , wait_list );
    338383
    339384            // get client thread cluster and local pointer
     
    345390
    346391            // release lock
    347             remote_spinlock_unlock( lock_xp );
     392            remote_busylock_release( lock_xp );
    348393
    349394#if DEBUG_CHDEV_SERVER_RX
    350 uint32_t rx_cycle = (uint32_t)hal_get_cycles();
     395rx_cycle = (uint32_t)hal_get_cycles();
    351396if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) )
    352 printk("\n[DBG] %s : server_thread %x start RX / client %x / cycle %d\n",
    353 __FUNCTION__ , server , client_ptr , rx_cycle );
     397printk("\n[DBG] %s : dev_thread %x for RX found client thread %x in process %x / cycle %d\n",
     398__FUNCTION__, server->trdid ,client_ptr->trdid ,client_ptr->process->pid, rx_cycle );
    354399#endif
    355400
    356401#if DEBUG_CHDEV_SERVER_TX
    357 uint32_t tx_cycle = (uint32_t)hal_get_cycles();
     402tx_cycle = (uint32_t)hal_get_cycles();
    358403if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) )
    359 printk("\n[DBG] %s : server_thread %x start TX / client %x / cycle %d\n",
    360 __FUNCTION__ , server , client_ptr , tx_cycle );
     404printk("\n[DBG] %s : dev_thread %x for TX found client thread %x in process %x / cycle %d\n",
     405__FUNCTION__, server->trdid ,client_ptr->trdid ,client_ptr->process->pid, tx_cycle );
    361406#endif
    362407
     
    378423rx_cycle = (uint32_t)hal_get_cycles();
    379424if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) )
    380 printk("\n[DBG] %s : server_thread %x completes RX / client %x / cycle %d\n",
    381 __FUNCTION__ , server , client_ptr , rx_cycle );
     425printk("\n[DBG] %s : dev_thread %x completes RX for client %x in process %x / cycle %d\n",
     426__FUNCTION__, server->trdid, client_ptr->trdid, client_ptr->process->pid, rx_cycle );
    382427#endif
    383428
     
    385430tx_cycle = (uint32_t)hal_get_cycles();
    386431if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) )
    387 printk("\n[DBG] %s : server_thread %x completes TX / client %x / cycle %d\n",
    388 __FUNCTION__ , server , client_ptr , tx_cycle );
     432printk("\n[DBG] %s : dev_thread %x completes TX for client %x in process %x / cycle %d\n",
     433__FUNCTION__, server->trdid, client_ptr->trdid, client_ptr->process->pid, tx_cycle );
    389434#endif
    390435
     
    419464
    420465    // get inode type from file descriptor
    421     inode_type = hal_remote_lw( XPTR( file_cxy , &file_ptr->type ) );
     466    inode_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type ) );
    422467    inode_ptr  = (vfs_inode_t *)hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ) );
    423468
     
    432477}  // end chdev_from_file()
    433478
    434 ////////////////////////
     479//////////////////////////////
    435480void chdev_dir_display( void )
    436481{
     
    439484    chdev_t * ptr;
    440485    uint32_t  base;
    441     reg_t     save_sr;
    442486
    443487    // get pointers on TXT0 chdev
     
    446490    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
    447491
    448     // get extended pointer on remote TXT0 chdev lock
     492    // get extended pointer on TXT0 lock
    449493    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
    450494
    451     // get TXT0 lock in busy waiting mode
    452     remote_spinlock_lock_busy( lock_xp , &save_sr );
     495    // get TXT0 lock
     496    remote_busylock_acquire( lock_xp );
    453497
    454498    // header
     
    456500
    457501    // IOB
    458     if (chdev_dir.iob != NULL )
     502    if (chdev_dir.iob != XPTR_NULL )
    459503    {
    460504        cxy  = GET_CXY( chdev_dir.iob );
    461505        ptr  = GET_PTR( chdev_dir.iob );
    462         base = (uint32_t)hal_remote_lwd( XPTR( cxy , &ptr->base ) );
     506        base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
    463507        nolock_printk("  - iob       : cxy = %X / ptr = %X / base = %X\n", cxy, ptr, base);
    464508    }
     
    467511    cxy  = GET_CXY( chdev_dir.pic );
    468512    ptr  = GET_PTR( chdev_dir.pic );
    469     base = (uint32_t)hal_remote_lwd( XPTR( cxy , &ptr->base ) );
     513    base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
    470514    nolock_printk("  - pic       : cxy = %X / ptr = %X / base = %X\n", cxy, ptr, base);
    471515
     
    475519        cxy = GET_CXY( chdev_dir.txt_rx[i] );
    476520        ptr = GET_PTR( chdev_dir.txt_rx[i] );
    477         base = (uint32_t)hal_remote_lwd( XPTR( cxy , &ptr->base ) );
     521        base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
    478522        nolock_printk("  - txt_rx[%d] : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
    479523
    480524        cxy = GET_CXY( chdev_dir.txt_tx[i] );
    481525        ptr = GET_PTR( chdev_dir.txt_tx[i] );
    482         base = (uint32_t)hal_remote_lwd( XPTR( cxy , &ptr->base ) );
     526        base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
    483527        nolock_printk("  - txt_tx[%d] : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
    484528    }
     
    489533        cxy = GET_CXY( chdev_dir.ioc[i] );
    490534        ptr = GET_PTR( chdev_dir.ioc[i] );
    491         base = (uint32_t)hal_remote_lwd( XPTR( cxy , &ptr->base ) );
     535        base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
    492536        nolock_printk("  - ioc[%d]    : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
    493537    }
     
    498542        cxy  = GET_CXY( chdev_dir.fbf[i] );
    499543        ptr  = GET_PTR( chdev_dir.fbf[i] );
    500         base = (uint32_t)hal_remote_lwd( XPTR( cxy , &ptr->base ) );
     544        base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
    501545        nolock_printk("  - fbf[%d]    : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
    502546    }
     
    507551        cxy = GET_CXY( chdev_dir.nic_rx[i] );
    508552        ptr = GET_PTR( chdev_dir.nic_rx[i] );
    509         base = (uint32_t)hal_remote_lwd( XPTR( cxy , &ptr->base ) );
     553        base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
    510554        nolock_printk("  - nic_rx[%d] : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
    511555
    512556        cxy = GET_CXY( chdev_dir.nic_tx[i] );
    513557        ptr = GET_PTR( chdev_dir.nic_tx[i] );
    514         base = (uint32_t)hal_remote_lwd( XPTR( cxy , &ptr->base ) );
     558        base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
    515559        nolock_printk("  - nic_tx[%d] : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
    516560    }
    517561
    518562    // release lock
    519     remote_spinlock_unlock_busy( lock_xp , save_sr );
     563    remote_busylock_release( lock_xp );
    520564
    521565}  // end chdev_dir_display()
     
    546590    hal_remote_strcpy( XPTR( local_cxy , name ), XPTR( chdev_cxy , chdev_ptr->name ) );
    547591
     592    // get pointers on TXT0 chdev
     593    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
     594    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
     595    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
     596
     597    // get extended pointer on TXT0 lock
     598    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
     599
     600    // get TXT0 lock
     601    remote_busylock_acquire( lock_xp );
     602
    548603    // check queue empty
    549604    if( xlist_is_empty( root_xp ) )
    550605    {
    551         printk("\n***** Waiting queue empty for chdev %s\n", name );
     606        nolock_printk("\n***** Waiting queue empty for chdev %s\n", name );
    552607    }
    553608    else
    554609    {
    555         printk("\n***** Waiting queue for chdev %s\n", name );
     610        nolock_printk("\n***** Waiting queue for chdev %s\n", name );
    556611
    557612        // scan the waiting queue
     
    561616            thread_cxy = GET_CXY( thread_xp );
    562617            thread_ptr = GET_PTR( thread_xp );
    563             trdid      = hal_remote_lw ( XPTR( thread_cxy , &thread_ptr->trdid   ) );
     618            trdid      = hal_remote_l32 ( XPTR( thread_cxy , &thread_ptr->trdid   ) );
    564619            process    = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->process ) );
    565                         pid        = hal_remote_lw ( XPTR( thread_cxy , &process->pid        ) );
    566 
    567             printk("- thread %X / cluster %X / trdid %X / pid %X\n",
     620                        pid        = hal_remote_l32 ( XPTR( thread_cxy , &process->pid        ) );
     621
     622            nolock_printk("- thread %X / cluster %X / trdid %X / pid %X\n",
    568623            thread_ptr, thread_cxy, trdid, pid );
    569624        }
    570625    }
     626
     627    // release TXT0 lock
     628    remote_busylock_release( lock_xp );
     629
    571630}  // end chdev_queue_display()
    572631
  • trunk/kernel/kern/chdev.h

    r485 r564  
    2828#include <hal_kernel_types.h>
    2929#include <xlist.h>
    30 #include <remote_spinlock.h>
     30#include <remote_busylock.h>
    3131#include <dev_iob.h>
    3232#include <dev_ioc.h>
     
    4343 * ALMOS-MKH supports multi-channels peripherals, and defines one separated chdev
    4444 * descriptor for each channel (and for each RX/TX direction for the NIC and TXT devices).
    45  * Each chdev contains a waiting queue, registering the "client threads" requests,
     45 * Each chdev contains a trans-clusters waiting queue, registering the "client threads",
    4646 * and an associated "server thread", handling these requests.
    4747 * These descriptors are physically distributed on all clusters to minimize contention.
     
    116116 * of client threads is associated to each chdev descriptor (not for ICU, PIC, IOB).
    117117 * For each device type ***, the specific extension is defined in the "dev_***.h" file.
     118 *
     119 * NOTE : For most chdevs, the busylock is used to protect the waiting queue changes,
     120 *        when a thread register in this queue, or is removed after service.
     121 *        This busylock is also used to protect direct access to the kernel TXT0 terminal
     122 *        (without using the server thread).
    118123 *****************************************************************************************/
    119124
     
    136141    uint32_t             irq_id;      /*! associated IRQ index in local ICU              */
    137142
    138         remote_spinlock_t    wait_lock;   /*! lock protecting exclusive access to queue      */
    139     xlist_entry_t        wait_root;   /*! root of waiting threads queue                  */
     143        xlist_entry_t        wait_root;   /*! root of client threads waiting queue           */
     144    remote_busylock_t    wait_lock;   /*! lock protecting waiting queue                  */
    140145
    141146    union
  • trunk/kernel/kern/cluster.c

    r562 r564  
    2929#include <hal_special.h>
    3030#include <hal_ppm.h>
     31#include <hal_macros.h>
    3132#include <remote_fifo.h>
    3233#include <printk.h>
    3334#include <errno.h>
    34 #include <spinlock.h>
     35#include <queuelock.h>
    3536#include <core.h>
    3637#include <chdev.h>
     
    4546#include <process.h>
    4647#include <dqdt.h>
    47 #include <cluster_info.h>
    4848
    4949/////////////////////////////////////////////////////////////////////////////////////
     
    5151/////////////////////////////////////////////////////////////////////////////////////
    5252
    53 extern process_t           process_zero;     // allocated in kernel_init.c file
    54 extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c file
    55 
    56 ///////////////////////////////////////////////n
    57 error_t cluster_init( struct boot_info_s * info )
    58 {
    59     error_t         error;
    60     lpid_t          lpid;     // local process_index
    61     lid_t           lid;      // local core index
    62     uint32_t        i;        // index in loop on external peripherals
     53extern process_t           process_zero;     // allocated in kernel_init.c
     54extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c
     55
     56
     57
     58///////////////////////////////////////////////////
     59void cluster_info_init( struct boot_info_s * info )
     60{
    6361    boot_device_t * dev;      // pointer on external peripheral
    6462    uint32_t        func;     // external peripheral functionnal type
     63    uint32_t        x;
     64    uint32_t        y;
     65    uint32_t        i;   
    6566
    6667        cluster_t * cluster = LOCAL_CLUSTER;
     
    7576
    7677    // initialize the cluster_info[][] array
    77     int x;
    78     int y;
    79     for (x = 0; x < CONFIG_MAX_CLUSTERS_X; x++) {
    80         for (y = 0; y < CONFIG_MAX_CLUSTERS_Y;y++) {
     78    for (x = 0; x < CONFIG_MAX_CLUSTERS_X; x++)
     79    {
     80        for (y = 0; y < CONFIG_MAX_CLUSTERS_Y;y++)
     81        {
    8182            cluster->cluster_info[x][y] = info->cluster_info[x][y];
    8283        }
    8384    }
     85
    8486    // initialize external peripherals channels
    8587    for( i = 0 ; i < info->ext_dev_nr ; i++ )
     
    9395    }
    9496
    95     // initialize cluster local parameters
    96         cluster->cores_nr        = info->cores_nr;
     97    // initialize number of cores
     98        cluster->cores_nr  = info->cores_nr;
     99
     100}  // end cluster_info_init()
     101
     102/////////////////////////////////////////////////////////
     103error_t cluster_manager_init( struct boot_info_s * info )
     104{
     105    error_t         error;
     106    lpid_t          lpid;     // local process_index
     107    lid_t           lid;      // local core index
     108
     109        cluster_t * cluster = LOCAL_CLUSTER;
    97110
    98111    // initialize the lock protecting the embedded kcm allocator
    99         spinlock_init( &cluster->kcm_lock );
     112        busylock_init( &cluster->kcm_lock , LOCK_CLUSTER_KCM );
    100113
    101114#if DEBUG_CLUSTER_INIT
    102115uint32_t cycle = (uint32_t)hal_get_cycles();
    103116if( DEBUG_CLUSTER_INIT < cycle )
    104 printk("\n[DBG] %s : thread %x enters for cluster %x / cycle %d\n",
    105 __FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
     117printk("\n[DBG] %s : thread %x in process %x enters for cluster %x / cycle %d\n",
     118__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, local_cxy , cycle );
    106119#endif
    107120
    108121    // initialises DQDT
    109122    cluster->dqdt_root_level = dqdt_init( info->x_size,
    110                                           info->y_size,
    111                                           info->y_width ) - 1;
     123                                          info->y_size ) - 1;
     124
     125#if( DEBUG_CLUSTER_INIT & 1 )
     126cycle = (uint32_t)hal_get_cycles();
     127if( DEBUG_CLUSTER_INIT < cycle )
     128printk("\n[DBG] %s : DQDT initialized in cluster %x / cycle %d\n",
     129__FUNCTION__ , local_cxy , cycle );
     130#endif
    112131
    113132    // initialises embedded PPM
     
    166185        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
    167186    {
    168             local_fifo_init( &cluster->rpc_fifo[lid] );
     187            remote_fifo_init( &cluster->rpc_fifo[lid] );
    169188        cluster->rpc_threads[lid] = 0;
    170189    }
     
    178197
    179198    // initialise pref_tbl[] in process manager
    180         spinlock_init( &cluster->pmgr.pref_lock );
     199        queuelock_init( &cluster->pmgr.pref_lock , LOCK_CLUSTER_PREFTBL );
    181200    cluster->pmgr.pref_nr = 0;
    182201    cluster->pmgr.pref_tbl[0] = XPTR( local_cxy , &process_zero );
     
    187206
    188207    // initialise local_list in process manager
    189         remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
    190208    xlist_root_init( XPTR( local_cxy , &cluster->pmgr.local_root ) );
    191209    cluster->pmgr.local_nr = 0;
     210        remote_queuelock_init( XPTR( local_cxy , &cluster->pmgr.local_lock ) ,
     211                           LOCK_CLUSTER_LOCALS );
    192212
    193213    // initialise copies_lists in process manager
    194214    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
    195215    {
    196             remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] ) );
    197216        cluster->pmgr.copies_nr[lpid] = 0;
    198217        xlist_root_init( XPTR( local_cxy , &cluster->pmgr.copies_root[lpid] ) );
     218            remote_queuelock_init( XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] ),
     219                               LOCK_CLUSTER_COPIES );
    199220    }
    200221
     
    202223cycle = (uint32_t)hal_get_cycles();
    203224if( DEBUG_CLUSTER_INIT < cycle )
    204 printk("\n[DBG] %s , thread %x exit for cluster %x / cycle %d\n",
    205 __FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
     225printk("\n[DBG] %s : thread %x in process %x exit for cluster %x / cycle %d\n",
     226__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid , local_cxy , cycle );
    206227#endif
    207228
     
    209230
    210231        return 0;
    211 } // end cluster_init()
    212 
    213 /////////////////////////////////
     232} // end cluster_manager_init()
     233
     234///////////////////////////////////
    214235cxy_t cluster_random_select( void )
    215236{
    216     uint32_t  x_size;
    217     uint32_t  y_size;
    218     uint32_t  y_width;
    219237    uint32_t  index;
    220     uint32_t  x;
     238    uint32_t  x;   
    221239    uint32_t  y;
    222 
    223     do {
    224         x_size     = LOCAL_CLUSTER->x_size;
    225         y_size     = LOCAL_CLUSTER->y_size;
    226         y_width   = LOCAL_CLUSTER->y_width;
     240    cxy_t     cxy;
     241
     242    uint32_t  x_size    = LOCAL_CLUSTER->x_size;
     243    uint32_t  y_size    = LOCAL_CLUSTER->y_size;
     244
     245    do
     246    {
    227247        index     = ( hal_get_cycles() + hal_get_gid() ) % (x_size * y_size);
    228248        x         = index / y_size;
    229249        y         = index % y_size;
    230     } while ( cluster_info_is_active( LOCAL_CLUSTER->cluster_info[x][y] ) == 0 );
    231 
    232     return (x<<y_width) + y;
     250        cxy       = HAL_CXY_FROM_XY( x , y );
     251    }
     252    while ( cluster_is_active( cxy ) == false );
     253
     254    return ( cxy );
    233255}
    234256
     
    236258bool_t cluster_is_undefined( cxy_t cxy )
    237259{
    238     cluster_t * cluster = LOCAL_CLUSTER;
    239 
    240     uint32_t y_width = cluster->y_width;
    241 
    242     uint32_t x = cxy >> y_width;
    243     uint32_t y = cxy & ((1<<y_width)-1);
    244 
    245     if( x >= cluster->x_size ) return true;
    246     if( y >= cluster->y_size ) return true;
     260    uint32_t  x_size = LOCAL_CLUSTER->x_size;
     261    uint32_t  y_size = LOCAL_CLUSTER->y_size;
     262
     263    uint32_t  x      = HAL_X_FROM_CXY( cxy );
     264    uint32_t  y      = HAL_Y_FROM_CXY( cxy );
     265
     266    if( x >= x_size ) return true;
     267    if( y >= y_size ) return true;
    247268
    248269    return false;
     270}
     271
     272//////////////////////////////////////
     273bool_t cluster_is_active ( cxy_t cxy )
     274{
     275    uint32_t x = HAL_X_FROM_CXY( cxy );
     276    uint32_t y = HAL_Y_FROM_CXY( cxy );
     277
     278    return ( LOCAL_CLUSTER->cluster_info[x][y] != 0 );
    249279}
    250280
     
    304334
    305335    // take the lock protecting the list of processes
    306     remote_spinlock_lock( lock_xp );
     336    remote_queuelock_acquire( lock_xp );
    307337
    308338    // scan list of processes
     
    320350
    321351    // release the lock protecting the list of processes
    322     remote_spinlock_unlock( lock_xp );
     352    remote_queuelock_release( lock_xp );
    323353
    324354    // return extended pointer on process descriptor in owner cluster
     
    350380
    351381    // take the lock protecting the list of processes
    352     remote_spinlock_lock( lock_xp );
     382    remote_queuelock_acquire( lock_xp );
    353383
    354384    // scan list of processes in owner cluster
     
    358388        current_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
    359389        current_ptr = GET_PTR( current_xp );
    360         current_pid = hal_remote_lw( XPTR( owner_cxy , &current_ptr->pid ) );
     390        current_pid = hal_remote_l32( XPTR( owner_cxy , &current_ptr->pid ) );
    361391
    362392        if( current_pid == pid )
     
    368398
    369399    // release the lock protecting the list of processes
    370     remote_spinlock_unlock( lock_xp );
     400    remote_queuelock_release( lock_xp );
    371401
    372402    // return extended pointer on process descriptor in owner cluster
     
    397427    else                              // use a remote_lwd to access owner cluster
    398428    {
    399         ref_xp = (xptr_t)hal_remote_lwd( XPTR( owner_cxy , &cluster->pmgr.pref_tbl[lpid] ) );
     429        ref_xp = (xptr_t)hal_remote_l64( XPTR( owner_cxy , &cluster->pmgr.pref_tbl[lpid] ) );
    400430    }
    401431
     
    419449    pmgr_t    * pm         = &LOCAL_CLUSTER->pmgr;
    420450
    421     // get the process manager lock
    422     spinlock_lock( &pm->pref_lock );
     451    // get the lock protecting pref_tbl
     452    queuelock_acquire( &pm->pref_lock );
    423453
    424454    // search an empty slot
     
    443473
    444474        // release the processs_manager lock
    445         spinlock_unlock( &pm->pref_lock );
     475        queuelock_release( &pm->pref_lock );
    446476
    447477        return 0;
     
    449479    else
    450480    {
    451         // release the processs_manager lock
    452         spinlock_unlock( &pm->pref_lock );
    453 
    454         return -1;
     481        // release the lock
     482        queuelock_release( &pm->pref_lock );
     483
     484        return 0xFFFFFFFF;
    455485    }
    456486
     
    488518    "local_cluster %x !=  owner_cluster %x" , local_cxy , owner_cxy );
    489519
    490     // get the process manager lock
    491     spinlock_lock( &pm->pref_lock );
     520    // get the lock protecting pref_tbl
     521    queuelock_acquire( &pm->pref_lock );
    492522
    493523    // remove process from pref_tbl[]
     
    496526
    497527    // release the processs_manager lock
    498     spinlock_unlock( &pm->pref_lock );
     528    queuelock_release( &pm->pref_lock );
    499529
    500530#if DEBUG_CLUSTER_PID_RELEASE
     
    538568void cluster_process_local_link( process_t * process )
    539569{
    540     reg_t    save_sr;
    541 
    542570    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
    543571
     
    546574    xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock );
    547575
    548     // get lock protecting the process manager local list
    549     remote_spinlock_lock_busy( lock_xp , &save_sr );
     576    // get lock protecting the local list
     577    remote_queuelock_acquire( lock_xp );
    550578
    551579    // register process in local list
     
    553581    pm->local_nr++;
    554582
    555     // release lock protecting the process manager local list
    556     remote_spinlock_unlock_busy( lock_xp , save_sr );
     583    // release lock protecting the local list
     584    remote_queuelock_release( lock_xp );
    557585}
    558586
     
    560588void cluster_process_local_unlink( process_t * process )
    561589{
    562     reg_t save_sr;
    563 
    564590    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
    565591
     
    567593    xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock );
    568594
    569     // get lock protecting the process manager local list
    570     remote_spinlock_lock_busy( lock_xp , &save_sr );
     595    // get lock protecting the local list
     596    remote_queuelock_acquire( lock_xp );
    571597
    572598    // remove process from local list
     
    574600    pm->local_nr--;
    575601
    576     // release lock protecting the process manager local list
    577     remote_spinlock_unlock_busy( lock_xp , save_sr );
     602    // release lock protecting the local list
     603    remote_queuelock_release( lock_xp );
    578604}
    579605
     
    581607void cluster_process_copies_link( process_t * process )
    582608{
    583     reg_t    irq_state;
    584609    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
    585610
     
    606631
    607632    // get lock protecting copies_list[lpid]
    608     remote_spinlock_lock_busy( copies_lock , &irq_state );
     633    remote_queuelock_acquire( copies_lock );
    609634
    610635    // add copy to copies_list
     
    613638
    614639    // release lock protecting copies_list[lpid]
    615     remote_spinlock_unlock_busy( copies_lock , irq_state );
     640    remote_queuelock_release( copies_lock );
    616641
    617642#if DEBUG_CLUSTER_PROCESS_COPIES
     
    627652void cluster_process_copies_unlink( process_t * process )
    628653{
    629     uint32_t irq_state;
    630654    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
    631655
     
    649673
    650674    // get lock protecting copies_list[lpid]
    651     remote_spinlock_lock_busy( copies_lock , &irq_state );
     675    remote_queuelock_acquire( copies_lock );
    652676
    653677    // remove copy from copies_list
     
    656680
    657681    // release lock protecting copies_list[lpid]
    658     remote_spinlock_unlock_busy( copies_lock , irq_state );
     682    remote_queuelock_release( copies_lock );
    659683
    660684#if DEBUG_CLUSTER_PROCESS_COPIES
     
    678702    xptr_t        txt0_xp;
    679703    xptr_t        txt0_lock_xp;
    680     reg_t         txt0_save_sr;     // save SR to take TXT0 lock in busy mode     
    681704
    682705    assert( (cluster_is_undefined( cxy ) == false),
     
    696719
    697720    // get lock on local process list
    698     remote_spinlock_lock( lock_xp );
    699 
    700     // get TXT0 lock in busy waiting mode
    701     remote_spinlock_lock_busy( txt0_lock_xp , &txt0_save_sr );
     721    remote_queuelock_acquire( lock_xp );
     722
     723    // get TXT0 lock
     724    remote_busylock_acquire( txt0_lock_xp );
    702725     
    703726    // display header
     
    712735    }
    713736
    714     // release TXT0 lock in busy waiting mode
    715     remote_spinlock_unlock_busy( txt0_lock_xp , txt0_save_sr );
     737    // release TXT0 lock
     738    remote_busylock_release( txt0_lock_xp );
    716739
    717740    // release lock on local process list
    718     remote_spinlock_unlock( lock_xp );
     741    remote_queuelock_release( lock_xp );
    719742
    720743}  // end cluster_processes_display()
  • trunk/kernel/kern/cluster.h

    r562 r564  
    3030#include <hal_kernel_types.h>
    3131#include <bits.h>
    32 #include <spinlock.h>
    33 #include <readlock.h>
    34 #include <remote_barrier.h>
     32#include <queuelock.h>
     33#include <remote_queuelock.h>
    3534#include <list.h>
    3635#include <xlist.h>
     
    6867 * 2) The local_root is the root of the local list of all process descriptors in cluster K.
    6968 *    A process descriptor P is present in K, as soon as P has a thread in cluster K.
     69 *    We use an xlist, because this list can be traversed by remote threads.
    7070 *
    7171 * 3) The copies_root[] array is indexed by lpid. There is one entry per owned process,
    7272 *    and each each entry contains the root of the xlist of copies for this process.
     73 $    We use an xlist, because process copies are distributed in all clusters.
    7374 ******************************************************************************************/
    7475
    7576typedef struct process_manager_s
    7677{
    77         xptr_t            pref_tbl[CONFIG_MAX_PROCESS_PER_CLUSTER];  /*! reference  process   */
    78         spinlock_t        pref_lock;              /*! lock protecting lpid allocation/release */
    79     uint32_t          pref_nr;                /*! number of processes owned by cluster    */
    80 
    81     xlist_entry_t     local_root;             /*! root of list of process in cluster      */
    82     remote_spinlock_t local_lock;             /*! lock protecting access to local list    */
    83     uint32_t          local_nr;               /*! number of process in cluster            */
    84 
    85     xlist_entry_t     copies_root[CONFIG_MAX_PROCESS_PER_CLUSTER];  /*! roots of lists    */
    86     remote_spinlock_t copies_lock[CONFIG_MAX_PROCESS_PER_CLUSTER];  /*! one lock per list */
    87     uint32_t          copies_nr[CONFIG_MAX_PROCESS_PER_CLUSTER];    /*! number of copies  */
     78        xptr_t             pref_tbl[CONFIG_MAX_PROCESS_PER_CLUSTER];  /*! owned  processes    */
     79        queuelock_t        pref_lock;              /*! lock protecting pref_tbl              */
     80    uint32_t           pref_nr;                /*! number of processes owned by cluster   */
     81
     82    xlist_entry_t      local_root;            /*! root of list of process in cluster      */
     83    remote_queuelock_t local_lock;            /*! lock protecting local list              */
     84    uint32_t           local_nr;              /*! number of process in cluster            */
     85
     86    xlist_entry_t      copies_root[CONFIG_MAX_PROCESS_PER_CLUSTER];  /*! roots of lists   */
     87    remote_queuelock_t copies_lock[CONFIG_MAX_PROCESS_PER_CLUSTER];  /*! one  per list    */
     88    uint32_t           copies_nr[CONFIG_MAX_PROCESS_PER_CLUSTER];    /*! number of copie  */
    8889}
    8990pmgr_t;
     
    9798typedef struct cluster_s
    9899{
    99         spinlock_t      kcm_lock;          /*! local, protect creation of KCM allocators      */
    100100
    101101    // global parameters
    102         uint32_t        paddr_width;       /*! numer of bits in physical address              */
     102    uint32_t        paddr_width;       /*! numer of bits in physical address              */
    103103    uint32_t        x_width;           /*! number of bits to code x_size  (can be 0)      */
    104104    uint32_t        y_width;           /*! number of bits to code y_size  (can be 0)      */
    105         uint32_t        x_size;            /*! number of clusters in a row    (can be 1)      */
    106         uint32_t        y_size;            /*! number of clusters in a column (can be 1)      */
    107     uint32_t        cluster_info[CONFIG_MAX_CLUSTERS_X][CONFIG_MAX_CLUSTERS_Y];
    108         cxy_t           io_cxy;            /*! io cluster identifier                          */
     105    uint32_t        x_size;            /*! number of clusters in a row    (can be 1)      */
     106    uint32_t        y_size;            /*! number of clusters in a column (can be 1)      */
     107    cxy_t           io_cxy;            /*! io cluster identifier                          */
    109108    uint32_t        dqdt_root_level;   /*! index of root node in dqdt_tbl[]               */
    110109    uint32_t        nb_txt_channels;   /*! number of TXT channels                         */
     
    113112    uint32_t        nb_fbf_channels;   /*! number of FBF channels                         */
    114113
     114    char            cluster_info[CONFIG_MAX_CLUSTERS_X][CONFIG_MAX_CLUSTERS_Y];
     115
    115116    // local parameters
    116         uint32_t        cores_nr;          /*! actual number of cores in cluster              */
     117    uint32_t        cores_nr;          /*! actual number of cores in cluster              */
    117118    uint32_t        ram_size;          /*! physical memory size                           */
    118119    uint32_t        ram_base;          /*! physical memory base (local address)           */
     
    120121        core_t          core_tbl[CONFIG_MAX_LOCAL_CORES];    /*! embedded cores               */
    121122
    122         list_entry_t    dev_root;          /*! root of list of devices in cluster             */
     123    list_entry_t    dev_root;          /*! root of list of devices in cluster             */
    123124
    124125    // memory allocators
    125         ppm_t           ppm;               /*! embedded kernel page manager                   */
    126         khm_t           khm;               /*! embedded kernel heap manager                   */
    127         kcm_t           kcm;               /*! embedded kernel KCMs manager                   */
     126    ppm_t           ppm;               /*! embedded kernel page manager                   */
     127    khm_t           khm;               /*! embedded kernel heap manager                   */
     128    kcm_t           kcm;               /*! embedded kernel KCMs manager                   */
    128129
    129130    kcm_t         * kcm_tbl[KMEM_TYPES_NR];              /*! pointers on allocated KCMs   */
     131    busylock_t      kcm_lock;                            /*! protect kcm_tbl[] updates    */
    130132
    131133    // RPC
    132         remote_fifo_t   rpc_fifo[CONFIG_MAX_LOCAL_CORES];    /*! one RPC FIFO per core        */
     134    remote_fifo_t   rpc_fifo[CONFIG_MAX_LOCAL_CORES];    /*! one RPC FIFO per core        */
    133135    uint32_t        rpc_threads[CONFIG_MAX_LOCAL_CORES]; /*! RPC threads per core         */
    134136
    135137    // DQDT
    136         dqdt_node_t     dqdt_tbl[CONFIG_DQDT_LEVELS_NR];     /*! embedded DQDT nodes          */
     138    dqdt_node_t     dqdt_tbl[CONFIG_DQDT_LEVELS_NR];     /*! embedded DQDT nodes          */
    137139
    138140    // Local process manager
     
    158160
    159161/******************************************************************************************
    160  * This generic function initialises the local cluster manager from information found
    161  * in the local boot-info structure. It initializes the following local resources:
    162  * - the global platform parameters,
    163  * - the specific cluster parameters,
    164  * - the lock protecting KCM creation,
    165  * - the local DQDT nodes,
    166  * - the PPM, KHM, and KCM allocators,
    167  * - the local core descriptors,
    168  * - the local RPC FIFO,
    169  * - the process manager.
    170  * It does NOT initialise the local device descriptors.
     162 * These two functions initialise the local cluster manager from information found
     163 * in the local boot-info structure <info> build by the boot-loader.
     164 * 1) the cluster_info_init() function is called first, to initialize the structural
     165 *    constants, and cannot use the TXT0 kernel terminal.
     166 * 2) the cluster_manager_init() function initialize various complex structures:
     167 *    - the local DQDT nodes,
     168 *    - the PPM, KHM, and KCM allocators,
     169 *    - the local core descriptors,
     170 *    - the local RPC FIFO,
     171 *    - the process manager.
     172 *    It does NOT initialise the local device descriptors.
     173 *    It can use the TXT0 kernel terminal.
    171174 ******************************************************************************************
    172175 * @ info : pointer on the local boot_info_t structure build by the bootloader.
    173176 *****************************************************************************************/
    174 error_t cluster_init( boot_info_t * info );
    175 
    176 /******************************************************************************************
    177  * This function randomly selects a cluster.
    178  ******************************************************************************************
    179  * @ returns the selected cluster identifier.
    180  *****************************************************************************************/
    181 cxy_t cluster_random_select( void );
     177void    cluster_info_init( boot_info_t * info );
     178error_t cluster_manager_init( boot_info_t * info );
    182179
    183180/******************************************************************************************
     
    189186bool_t cluster_is_undefined( cxy_t cxy );
    190187
    191 
    192 /*****************************************************************************************/
    193 /***************   Process Management Operations   ***************************************/
    194 /*****************************************************************************************/
     188/******************************************************************************************
     189 * This function uses the local cluster_info[][] array in cluster descriptor,
     190 * and returns true when the cluster identified by the <cxy> argument is active.
     191 ******************************************************************************************
     192 * @ cxy   : cluster identifier.
     193 * @ return true if cluster contains a kernel instance.
     194 *****************************************************************************************/
     195bool_t cluster_is_active( cxy_t  cxy );
     196
     197/******************************************************************************************
     198 * This function (pseudo) randomly selects a valid cluster.
     199 * It is called by the vfs_cluster_lookup() function to place a new (missing) inode.
     200 * It is called by the vmm_page_allocate() function to place a distributed vseg page.
     201 ******************************************************************************************
     202 * @ returns the selected cluster identifier.
     203 *****************************************************************************************/
     204cxy_t cluster_random_select( void );
    195205
    196206/******************************************************************************************
     
    290300void cluster_process_copies_unlink( struct process_s * process );
    291301
    292 /*********************************************************************************************
     302/******************************************************************************************
    293303 * This function displays on the kernel terminal TXT0 all user processes registered
    294304 * in the cluster defined by the <cxy> argument.
    295305 * It can be called by a thread running in any cluster, because is use remote accesses
    296306 * to scan the xlist of registered processes.
    297  *********************************************************************************************
     307 ******************************************************************************************
    298308 * @ cxy   : cluster identifier.
    299  ********************************************************************************************/
     309 *****************************************************************************************/
    300310void cluster_processes_display( cxy_t cxy );
    301311
    302 
    303 
    304 /*****************************************************************************************/
    305 /***************   Cores Management Operations   *****************************************/
    306 /*****************************************************************************************/
    307 
    308 /******************************************************************************************
    309  * This function returns the core local index that has the lowest usage in local cluster.
     312/******************************************************************************************
     313 * This function uses the local boot_inforeturns the core local index that has the lowest usage in local cluster.
    310314 *****************************************************************************************/
    311315lid_t cluster_select_local_core( void );
    312316
     317             
    313318#endif  /* _CLUSTER_H_ */
     319
  • trunk/kernel/kern/core.c

    r457 r564  
    33 *
    44 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *         Alain Greiner (2016,2017)
     5 *         Alain Greiner (2016,2017,2018)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
  • trunk/kernel/kern/core.h

    r457 r564  
    33 *
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *          Alain Greiner (2016,2017)
     5 *          Alain Greiner (2016,2017,2018)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
  • trunk/kernel/kern/dqdt.c

    r562 r564  
    4040extern chdev_directory_t  chdev_dir;  // defined in chdev.h / allocated in kernel_init.c
    4141
    42 
     42/*
    4343///////////////////////////////////////////////////////////////////////////////////////////
    4444// This static recursive function traverse the DQDT quad-tree from root to bottom.
     
    6565    }
    6666}
    67 
    68 ///////////////////
     67*/
     68
     69/////////////////////////
    6970void dqdt_display( void )
    7071{
    71     /*
    72     reg_t   save_sr;
    73 
     72    return;
     73
     74/*
    7475    // build extended pointer on DQDT root node
    7576        cluster_t * cluster = LOCAL_CLUSTER;
     
    8283    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
    8384
    84     // get extended pointer on remote TXT0 chdev lock
     85    // get extended pointer on remote TXT0 lock
    8586    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
    8687
    87     // get TXT0 lock in busy waiting mode
    88     remote_spinlock_lock_busy( lock_xp , &save_sr );
     88    // get TXT0 lock
     89    remote_busylock_acquire( lock_xp );
    8990
    9091    // print header
     
    9596
    9697    // release lock
    97     remote_spinlock_unlock_busy( lock_xp , save_sr );
    98     */
     98    remote_busylock_release( lock_xp );
     99*/
     100
    99101}
    100102
    101103////////////////////////////////////
    102104uint32_t dqdt_init( uint32_t x_size,
    103                     uint32_t y_size,
    104                     uint32_t y_width )
     105                    uint32_t y_size )
    105106{
    106107    assert( ((x_size <= 32) && (y_size <= 32)) , "illegal mesh size\n");
    107 
     108 
     109    // compute level_max
     110    uint32_t  x_size_ext = POW2_ROUNDUP( x_size );
     111    uint32_t  y_size_ext = POW2_ROUNDUP( y_size );
     112    uint32_t  size_ext   = MAX(x_size_ext , y_size_ext);
     113    uint32_t  level_max  = (bits_log2(size_ext * size_ext) >> 1) + 1;
     114
     115return level_max;
     116
     117/*
    108118        dqdt_node_t * node;
    109119    cxy_t         p_cxy;         // cluster coordinates for parent node
     
    114124    cluster_t   * cluster;       // pointer on local cluster
    115125
    116     cluster = LOCAL_CLUSTER;
    117 
    118     // compute level_max
    119     uint32_t  x_size_ext = POW2_ROUNDUP( x_size );
    120     uint32_t  y_size_ext = POW2_ROUNDUP( y_size );
    121     uint32_t  size_ext   = MAX(x_size_ext , y_size_ext);
    122     uint32_t  level_max  = (bits_log2(size_ext * size_ext) >> 1) + 1;
    123 
    124     return level_max;
    125 
    126     /*
     126    cluster_t   * cluster = LOCAL_CLUSTER;
     127
    127128    // get cluster coordinates
    128     uint32_t    x       = local_cxy >> y_width;
    129     uint32_t    y       = local_cxy & ((1<<y_width)-1);
     129    uint32_t    x       = HAL_X_FROM_CXY( local_cxy );
     130    uint32_t    y       = HAL_Y_FROM_CXY( local_cxy );
    130131
    131132    // loop on local dqdt nodes (at most one node per level)
     
    154155        {
    155156            // set parent extended pointer
    156             p_cxy = ((x & ~pmask)<<y_width) + (y & ~pmask);
     157            p_cxy = HAL_CXY_FROM_XY( (x & ~pmask) , (y & ~pmask) );
    157158            node->parent = XPTR( p_cxy , &cluster->dqdt_tbl[level+1] );
    158159
     
    168169            if ( (level > 0) && ((y + (1<<(level-1))) < y_size) )
    169170            {
    170                 c_cxy = local_cxy + (1<<(level-1));
     171                c_cxy = local_cxy + HAL_CXY_FROM_XY( 0 , (1<<(level-1) );
    171172                node->children[1] = XPTR( c_cxy , &cluster->dqdt_tbl[level-1] );
    172173                node->arity++;
     
    176177            if ( (level > 0) && ((x + (1<<(level-1))) < x_size) )
    177178            {
    178                 c_cxy = local_cxy + ((1<<(level-1))<<y_width);
     179                c_cxy = local_cxy + HAL_CXY_FROM_XY( (1<<(level-1)) , 0 );
    179180                node->children[2] = XPTR( c_cxy , &cluster->dqdt_tbl[level-1]);
    180181                node->arity++;
     
    186187                 ((y + (1<<(level-1))) < y_size) )
    187188            {
    188                 c_cxy = local_cxy + ((1<<(level-1))<<y_width) + (1<<(level-1));
     189                c_cxy = local_cxy + HAL_CXY_FROM_XY( (1<<(level-1)) , (1<<(level-1) );
    189190                node->children[3] = XPTR( c_cxy , &cluster->dqdt_tbl[level-1]);
    190191                node->arity++;
     
    194195
    195196    return level_max;
    196     */
     197*/
    197198
    198199} // end dqdt_init()
    199200
     201/*
    200202///////////////////////////////////////////////////////////////////////////
    201203// This recursive function is called by the dqdt_update_threads() function.
     
    216218
    217219    // get extended pointer on parent node
    218     xptr_t parent = (xptr_t)hal_remote_lwd( XPTR( cxy , &ptr->parent ) );
     220    xptr_t parent = (xptr_t)hal_remote_l64( XPTR( cxy , &ptr->parent ) );
    219221
    220222    // propagate if required
    221223    if ( parent != XPTR_NULL ) dqdt_propagate_threads( parent, increment );
    222224}
    223 
     225*/
     226
     227/*
    224228///////////////////////////////////////////////////////////////////////////
    225229// This recursive function is called by the dqdt_update_pages() function.
     
    240244
    241245    // get extended pointer on parent node
    242     xptr_t parent = (xptr_t)hal_remote_lwd( XPTR( cxy , &ptr->parent ) );
     246    xptr_t parent = (xptr_t)hal_remote_l64( XPTR( cxy , &ptr->parent ) );
    243247
    244248    // propagate if required
    245249    if ( parent != XPTR_NULL ) dqdt_propagate_pages( parent, increment );
    246250}
     251*/
    247252
    248253/////////////////////////////////////////////
    249 void dqdt_update_threads( int32_t increment )
    250 {
    251     return;
    252     /*
     254void dqdt_update_threads( int32_t increment __attribute__ ((__unused__)) )
     255{
     256
     257return;
     258
     259/*
    253260        cluster_t   * cluster = LOCAL_CLUSTER;
    254261    dqdt_node_t * node    = &cluster->dqdt_tbl[0];
     
    259266    // propagate to DQDT upper levels
    260267    if( node->parent != XPTR_NULL ) dqdt_propagate_threads( node->parent , increment );
    261     */
     268*/
     269
    262270}
    263271
    264272///////////////////////////////////////////
    265 void dqdt_update_pages( int32_t increment )
    266 {
    267     return;
    268     /*
     273void dqdt_update_pages( int32_t increment  __attribute__ ((__unused__)) )
     274{
     275
     276return;
     277
     278/*
    269279        cluster_t   * cluster = LOCAL_CLUSTER;
    270280    dqdt_node_t * node    = &cluster->dqdt_tbl[0];
     
    275285    // propagate to DQDT upper levels
    276286    if( node->parent != XPTR_NULL ) dqdt_propagate_pages( node->parent , increment );
    277     */
    278 }
    279 
    280 
     287*/
     288
     289}
     290
     291/*
    281292////////////////////////////////////////////////////////////////////////////////
    282293// This recursive function is called by both the dqdt_get_cluster_for_process()
     
    313324            cxy  = (cxy_t)GET_CXY( child );
    314325            ptr  = (dqdt_node_t *)GET_PTR( child );
    315             if( for_memory ) load = hal_remote_lw( XPTR( cxy , &ptr->pages ) );
    316             else             load = hal_remote_lw( XPTR( cxy , &ptr->threads ) );
     326            if( for_memory ) load = hal_remote_l32( XPTR( cxy , &ptr->pages ) );
     327            else             load = hal_remote_l32( XPTR( cxy , &ptr->threads ) );
    317328            if( load < load_min )
    318329            {
     
    326337    return dqdt_select_cluster( node_copy.children[select], for_memory );
    327338}
    328 
    329 ////////////////////////////////////
     339*/
     340
     341//////////////////////////////////////////
    330342cxy_t dqdt_get_cluster_for_process( void )
    331343{
    332     return cluster_random_select();
    333     /*
     344
     345return cluster_random_select();
     346
     347/*
    334348    // build extended pointer on DQDT root node
    335349        cluster_t * cluster = LOCAL_CLUSTER;
     
    339353    // call recursive function
    340354    return dqdt_select_cluster( root_xp , false );
    341     */
    342 }
    343 
    344 ////////////////////////////////////
     355*/
     356
     357}
     358
     359/////////////////////////////////////////
    345360cxy_t dqdt_get_cluster_for_memory( void )
    346361{
    347     return cluster_random_select();
    348     /*
     362
     363return cluster_random_select();
     364 
     365/*
    349366    // build extended pointer on DQDT root node
    350367        cluster_t * cluster = LOCAL_CLUSTER;
     
    354371    // call recursive function
    355372    return dqdt_select_cluster( root_xp , true );
    356     */
    357 }
    358 
     373*/
     374
     375}
     376
  • trunk/kernel/kern/dqdt.h

    r485 r564  
    3737 *   quad-tree covering this one-dimensionnal vector. If the number of clusters
    3838 *   is not a power of 4, the tree is truncated as required.
     39 *
    3940 *   TODO : the mapping for the one dimensionnal topology is not implemented yet [AG].
    4041 *
     
    5556 *   . Level 4 nodes exist when both X and Y coordinates are multiple of 16
    5657 *   . Level 5 nodes exist when both X and Y coordinates are multiple of 32
     58 *
     59 *   TODO : the cluster_info[x][y] array is not taken into account [AG].
    5760 ***************************************************************************************/
    5861
     
    8588 * @ x_size   : number of clusters (containing memory and CPUs) in a row
    8689 * @ y_size   : number of clusters (containing memory and CPUs) in a column
    87  * @ y_width  : number of LSB used to code the Y value in CXY
    8890 * @ return the number of levels in quad-tree.
    8991 ***************************************************************************************/
    9092uint32_t dqdt_init( uint32_t x_size,
    91                     uint32_t y_size,
    92                     uint32_t y_width );
     93                    uint32_t y_size );
    9394
    9495/****************************************************************************************
  • trunk/kernel/kern/kernel_init.c

    r561 r564  
    2424
    2525#include <kernel_config.h>
    26 #include <hard_config.h> // for the USE_TXT_XXX macros
    2726#include <errno.h>
    2827#include <hal_kernel_types.h>
     
    3029#include <hal_context.h>
    3130#include <hal_irqmask.h>
     31#include <hal_macros.h>
    3232#include <hal_ppm.h>
    3333#include <barrier.h>
    34 #include <remote_barrier.h>
     34#include <xbarrier.h>
    3535#include <remote_fifo.h>
    3636#include <core.h>
     
    5959#include <devfs.h>
    6060#include <mapper.h>
    61 #include <cluster_info.h>
    6261
    6362///////////////////////////////////////////////////////////////////////////////////////////
     
    8685cluster_t            cluster_manager                         CONFIG_CACHE_LINE_ALIGNED;
    8786
    88 // This variable defines the TXT0 kernel terminal (TX only)
     87// This variable defines the TXT_TX[0] chdev
    8988__attribute__((section(".kdata")))
    90 chdev_t              txt0_chdev                              CONFIG_CACHE_LINE_ALIGNED;
    91 
    92 // This variable defines the TXT0 lock for writing characters to MTY0
     89chdev_t              txt0_tx_chdev                           CONFIG_CACHE_LINE_ALIGNED;
     90
     91// This variable defines the TXT_RX[0] chdev
    9392__attribute__((section(".kdata")))
    94 spinlock_t           txt0_lock                               CONFIG_CACHE_LINE_ALIGNED;
     93chdev_t              txt0_rx_chdev                           CONFIG_CACHE_LINE_ALIGNED;
    9594
    9695// This variables define the kernel process0 descriptor
     
    116115// This variable is used for CP0 cores synchronisation in kernel_init()
    117116__attribute__((section(".kdata")))
    118 remote_barrier_t     global_barrier                          CONFIG_CACHE_LINE_ALIGNED;
     117xbarrier_t           global_barrier                          CONFIG_CACHE_LINE_ALIGNED;
    119118
    120119// This variable is used for local cores synchronisation in kernel_init()
     
    127126
    128127// kernel_init is the entry point defined in hal/tsar_mips32/kernel.ld
    129 // It will be used by the bootloader.
     128// It is used by the bootloader.
    130129extern void kernel_init( boot_info_t * info );
    131130
    132 // these debug variables are used to analyse the sys_read() syscall timing
     131// This array is used for debug, and describes the kernel locks usage,
     132// It must be kept consistent with the defines in kernel_config.h file.
     133char * lock_type_str[] =
     134{
     135    "unused_0",              //  0
     136
     137    "CLUSTER_KCM",           //  1
     138    "PPM_FREE",              //  2
     139    "SCHED_STATE",           //  3
     140    "VMM_STACK",             //  4
     141    "VMM_MMAP",              //  5
     142    "VFS_CTX",               //  6
     143    "KCM_STATE",             //  7
     144    "KHM_STATE",             //  8
     145    "HTAB_STATE",            //  9
     146
     147    "THREAD_JOIN",           // 10
     148    "VFS_MAIN",              // 11
     149    "CHDEV_QUEUE",           // 12
     150    "CHDEV_TXT0",            // 13
     151    "CHDEV_TXTLIST",         // 14
     152    "PAGE_STATE",            // 15
     153    "MUTEX_STATE",           // 16
     154    "CONDVAR_STATE",         // 17
     155    "SEM_STATE",             // 18
     156    "XHTAB_STATE",           // 19
     157
     158    "unused_20",             // 20
     159
     160    "CLUSTER_PREFTBL",       // 21
     161    "PPM_DIRTY",             // 22
     162
     163    "CLUSTER_LOCALS",        // 23
     164    "CLUSTER_COPIES",        // 24
     165    "PROCESS_CHILDREN",      // 25
     166    "PROCESS_USERSYNC",      // 26
     167    "PROCESS_FDARRAY",       // 27
     168
     169    "MAPPER_STATE",          // 28
     170    "PROCESS_THTBL",         // 29
     171
     172    "PROCESS_CWD",           // 30
     173    "VFS_INODE",             // 31
     174    "VFS_FILE",              // 32
     175    "VMM_VSL",               // 33
     176};       
     177
     178// these debug variables are used to analyse the sys_read() and sys_write() syscalls timing
    133179
    134180#if DEBUG_SYS_READ
     
    179225uint32_t   exit_tty_isr_write;
    180226#endif
     227
     228// intrumentation variables : cumulated costs per syscall type in cluster
     229uint32_t   syscalls_cumul_cost[SYSCALLS_NR];
     230
     231// intrumentation variables : number of syscalls per syscal type in cluster
     232uint32_t   syscalls_occurences[SYSCALLS_NR];
    181233
    182234///////////////////////////////////////////////////////////////////////////////////////////
     
    201253
    202254///////////////////////////////////////////////////////////////////////////////////////////
    203 // This function initializes the TXT0 chdev descriptor, that is the "kernel terminal",
    204 // shared by all kernel instances for debug messages.
    205 // It is a global variable (replicated in all clusters), because this terminal is used
    206 // before the kmem allocator initialisation, but only the instance in cluster containing
    207 // the calling core is registered in the "chdev_dir" directory.
     255// This function initializes the TXT_TX[0] and TXT_RX[0] chdev descriptors, implementing
     256// the "kernel terminal", shared by all kernel instances for debug messages.
     257// These chdev are implemented as global variables (replicated in all clusters),
     258// because this terminal is used before the kmem allocator initialisation, but only
     259// the chdevs in cluster 0 are registered in the "chdev_dir" directory.
    208260// As this TXT0 chdev supports only the TXT_SYNC_WRITE command, we don't create
    209261// a server thread, we don't allocate a WTI, and we don't initialize the waiting queue.
     262// Note: The TXT_RX[0] chdev is created, but is not used by ALMOS-MKH (september 2018).
    210263///////////////////////////////////////////////////////////////////////////////////////////
    211264// @ info    : pointer on the local boot-info structure.
    212265///////////////////////////////////////////////////////////////////////////////////////////
    213 static void txt0_device_init( boot_info_t * info )
     266static void __attribute__ ((noinline)) txt0_device_init( boot_info_t * info )
    214267{
    215268    boot_device_t * dev_tbl;         // pointer on array of devices in boot_info
     
    237290        if (func == DEV_FUNC_TXT )
    238291        {
    239             assert( (channels > 0) , "number of TXT channels cannot be 0\n");
    240 
    241             // initializes TXT_TX[0] chdev
    242             txt0_chdev.func    = func;
    243             txt0_chdev.impl    = impl;
    244             txt0_chdev.channel = 0;
    245             txt0_chdev.base    = base;
    246             txt0_chdev.is_rx   = false;
    247 
    248             // initializes lock
    249             remote_spinlock_init( XPTR( local_cxy , &txt0_chdev.wait_lock ) );
     292            // initialize TXT_TX[0] chdev
     293            txt0_tx_chdev.func    = func;
     294            txt0_tx_chdev.impl    = impl;
     295            txt0_tx_chdev.channel = 0;
     296            txt0_tx_chdev.base    = base;
     297            txt0_tx_chdev.is_rx   = false;
     298            remote_busylock_init( XPTR( local_cxy , &txt0_tx_chdev.wait_lock ),
     299                                  LOCK_CHDEV_TXT0 );
    250300           
    251             // TXT specific initialisation:
    252             // no server thread & no IRQ routing for channel 0
    253             dev_txt_init( &txt0_chdev );                 
    254 
    255             // register the TXT0 in all chdev_dir[x][y] structures
     301            // initialize TXT_RX[0] chdev
     302            txt0_rx_chdev.func    = func;
     303            txt0_rx_chdev.impl    = impl;
     304            txt0_rx_chdev.channel = 0;
     305            txt0_rx_chdev.base    = base;
     306            txt0_rx_chdev.is_rx   = true;
     307            remote_busylock_init( XPTR( local_cxy , &txt0_rx_chdev.wait_lock ),
     308                                  LOCK_CHDEV_TXT0 );
     309           
     310            // make TXT specific initialisations
     311            dev_txt_init( &txt0_tx_chdev );                 
     312            dev_txt_init( &txt0_rx_chdev );
     313
     314            // register TXT_TX[0] & TXT_RX[0] in chdev_dir[x][y]
     315            // for all valid clusters             
    256316            for( x = 0 ; x < info->x_size ; x++ )
    257317            {
    258                 for( y = 0 ; y < info->y_size; y++ ) // [FIXME]
     318                for( y = 0 ; y < info->y_size ; y++ )
    259319                {
    260                     if (cluster_info_is_active(info->cluster_info[x][y])) {
    261                         cxy_t  cxy = (x<<info->y_width) + y;
    262                         hal_remote_swd( XPTR( cxy , &chdev_dir.txt_tx[0] ) ,
    263                                         XPTR( local_cxy , &txt0_chdev ) );
     320                    cxy_t cxy = HAL_CXY_FROM_XY( x , y );
     321
     322                    if( cluster_is_active( cxy ) )
     323                    {
     324                        hal_remote_s64( XPTR( cxy , &chdev_dir.txt_tx[0] ) ,
     325                                        XPTR( local_cxy , &txt0_tx_chdev ) );
     326                        hal_remote_s64( XPTR( cxy , &chdev_dir.txt_rx[0] ) ,
     327                                        XPTR( local_cxy , &txt0_rx_chdev ) );
    264328                    }
    265329                }
    266330            }
     331
     332            hal_fence();
    267333        }
    268334        } // end loop on devices
    269335}  // end txt0_device_init()
    270 
    271 ///////////////////////////////////////////////////////////////////////////////////////////
    272 // This function is the same as txt0_device_init() but uses the internal multi_tty device
    273 // attached to cluster (0,0) instead of the external tty_tsar.
    274 // This function is used instead of txt0_device_init() only for TSAR LETI.
    275 ///////////////////////////////////////////////////////////////////////////////////////////
    276 // @ info    : pointer on the local boot-info structure.
    277 ///////////////////////////////////////////////////////////////////////////////////////////
    278 static void mtty0_device_init( boot_info_t * info)
    279 {
    280     boot_device_t * dev_tbl;         // pointer on array of devices in boot_info
    281     uint32_t        dev_nr;          // actual number of devices in this cluster
    282     xptr_t          base;            // remote pointer on segment base
    283     uint32_t        func;            // device functional index
    284     uint32_t        impl;            // device implementation index
    285     uint32_t        i;               // device index in dev_tbl
    286     uint32_t        x;               // X cluster coordinate
    287     uint32_t        y;               // Y cluster coordinate
    288 
    289     dev_nr = info->int_dev_nr;
    290     dev_tbl = info->int_dev;
    291 
    292     // Initialize spinlock for writing to MTY0
    293     spinlock_init(&txt0_lock);
    294    
    295     // Loop on internal peripherals of cluster (0,0) to find MTY0
    296     for ( i = 0; i < dev_nr; i++ )
    297     {
    298         base = dev_tbl[i].base;
    299         func = FUNC_FROM_TYPE( dev_tbl[i].type );
    300         impl = IMPL_FROM_TYPE( dev_tbl[i].type );
    301 
    302         if ( func == DEV_FUNC_TXT )
    303         {
    304             txt0_chdev.func     = func;
    305             txt0_chdev.impl     = impl;
    306             txt0_chdev.channel  = 0;
    307             txt0_chdev.base     = base;
    308             txt0_chdev.is_rx    = false;
    309 
    310             // Initialize MTY0 chdev lock
    311             remote_spinlock_init( XPTR( local_cxy, &txt0_chdev.wait_lock ) );
    312 
    313             // MTY specific initialization
    314             dev_txt_init( &txt0_chdev );
    315 
    316             // register the MTY in all chdev_dir[x][y] structures
    317             for( x = 0 ; x < info->x_size ; x++ )
    318             {
    319                 for( y = 0 ; y < info->y_size; y++ ) // [FIXME]
    320                 {
    321                     if (cluster_info_is_active(info->cluster_info[x][y])) {
    322                         cxy_t  cxy = (x<<info->y_width) + y;
    323                         hal_remote_swd( XPTR( cxy , &chdev_dir.txt_tx[0] ) ,
    324                                         XPTR( local_cxy , &txt0_chdev ) );
    325                     }
    326                 }
    327             }
    328         }
    329     } // end loop on internal devices
    330 } // end mty0_device_init()
    331336
    332337///////////////////////////////////////////////////////////////////////////////////////////
     
    338343// @ info    : pointer on the local boot-info structure.
    339344///////////////////////////////////////////////////////////////////////////////////////////
    340 static void internal_devices_init( boot_info_t * info )
     345static void __attribute__ ((noinline)) internal_devices_init( boot_info_t * info )
    341346{
    342347    boot_device_t * dev_tbl;         // pointer on array of internaldevices in boot_info
     
    367372        if( func == DEV_FUNC_MMC ) 
    368373        {
    369             assert( (channels == 1) , "MMC device must be single channel\n" );
     374
     375            // check channels
     376            if( channels != 1 )
     377            printk("\n[PANIC] in %s : MMC device must be single channel\n", __FUNCTION__ );
    370378
    371379            // create chdev in local cluster
     
    376384                                      base );
    377385
    378             assert( (chdev_ptr != NULL) ,
    379                     "cannot allocate memory for MMC chdev\n" );
     386            // check memory
     387            if( chdev_ptr == NULL )
     388            printk("\n[PANIC] in %s : cannot create MMC chdev\n", __FUNCTION__ );
    380389           
    381390            // make MMC specific initialisation
     
    385394            for( x = 0 ; x < info->x_size ; x++ )
    386395            {
    387                 for( y = 0 ; y < info->y_size; y++ ) // [FIXME]
     396                for( y = 0 ; y < info->y_size ; y++ )
    388397                {
    389                     if (cluster_info_is_active(info->cluster_info[x][y])) {
    390                         cxy_t  cxy = (x<<info->y_width) + y;
    391                         hal_remote_swd( XPTR( cxy , &chdev_dir.mmc[local_cxy] ),
     398                    cxy_t cxy = HAL_CXY_FROM_XY( x , y );
     399
     400                    if( cluster_is_active( cxy ) )
     401                    {
     402                        hal_remote_s64( XPTR( cxy , &chdev_dir.mmc[local_cxy] ),
    392403                                        XPTR( local_cxy , chdev_ptr ) );
    393404                    }
     
    414425                                          base );
    415426
    416                 assert( (chdev_ptr != NULL) , "cannot allocate memory for DMA chdev" );
    417 
     427                // check memory
     428                if( chdev_ptr == NULL )
     429                printk("\n[PANIC] in %s : cannot create DMA chdev\n", __FUNCTION__ );
     430           
    418431                // make DMA specific initialisation
    419432                dev_dma_init( chdev_ptr );     
     
    430443            }
    431444        }
    432 
    433         ///////////////////////////////
    434         else if ( func == DEV_FUNC_TXT && USE_TXT_MTY == 1 )
    435         {
    436             assert(impl == IMPL_TXT_MTY,
    437                 "Internal TTYs should have MTY implementation\n");
    438 
    439             for ( channel = 0; channel < channels; channel++ )
    440             {
    441                 int rx;
    442                 for ( rx = 0; rx <= 1; rx++ )
    443                 {
    444                     // skip MTY0_TX since it has already been initialized
    445                     if ( channel == 0 && rx == 0 ) continue;
    446 
    447                     // create chdev in local cluster
    448                     chdev_ptr = chdev_create( func,
    449                                               impl,
    450                                               channel,
    451                                               rx,
    452                                               base );
    453 
    454                     assert( (chdev_ptr != NULL) ,
    455                         "cannot allocate memory for MTY chdev" );
    456 
    457                     // make MTY specific initialization
    458                     dev_txt_init( chdev_ptr );
    459 
    460                     // set the MTY fields in all clusters
    461                     xptr_t *chdev_entry;
    462                     if ( rx == 1 ) {
    463                         chdev_entry = &chdev_dir.txt_rx[channel];
    464                     } else {
    465                         chdev_entry = &chdev_dir.txt_tx[channel];
    466                     }
    467                     for ( x = 0; x < info->x_size; x++ )
    468                     {
    469                         for ( y = 0; y < info->y_size; y++ )
    470                         {
    471                             if (cluster_info_is_active(info->cluster_info[x][y])) {
    472                                 cxy_t cxy = (x<<info->y_width) + y;
    473                                 hal_remote_swd( XPTR( cxy, chdev_entry ),
    474                                                 XPTR( local_cxy, chdev_ptr ) );
    475                             }
    476                         }
    477                     }
    478 #if( DEBUG_KERNEL_INIT & 0x1 )
    479 if( hal_time_stamp() > DEBUG_KERNEL_INIT )
    480 printk("\n[DBG] %s : created MTY[%d] in cluster %x / chdev = %x\n",
    481 __FUNCTION__ , channel , local_cxy , chdev_ptr );
    482 #endif
    483                 }
    484             }
    485         }
    486 
    487         ///////////////////////////////
    488         else if ( func == DEV_FUNC_IOC )
    489         {
    490             assert(impl == IMPL_IOC_SPI, __FUNCTION__,
    491                 "Internal IOC should have SPI implementation\n");
    492 
    493             for ( channel = 0; channel < channels; channel++ )
    494             {
    495                 // create chdev in local cluster
    496                 chdev_ptr = chdev_create( func,
    497                                           impl,
    498                                           channel,
    499                                           0,
    500                                           base );
    501 
    502                 assert( (chdev_ptr != NULL) , __FUNCTION__ ,
    503                     "cannot allocate memory for IOC chdev" );
    504                
    505                 // make IOC specific initialization
    506                 dev_ioc_init( chdev_ptr );
    507 
    508                 // set the IOC fields in all clusters
    509                 xptr_t *chdev_entry = &chdev_dir.ioc[channel];
    510                 for ( x = 0; x < info->x_size; x++ )
    511                 {
    512                     for ( y = 0; y < info->y_size; y++ )
    513                     {
    514                         if (cluster_info_is_active(info->cluster_info[x][y])) {
    515                             cxy_t cxy = (x<<info->y_width) + y;
    516                             hal_remote_swd( XPTR( cxy, chdev_entry ),
    517                                             XPTR( local_cxy, chdev_ptr ) );
    518                         }
    519                     }
    520     }
    521 #if( DEBUG_KERNEL_INIT & 0x1 )
    522 if( hal_time_stamp() > DEBUG_KERNEL_INIT )
    523 printk("\n[DBG] %s : created IOC[%d] in cluster %x / chdev = %x\n",
    524 __FUNCTION__ , channel , local_cxy , chdev_ptr );
    525 #endif
    526             }
    527         }
    528 
    529445    }
    530446}  // end internal_devices_init()
     
    586502
    587503        // check PIC device initialized
    588         assert( (chdev_dir.pic != XPTR_NULL ) ,
    589               "PIC device must be initialized before other devices\n" );
     504        if( chdev_dir.pic == XPTR_NULL )
     505        printk("\n[PANIC] in %s : PIC device must be initialized first\n", __FUNCTION__ );
    590506
    591507        // check external device functionnal type
    592         assert( ( (func == DEV_FUNC_IOB) ||
    593                   (func == DEV_FUNC_IOC) ||
    594                   (func == DEV_FUNC_TXT) ||
    595                   (func == DEV_FUNC_NIC) ||
    596                   (func == DEV_FUNC_FBF) ) ,
    597                   "undefined external peripheral type\n" );
     508        if( (func != DEV_FUNC_IOB) && (func != DEV_FUNC_IOC) && (func != DEV_FUNC_TXT) &&
     509            (func != DEV_FUNC_NIC) && (func != DEV_FUNC_FBF) )
     510        printk("\n[PANIC] in %s : undefined peripheral type\n", __FUNCTION__ );
    598511
    599512        // loops on channels
     
    603516            for( rx = 0 ; rx < directions ; rx++ )
    604517            {
    605                 // skip TXT_TX[0] chdev that has already been created & registered
    606                 if( USE_TXT_MTY == 0 && (func == DEV_FUNC_TXT) && (channel == 0) && (rx == 0) )
     518                // skip TXT0 that has already been initialized
     519                if( (func == DEV_FUNC_TXT) && (channel == 0) ) continue;
     520
     521                // all kernel instances compute the target cluster for all chdevs,
     522                // computing the global index ext_chdev_gid[func,channel,direction]
     523                cxy_t target_cxy;
     524                while( 1 )
    607525                {
    608                     continue;
     526                    uint32_t offset     = ext_chdev_gid % ( info->x_size * info->y_size );
     527                    uint32_t x          = offset / info->y_size;
     528                    uint32_t y          = offset % info->y_size;
     529
     530                    target_cxy = HAL_CXY_FROM_XY( x , y );
     531
     532                    // exit loop if target cluster is active
     533                    if( cluster_is_active( target_cxy ) ) break;
     534               
     535                    // increment global index otherwise
     536                    ext_chdev_gid++;
    609537                }
    610538
    611                 // skip TXT chdevs because they are initialized in internal_devices_init()
    612                 if ( USE_TXT_MTY == 1 && func == DEV_FUNC_TXT )
    613                 {
    614                     continue;
    615                 }
    616 
    617                 if ( func == DEV_FUNC_IOC && impl == IMPL_IOC_SPI )
    618                 {
    619                     continue;
    620                 }
    621 
    622                 // compute target cluster for chdev[func,channel,direction]
    623                 uint32_t offset;
    624                 uint32_t cx;
    625                 uint32_t cy;
    626                 uint32_t target_cxy;
    627                 while (1) {
    628                     offset     = ext_chdev_gid % ( info->x_size * (info->y_size) );
    629                     cx         = offset / (info->y_size);
    630                     cy         = offset % (info->y_size);
    631                     target_cxy = (cx<<info->y_width) + cy;
    632                     // ext_chdev_gid that results in empty target clusters are skipped
    633                     if ( cluster_info_is_active( LOCAL_CLUSTER->cluster_info[cx][cy] ) == 0 ) {
    634                         ext_chdev_gid++;
    635                     } else { // The ext_chdev_gid resulted in a full target cluster
    636                         break;
    637                     }
    638                 }
    639539                // allocate and initialize a local chdev
    640540                // when local cluster matches target cluster
     
    647547                                          base );
    648548
    649                     assert( (chdev != NULL),
    650                             "cannot allocate external device" );
     549                    if( chdev == NULL )
     550                    printk("\n[PANIC] in %s : cannot allocate chdev for external device\n",
     551                    __FUNCTION__ );
    651552
    652553                    // make device type specific initialisation
     
    672573                    for( x = 0 ; x < info->x_size ; x++ )
    673574                    {
    674                         for ( y = 0; y < info->y_size; y++ )
     575                        for( y = 0 ; y < info->y_size ; y++ )
    675576                        {
    676                             if (cluster_info_is_active(info->cluster_info[x][y])) {
    677                                 cxy_t  cxy = (x<<info->y_width) + y;
    678                                 hal_remote_swd( XPTR( cxy , entry ),
     577                            cxy_t cxy = HAL_CXY_FROM_XY( x , y );
     578
     579                            if( cluster_is_active( cxy ) )
     580                            {
     581                                hal_remote_s64( XPTR( cxy , entry ),
    679582                                                XPTR( local_cxy , chdev ) );
    680583                            }
     
    706609// @ info    : pointer on the local boot-info structure.
    707610///////////////////////////////////////////////////////////////////////////////////////////
    708 static void iopic_init( boot_info_t * info )
     611static void __attribute__ ((noinline)) iopic_init( boot_info_t * info )
    709612{
    710613    boot_device_t * dev_tbl;         // pointer on boot_info external devices array
     
    723626    dev_tbl     = info->ext_dev;
    724627
     628    // avoid GCC warning
     629    base        = XPTR_NULL;
     630    impl        = 0;
     631
    725632    // loop on external peripherals to get the IOPIC 
    726633        for( i = 0 , found = false ; i < dev_nr ; i++ )
     
    737644    }
    738645
    739     assert( found , "PIC device not found\n" );
     646    // check PIC existence
     647    if( found == false )
     648    printk("\n[PANIC] in %s : PIC device not found\n", __FUNCTION__ );
    740649
    741650    // allocate and initialize the PIC chdev in cluster 0
     
    746655                          base );
    747656
    748     assert( (chdev != NULL), "no memory for PIC chdev\n" );
     657    // check memory
     658    if( chdev == NULL )
     659    printk("\n[PANIC] in %s : no memory for PIC chdev\n", __FUNCTION__ );
    749660
    750661    // make PIC device type specific initialisation
     
    757668    for( x = 0 ; x < info->x_size ; x++ )
    758669    {
    759         for ( y = 0; y < info->y_size; y++ )
     670        for( y = 0 ; y < info->y_size ; y++ )
    760671        {
    761             if (cluster_info_is_active(info->cluster_info[x][y])) {
    762                 cxy_t  cxy = (x<<info->y_width) + y;
    763                 hal_remote_swd( XPTR( cxy , entry ) ,
     672            cxy_t cxy = HAL_CXY_FROM_XY( x , y );
     673
     674            if( cluster_is_active( cxy ) )
     675            {
     676                hal_remote_s64( XPTR( cxy , entry ) ,
    764677                                XPTR( local_cxy , chdev ) );
    765678            }
     
    773686    for( x = 0 ; x < info->x_size ; x++ )
    774687    {
    775         for ( y = 0; y < info->y_size; y++ )
     688        for( y = 0 ; y < info->y_size ; y++ )
    776689        {
    777             if (cluster_info_is_active(info->cluster_info[x][y])) {
    778                 cxy_t  cxy = (x<<info->y_width) + y;
    779                 hal_remote_memset( XPTR( cxy , &iopic_input ) , 0xFF , sizeof(iopic_input_t) );
     690            cxy_t cxy = HAL_CXY_FROM_XY( x , y );
     691
     692            if( cluster_is_active( cxy ) )
     693            {
     694                hal_remote_memset( XPTR( cxy , &iopic_input ),
     695                                   0xFF , sizeof(iopic_input_t) );
    780696            }
    781697        }
     
    807723            else if((func == DEV_FUNC_NIC) && (is_rx != 0)) ptr = &iopic_input.nic_rx[channel];
    808724            else if( func == DEV_FUNC_IOB )                 ptr = &iopic_input.iob;
    809             else     assert( false , "illegal source device for IOPIC input" );
     725            else     printk("\n[PANIC] in %s : illegal source device for IOPIC input" );
    810726
    811727            // set one entry in all "iopic_input" structures
    812728            for( x = 0 ; x < info->x_size ; x++ )
    813729            {
    814                 for ( y = 0; y < info->y_size; y++ )
     730                for( y = 0 ; y < info->y_size ; y++ )
    815731                {
    816                     if (cluster_info_is_active(info->cluster_info[x][y])) {
    817                         cxy_t  cxy = (x<<info->y_width) + y;
    818                         hal_remote_swd( XPTR( cxy , ptr ) , id );
     732                    cxy_t cxy = HAL_CXY_FROM_XY( x , y );
     733
     734                    if( cluster_is_active( cxy ) )
     735                    {
     736                        hal_remote_s64( XPTR( cxy , ptr ) , id );
    819737                    }
    820738                }
     
    824742
    825743#if( DEBUG_KERNEL_INIT & 0x1 )
    826 if( hal_time_stamp() > DEBUG_KERNEL_INIT )
     744if( hal_tim_stamp() > DEBUG_KERNEL_INIT )
    827745{
    828746    printk("\n[DBG] %s created PIC chdev in cluster %x at cycle %d\n",
     
    843761// @ info    : pointer on the local boot-info structure.
    844762///////////////////////////////////////////////////////////////////////////////////////////
    845 static void lapic_init( boot_info_t * info )
     763static void __attribute__ ((noinline)) lapic_init( boot_info_t * info )
    846764{
    847765    boot_device_t * dev_tbl;      // pointer on boot_info internal devices array
     
    896814                if     ( func == DEV_FUNC_MMC ) lapic_input.mmc = id;
    897815                else if( func == DEV_FUNC_DMA ) lapic_input.dma[channel] = id;
    898                 else if( func == DEV_FUNC_TXT ) lapic_input.mtty = id;
    899                 else if( func == DEV_FUNC_IOC ) lapic_input.sdcard = id;
    900                 else assert( false , "illegal source device for LAPIC input" );
     816                else     printk("\n[PANIC] in %s : illegal source device for LAPIC input" );
    901817            }
    902818        }
     
    913829// @ return 0 if success / return EINVAL if not found.
    914830///////////////////////////////////////////////////////////////////////////////////////////
    915 static error_t get_core_identifiers( boot_info_t * info,
    916                                      lid_t       * lid,
    917                                      cxy_t       * cxy,
    918                                      gid_t       * gid )
     831static error_t __attribute__ ((noinline)) get_core_identifiers( boot_info_t * info,
     832                                                                lid_t       * lid,
     833                                                                cxy_t       * cxy,
     834                                                                gid_t       * gid )
    919835{
    920836    uint32_t   i;
     
    989905    thread->core = &LOCAL_CLUSTER->core_tbl[core_lid];
    990906
    991     // each core initializes the idle thread lists of locks
    992     list_root_init( &thread->locks_root );
    993     xlist_root_init( XPTR( local_cxy , &thread->xlocks_root ) );
    994     thread->local_locks = 0;
    995     thread->remote_locks = 0;
    996 
    997     // CP0 in cluster 0 initializes TXT0 chdev descriptor
    998     if( core_cxy == 0 && core_lid == 0 ) // [MODIF]
    999     {
    1000         if( USE_TXT_MTY == 1 ) {
    1001             mtty0_device_init( info );
    1002         } else {
    1003             txt0_device_init( info );
    1004         }
    1005     }
    1006 
    1007     /////////////////////////////////////////////////////////////////////////////////
    1008     if( core_lid == 0 ) remote_barrier( XPTR( 0 , &global_barrier ), // [FIXME]
    1009                                         cluster_info_nb_actives(info->cluster_info) );
     907    // each core initializes the idle thread locks counters
     908    thread->busylocks = 0;
     909
     910#if DEBUG_BUSYLOCK
     911    // each core initialise the idle thread list of busylocks
     912    xlist_root_init( XPTR( local_cxy , &thread->busylocks_root ) );
     913#endif
     914
     915    // CP0 initializes cluster info
     916    if( core_lid == 0 ) cluster_info_init( info );
     917
     918    // CP0 in cluster 0 initialises TXT0 chdev descriptor
     919    if( (core_lid == 0) && (core_cxy == 0) ) txt0_device_init( info );
     920
     921    /////////////////////////////////////////////////////////////////////////////////
     922    if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ),
     923                                        (info->x_size * info->y_size) );
    1010924    barrier_wait( &local_barrier , info->cores_nr );
    1011925    /////////////////////////////////////////////////////////////////////////////////
    1012926
    1013927#if DEBUG_KERNEL_INIT
    1014 if( (core_lid ==  0) & (local_cxy == 0) )
    1015 printk("\n[DBG] %s : exit barrier 0 : TXT0 initialized / cycle %d\n",
    1016 __FUNCTION__, (uint32_t)hal_get_cycles() );
     928// if( (core_lid ==  0) & (local_cxy == 0) )
     929printk("\n[DBG] %s : exit barrier 0 : TXT0 initialized / sr %x / cycle %d\n",
     930__FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() );
    1017931#endif
    1018932
     
    1025939    // all cores check identifiers
    1026940    if( error )
    1027     {
    1028         assert( false ,
    1029         "illegal core identifiers gid = %x / cxy = %x / lid = %d",
    1030         core_lid , core_cxy , core_lid );
    1031     }
    1032 
    1033     // CP0 initializes cluster manager
     941    printk("\n[PANIC] in %s : illegal core : gid %x / cxy %x / lid %d",
     942    __FUNCTION__, core_lid, core_cxy, core_lid );
     943
     944    // CP0 initializes cluster manager complex structures
    1034945    if( core_lid == 0 )
    1035946    {
    1036         error = cluster_init( info );
     947        error = cluster_manager_init( info );
    1037948
    1038949        if( error )
    1039         {
    1040             assert( false ,
    1041             "cannot initialise cluster %x", local_cxy );
    1042         }
     950        printk("\n[PANIC] in %s : cannot initialize cluster manager in cluster %x\n",
     951        __FUNCTION__, local_cxy );
    1043952    }
    1044953
    1045954    /////////////////////////////////////////////////////////////////////////////////
    1046     if( core_lid == 0 ) remote_barrier( XPTR( 0 , &global_barrier ), // [FIXME]
    1047                                         cluster_info_nb_actives(info->cluster_info) );
     955    if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ),
     956                                        (info->x_size * info->y_size) );
    1048957    barrier_wait( &local_barrier , info->cores_nr );
    1049958    /////////////////////////////////////////////////////////////////////////////////
     
    1051960#if DEBUG_KERNEL_INIT
    1052961if( (core_lid ==  0) & (local_cxy == 0) )
    1053 printk("\n[DBG] %s : exit barrier 1 : clusters initialised / cycle %d\n",
    1054 __FUNCTION__, (uint32_t)hal_get_cycles() );
     962printk("\n[DBG] %s : exit barrier 1 : clusters initialised / sr %x / cycle %d\n",
     963__FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() );
    1055964#endif
    1056965
     
    1071980   
    1072981    ////////////////////////////////////////////////////////////////////////////////
    1073     if( core_lid == 0 ) remote_barrier( XPTR( 0 , &global_barrier ), // [FIXME]
    1074                                         cluster_info_nb_actives(info->cluster_info) );
     982    if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ),
     983                                        (info->x_size * info->y_size) );
    1075984    barrier_wait( &local_barrier , info->cores_nr );
    1076985    ////////////////////////////////////////////////////////////////////////////////
     
    1078987#if DEBUG_KERNEL_INIT
    1079988if( (core_lid ==  0) & (local_cxy == 0) )
    1080 printk("\n[DBG] %s : exit barrier 2 : PIC initialised / cycle %d\n",
    1081 __FUNCTION__, (uint32_t)hal_get_cycles() );
     989printk("\n[DBG] %s : exit barrier 2 : PIC initialised / sr %x / cycle %d\n",
     990__FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() );
    1082991#endif
    1083992
     
    11041013
    11051014    /////////////////////////////////////////////////////////////////////////////////
    1106     if( core_lid == 0 ) remote_barrier( XPTR( 0 , &global_barrier ), // [FIXME]
    1107                                         cluster_info_nb_actives(info->cluster_info) );
     1015    if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ),
     1016                                        (info->x_size * info->y_size) );
    11081017    barrier_wait( &local_barrier , info->cores_nr );
    11091018    /////////////////////////////////////////////////////////////////////////////////
     
    11111020#if DEBUG_KERNEL_INIT
    11121021if( (core_lid ==  0) & (local_cxy == 0) )
    1113 printk("\n[DBG] %s : exit barrier 3 : all chdev initialised / cycle %d\n",
    1114 __FUNCTION__, (uint32_t)hal_get_cycles() );
     1022printk("\n[DBG] %s : exit barrier 3 : all chdev initialised / sr %x / cycle %d\n",
     1023__FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() );
    11151024#endif
    11161025
     
    11271036    /////////////////////////////////////////////////////////////////////////////////
    11281037
    1129     // All cores enable the shared IPI channel
     1038    // All cores enable IPI
    11301039    dev_pic_enable_ipi();
    11311040    hal_enable_irq( &status );
    1132 
    1133 #if DEBUG_KERNEL_INIT
    1134 printk("\n[DBG] %s: IPI enabled for core %d cluster %d\n", __FUNCTION__,
    1135   core_lid, local_cxy);
    1136 #endif
    11371041
    11381042    // all cores initialize the idle thread descriptor
     
    11631067            fatfs_ctx_t * fatfs_ctx = fatfs_ctx_alloc();
    11641068
    1165             assert( (fatfs_ctx != NULL) ,
    1166                     "cannot create FATFS context in cluster 0\n" );
     1069            if( fatfs_ctx == NULL )
     1070            printk("\n[PANIC] in %s : cannot create FATFS context in cluster 0\n",
     1071            __FUNCTION__ );
    11671072
    11681073            // 2. access boot device to initialize FATFS context
     
    11751080            uint32_t total_clusters   = fatfs_ctx->fat_sectors_count << 7;
    11761081 
    1177             // 4. create VFS root inode in cluster 0
     1082            // 4. initialize the FATFS entry in the vfs_context[] array
     1083            vfs_ctx_init( FS_TYPE_FATFS,                               // fs type
     1084                          0,                                           // attributes: unused
     1085                              total_clusters,               
     1086                              cluster_size,
     1087                              vfs_root_inode_xp,                           // VFS root
     1088                          fatfs_ctx );                                 // extend
     1089
     1090            // 5. create VFS root inode in cluster 0
    11781091            error = vfs_inode_create( XPTR_NULL,                           // dentry_xp
    11791092                                      FS_TYPE_FATFS,                       // fs_type
     
    11851098                                      0,                                   // gid
    11861099                                      &vfs_root_inode_xp );                // return
    1187 
    1188             assert( (error == 0) ,
    1189                     "cannot create VFS root inode\n" );
    1190 
    1191             // 5. initialize VFS context for FAT in cluster 0
    1192             vfs_ctx_init( FS_TYPE_FATFS,                 // file system type
    1193                           0,                             // attributes
    1194                               total_clusters,               
    1195                               cluster_size,
    1196                               vfs_root_inode_xp,             // VFS root
    1197                           fatfs_ctx );                   // extend
    1198 
    1199             // 6. check initialisation
     1100            if( error )
     1101            printk("\n[PANIC] in %s : cannot create VFS root inode in cluster 0\n",
     1102            __FUNCTION__ );
     1103
     1104            // 6. update the FATFS entry in vfs_context[] array
     1105            fs_context[FS_TYPE_FATFS].vfs_root_xp = vfs_root_inode_xp;
     1106
     1107            // 7. check FATFS initialization
    12001108            vfs_ctx_t   * vfs_ctx = &fs_context[FS_TYPE_FATFS];
    1201             assert( (((fatfs_ctx_t *)vfs_ctx->extend)->sectors_per_cluster == 8),
    1202              "illegal value for FATFS context in cluster %x\n", local_cxy );
     1109
     1110            if( ((fatfs_ctx_t *)vfs_ctx->extend)->sectors_per_cluster != 8 )
     1111            printk("\n[PANIC] in %s : illegal FATFS context in cluster 0\n",
     1112            __FUNCTION__ );
    12031113        }
    12041114        else
    12051115        {
    1206             assert( false ,
    1207             "root FS must be FATFS" );
     1116            printk("\n[PANIC] in %s : unsupported VFS type in cluster 0\n",
     1117            __FUNCTION__ );
    12081118        }
    12091119
     
    12141124
    12151125    /////////////////////////////////////////////////////////////////////////////////
    1216     if( core_lid == 0 ) remote_barrier( XPTR( 0 , &global_barrier ), // [FIXME]
    1217                                         cluster_info_nb_actives(info->cluster_info) );
     1126    if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ),
     1127                                        (info->x_size * info->y_size) );
    12181128    barrier_wait( &local_barrier , info->cores_nr );
    12191129    /////////////////////////////////////////////////////////////////////////////////
     
    12211131#if DEBUG_KERNEL_INIT
    12221132if( (core_lid ==  0) & (local_cxy == 0) )
    1223 printk("\n[DBG] %s : exit barrier 4 : VFS_root = %l in cluster 0 / cycle %d\n",
    1224 __FUNCTION__, vfs_root_inode_xp , (uint32_t)hal_get_cycles());
     1133printk("\n[DBG] %s : exit barrier 4 : VFS root initialized in cluster 0 / sr %x / cycle %d\n",
     1134__FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() );
    12251135#endif
    12261136
     
    12411151            fatfs_ctx_t * local_fatfs_ctx = fatfs_ctx_alloc();
    12421152
    1243             assert( (local_fatfs_ctx != NULL) ,
    1244             "cannot create FATFS context in cluster %x\n", local_cxy );
     1153            // check memory
     1154            if( local_fatfs_ctx == NULL )
     1155            printk("\n[PANIC] in %s : cannot create FATFS context in cluster %x\n",
     1156            __FUNCTION__ , local_cxy );
    12451157
    12461158            // 2. get local pointer on VFS context for FATFS
     
    12611173            vfs_ctx->extend = local_fatfs_ctx;
    12621174
    1263             // 7. check initialisation
    1264             assert( (((fatfs_ctx_t *)vfs_ctx->extend)->sectors_per_cluster == 8),
    1265             "illegal value for FATFS context in cluster %x\n", local_cxy );
     1175            if( ((fatfs_ctx_t *)vfs_ctx->extend)->sectors_per_cluster != 8 )
     1176            printk("\n[PANIC] in %s : illegal FATFS context in cluster %x\n",
     1177            __FUNCTION__ , local_cxy );
    12661178        }
    12671179
    12681180        // get extended pointer on VFS root inode from cluster 0
    1269         vfs_root_inode_xp = hal_remote_lwd( XPTR( 0 , &process_zero.vfs_root_xp ) );
     1181        vfs_root_inode_xp = hal_remote_l64( XPTR( 0 , &process_zero.vfs_root_xp ) );
    12701182
    12711183        // update local process_zero descriptor
     
    12751187
    12761188    /////////////////////////////////////////////////////////////////////////////////
    1277     if( core_lid == 0 ) remote_barrier( XPTR( 0 , &global_barrier ), // [FIXME]
    1278                                         cluster_info_nb_actives(info->cluster_info) );
     1189    if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ),
     1190                                        (info->x_size * info->y_size) );
    12791191    barrier_wait( &local_barrier , info->cores_nr );
    12801192    /////////////////////////////////////////////////////////////////////////////////
    12811193
    12821194#if DEBUG_KERNEL_INIT
    1283 if( (core_lid ==  0) & (local_cxy == 0) )
    1284 printk("\n[DBG] %s : exit barrier 5 : VFS_root = %l in cluster 0 / cycle %d\n",
    1285 __FUNCTION__, vfs_root_inode_xp , (uint32_t)hal_get_cycles());
    1286 #endif
    1287 
    1288     /////////////////////////////////////////////////////////////////////////////////
    1289     // STEP 6 : CP0 in cluster IO makes the global DEVFS tree initialisation:
    1290     //          It creates the DEVFS directory "dev", and the DEVFS "external"
    1291     //          directory in cluster IO and mount these inodes into VFS.
    1292     /////////////////////////////////////////////////////////////////////////////////
    1293 
    1294     if( (core_lid ==  0) && (local_cxy == 0) )  // [FIXME]
     1195if( (core_lid ==  0) & (local_cxy == 1) )
     1196printk("\n[DBG] %s : exit barrier 5 : VFS root initialized in cluster 1 / sr %x / cycle %d\n",
     1197__FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() );
     1198#endif
     1199
     1200    /////////////////////////////////////////////////////////////////////////////////
     1201    // STEP 6 : CP0 in cluster 0 makes the global DEVFS tree initialisation:
     1202    //          It initializes the DEVFS context, and creates the DEVFS
     1203    //          "dev" and "external" inodes in cluster 0.
     1204    /////////////////////////////////////////////////////////////////////////////////
     1205
     1206    if( (core_lid ==  0) && (local_cxy == 0) )
    12951207    {
    1296         // create "dev" and "external" directories.
     1208        // 1. allocate memory for DEVFS context extension in cluster 0
     1209        devfs_ctx_t * devfs_ctx = devfs_ctx_alloc();
     1210
     1211        if( devfs_ctx == NULL )
     1212        printk("\n[PANIC] in %s : cannot create DEVFS context in cluster 0\n",
     1213        __FUNCTION__ , local_cxy );
     1214
     1215        // 2. initialize the DEVFS entry in the vfs_context[] array
     1216        vfs_ctx_init( FS_TYPE_DEVFS,                                // fs type
     1217                      0,                                            // attributes: unused
     1218                          0,                                            // total_clusters: unused
     1219                          0,                                            // cluster_size: unused
     1220                          vfs_root_inode_xp,                            // VFS root
     1221                      devfs_ctx );                                  // extend
     1222
     1223        // 3. create "dev" and "external" inodes (directories)
    12971224        devfs_global_init( process_zero.vfs_root_xp,
    12981225                           &devfs_dev_inode_xp,
    12991226                           &devfs_external_inode_xp );
    13001227
    1301         // creates the DEVFS context in cluster IO
    1302         devfs_ctx_t * devfs_ctx = devfs_ctx_alloc();
    1303 
    1304         assert( (devfs_ctx != NULL) ,
    1305                 "cannot create DEVFS context in cluster IO\n");
    1306 
    1307         // register DEVFS root and external directories
    1308         devfs_ctx_init( devfs_ctx, devfs_dev_inode_xp, devfs_external_inode_xp );
     1228        // 4. initializes DEVFS context extension
     1229        devfs_ctx_init( devfs_ctx,
     1230                        devfs_dev_inode_xp,
     1231                        devfs_external_inode_xp );
    13091232    }   
    13101233
    13111234    /////////////////////////////////////////////////////////////////////////////////
    1312     if( core_lid == 0 ) remote_barrier( XPTR( 0 , &global_barrier ), // [FIXME]
    1313                                         cluster_info_nb_actives(info->cluster_info) );
     1235    if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ),
     1236                                        (info->x_size * info->y_size) );
    13141237    barrier_wait( &local_barrier , info->cores_nr );
    13151238    /////////////////////////////////////////////////////////////////////////////////
     
    13171240#if DEBUG_KERNEL_INIT
    13181241if( (core_lid ==  0) & (local_cxy == 0) )
    1319 printk("\n[DBG] %s : exit barrier 6 : dev_root = %l in cluster 0 / cycle %d\n",
    1320 __FUNCTION__, devfs_dev_inode_xp , (uint32_t)hal_get_cycles() );
     1242printk("\n[DBG] %s : exit barrier 6 : DEVFS root initialized in cluster 0 / sr %x / cycle %d\n",
     1243__FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() );
    13211244#endif
    13221245
     
    13241247    // STEP 7 : All CP0s complete in parallel the DEVFS tree initialization.
    13251248    //          Each CP0 get the "dev" and "external" extended pointers from
    1326     //          values stored in cluster IO.
    1327     //          Then each CP0 in cluster(i) creates the DEVFS "internal directory,
     1249    //          values stored in cluster 0.
     1250    //          Then each CP0 in cluster(i) creates the DEVFS "internal" directory,
    13281251    //          and creates the pseudo-files for all chdevs in cluster (i).
    13291252    /////////////////////////////////////////////////////////////////////////////////
     
    13311254    if( core_lid == 0 )
    13321255    {
    1333         // get extended pointer on "extend" field of VFS context for DEVFS in cluster IO
    1334         xptr_t  extend_xp = XPTR( 0 , &fs_context[FS_TYPE_DEVFS].extend ); // [FIXME]
     1256        // get extended pointer on "extend" field of VFS context for DEVFS in cluster 0
     1257        xptr_t  extend_xp = XPTR( 0 , &fs_context[FS_TYPE_DEVFS].extend );
    13351258
    13361259        // get pointer on DEVFS context in cluster 0
    13371260        devfs_ctx_t * devfs_ctx = hal_remote_lpt( extend_xp );
    13381261       
    1339         devfs_dev_inode_xp      = hal_remote_lwd( XPTR( 0 , &devfs_ctx->dev_inode_xp ) );
    1340         devfs_external_inode_xp = hal_remote_lwd( XPTR( 0 , &devfs_ctx->external_inode_xp ) );
     1262        devfs_dev_inode_xp      = hal_remote_l64( XPTR( 0 , &devfs_ctx->dev_inode_xp ) );
     1263        devfs_external_inode_xp = hal_remote_l64( XPTR( 0 , &devfs_ctx->external_inode_xp ) );
    13411264
    13421265        // populate DEVFS in all clusters
     
    13471270
    13481271    /////////////////////////////////////////////////////////////////////////////////
    1349     if( core_lid == 0 ) remote_barrier( XPTR( 0 , &global_barrier ), // [FIXME]
    1350                                         cluster_info_nb_actives(info->cluster_info) );
     1272    if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ),
     1273                                        (info->x_size * info->y_size) );
    13511274    barrier_wait( &local_barrier , info->cores_nr );
    13521275    /////////////////////////////////////////////////////////////////////////////////
     
    13541277#if DEBUG_KERNEL_INIT
    13551278if( (core_lid ==  0) & (local_cxy == 0) )
    1356 printk("\n[DBG] %s : exit barrier 7 : dev_root = %l in cluster 0 / cycle %d\n",
    1357 __FUNCTION__, devfs_dev_inode_xp , (uint32_t)hal_get_cycles() );
     1279printk("\n[DBG] %s : exit barrier 7 : DEV initialized in cluster 0 / sr %x / cycle %d\n",
     1280__FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() );
    13581281#endif
    13591282
     
    13731296
    13741297    /////////////////////////////////////////////////////////////////////////////////
    1375     if( core_lid == 0 ) remote_barrier( XPTR( 0 , &global_barrier ), // [FIXME]
    1376                                         cluster_info_nb_actives(info->cluster_info) );
     1298    if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ),
     1299                                        (info->x_size * info->y_size) );
    13771300    barrier_wait( &local_barrier , info->cores_nr );
    13781301    /////////////////////////////////////////////////////////////////////////////////
     
    13801303#if DEBUG_KERNEL_INIT
    13811304if( (core_lid ==  0) & (local_cxy == 0) )
    1382 printk("\n[DBG] %s : exit barrier 8 : process init created / cycle %d\n",
    1383 __FUNCTION__ , (uint32_t)hal_get_cycles() );
     1305printk("\n[DBG] %s : exit barrier 8 : process init created / sr %x / cycle %d\n",
     1306__FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() );
    13841307#endif
    13851308
    13861309#if (DEBUG_KERNEL_INIT & 1)
    1387 if( (core_lid ==  0) /*& (local_cxy == 0)*/ )
     1310if( (core_lid ==  0) & (local_cxy == 0) )
    13881311sched_display( 0 );
    13891312#endif
     
    13931316    /////////////////////////////////////////////////////////////////////////////////
    13941317   
    1395     if( (core_lid ==  0) && (local_cxy == 0) ) // [FIXME]
     1318    if( (core_lid == 0) && (local_cxy == 0) )
    13961319    {
    13971320        print_banner( (info->x_size * info->y_size) , info->cores_nr );
     
    14151338                   " - list item          : %d bytes\n"
    14161339                   " - xlist item         : %d bytes\n"
    1417                    " - spinlock           : %d bytes\n"
    1418                    " - remote spinlock    : %d bytes\n"
     1340                   " - busylock           : %d bytes\n"
     1341                   " - remote busylock    : %d bytes\n"
     1342                   " - queuelock          : %d bytes\n"
     1343                   " - remote queuelock   : %d bytes\n"
    14191344                   " - rwlock             : %d bytes\n"
    14201345                   " - remote rwlock      : %d bytes\n",
    1421                    sizeof( thread_t          ),
    1422                    sizeof( process_t         ),
    1423                    sizeof( cluster_t         ),
    1424                    sizeof( chdev_t           ),
    1425                    sizeof( core_t            ),
    1426                    sizeof( scheduler_t       ),
    1427                    sizeof( remote_fifo_t     ),
    1428                    sizeof( page_t            ),
    1429                    sizeof( mapper_t          ),
    1430                    sizeof( ppm_t             ),
    1431                    sizeof( kcm_t             ),
    1432                    sizeof( khm_t             ),
    1433                    sizeof( vmm_t             ),
    1434                    sizeof( gpt_t             ),
    1435                    sizeof( list_entry_t      ),
    1436                    sizeof( xlist_entry_t     ),
    1437                    sizeof( spinlock_t        ),
    1438                    sizeof( remote_spinlock_t ),
    1439                    sizeof( rwlock_t          ),
    1440                    sizeof( remote_rwlock_t   ));
     1346                   sizeof( thread_t           ),
     1347                   sizeof( process_t          ),
     1348                   sizeof( cluster_t          ),
     1349                   sizeof( chdev_t            ),
     1350                   sizeof( core_t             ),
     1351                   sizeof( scheduler_t        ),
     1352                   sizeof( remote_fifo_t      ),
     1353                   sizeof( page_t             ),
     1354                   sizeof( mapper_t           ),
     1355                   sizeof( ppm_t              ),
     1356                   sizeof( kcm_t              ),
     1357                   sizeof( khm_t              ),
     1358                   sizeof( vmm_t              ),
     1359                   sizeof( gpt_t              ),
     1360                   sizeof( list_entry_t       ),
     1361                   sizeof( xlist_entry_t      ),
     1362                   sizeof( busylock_t         ),
     1363                   sizeof( remote_busylock_t  ),
     1364                   sizeof( queuelock_t        ),
     1365                   sizeof( remote_queuelock_t ),
     1366                   sizeof( rwlock_t           ),
     1367                   sizeof( remote_rwlock_t    ));
    14411368#endif
    14421369
  • trunk/kernel/kern/printk.c

    r502 r564  
    2626#include <hal_special.h>
    2727#include <dev_txt.h>
    28 #include <remote_spinlock.h>
     28#include <remote_busylock.h>
    2929#include <cluster.h>
    3030#include <thread.h>
     
    201201// @ args      : va_list of arguments.
    202202//////////////////////////////////////////////////////////////////////////////////////
    203 static void kernel_printf( char    * format,
    204                            va_list  * args )
     203static void kernel_printf( const char * format,
     204                           va_list    * args )
    205205{
    206206
     
    352352{
    353353    va_list       args;
    354     reg_t         save_sr;
    355354
    356355    // get pointers on TXT0 chdev
     
    359358    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
    360359
    361     // get extended pointer on remote TXT0 chdev lock
     360    // get extended pointer on remote TXT0 lock
    362361    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
    363362
    364     // get TXT0 lock in busy waiting mode
    365     remote_spinlock_lock_busy( lock_xp , &save_sr );
    366 
    367     // call kernel_printf on TXT0, in busy waiting mode
     363    // get TXT0 lock
     364    remote_busylock_acquire( lock_xp );
     365
     366    // display format on TXT0 in busy waiting mode
    368367    va_start( args , format );
    369368    kernel_printf( format , &args );
    370369    va_end( args );
    371370
    372     // release lock
    373     remote_spinlock_unlock_busy( lock_xp , save_sr );
     371    // release TXT0 lock
     372    remote_busylock_release( lock_xp );
    374373}
    375374
     
    386385
    387386////////////////////////////////////
    388 void __panic( const char * file_name,
    389               const char * function_name,
    390               uint32_t     line,
    391               cycle_t      cycle,
    392               const char * format,
    393               ... )
     387void panic( const char * file_name,
     388            const char * function_name,
     389            uint32_t     line,
     390            cycle_t      cycle,
     391            const char * format,
     392            ... )
    394393{
    395394    // get pointers on TXT0 chdev
     
    399398
    400399    // get extended pointer on remote TXT0 lock
    401     xptr_t lock_txt0_xp = XPTR(txt0_cxy, &txt0_ptr->wait_lock);
    402 
    403     // get TXT0 lock in busy waiting mode
    404     {
    405         uint32_t save_sr;
    406         remote_spinlock_lock_busy(lock_txt0_xp, &save_sr);
    407 
    408         thread_t *current = CURRENT_THREAD;
    409         nolock_printk(
    410             "\n[PANIC] in %s: line %d | funct %s | cycle %d\n"
    411             "core[%x,%d] | thread %x in process %x\n"
    412             "            | thread_ptr %x | procress_ptr %x\n",
     400    xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
     401
     402    // get TXT0 lock
     403    remote_busylock_acquire( lock_xp );
     404
     405    // get calling thread
     406    thread_t * current = CURRENT_THREAD;
     407
     408    // print generic infos
     409    nolock_printk(
     410            "\n[PANIC] in %s: line %d | function %s | cycle %d\n"
     411            "core[%x,%d] | thread %x (%x) in process %x (%x)\n",
    413412            file_name, line, function_name, (uint32_t) cycle,
    414             local_cxy, current->core->lid, current->trdid, current->process->pid,
    415             current, current->process);
    416 
    417         // call kernel_printf on TXT0, in busy waiting to print format
    418         va_list args;
    419         va_start(args, format);
    420         kernel_printf(format, &args);
    421         va_end(args);
    422 
    423         // release TXT0 lock
    424         remote_spinlock_unlock_busy(lock_txt0_xp, save_sr);
    425     }
     413            local_cxy, current->core->lid,
     414            current->trdid, current,
     415            current->process->pid, current->process );
     416
     417    // call kernel_printf to print format
     418    va_list args;
     419    va_start(args, format);
     420    kernel_printf(format, &args);
     421    va_end(args);
     422
     423    // release TXT0 lock
     424    remote_busylock_release( lock_xp );
    426425
    427426    // suicide
     
    432431void puts( char * string )
    433432{
    434     uint32_t   save_sr;
    435433    uint32_t   n = 0;
    436434
     
    443441    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
    444442
    445     // get extended pointer on remote TXT0 chdev lock
     443    // get extended pointer on remote TXT0 lock
    446444    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
    447445
    448     // get TXT0 lock in busy waiting mode
    449     remote_spinlock_lock_busy( lock_xp , &save_sr );
     446    // get TXT0 lock
     447    remote_busylock_acquire( lock_xp );
    450448
    451449    // display string on TTY0
    452450    dev_txt_sync_write( string , n );
    453451
    454     // release TXT0 lock in busy waiting mode
    455     remote_spinlock_unlock_busy( lock_xp , save_sr );
     452    // release TXT0 lock
     453    remote_busylock_release( lock_xp );
    456454}
    457455
     
    464462    char      buf[10];
    465463    uint32_t  c;
    466     uint32_t  save_sr;
    467464
    468465    buf[0] = '0';
     
    484481    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
    485482
    486     // get TXT0 lock in busy waiting mode
    487     remote_spinlock_lock_busy( lock_xp , &save_sr );
     483    // get TXT0 lock
     484    remote_busylock_acquire( lock_xp );
    488485
    489486    // display string on TTY0
    490487    dev_txt_sync_write( buf , 10 );
    491488
    492     // release TXT0 lock in busy waiting mode
    493     remote_spinlock_unlock_busy( lock_xp , save_sr );
     489    // release TXT0 lock
     490    remote_busylock_release( lock_xp );
    494491}
    495492
     
    501498    char      buf[18];
    502499    uint32_t  c;
    503     uint32_t  save_sr;
    504500
    505501    buf[0] = '0';
     
    521517    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
    522518
    523     // get TXT0 lock in busy waiting mode
    524     remote_spinlock_lock_busy( lock_xp , &save_sr );
     519    // get TXT0 lock
     520    remote_busylock_acquire( lock_xp );
    525521
    526522    // display string on TTY0
    527523    dev_txt_sync_write( buf , 18 );
    528524
    529     // release TXT0 lock in busy waiting mode
    530     remote_spinlock_unlock_busy( lock_xp , save_sr );
     525    // release TXT0 lock
     526    remote_busylock_release( lock_xp );
    531527}
    532528
  • trunk/kernel/kern/printk.h

    r502 r564  
    2828// - The printk() function displays kernel messages on the kernel terminal TXT0,
    2929//   using a busy waiting policy: It calls directly the relevant TXT driver,
    30 //   after taking the TXT0 chdev lock for exclusive access to the TXT0 terminal.
     30//   after taking the TXT0 busylock for exclusive access to the TXT0 terminal.
    3131// - The user_printk() function displays messages on the calling thread private
    3232//   terminal, using a descheduling policy: it register the request in the selected
     
    6767/**********************************************************************************
    6868 * This function displays a formatted string on the kernel terminal TXT0,
    69  * using a busy waiting policy: It calls directly the relevant TXT driver,
    7069 * after taking the TXT0 lock.
     70 * It uses a busy waiting policy, calling directly the relevant TXT driver,
    7171 **********************************************************************************
    7272 * @ format     : formatted string.
     
    7676/**********************************************************************************
    7777 * This function displays a formatted string on the kernel terminal TXT0,
    78  * using a busy waiting policy: It calls directly the relevant TXT driver,
    7978 * without taking the TXT0 lock.
     79 * It uses a busy waiting policy, calling directly the relevant TXT driver,
    8080 **********************************************************************************
    8181 * @ format     : formatted string.
     
    8585
    8686/**********************************************************************************
    87  * Private function designed to be called by the assert macro (below)
     87 * This function is called in case of kernel panic. It printt a detailed message
     88 * on the TXT0 terminal after taking the TXT0 lock, and call the hal_core_sleep()
     89 * function to block the calling core.  It is used by the assert macro (below).
    8890 **********************************************************************************
    8991 * @ file_name     : File where the assert macro was invoked
     
    9698 * See assert macro documentation for information about printed information.
    9799 *********************************************************************************/
    98 void __panic( const char * file_name,
    99               const char * function_name,
    100               uint32_t     line,
    101               cycle_t      cycle,
    102               const char * format,
    103               ... )
    104 __attribute__((__noreturn__));
     100void panic( const char * file_name,
     101            const char * function_name,
     102            uint32_t     line,
     103            cycle_t      cycle,
     104            const char * format,
     105            ... ) __attribute__((__noreturn__));
    105106
    106107/**********************************************************************************
     
    134135 * @ format        : formatted string
    135136 *********************************************************************************/
    136 #define assert( expr, format, ... ) { uint32_t __line_at_expansion = __LINE__;    \
    137   const volatile cycle_t __assert_cycle = hal_get_cycles();                       \
    138   if ( ( expr ) == false ) {                                                      \
    139     __panic( __FILE__, __FUNCTION__,                                              \
    140              __line_at_expansion, __assert_cycle,                                 \
    141              ( format ), ##__VA_ARGS__ );                                         \
    142   }                                                                               \
     137#define assert( expr, format, ... )                                               \
     138{                                                                                 \
     139    uint32_t __line_at_expansion = __LINE__;                                      \
     140    const volatile cycle_t __assert_cycle = hal_get_cycles();                     \
     141    if ( ( expr ) == false )                                                      \
     142    {                                                                             \
     143        panic( __FILE__, __FUNCTION__,                                            \
     144               __line_at_expansion, __assert_cycle,                               \
     145               ( format ), ##__VA_ARGS__ );                                       \
     146    }                                                                             \
    143147}
    144148
     
    168172
    169173
    170 
    171 /*  deprecated march 2018 [AG]
    172 
    173 #if CONFIG_CHDEV_DEBUG
    174 #define chdev_dmsg(...)   if(hal_time_stamp() > CONFIG_CHDEV_DEBUG) printk(__VA_ARGS__)
    175 #else
    176 #define chdev_dmsg(...)
    177 #endif
    178 
    179 #if CONFIG_CLUSTER_DEBUG
    180 #define cluster_dmsg(...)   if(hal_time_stamp() > CONFIG_CLUSTER_DEBUG) printk(__VA_ARGS__)
    181 #else
    182 #define cluster_dmsg(...)
    183 #endif
    184 
    185 #if CONFIG_CONTEXT_DEBUG
    186 #define context_dmsg(...)   if(hal_time_stamp() > CONFIG_CONTEXT_DEBUG) printk(__VA_ARGS__)
    187 #else
    188 #define context_dmsg(...)
    189 #endif
    190 
    191 #if CONFIG_CORE_DEBUG
    192 #define core_dmsg(...)   if(hal_time_stamp() > CONFIG_CORE_DEBUG) printk(__VA_ARGS__)
    193 #else
    194 #define core_dmsg(...)
    195 #endif
    196 
    197 #if CONFIG_DEVFS_DEBUG
    198 #define devfs_dmsg(...)   if(hal_time_stamp() > CONFIG_DEVFS_DEBUG) printk(__VA_ARGS__)
    199 #else
    200 #define devfs_dmsg(...)
    201 #endif
    202 
    203 #if CONFIG_DMA_DEBUG
    204 #define dma_dmsg(...)   if(hal_time_stamp() > CONFIG_DMA_DEBUG) printk(__VA_ARGS__)
    205 #else
    206 #define dma_dmsg(...)
    207 #endif
    208 
    209 #if CONFIG_DQDT_DEBUG
    210 #define dqdt_dmsg(...)   if(hal_time_stamp() > CONFIG_DQDT_DEBUG) printk(__VA_ARGS__)
    211 #else
    212 #define dqdt_dmsg(...)
    213 #endif
    214 
    215 #if CONFIG_ELF_DEBUG
    216 #define elf_dmsg(...)   if(hal_time_stamp() > CONFIG_ELF_DEBUG) printk(__VA_ARGS__)
    217 #else
    218 #define elf_dmsg(...)
    219 #endif
    220 
    221 #if CONFIG_EXEC_DEBUG
    222 #define exec_dmsg(...)   if(hal_time_stamp() > CONFIG_EXEC_DEBUG) printk(__VA_ARGS__)
    223 #else
    224 #define exec_dmsg(...)
    225 #endif
    226 
    227 #if CONFIG_EXCP_DEBUG
    228 #define excp_dmsg(...)   if(hal_time_stamp() > CONFIG_EXCP_DEBUG) printk(__VA_ARGS__)
    229 #else
    230 #define excp_dmsg(...)
    231 #endif
    232 
    233 #if CONFIG_FATFS_DEBUG
    234 #define fatfs_dmsg(...)   if(hal_time_stamp() > CONFIG_FATFS_DEBUG) printk(__VA_ARGS__)
    235 #else
    236 #define fatfs_dmsg(...)
    237 #endif
    238 
    239 #if CONFIG_FBF_DEBUG
    240 #define fbf_dmsg(...)   if(hal_time_stamp() > CONFIG_FBF_DEBUG) printk(__VA_ARGS__)
    241 #else
    242 #define fbf_dmsg(...)
    243 #endif
    244 
    245 #if CONFIG_FORK_DEBUG
    246 #define fork_dmsg(...)   if(hal_time_stamp() > CONFIG_FORK_DEBUG) printk(__VA_ARGS__)
    247 #else
    248 #define fork_dmsg(...)
    249 #endif
    250 
    251 #if CONFIG_GPT_DEBUG
    252 #define gpt_dmsg(...)   if(hal_time_stamp() > CONFIG_GPT_DEBUG) printk(__VA_ARGS__)
    253 #else
    254 #define gpt_dmsg(...)
    255 #endif
    256 
    257 #if CONFIG_GRPC_DEBUG
    258 #define grpc_dmsg(...)   if(hal_time_stamp() > CONFIG_GRPC_DEBUG) printk(__VA_ARGS__)
    259 #else
    260 #define grpc_dmsg(...)
    261 #endif
    262 
    263 #if CONFIG_IDLE_DEBUG
    264 #define idle_dmsg(...)   if(hal_time_stamp() > CONFIG_IDLE_DEBUG) printk(__VA_ARGS__)
    265 #else
    266 #define idle_dmsg(...)
    267 #endif
    268 
    269 #if CONFIG_IOC_DEBUG
    270 #define ioc_dmsg(...)   if(hal_time_stamp() > CONFIG_IOC_DEBUG) printk(__VA_ARGS__)
    271 #else
    272 #define ioc_dmsg(...)
    273 #endif
    274 
    275 #if CONFIG_IRQ_DEBUG
    276 #define irq_dmsg(...)   if(hal_time_stamp() > CONFIG_IRQ_DEBUG) printk(__VA_ARGS__)
    277 #else
    278 #define irq_dmsg(...)
    279 #endif
    280 
    281 #if CONFIG_KCM_DEBUG
    282 #define kcm_dmsg(...)   if(hal_time_stamp() > CONFIG_KCM_DEBUG) printk(__VA_ARGS__)
    283 #else
    284 #define kcm_dmsg(...)
    285 #endif
    286 
    287 #if CONFIG_KHM_DEBUG
    288 #define khm_dmsg(...)   if(hal_time_stamp() > CONFIG_KHM_DEBUG) printk(__VA_ARGS__)
    289 #else
    290 #define khm_dmsg(...)
    291 #endif
    292 
    293 #if CONFIG_KILL_DEBUG
    294 #define kill_dmsg(...)   if(hal_time_stamp() > CONFIG_KILL_DEBUG) printk(__VA_ARGS__)
    295 #else
    296 #define kill_dmsg(...)
    297 #endif
    298 
    299 #if CONFIG_KINIT_DEBUG
    300 #define kinit_dmsg(...)   if(hal_time_stamp() > CONFIG_KINIT_DEBUG) printk(__VA_ARGS__)
    301 #else
    302 #define kinit_dmsg(...)
    303 #endif
    304 
    305 #if CONFIG_KMEM_DEBUG
    306 #define kmem_dmsg(...)   if(hal_time_stamp() > CONFIG_KMEM_DEBUG) printk(__VA_ARGS__)
    307 #else
    308 #define kmem_dmsg(...)
    309 #endif
    310 
    311 #if CONFIG_MAPPER_DEBUG
    312 #define mapper_dmsg(...)   if(hal_time_stamp() > CONFIG_MAPPER_DEBUG) printk(__VA_ARGS__)
    313 #else
    314 #define mapper_dmsg(...)
    315 #endif
    316 
    317 #if CONFIG_MMAP_DEBUG
    318 #define mmap_dmsg(...)   if(hal_time_stamp() > CONFIG_MMAP_DEBUG) printk(__VA_ARGS__)
    319 #else
    320 #define mmap_dmsg(...)
    321 #endif
    322 
    323 #if CONFIG_MMC_DEBUG
    324 #define mmc_dmsg(...)   if(hal_time_stamp() > CONFIG_MMC_DEBUG) printk(__VA_ARGS__)
    325 #else
    326 #define mmc_dmsg(...)
    327 #endif
    328 
    329 #if CONFIG_NIC_DEBUG
    330 #define nic_dmsg(...)   if(hal_time_stamp() > CONFIG_NIC_DEBUG) printk(__VA_ARGS__)
    331 #else
    332 #define nic_dmsg(...)
    333 #endif
    334 
    335 #if CONFIG_PIC_DEBUG
    336 #define pic_dmsg(...)   if(hal_time_stamp() > CONFIG_PIC_DEBUG) printk(__VA_ARGS__)
    337 #else
    338 #define pic_dmsg(...)
    339 #endif
    340 
    341 #if CONFIG_PPM_DEBUG
    342 #define ppm_dmsg(...)   if(hal_time_stamp() > CONFIG_PPM_DEBUG) printk(__VA_ARGS__)
    343 #else
    344 #define ppm_dmsg(...)
    345 #endif
    346 
    347 #if CONFIG_PROCESS_DEBUG
    348 #define process_dmsg(...)   if(hal_time_stamp() > CONFIG_PROCESS_DEBUG) printk(__VA_ARGS__)
    349 #else
    350 #define process_dmsg(...)
    351 #endif
    352 
    353 #if CONFIG_READ_DEBUG
    354 #define read_dmsg(...)   if(hal_time_stamp() > CONFIG_READ_DEBUG) printk(__VA_ARGS__)
    355 #else
    356 #define read_dmsg(...)
    357 #endif
    358 
    359 #if CONFIG_RPC_DEBUG
    360 #define rpc_dmsg(...)   if(hal_time_stamp() > CONFIG_RPC_DEBUG) printk(__VA_ARGS__)
    361 #else
    362 #define rpc_dmsg(...)
    363 #endif
    364 
    365 #if CONFIG_SCHED_DEBUG
    366 #define sched_dmsg(...)   if(hal_time_stamp() > CONFIG_SCHED_DEBUG) printk(__VA_ARGS__)
    367 #else
    368 #define sched_dmsg(...)
    369 #endif
    370 
    371 #if CONFIG_SIGACTION_DEBUG
    372 #define sigaction_dmsg(...)   if(hal_time_stamp() > CONFIG_SIGACTION_DEBUG) printk(__VA_ARGS__)
    373 #else
    374 #define sigaction_dmsg(...)
    375 #endif
    376 
    377 #if CONFIG_SYSCALL_DEBUG
    378 #define syscall_dmsg(...)   if(hal_time_stamp() > CONFIG_SYSCALL_DEBUG) printk(__VA_ARGS__)
    379 #else
    380 #define syscall_dmsg(...)
    381 #endif
    382 
    383 #if CONFIG_THREAD_DEBUG
    384 #define thread_dmsg(...)   if(hal_time_stamp() > CONFIG_THREAD_DEBUG) printk(__VA_ARGS__)
    385 #else
    386 #define thread_dmsg(...)
    387 #endif
    388 
    389 #if CONFIG_TXT_DEBUG
    390 #define txt_dmsg(...)   if(hal_time_stamp() > CONFIG_TXT_DEBUG) printk(__VA_ARGS__)
    391 #else
    392 #define txt_dmsg(...)
    393 #endif
    394 
    395 #if CONFIG_VFS_DEBUG
    396 #define vfs_dmsg(...)   if(hal_time_stamp() > CONFIG_VFS_DEBUG) printk(__VA_ARGS__)
    397 #else
    398 #define vfs_dmsg(...)
    399 #endif
    400 
    401 #if CONFIG_VMM_DEBUG
    402 #define vmm_dmsg(...)   if(hal_time_stamp() > CONFIG_VMM_DEBUG) printk(__VA_ARGS__)
    403 #else
    404 #define vmm_dmsg(...)
    405 #endif
    406 
    407 #if CONFIG_WRITE_DEBUG
    408 #define write_dmsg(...)   if(hal_time_stamp() > CONFIG_WRITE_DEBUG) printk(__VA_ARGS__)
    409 #else
    410 #define write_dmsg(...)
    411 #endif
    412 
    413 */
    414 
    415174#endif  // _PRINTK_H
    416175
  • trunk/kernel/kern/process.c

    r527 r564  
    11/*
    2  * process.c - process related management
     2 * process.c - process related functions definition.
    33 *
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
     
    4343#include <string.h>
    4444#include <scheduler.h>
    45 #include <remote_spinlock.h>
     45#include <busylock.h>
     46#include <queuelock.h>
     47#include <remote_queuelock.h>
     48#include <rwlock.h>
     49#include <remote_rwlock.h>
    4650#include <dqdt.h>
    4751#include <cluster.h>
     
    114118
    115119    // get parent_pid
    116     parent_pid = hal_remote_lw( XPTR( parent_cxy , &parent_ptr->pid ) );
     120    parent_pid = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
    117121
    118122#if DEBUG_PROCESS_REFERENCE_INIT
     
    132136    // initialize vmm as empty
    133137    error = vmm_init( process );
    134     assert( (error == 0) , "cannot initialize VMM\n" );
     138
     139assert( (error == 0) , "cannot initialize VMM\n" );
    135140 
    136141#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
     
    138143if( DEBUG_PROCESS_REFERENCE_INIT )
    139144printk("\n[DBG] %s : thread %x in process %x / vmm empty for process %x / cycle %d\n",
    140 __FUNCTION__, CURRENT_THREAD->trdid, parent_pid , cycle );
     145__FUNCTION__, CURRENT_THREAD->trdid, parent_pid , pid, cycle );
    141146#endif
    142147
     
    160165__FUNCTION__, CURRENT_THREAD->trdid, parent_pid, pid, txt_id, cycle );
    161166#endif
    162 
    163 
    164 
    165167        // build path to TXT_RX[i] and TXT_TX[i] chdevs
    166168        snprintf( rx_path , 40 , "/dev/external/txt%d_rx", txt_id );
     
    175177                           &stdin_id );
    176178
    177         assert( (error == 0) , "cannot open stdin pseudo file" );
    178         assert( (stdin_id == 0) , "stdin index must be 0" );
     179assert( (error == 0) , "cannot open stdin pseudo file" );
     180assert( (stdin_id == 0) , "stdin index must be 0" );
    179181
    180182#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
     
    225227    {
    226228        // get extended pointer on stdin pseudo file in parent process
    227         file_xp = (xptr_t)hal_remote_lwd( XPTR( parent_cxy , &parent_ptr->fd_array.array[0] ) );
     229        file_xp = (xptr_t)hal_remote_l64( XPTR( parent_cxy , &parent_ptr->fd_array.array[0] ) );
    228230
    229231        // get extended pointer on parent process TXT chdev
     
    234236        chdev_ptr = GET_PTR( chdev_xp );
    235237 
    236         // get TXT terminal index
    237         txt_id = hal_remote_lw( XPTR( chdev_cxy , &chdev_ptr->channel ) );
    238 
    239         // attach process to TXT[txt_id]
     238        // get parent process TXT terminal index
     239        txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
     240
     241        // attach child process to parent process TXT terminal
    240242        process_txt_attach( process , txt_id );
    241243
     
    246248
    247249    // initialize specific inodes root and cwd
    248     process->vfs_root_xp = (xptr_t)hal_remote_lwd( XPTR( parent_cxy,
     250    process->vfs_root_xp = (xptr_t)hal_remote_l64( XPTR( parent_cxy,
    249251                                                         &parent_ptr->vfs_root_xp ) );
    250     process->vfs_cwd_xp  = (xptr_t)hal_remote_lwd( XPTR( parent_cxy,
     252    process->vfs_cwd_xp  = (xptr_t)hal_remote_l64( XPTR( parent_cxy,
    251253                                                         &parent_ptr->vfs_cwd_xp ) );
    252254    vfs_inode_remote_up( process->vfs_root_xp );
    253255    vfs_inode_remote_up( process->vfs_cwd_xp );
    254256
    255     remote_rwlock_init( XPTR( local_cxy , &process->cwd_lock ) );
     257    remote_rwlock_init( XPTR( local_cxy , &process->cwd_lock ), LOCK_PROCESS_CWD );
    256258
    257259#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
    258260cycle = (uint32_t)hal_get_cycles();
    259261if( DEBUG_PROCESS_REFERENCE_INIT )
    260 printk("\n[DBG] %s : thread %x / fd_array for process %x / cycle %d\n",
    261 __FUNCTION__ , CURRENT_THREAD , pid , cycle );
     262printk("\n[DBG] %s : thread %x in process %x / set fd_array for process %x / cycle %d\n",
     263__FUNCTION__, CURRENT_THREAD->trdid, parent_pid, pid , cycle );
    262264#endif
    263265
     
    265267    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
    266268    process->children_nr     = 0;
    267     remote_spinlock_init( XPTR( local_cxy , &process->children_lock ) );
     269    remote_queuelock_init( XPTR( local_cxy , &process->children_lock ), LOCK_PROCESS_CHILDREN );
    268270
    269271    // reset semaphore / mutex / barrier / condvar list roots
     
    272274    xlist_root_init( XPTR( local_cxy , &process->barrier_root ) );
    273275    xlist_root_init( XPTR( local_cxy , &process->condvar_root ) );
    274     remote_spinlock_init( XPTR( local_cxy , &process->sync_lock ) );
     276    remote_queuelock_init( XPTR( local_cxy , &process->sync_lock ), LOCK_PROCESS_USERSYNC );
    275277
    276278    // register new process in the local cluster manager pref_tbl[]
     
    284286    cluster_process_copies_link( process );
    285287
    286     // reset th_tbl[] array as empty in process descriptor
     288    // initialize th_tbl[] array and associated threads
    287289    uint32_t i;
    288     for( i = 0 ; i < CONFIG_THREAD_MAX_PER_CLUSTER ; i++ )
     290
     291    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
    289292        {
    290293        process->th_tbl[i] = NULL;
    291294    }
    292295    process->th_nr  = 0;
    293     spinlock_init( &process->th_lock );
     296    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
    294297
    295298        hal_fence();
     
    298301cycle = (uint32_t)hal_get_cycles();
    299302if( DEBUG_PROCESS_REFERENCE_INIT )
    300 printk("\n[DBG] %s : thread %x exit / process %x / cycle %d\n",
    301 __FUNCTION__ , CURRENT_THREAD , pid , cycle );
     303printk("\n[DBG] %s : thread %x in process %x exit for process %x / cycle %d\n",
     304__FUNCTION__, CURRENT_THREAD->trdid, parent_pid, pid, cycle );
    302305#endif
    303306
     
    315318
    316319    // initialize PID, REF_XP, PARENT_XP, and STATE
    317     local_process->pid        = hal_remote_lw(  XPTR( ref_cxy , &ref_ptr->pid ) );
    318     local_process->parent_xp  = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->parent_xp ) );
     320    local_process->pid        = hal_remote_l32(  XPTR( ref_cxy , &ref_ptr->pid ) );
     321    local_process->parent_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->parent_xp ) );
    319322    local_process->ref_xp     = reference_process_xp;
    320323    local_process->owner_xp   = reference_process_xp;
    321324    local_process->term_state = 0;
    322325
    323 #if DEBUG_PROCESS_COPY_INIT
     326#if DEBUG_PROCESS_COPY_INIT
     327thread_t * this = CURRET_THREAD; 
    324328uint32_t cycle = (uint32_t)hal_get_cycles();
    325329if( DEBUG_PROCESS_COPY_INIT )
    326 printk("\n[DBG] %s : thread %x enter for process %x\n",
    327 __FUNCTION__ , CURRENT_THREAD , local_process->pid );
    328 #endif
     330printk("\n[DBG] %s : thread %x in process %x enter for process %x / cycle %d\n",
     331__FUNCTION__, this->trdid, this->process->pid, local_process->pid, cycle );
     332#endif
     333
     334// check user process
     335assert( (local_process->pid != 0), "PID cannot be 0" );
    329336
    330337    // reset local process vmm
     
    336343
    337344    // reset vfs_root_xp / vfs_bin_xp / vfs_cwd_xp fields
    338     local_process->vfs_root_xp = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) );
    339     local_process->vfs_bin_xp  = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) );
     345    local_process->vfs_root_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) );
     346    local_process->vfs_bin_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) );
    340347    local_process->vfs_cwd_xp  = XPTR_NULL;
    341348
     
    343350    xlist_root_init( XPTR( local_cxy , &local_process->children_root ) );
    344351    local_process->children_nr   = 0;
    345     remote_spinlock_init( XPTR( local_cxy , &local_process->children_lock ) );
     352    remote_queuelock_init( XPTR( local_cxy , &local_process->children_lock ),
     353                           LOCK_PROCESS_CHILDREN );
    346354
    347355    // reset children_list (not used in a process descriptor copy)
     
    354362    xlist_root_init( XPTR( local_cxy , &local_process->condvar_root ) );
    355363
    356     // reset th_tbl[] array as empty
     364    // initialize th_tbl[] array and associated fields
    357365    uint32_t i;
    358     for( i = 0 ; i < CONFIG_THREAD_MAX_PER_CLUSTER ; i++ )
     366    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
    359367        {
    360368        local_process->th_tbl[i] = NULL;
    361369    }
    362370    local_process->th_nr  = 0;
    363     spinlock_init( &local_process->th_lock );
     371    rwlock_init( &local_process->th_lock , LOCK_PROCESS_THTBL );
     372
    364373
    365374    // register new process descriptor in local cluster manager local_list
     
    374383cycle = (uint32_t)hal_get_cycles();
    375384if( DEBUG_PROCESS_COPY_INIT )
    376 printk("\n[DBG] %s : thread %x exit for process %x\n",
    377 __FUNCTION__ , CURRENT_THREAD , local_process->pid );
     385printk("\n[DBG] %s : thread %x in process %x exit for process %x / cycle %d\n",
     386__FUNCTION__, this->trdid, this->process->pid, local_process->pid, cycle );
    378387#endif
    379388
     
    399408uint32_t cycle = (uint32_t)hal_get_cycles();
    400409if( DEBUG_PROCESS_DESTROY )
    401 printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",
    402 __FUNCTION__ , CURRENT_THREAD , pid , local_cxy , cycle );
     410printk("\n[DBG] %s : thread %x in process %x enter for process %x in cluster %x / cycle %d\n",
     411__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, pid, local_cxy, cycle );
    403412#endif
    404413
     
    423432
    424433        // remove process from children_list
    425         remote_spinlock_lock( children_lock_xp );
     434        remote_queuelock_acquire( children_lock_xp );
    426435        xlist_unlink( XPTR( local_cxy , &process->children_list ) );
    427436            hal_remote_atomic_add( children_nr_xp , -1 );
    428         remote_spinlock_unlock( children_lock_xp );
    429 
    430     // release the process PID to cluster manager
    431     cluster_pid_release( pid );
    432 
    433     }
    434 
    435     // FIXME close all open files and update dirty [AG]
     437        remote_queuelock_release( children_lock_xp );
     438
     439        // release the process PID to cluster manager
     440        cluster_pid_release( pid );
     441    }
     442
     443    // FIXME close all open files and synchronize dirty [AG]
    436444
    437445    // decrease refcount for bin file, root file and cwd file
     
    449457cycle = (uint32_t)hal_get_cycles();
    450458if( DEBUG_PROCESS_DESTROY )
    451 printk("\n[DBG] %s : thread %x exit / destroyed process %x in cluster %x / cycle %d\n",
    452 __FUNCTION__ , CURRENT_THREAD , pid, local_cxy, cycle );
     459printk("\n[DBG] %s : thread %x in process %x exit / process %x in cluster %x / cycle %d\n",
     460__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, pid, local_cxy, cycle );
    453461#endif
    454462
     
    491499    remote_nr = 0;
    492500
     501// check calling thread can yield
     502assert( (client->busylocks == 0),
     503"cannot yield : busylocks = %d\n", client->busylocks );
     504
    493505#if DEBUG_PROCESS_SIGACTION
    494506uint32_t cycle = (uint32_t)hal_get_cycles();
     
    517529
    518530    // The client thread send parallel RPCs to all remote clusters containing
    519     // target process copies, wait all responses, and then handles directly the
    520     // threads in local cluster, when required.
     531    // target process copies, wait all responses, and then handles directly
     532    // the threads in local cluster, when required.
    521533    // The client thread allocates a - shared - RPC descriptor in the stack,
    522534    // because all parallel, non-blocking, server threads use the same input
     
    529541    thread_block( client_xp , THREAD_BLOCKED_RPC );
    530542
    531     // take the lock protecting the copies
    532     remote_spinlock_lock( lock_xp );
     543    // take the lock protecting process copies
     544    remote_queuelock_acquire( lock_xp );
    533545
    534546    // initialize shared RPC descriptor
     
    573585
    574586    // release the lock protecting process copies
    575     remote_spinlock_unlock( lock_xp );
     587    remote_queuelock_release( lock_xp );
    576588
    577589    // restore IRQs
     
    620632    thread_t          * target;         // pointer on target thread
    621633    thread_t          * this;           // pointer on calling thread
    622     uint32_t            ltid;           // index in process th_tbl
     634    uint32_t            ltid;           // index in process th_tbl[]
    623635    cxy_t               owner_cxy;      // target process owner cluster
    624636    uint32_t            count;          // requests counter
     
    628640    this = CURRENT_THREAD;
    629641
     642#if DEBUG_PROCESS_SIGACTION
     643pid_t pid = process->pid;
     644uint32_t cycle = (uint32_t)hal_get_cycles();
     645if( DEBUG_PROCESS_SIGACTION < cycle )
     646printk("\n[DBG] %s : thread %x in process %x enter for process %x in cluster %x / cycle %d\n",
     647__FUNCTION__, this->trdid, this->process->pid, pid, local_cxy , cycle );
     648#endif
     649
     650// check target process is an user process
     651assert( ( process->pid != 0 ),
     652"target process must be an user process" );
     653
    630654    // get target process owner cluster
    631655    owner_cxy = CXY_FROM_PID( process->pid );
    632656
    633 #if DEBUG_PROCESS_SIGACTION
    634 uint32_t cycle = (uint32_t)hal_get_cycles();
    635 if( DEBUG_PROCESS_SIGACTION < cycle )
    636 printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",
    637 __FUNCTION__ , this , process->pid , local_cxy , cycle );
    638 #endif
    639 
    640657    // get lock protecting process th_tbl[]
    641     spinlock_lock( &process->th_lock );
     658    rwlock_rd_acquire( &process->th_lock );
    642659
    643660    // loop on target process local threads
     
    680697
    681698    // release lock protecting process th_tbl[]
    682     spinlock_unlock( &process->th_lock );
    683 
    684     // wait acknowledges
     699    rwlock_rd_release( &process->th_lock );
     700
     701    // busy waiting acknowledges
     702    // TODO this could be improved...
    685703    while( 1 )
    686704    {
     
    695713cycle = (uint32_t)hal_get_cycles();
    696714if( DEBUG_PROCESS_SIGACTION < cycle )
    697 printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n",
    698 __FUNCTION__ , this , process->pid , local_cxy , cycle );
     715printk("\n[DBG] %s : thread %x in process %x exit for process %x in cluster %x / cycle %d\n",
     716__FUNCTION__, this, this->process->pid, pid, local_cxy , cycle );
    699717#endif
    700718
     
    719737
    720738#if DEBUG_PROCESS_SIGACTION
     739pid_t pid = process->pid;
    721740uint32_t cycle = (uint32_t)hal_get_cycles();
    722741if( DEBUG_PROCESS_SIGACTION < cycle )
    723 printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",
    724 __FUNCTION__ , this , process->pid , local_cxy , cycle );
    725 #endif
     742printk("\n[DBG] %s : thread %x n process %x enter for process %x in cluster %x / cycle %d\n",
     743__FUNCTION__, this->trdid, this->process->pid, pid, local_cxy , cycle );
     744#endif
     745
     746// check target process is an user process
     747assert( ( process->pid != 0 ),
     748"target process must be an user process" );
    726749
    727750    // get lock protecting process th_tbl[]
    728     spinlock_lock( &process->th_lock );
     751    rwlock_rd_acquire( &process->th_lock );
    729752
    730753    // loop on target process local threads                       
     
    739762            target_xp = XPTR( local_cxy , target );
    740763
    741             // main thread and client thread should not be blocked
     764            // main thread and client thread should not be deleted
    742765            if( ((ltid != 0) || (owner_cxy != local_cxy)) &&         // not main thread
    743766                (client_xp) != target_xp )                           // not client thread
     
    750773
    751774    // release lock protecting process th_tbl[]
    752     spinlock_unlock( &process->th_lock );
     775    rwlock_rd_release( &process->th_lock );
    753776
    754777#if DEBUG_PROCESS_SIGACTION
    755778cycle = (uint32_t)hal_get_cycles();
    756779if( DEBUG_PROCESS_SIGACTION < cycle )
    757 printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n",
    758 __FUNCTION__ , this , process->pid , local_cxy , cycle );
     780printk("\n[DBG] %s : thread %x in process %x exit for process %x in cluster %x / cycle %d\n",
     781__FUNCTION__, this->trdid, this->process->pid, pid, local_cxy , cycle );
    759782#endif
    760783
     
    773796
    774797#if DEBUG_PROCESS_SIGACTION
     798pid_t pid = process->pid;
    775799uint32_t cycle = (uint32_t)hal_get_cycles();
    776800if( DEBUG_PROCESS_SIGACTION < cycle )
    777 printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",
    778 __FUNCTION__ , this , process->pid , local_cxy , cycle );
    779 #endif
     801printk("\n[DBG] %s : thread %x in process %x enter for process %x in cluster %x / cycle %d\n",
     802__FUNCTION__, this->trdid, this->process->pid, pid, local_cxy , cycle );
     803#endif
     804
     805// check target process is an user process
     806assert( ( process->pid != 0 ),
     807"target process must be an user process" );
    780808
    781809    // get lock protecting process th_tbl[]
    782     spinlock_lock( &process->th_lock );
     810    rwlock_rd_acquire( &process->th_lock );
    783811
    784812    // loop on process threads to unblock all threads
     
    798826
    799827    // release lock protecting process th_tbl[]
    800     spinlock_unlock( &process->th_lock );
     828    rwlock_rd_release( &process->th_lock );
    801829
    802830#if DEBUG_PROCESS_SIGACTION
    803831cycle = (uint32_t)hal_get_cycles();
    804832if( DEBUG_PROCESS_SIGACTION < cycle )
    805 printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n",
    806 __FUNCTION__ , this , process->pid , local_cxy , cycle );
     833printk("\n[DBG] %s : thread %x in process %x exit for process %x in cluster %x / cycle %d\n",
     834__FUNCTION__, this->trdid, this->process->pid, pid, local_cxy, cycle );
    807835#endif
    808836
     
    818846    cluster_t * cluster = LOCAL_CLUSTER;
    819847
     848#if DEBUG_PROCESS_GET_LOCAL_COPY
     849thread_t * this = CURRENT_THREAD;
     850uint32_t cycle = (uint32_t)hal_get_cycles();
     851if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
     852printk("\n[DBG] %s : thread %x in cluster %x enter for process %x in cluster %x / cycle %d\n",
     853__FUNCTION__, this->trdid, this->process->pid, pid, local_cxy, cycle );
     854#endif
     855
    820856    // get lock protecting local list of processes
    821     remote_spinlock_lock( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
     857    remote_queuelock_acquire( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
    822858
    823859    // scan the local list of process descriptors to find the process
     
    836872
    837873    // release lock protecting local list of processes
    838     remote_spinlock_unlock( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
     874    remote_queuelock_release( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
    839875
    840876    // allocate memory for a new local process descriptor
     
    859895
    860896#if DEBUG_PROCESS_GET_LOCAL_COPY
    861 uint32_t cycle = (uint32_t)hal_get_cycles();
     897cycle = (uint32_t)hal_get_cycles();
    862898if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
    863 printk("\n[DBG] %s : enter in cluster %x / pid %x / process %x / cycle %d\n",
    864 __FUNCTION__ , local_cxy , pid , process_ptr , cycle );
     899printk("\n[DBG] %s : thread %x in cluster %x exit in cluster %x / process %x / cycle %d\n",
     900__FUNCTION__, this->trdid, this->process->pid, local_cxy, process_ptr, cycle );
    865901#endif
    866902
     
    883919
    884920    // get pointers on parent process
    885     parent_xp  = (xptr_t)hal_remote_lwd( XPTR( process_cxy , &process_ptr->parent_xp ) );
     921    parent_xp  = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
    886922    parent_cxy = GET_CXY( parent_xp );
    887923    parent_ptr = GET_PTR( parent_xp );
    888924
    889     return hal_remote_lw( XPTR( parent_cxy , &parent_ptr->pid ) );
     925    return hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
    890926}
    891927
     
    899935    uint32_t fd;
    900936
    901     remote_spinlock_init( XPTR( local_cxy , &process->fd_array.lock ) );
     937    remote_queuelock_init( XPTR( local_cxy , &process->fd_array.lock ), LOCK_PROCESS_FDARRAY );
    902938
    903939    process->fd_array.current = 0;
     
    909945    }
    910946}
    911 
    912 //////////////////////////////
    913 bool_t process_fd_array_full( void )
    914 {
    915     // get extended pointer on reference process
    916     xptr_t ref_xp = CURRENT_THREAD->process->ref_xp;
    917 
    918     // get reference process cluster and local pointer
    919     process_t * ref_ptr = GET_PTR( ref_xp );
    920     cxy_t       ref_cxy = GET_CXY( ref_xp );
    921 
    922     // get number of open file descriptors from reference fd_array
    923     uint32_t current = hal_remote_lw( XPTR( ref_cxy , &ref_ptr->fd_array.current ) );
    924 
    925         return ( current >= CONFIG_PROCESS_FILE_MAX_NR );
    926 }
    927 
    928947/////////////////////////////////////////////////
    929948error_t process_fd_register( process_t * process,
     
    933952    bool_t    found;
    934953    uint32_t  id;
     954    uint32_t  count;
    935955    xptr_t    xp;
    936956
     
    941961
    942962    // take lock protecting reference fd_array
    943         remote_spinlock_lock( XPTR( ref_cxy , &ref_ptr->fd_array.lock ) );
     963        remote_queuelock_acquire( XPTR( ref_cxy , &ref_ptr->fd_array.lock ) );
    944964
    945965    found   = false;
     
    947967    for ( id = 0; id < CONFIG_PROCESS_FILE_MAX_NR ; id++ )
    948968    {
    949         xp = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->fd_array.array[id] ) );
     969        xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->fd_array.array[id] ) );
    950970        if ( xp == XPTR_NULL )
    951971        {
     972            // update reference fd_array
     973            hal_remote_s64( XPTR( ref_cxy , &ref_ptr->fd_array.array[id] ) , file_xp );
     974                count = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->fd_array.current ) ) + 1;
     975            hal_remote_s32( XPTR( ref_cxy , &ref_ptr->fd_array.current ) , count );
     976
     977            // update local fd_array copy if required
     978            if( ref_cxy != local_cxy )
     979            {
     980                process->fd_array.array[id] = file_xp;
     981                process->fd_array.current   = count;
     982            }
     983
     984            // exit
     985                        *fdid = id;
    952986            found = true;
    953             hal_remote_swd( XPTR( ref_cxy , &ref_ptr->fd_array.array[id] ) , file_xp );
    954                 hal_remote_atomic_add( XPTR( ref_cxy , &ref_ptr->fd_array.current ) , 1 );
    955                         *fdid = id;
    956987            break;
    957988        }
     
    959990
    960991    // release lock protecting reference fd_array
    961         remote_spinlock_unlock( XPTR( ref_cxy , &ref_ptr->fd_array.lock ) );
     992        remote_queuelock_release( XPTR( ref_cxy , &ref_ptr->fd_array.lock ) );
    962993
    963994    if ( !found ) return -1;
     
    9701001{
    9711002    xptr_t  file_xp;
     1003    xptr_t  lock_xp;
    9721004
    9731005    // access local copy of process descriptor
     
    9811013        process_t * ref_ptr = GET_PTR( ref_xp );
    9821014
     1015        // build extended pointer on lock protecting reference fd_array
     1016        lock_xp = XPTR( ref_cxy , &ref_ptr->fd_array.lock );
     1017
     1018        // take lock protecting reference fd_array
     1019            remote_queuelock_acquire( lock_xp );
     1020
    9831021        // access reference process descriptor
    984         file_xp = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->fd_array.array[fdid] ) );
     1022        file_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->fd_array.array[fdid] ) );
    9851023
    9861024        // update local fd_array if found
    987         if( file_xp != XPTR_NULL )
    988         {
    989             process->fd_array.array[fdid] = file_xp;
    990         }
     1025        if( file_xp != XPTR_NULL )  process->fd_array.array[fdid] = file_xp;
     1026       
     1027        // release lock protecting reference fd_array
     1028            remote_queuelock_release( lock_xp );
    9911029    }
    9921030
     
    10111049
    10121050    // get the remote lock protecting the src fd_array
    1013         remote_spinlock_lock( XPTR( src_cxy , &src_ptr->lock ) );
     1051        remote_queuelock_acquire( XPTR( src_cxy , &src_ptr->lock ) );
    10141052
    10151053    // loop on all fd_array entries
    10161054    for( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
    10171055        {
    1018                 entry = (xptr_t)hal_remote_lwd( XPTR( src_cxy , &src_ptr->array[fd] ) );
     1056                entry = (xptr_t)hal_remote_l64( XPTR( src_cxy , &src_ptr->array[fd] ) );
    10191057
    10201058                if( entry != XPTR_NULL )
     
    10241062
    10251063                        // copy entry in destination process fd_array
    1026                         hal_remote_swd( XPTR( dst_cxy , &dst_ptr->array[fd] ) , entry );
     1064                        hal_remote_s64( XPTR( dst_cxy , &dst_ptr->array[fd] ) , entry );
    10271065                }
    10281066        }
    10291067
    10301068    // release lock on source process fd_array
    1031         remote_spinlock_unlock( XPTR( src_cxy , &src_ptr->lock ) );
     1069        remote_queuelock_release( XPTR( src_cxy , &src_ptr->lock ) );
    10321070
    10331071}  // end process_fd_remote_copy()
     1072
     1073
     1074////////////////////////////////////
     1075bool_t process_fd_array_full( void )
     1076{
     1077    // get extended pointer on reference process
     1078    xptr_t ref_xp = CURRENT_THREAD->process->ref_xp;
     1079
     1080    // get reference process cluster and local pointer
     1081    process_t * ref_ptr = GET_PTR( ref_xp );
     1082    cxy_t       ref_cxy = GET_CXY( ref_xp );
     1083
     1084    // get number of open file descriptors from reference fd_array
     1085    uint32_t current = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->fd_array.current ) );
     1086
     1087        return ( current >= CONFIG_PROCESS_FILE_MAX_NR );
     1088}
     1089
    10341090
    10351091////////////////////////////////////////////////////////////////////////////////////
     
    10431099{
    10441100    ltid_t         ltid;
    1045     reg_t          save_sr;
    10461101    bool_t         found = false;
    10471102 
    1048 
    1049     assert( (process != NULL) , "process argument is NULL" );
    1050 
    1051     assert( (thread != NULL) , "thread argument is NULL" );
    1052 
    1053     // take lock protecting th_tbl, depending on thread type:
    1054     // we don't want to use a descheduling policy for idle thread initialisation
    1055     if ( thread->type == THREAD_IDLE ) {
    1056         spinlock_lock_busy( &process->th_lock , &save_sr );
    1057     } else {
    1058         spinlock_lock( &process->th_lock );
    1059     }
    1060 
    1061     // search a free slot in th_tbl[]
    1062     for( ltid = 0 ; ltid < CONFIG_THREAD_MAX_PER_CLUSTER ; ltid++ )
     1103// check arguments
     1104assert( (process != NULL) , "process argument is NULL" );
     1105assert( (thread != NULL) , "thread argument is NULL" );
     1106
     1107    // get the lock protecting th_tbl for all threads
     1108    // but the idle thread executing kernel_init (cannot yield)
     1109    if( thread->type != THREAD_IDLE ) rwlock_wr_acquire( &process->th_lock );
     1110
     1111    // scan kth_tbl
     1112    for( ltid = 0 ; ltid < CONFIG_THREADS_MAX_PER_CLUSTER ; ltid++ )
    10631113    {
    10641114        if( process->th_tbl[ltid] == NULL )
     
    10791129    }
    10801130
    1081     // release lock protecting th_tbl
    1082     hal_fence();
    1083     if( thread->type == THREAD_IDLE ) {
    1084         spinlock_unlock_busy( &process->th_lock , save_sr );
    1085     } else {
    1086         spinlock_unlock( &process->th_lock );
    1087     }
    1088 
    1089     return (found) ? 0 : ENOMEM;
     1131    // get the lock protecting th_tbl for all threads
     1132    // but the idle thread executing kernel_init (cannot yield)
     1133    if( thread->type != THREAD_IDLE ) rwlock_wr_release( &process->th_lock );
     1134
     1135    return (found) ? 0 : 0xFFFFFFFF;
    10901136
    10911137}  // end process_register_thread()
     
    10961142    uint32_t count;  // number of threads in local process descriptor
    10971143
    1098     assert( (thread != NULL) , "thread argument is NULL" );
     1144// check argument
     1145assert( (thread != NULL) , "thread argument is NULL" );
    10991146
    11001147    process_t * process = thread->process;
     
    11021149    // get thread local index
    11031150    ltid_t  ltid = LTID_FROM_TRDID( thread->trdid );
    1104 
    1105     // take lock protecting th_tbl
    1106     spinlock_lock( &process->th_lock );
    1107 
     1151   
     1152    // the lock depends on thread user/kernel type, because we cannot
     1153    // use a descheduling policy for the lock protecting the kth_tbl
     1154
     1155    // get the lock protecting th_tbl[]
     1156    rwlock_wr_acquire( &process->th_lock );
     1157
     1158    // get number of kernel threads
    11081159    count = process->th_nr;
    11091160
    1110     assert( (count > 0) , "process th_nr cannot be 0\n" );
     1161// check th_nr value
     1162assert( (count > 0) , "process kth_nr cannot be 0\n" );
    11111163
    11121164    // remove thread from th_tbl[]
     
    11141166    process->th_nr = count-1;
    11151167
    1116     // release lock protecting th_tbl
    1117     hal_fence();
    1118     spinlock_unlock( &process->th_lock );
     1168    // release lock protecting kth_tbl
     1169    rwlock_wr_release( &process->th_lock );
    11191170
    11201171    return (count == 1);
     
    11411192
    11421193    // get parent process PID and extended pointer on .elf file
    1143     parent_pid = hal_remote_lw (XPTR( parent_process_cxy , &parent_process_ptr->pid));
    1144     vfs_bin_xp = hal_remote_lwd(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp));
    1145 
    1146     // check parent process is the reference process
    1147     ref_xp = hal_remote_lwd( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) );
    1148 
    1149     assert( (parent_process_xp == ref_xp ) ,
    1150     "parent process must be the reference process\n" );
     1194    parent_pid = hal_remote_l32 (XPTR( parent_process_cxy , &parent_process_ptr->pid));
     1195    vfs_bin_xp = hal_remote_l64(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp));
     1196
     1197    // get extended pointer on reference process
     1198    ref_xp = hal_remote_l64( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) );
     1199
     1200// check parent process is the reference process
     1201assert( (parent_process_xp == ref_xp ) ,
     1202"parent process must be the reference process\n" );
    11511203
    11521204#if DEBUG_PROCESS_MAKE_FORK
     
    11951247#endif
    11961248
    1197     // give TXT ownership to child process
    1198     process_txt_set_ownership( XPTR( local_cxy , process ) );
    11991249
    12001250    // copy VMM from parent descriptor to child descriptor
     
    12181268#endif
    12191269
    1220     // parent process gives TXT ownership to child process if required
    1221     if( process_txt_is_owner(parent_process_xp) )
     1270    // if parent_process is INIT, or if parent_process is the TXT owner,
     1271    // the child_process becomes the owner of its TXT terminal
     1272    if( (parent_pid == 1) || process_txt_is_owner( parent_process_xp ) )
    12221273    {
    12231274        process_txt_set_ownership( XPTR( local_cxy , process ) );
     
    12261277cycle = (uint32_t)hal_get_cycles();
    12271278if( DEBUG_PROCESS_MAKE_EXEC < cycle )
    1228 printk("\n[DBG] %s : thread %x in process %x gives TXT from parent %x to child %x / cycle %d\n",
    1229 __FUNCTION__ , CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,
    1230 parent_pid, new_pid, cycle );
     1279printk("\n[DBG] %s : thread %x in process %x / child takes TXT ownership / cycle %d\n",
     1280__FUNCTION__ , CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, cycle );
    12311281#endif
    12321282
     
    12491299    }
    12501300
    1251     // check main thread LTID
    1252     assert( (LTID_FROM_TRDID(thread->trdid) == 0) ,
    1253     "main thread must have LTID == 0\n" );
    1254 
    1255 //#if( DEBUG_PROCESS_MAKE_FORK & 1 )
    1256 #if DEBUG_PROCESS_MAKE_FORK
     1301// check main thread LTID
     1302assert( (LTID_FROM_TRDID(thread->trdid) == 0) ,
     1303"main thread must have LTID == 0\n" );
     1304
     1305#if( DEBUG_PROCESS_MAKE_FORK & 1 )
    12571306cycle = (uint32_t)hal_get_cycles();
    12581307if( DEBUG_PROCESS_MAKE_FORK < cycle )
     
    12891338
    12901339    // register process in parent children list
    1291     remote_spinlock_lock( children_lock_xp );
     1340    remote_queuelock_acquire( children_lock_xp );
    12921341        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
    12931342        hal_remote_atomic_add( children_nr_xp , 1 );
    1294     remote_spinlock_unlock( children_lock_xp );
     1343    remote_queuelock_release( children_lock_xp );
    12951344
    12961345    // return success
     
    13411390    // open the file identified by <path>
    13421391    file_xp = XPTR_NULL;
    1343     file_id = -1;
     1392    file_id = 0xFFFFFFFF;
    13441393        error   = vfs_open( process,
    13451394                            path,
     
    14421491uint32_t cycle = (uint32_t)hal_get_cycles();
    14431492if( DEBUG_PROCESS_ZERO_CREATE < cycle )
    1444 printk("\n[DBG] %s : thread %x enter / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle );
     1493printk("\n[DBG] %s : enter / cluster %x / cycle %d\n",
     1494__FUNCTION__, local_cxy, cycle );
    14451495#endif
    14461496
     
    14521502    process->term_state = 0;
    14531503
    1454     // reset th_tbl[] array as empty
     1504    // reset th_tbl[] array and associated fields
    14551505    uint32_t i;
    1456     for( i = 0 ; i < CONFIG_THREAD_MAX_PER_CLUSTER ; i++ )
     1506    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
    14571507        {
    14581508        process->th_tbl[i] = NULL;
    14591509    }
    14601510    process->th_nr  = 0;
    1461     spinlock_init( &process->th_lock );
     1511    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
     1512
    14621513
    14631514    // reset children list as empty
    14641515    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
    1465     remote_spinlock_init( XPTR( local_cxy , &process->children_lock ) );
    14661516    process->children_nr = 0;
     1517    remote_queuelock_init( XPTR( local_cxy , &process->children_lock ),
     1518                           LOCK_PROCESS_CHILDREN );
    14671519
    14681520        hal_fence();
     
    14711523cycle = (uint32_t)hal_get_cycles();
    14721524if( DEBUG_PROCESS_ZERO_CREATE < cycle )
    1473 printk("\n[DBG] %s : thread %x exit / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle );
     1525printk("\n[DBG] %s : exit / cluster %x / cycle %d\n",
     1526__FUNCTION__, local_cxy, cycle );
    14741527#endif
    14751528
    14761529}  // end process_zero_init()
    14771530
    1478 //////////////////////////
     1531////////////////////////////////
    14791532void process_init_create( void )
    14801533{
     
    14981551        process = process_alloc();
    14991552       
    1500     assert( (process != NULL),
    1501     "no memory for process descriptor in cluster %x\n", local_cxy  );
     1553// check memory allocator
     1554assert( (process != NULL),
     1555"no memory for process descriptor in cluster %x\n", local_cxy  );
    15021556
    15031557    // get PID from local cluster
    15041558    error = cluster_pid_alloc( process , &pid );
    15051559
    1506     assert( (error == 0),
    1507     "cannot allocate PID in cluster %x\n", local_cxy );
    1508 
    1509     assert( (pid == 1) ,
    1510     "process INIT must be first process in cluster 0\n" );
     1560// check PID allocator
     1561assert( (error == 0),
     1562"cannot allocate PID in cluster %x\n", local_cxy );
     1563
     1564// check PID value
     1565assert( (pid == 1) ,
     1566"process INIT must be first process in cluster 0\n" );
    15111567
    15121568    // initialize process descriptor / parent is local process_zero
     
    15141570                            pid,
    15151571                            XPTR( local_cxy , &process_zero ) ); 
     1572
     1573#if(DEBUG_PROCESS_INIT_CREATE & 1)
     1574if( DEBUG_PROCESS_INIT_CREATE < cycle )
     1575printk("\n[DBG] %s : thread %x in process %x initialized process descriptor\n",
     1576__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid );
     1577#endif
    15161578
    15171579    // open the file identified by CONFIG_PROCESS_INIT_PATH
     
    15251587                            &file_id );
    15261588
    1527         assert( (error == 0),
    1528     "failed to open file <%s>\n", CONFIG_PROCESS_INIT_PATH );
    1529 
    1530     // register "code" and "data" vsegs as well as entry-point
     1589assert( (error == 0),
     1590"failed to open file <%s>\n", CONFIG_PROCESS_INIT_PATH );
     1591
     1592#if(DEBUG_PROCESS_INIT_CREATE & 1)
     1593if( DEBUG_PROCESS_INIT_CREATE < cycle )
     1594printk("\n[DBG] %s : thread %x in process %x open .elf file decriptor\n",
     1595__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid );
     1596#endif
     1597
     1598   // register "code" and "data" vsegs as well as entry-point
    15311599    // in process VMM, using information contained in the elf file.
    15321600        error = elf_load_process( file_xp , process );
    15331601
    1534         assert( (error == 0),
    1535     "cannot access .elf file <%s>\n", CONFIG_PROCESS_INIT_PATH );
     1602assert( (error == 0),
     1603"cannot access .elf file <%s>\n", CONFIG_PROCESS_INIT_PATH );
     1604
     1605#if(DEBUG_PROCESS_INIT_CREATE & 1)
     1606if( DEBUG_PROCESS_INIT_CREATE < cycle )
     1607printk("\n[DBG] %s : thread %x in process %x registered code/data vsegs in VMM\n",
     1608__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid );
     1609#endif
    15361610
    15371611    // get extended pointers on process_zero children_root, children_lock
     
    15391613    xptr_t children_lock_xp = XPTR( local_cxy , &process_zero.children_lock );
    15401614
     1615    // take lock protecting kernel process children list
     1616    remote_queuelock_acquire( children_lock_xp );
     1617
    15411618    // register process INIT in parent local process_zero
    1542     remote_spinlock_lock( children_lock_xp );
    15431619        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
    15441620        hal_atomic_add( &process_zero.children_nr , 1 );
    1545     remote_spinlock_unlock( children_lock_xp );
     1621
     1622    // release lock protecting kernel process children list
     1623    remote_queuelock_release( children_lock_xp );
     1624
     1625#if(DEBUG_PROCESS_INIT_CREATE & 1)
     1626if( DEBUG_PROCESS_INIT_CREATE < cycle )
     1627printk("\n[DBG] %s : thread %x in process %x registered init process in parent\n",
     1628__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid );
     1629#endif
    15461630
    15471631    // select a core in local cluster to execute the main thread
     
    15601644                                &thread );
    15611645
    1562         assert( (error == 0),
    1563     "cannot create main thread for <%s>\n", CONFIG_PROCESS_INIT_PATH );
    1564 
    1565     assert( (thread->trdid == 0),
    1566     "main thread must have index 0 for <%s>\n", CONFIG_PROCESS_INIT_PATH );
     1646assert( (error == 0),
     1647"cannot create main thread for <%s>\n", CONFIG_PROCESS_INIT_PATH );
     1648
     1649assert( (thread->trdid == 0),
     1650"main thread must have index 0 for <%s>\n", CONFIG_PROCESS_INIT_PATH );
     1651
     1652#if(DEBUG_PROCESS_INIT_CREATE & 1)
     1653if( DEBUG_PROCESS_INIT_CREATE < cycle )
     1654printk("\n[DBG] %s : thread %x in process %x created main thread\n",
     1655__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid );
     1656#endif
    15671657
    15681658    // activate thread
     
    16181708
    16191709    // get PID and state
    1620     pid   = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) );
    1621     state = hal_remote_lw( XPTR( process_cxy , &process_ptr->term_state ) );
     1710    pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
     1711    state = hal_remote_l32( XPTR( process_cxy , &process_ptr->term_state ) );
    16221712
    16231713    // get PPID
    1624     parent_xp  = hal_remote_lwd( XPTR( process_cxy , &process_ptr->parent_xp ) );
     1714    parent_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
    16251715    parent_cxy = GET_CXY( parent_xp );
    16261716    parent_ptr = GET_PTR( parent_xp );
    1627     ppid       = hal_remote_lw( XPTR( parent_cxy , &parent_ptr->pid ) );
     1717    ppid       = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
    16281718
    16291719    // get number of threads
    1630     th_nr      = hal_remote_lw( XPTR( process_cxy , &process_ptr->th_nr ) );
     1720    th_nr      = hal_remote_l32( XPTR( process_cxy , &process_ptr->th_nr ) );
    16311721
    16321722    // get pointers on owner process descriptor
    1633     owner_xp  = hal_remote_lwd( XPTR( process_cxy , &process_ptr->owner_xp ) );
     1723    owner_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) );
    16341724    owner_cxy = GET_CXY( owner_xp );
    16351725    owner_ptr = GET_PTR( owner_xp );
    16361726
    16371727    // get extended pointer on TXT_RX file descriptor attached to process
    1638     txt_file_xp = hal_remote_lwd( XPTR( owner_cxy , &owner_ptr->fd_array.array[0] ) );
     1728    txt_file_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->fd_array.array[0] ) );
    16391729
    16401730    assert( (txt_file_xp != XPTR_NULL) ,
     
    16501740                       XPTR( txt_chdev_cxy , txt_chdev_ptr->name ) );
    16511741   
    1652     txt_owner_xp = (xptr_t)hal_remote_lwd( XPTR( txt_chdev_cxy,
     1742    txt_owner_xp = (xptr_t)hal_remote_l64( XPTR( txt_chdev_cxy,
    16531743                                                 &txt_chdev_ptr->ext.txt.owner_xp ) );
    16541744   
    16551745    // get process .elf name
    1656     elf_file_xp   = hal_remote_lwd( XPTR( process_cxy , &process_ptr->vfs_bin_xp ) );
     1746    elf_file_xp   = hal_remote_l64( XPTR( process_cxy , &process_ptr->vfs_bin_xp ) );
    16571747    elf_file_cxy  = GET_CXY( elf_file_xp );
    16581748    elf_file_ptr  = (vfs_file_t *)GET_PTR( elf_file_xp );
     
    17181808    xptr_t      lock_xp;      // extended pointer on list lock in chdev
    17191809
    1720     // check process is in owner cluster
    1721     assert( (CXY_FROM_PID( process->pid ) == local_cxy) ,
    1722     "process descriptor not in owner cluster" );
    1723 
    1724     // check terminal index
    1725     assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
    1726     "illegal TXT terminal index" );
     1810// check process is in owner cluster
     1811assert( (CXY_FROM_PID( process->pid ) == local_cxy) ,
     1812"process descriptor not in owner cluster" );
     1813
     1814// check terminal index
     1815assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
     1816"illegal TXT terminal index" );
    17271817
    17281818    // get pointers on TXT_RX[txt_id] chdev
     
    17351825    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
    17361826
     1827    // get lock protecting list of processes attached to TXT
     1828    remote_busylock_acquire( lock_xp );
     1829
    17371830    // insert process in attached process list
    1738     remote_spinlock_lock( lock_xp );
    17391831    xlist_add_last( root_xp , XPTR( local_cxy , &process->txt_list ) );
    1740     remote_spinlock_unlock( lock_xp );
     1832
     1833    // release lock protecting list of processes attached to TXT
     1834    remote_busylock_release( lock_xp );
    17411835
    17421836#if DEBUG_PROCESS_TXT
     
    17651859    process_cxy = GET_CXY( process_xp );
    17661860    process_ptr = GET_PTR( process_xp );
    1767 
    1768     // check process descriptor in owner cluster
    1769     process_pid = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) );
    1770     assert( (CXY_FROM_PID( process_pid ) == process_cxy ) ,
    1771     "process descriptor not in owner cluster" );
     1861    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
     1862
     1863// check process descriptor in owner cluster
     1864assert( (CXY_FROM_PID( process_pid ) == process_cxy ) ,
     1865"process descriptor not in owner cluster" );
    17721866
    17731867    // release TXT ownership (does nothing if not TXT owner)
     
    17751869
    17761870    // get extended pointer on process stdin file
    1777     file_xp = (xptr_t)hal_remote_lwd( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
     1871    file_xp = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
    17781872
    17791873    // get pointers on TXT_RX chdev
     
    17851879    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
    17861880
     1881    // get lock protecting list of processes attached to TXT
     1882    remote_busylock_acquire( lock_xp );
     1883
    17871884    // unlink process from attached process list
    1788     remote_spinlock_lock( lock_xp );
    17891885    xlist_unlink( XPTR( process_cxy , &process_ptr->txt_list ) );
    1790     remote_spinlock_unlock( lock_xp );
     1886
     1887    // release lock protecting list of processes attached to TXT
     1888    remote_busylock_release( lock_xp );
    17911889
    17921890#if DEBUG_PROCESS_TXT
    17931891uint32_t cycle  = (uint32_t)hal_get_cycles();
    1794 uint32_t txt_id = hal_remote_lw( XPTR( chdev_cxy , &chdev_ptr->channel ) );
     1892uint32_t txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
    17951893if( DEBUG_PROCESS_TXT < cycle )
    17961894printk("\n[DBG] %s : thread %x in process %x detached process %x from TXT %d / cycle %d\n",
     
    18151913    process_cxy = GET_CXY( process_xp );
    18161914    process_ptr = GET_PTR( process_xp );
    1817     process_pid = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) );
     1915    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
    18181916
    18191917    // check owner cluster
     
    18221920
    18231921    // get extended pointer on stdin pseudo file
    1824     file_xp = hal_remote_lwd( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
     1922    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
    18251923
    18261924    // get pointers on TXT chdev
     
    18301928
    18311929    // set owner field in TXT chdev
    1832     hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp );
     1930    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp );
    18331931
    18341932#if DEBUG_PROCESS_TXT
    18351933uint32_t cycle  = (uint32_t)hal_get_cycles();
    1836 uint32_t txt_id = hal_remote_lw( XPTR( txt_cxy , &txt_ptr->channel ) );
     1934uint32_t txt_id = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
    18371935if( DEBUG_PROCESS_TXT < cycle )
    18381936printk("\n[DBG] %s : thread %x in process %x give TXT %d to process %x / cycle %d\n",
     
    18681966    process_cxy = GET_CXY( process_xp );
    18691967    process_ptr = GET_PTR( process_xp );
    1870     process_pid = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) );
     1968    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
    18711969
    18721970    // check owner cluster
     
    18751973
    18761974    // get extended pointer on stdin pseudo file
    1877     file_xp = hal_remote_lwd( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
     1975    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
    18781976
    18791977    // get pointers on TXT chdev
     
    18831981
    18841982    // get extended pointer on TXT_RX owner and TXT channel
    1885     owner_xp = hal_remote_lwd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
    1886     txt_id   = hal_remote_lw ( XPTR( txt_cxy , &txt_ptr->channel ) );
     1983    owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
     1984    txt_id   = hal_remote_l32 ( XPTR( txt_cxy , &txt_ptr->channel ) );
    18871985
    18881986    // transfer ownership only if process is the TXT owner
     
    18941992
    18951993        // get lock
    1896         remote_spinlock_lock( lock_xp );
     1994        remote_busylock_acquire( lock_xp );
    18971995
    18981996        if( process_get_ppid( process_xp ) != 1 )           // process is not KSH
     
    19082006                {
    19092007                    // release lock
    1910                     remote_spinlock_unlock( lock_xp );
     2008                    remote_busylock_release( lock_xp );
    19112009
    19122010                    // set owner field in TXT chdev
    1913                     hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
     2011                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
    19142012
    19152013#if DEBUG_PROCESS_TXT
    19162014cycle   = (uint32_t)hal_get_cycles();
    1917 uint32_t ksh_pid = hal_remote_lw( XPTR( current_cxy , &current_ptr->pid ) );
     2015uint32_t ksh_pid = hal_remote_l32( XPTR( current_cxy , &current_ptr->pid ) );
    19182016if( DEBUG_PROCESS_TXT < cycle )
    19192017printk("\n[DBG] %s : thread %x in process %x release TXT %d to KSH %x / cycle %d\n",
     
    19262024 
    19272025            // release lock
    1928             remote_spinlock_unlock( lock_xp );
     2026            remote_busylock_release( lock_xp );
    19292027
    19302028            // PANIC if KSH not found
     
    19452043                {
    19462044                    // release lock
    1947                     remote_spinlock_unlock( lock_xp );
     2045                    remote_busylock_release( lock_xp );
    19482046
    19492047                    // set owner field in TXT chdev
    1950                     hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
     2048                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
    19512049
    19522050#if DEBUG_PROCESS_TXT
    19532051cycle   = (uint32_t)hal_get_cycles();
    1954 uint32_t new_pid = hal_remote_lw( XPTR( current_cxy , &current_ptr->pid ) );
     2052uint32_t new_pid = hal_remote_l32( XPTR( current_cxy , &current_ptr->pid ) );
    19552053if( DEBUG_PROCESS_TXT < cycle )
    19562054printk("\n[DBG] %s : thread %x in process %x release TXT %d to process %x / cycle %d\n",
     
    19632061
    19642062            // release lock
    1965             remote_spinlock_unlock( lock_xp );
     2063            remote_busylock_release( lock_xp );
    19662064
    19672065            // no more owner for TXT if no other process found
    1968             hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL );
     2066            hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL );
    19692067
    19702068#if DEBUG_PROCESS_TXT
     
    19932091
    19942092
    1995 //////////////////////////////////////////////////
    1996 uint32_t process_txt_is_owner( xptr_t process_xp )
     2093////////////////////////////////////////////////
     2094bool_t process_txt_is_owner( xptr_t process_xp )
    19972095{
    19982096    // get local pointer and cluster of process in owner cluster
     
    20002098    process_t * process_ptr = GET_PTR( process_xp );
    20012099
    2002     // check owner cluster
    2003     pid_t process_pid = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) );
    2004     assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
    2005     "process descriptor not in owner cluster\n" );
     2100// check calling thread execute in target process owner cluster
     2101pid_t process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
     2102assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
     2103"process descriptor not in owner cluster\n" );
    20062104
    20072105    // get extended pointer on stdin pseudo file
    2008     xptr_t file_xp = hal_remote_lwd( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
     2106    xptr_t file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
    20092107
    20102108    // get pointers on TXT chdev
     
    20142112
    20152113    // get extended pointer on TXT_RX owner process
    2016     xptr_t owner_xp = hal_remote_lwd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
     2114    xptr_t owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
    20172115
    20182116    return (process_xp == owner_xp);
     
    20272125    chdev_t *   txt_rx_ptr = GET_PTR( txt_rx_xp );
    20282126
    2029     return (xptr_t)hal_remote_lwd( XPTR( txt_rx_cxy , &txt_rx_ptr->ext.txt.owner_xp ) );
     2127    return (xptr_t)hal_remote_l64( XPTR( txt_rx_cxy , &txt_rx_ptr->ext.txt.owner_xp ) );
    20302128
    20312129}  // end process_txt_get_owner()
     
    20452143    xptr_t      txt0_xp;
    20462144    xptr_t      txt0_lock_xp;
    2047     reg_t       txt0_save_sr;    // save SR to take TXT0 lock in busy mode
    20482145   
    20492146    assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
     
    20682165
    20692166    // get lock on attached process list
    2070     remote_spinlock_lock( lock_xp );
     2167    remote_busylock_acquire( lock_xp );
    20712168
    20722169    // get TXT0 lock in busy waiting mode
    2073     remote_spinlock_lock_busy( txt0_lock_xp , &txt0_save_sr );
     2170    remote_busylock_acquire( txt0_lock_xp );
    20742171
    20752172    // display header
     
    20852182
    20862183    // release TXT0 lock in busy waiting mode
    2087     remote_spinlock_unlock_busy( txt0_lock_xp , txt0_save_sr );
     2184    remote_busylock_release( txt0_lock_xp );
    20882185
    20892186    // release lock on attached process list
    2090     remote_spinlock_unlock( lock_xp );
     2187    remote_busylock_release( lock_xp );
    20912188
    20922189}  // end process_txt_display
  • trunk/kernel/kern/process.h

    r527 r564  
    11/*
    2  * process.h - process related management functions
     2 * process.h - process related functions definition.
    33 *
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
     
    3333#include <xlist.h>
    3434#include <bits.h>
    35 #include <spinlock.h>
     35#include <busylock.h>
     36#include <queuelock.h>
     37#include <remote_queuelock.h>
     38#include <remote_rwlock.h>
    3639#include <hal_atomic.h>
    3740#include <vmm.h>
     
    6972 * A free entry in this array contains the XPTR_NULL value.
    7073 * The array size is defined by a the CONFIG_PROCESS_FILE_MAX_NR parameter.
    71  * All modifications (open/close) in this structure must be done by the reference cluster,
    72  * and reported in process copies.
     74 *
     75 * NOTE: - Only the fd_array[] in the reference process contains a complete list of open
     76 *         files, and is protected by the lock against concurrent access.
     77 *       - the fd_array[] in a process copy is simply a cache containing a subset of the
     78 *         open files to speed the fdid to xptr translation, but the "lock" and "current
     79 *         fields should not be used.
     80 *       - all modifications made by the process_fd_remove() are done in reference cluster
     81 *         and reported in all process_copies.
    7382 ********************************************************************************************/
    7483
    7584typedef struct fd_array_s
    7685{
    77         remote_spinlock_t lock;                               /*! lock protecting fd_array      */
    78     uint32_t          current;                            /*! current number of open files  */
    79         xptr_t            array[CONFIG_PROCESS_FILE_MAX_NR];  /*! xptr on open file descriptors */
     86        remote_queuelock_t lock;                              /*! lock protecting fd_array      */
     87    uint32_t           current;                           /*! current number of open files  */
     88        xptr_t             array[CONFIG_PROCESS_FILE_MAX_NR]; /*! open file descriptors        */
    8089}
    8190fd_array_t;
     
    100109 *    complete in the reference process cluster, other copies are read-only caches.
    101110 * 4) The <sem_root>, <mutex_root>, <barrier_root>, <condvar_root>, and the associated
    102  *    <sync_lock>, that are dynamically allocated, are only defined in the reference cluster.
     111 *    <sync_lock>, dynamically allocated, are only defined in the reference cluster.
    103112 * 5) The <children_root>, <children_nr>, <children_list>, and <txt_list> fields are only
    104113 *    defined in the reference cluster, and are undefined in other clusters.
    105  * 6) The <local_list>, <copies_list>, <th_tbl>, <th_nr>, <th_lock> fields
    106  *    are defined in all process descriptors copies.
     114 * 6) The <local_list>, <copies_list>, <th_tbl>, <th_nr>, <u_th_lock> or <k_th_lock> fields
     115 *    are specific n each cluster, and are defined in all process descriptors copies.
    107116 * 7) The termination <flags> and <exit_status> are only defined in the reference cluster.
    108  *    The term_state format is defined in the shared_syscalls.h file.
     117 *    (The term_state format is defined in the shared_syscalls.h file ).
    109118 ********************************************************************************************/
    110119
    111120typedef struct process_s
    112121{
    113         vmm_t             vmm;              /*! embedded virtual memory manager                 */
    114 
    115         fd_array_t        fd_array;         /*! embedded open file descriptors array            */
    116 
    117         xptr_t            vfs_root_xp;      /*! extended pointer on current VFS root inode      */
    118         xptr_t            vfs_bin_xp;       /*! extended pointer on .elf file descriptor        */
    119         pid_t             pid;              /*! process identifier                              */
    120     xptr_t            ref_xp;           /*! extended pointer on reference process           */
    121     xptr_t            owner_xp;         /*! extended pointer on owner process               */
    122     xptr_t            parent_xp;        /*! extended pointer on parent process              */
    123 
    124         xptr_t            vfs_cwd_xp;       /*! extended pointer on current working dir inode   */
    125         remote_rwlock_t   cwd_lock;         /*! lock protecting working directory changes       */
    126 
    127         xlist_entry_t     children_root;    /*! root of the children process xlist              */
    128     remote_spinlock_t children_lock;    /*! lock protecting children process xlist          */
    129     uint32_t          children_nr;      /*! number of children processes                    */
    130 
    131         xlist_entry_t     children_list;    /*! member of list of children of same parent       */
    132     xlist_entry_t     local_list;       /*! member of list of process in same cluster       */
    133     xlist_entry_t     copies_list;      /*! member of list of copies of same process        */
    134     xlist_entry_t     txt_list;         /*! member of list of processes sharing same TXT    */
    135 
    136         spinlock_t        th_lock;          /*! lock protecting th_tbl[] concurrent access      */
    137         uint32_t          th_nr;            /*! number of threads in this cluster               */
    138 
    139         struct thread_s * th_tbl[CONFIG_THREAD_MAX_PER_CLUSTER]; /*! pointers on local threads  */
    140 
    141     xlist_entry_t     sem_root;         /*! root of the process semaphore list              */
    142     xlist_entry_t     mutex_root;       /*! root of the process mutex list                  */
    143     xlist_entry_t     barrier_root;     /*! root of the process barrier list                */
    144     xlist_entry_t     condvar_root;     /*! root of the process condvar list                */
    145     remote_spinlock_t sync_lock;        /*! lock protecting sem,mutex,barrier,condvar lists */
    146 
    147     uint32_t          term_state;       /*! termination status (flags & exit status)        */
     122        vmm_t              vmm;              /*! embedded virtual memory manager                 */
     123
     124        fd_array_t         fd_array;         /*! embedded open file descriptors array            */
     125
     126        xptr_t             vfs_root_xp;      /*! extended pointer on current VFS root inode      */
     127        xptr_t             vfs_bin_xp;       /*! extended pointer on .elf file descriptor        */
     128        pid_t              pid;              /*! process identifier                              */
     129    xptr_t             ref_xp;           /*! extended pointer on reference process           */
     130    xptr_t             owner_xp;         /*! extended pointer on owner process               */
     131    xptr_t             parent_xp;        /*! extended pointer on parent process              */
     132
     133        xptr_t             vfs_cwd_xp;       /*! extended pointer on current working dir inode   */
     134        remote_rwlock_t    cwd_lock;         /*! lock protecting working directory changes       */
     135
     136        xlist_entry_t      children_root;    /*! root of the children process xlist              */
     137    remote_queuelock_t children_lock;    /*! lock protecting children process xlist          */
     138    uint32_t           children_nr;      /*! number of children processes                    */
     139
     140        xlist_entry_t      children_list;    /*! member of list of children of same parent       */
     141    xlist_entry_t      local_list;       /*! member of list of process in same cluster       */
     142    xlist_entry_t      copies_list;      /*! member of list of copies of same process        */
     143    xlist_entry_t      txt_list;         /*! member of list of processes sharing same TXT    */
     144
     145        struct thread_s  * th_tbl[CONFIG_THREADS_MAX_PER_CLUSTER];       /*! local threads       */
     146        uint32_t           th_nr;            /*! number of threads in this cluster               */
     147    rwlock_t           th_lock;          /*! lock protecting th_tbl[]  i                     */
     148
     149    xlist_entry_t      sem_root;         /*! root of the user definedsemaphore list          */
     150    xlist_entry_t      mutex_root;       /*! root of the user defined mutex list             */
     151    xlist_entry_t      barrier_root;     /*! root of the user defined barrier list           */
     152    xlist_entry_t      condvar_root;     /*! root of the user defined condvar list           */
     153    remote_queuelock_t sync_lock;        /*! lock protecting user defined synchro lists      */
     154
     155    uint32_t           term_state;       /*! termination status (flags & exit status)        */
    148156}
    149157process_t;
     
    210218
    211219/*********************************************************************************************
    212  * This function initializes a local, reference, user process descriptor from another process
    213  * descriptor, defined by the <parent_xp> argument. The <process> and <pid> arguments must
    214  * be previously allocated by the caller. This function can be called by two functions:
     220 * This function initializes a reference, user process descriptor from another process
     221 * descriptor, defined by the <parent_xp> argument. The <process> and <pid> arguments
     222 * are previously allocated by the caller. This function can be called by two functions:
    215223 * 1) process_init_create() : process is the INIT process; parent is process-zero.
    216224 * 2) process_make_fork() : the parent process descriptor is generally remote.
     
    411419
    412420/*********************************************************************************************
    413  * This function uses as many remote accesses as required, to reset an entry in fd_array[],
    414  * in all clusters containing a copy. The entry is identified by the <fdid> argument.
    415  * This function must be executed by a thread running reference cluster, that contains
    416  * the complete list of process descriptors copies.
    417  *********************************************************************************************
    418  * @ process  : pointer on the local process descriptor.
    419  * @ fdid     : file descriptor index in the fd_array.
    420  ********************************************************************************************/
    421 void process_fd_remove( process_t * process,
    422                         uint32_t    fdid );
    423 
    424 /*********************************************************************************************
    425  * This function returns an extended pointer on a file descriptor identified by its index
    426  * in fd_array. It can be called by any thread running in any cluster.
    427  * It accesses first the local process descriptor. In case of local miss, it uses remote
    428  * access to access the reference process descriptor.
    429  * It updates the local fd_array when the file descriptor exists in reference cluster.
    430  * The file descriptor refcount is not incremented.
    431  *********************************************************************************************
    432  * @ process  : pointer on the local process descriptor.
    433  * @ fdid     : file descriptor index in the fd_array.
    434  * @ return extended pointer on file descriptor if success / return XPTR_NULL if not found.
    435  ********************************************************************************************/
    436 xptr_t process_fd_get_xptr( process_t * process,
    437                             uint32_t    fdid );
    438 
    439 /*********************************************************************************************
    440  * This function checks the number of open files for a given process.
    441  * It can be called by any thread in any cluster, because it uses portable remote access
    442  * primitives to access the reference process descriptor.
    443  *********************************************************************************************
    444  * @ returns true if file descriptor array full.
    445  ********************************************************************************************/
    446 bool_t process_fd_array_full( void );
    447 
    448 /*********************************************************************************************
    449421 * This function allocates a free slot in the fd_array of the reference process,
    450422 * register the <file_xp> argument in the allocated slot, and return the slot index.
    451423 * It can be called by any thread in any cluster, because it uses portable remote access
    452424 * primitives to access the reference process descriptor.
     425 * It takes the lock protecting the reference fd_array against concurrent accesses.
    453426 *********************************************************************************************
    454427 * @ file_xp  : extended pointer on the file descriptor to be registered.
     
    459432                             xptr_t      file_xp,
    460433                             uint32_t  * fdid );
     434
     435/*********************************************************************************************
     436 * This function uses as many remote accesses as required, to reset an entry in fd_array[],
     437 * in all clusters containing a copy. The entry is identified by the <fdid> argument.
     438 * This function must be executed by a thread running in reference cluster, that contains
     439 * the complete list of process descriptors copies.
     440 * It takes the lock protecting the reference fd_array against concurrent accesses.
     441 * TODO this function is not implemented yet.
     442 *********************************************************************************************
     443 * @ process  : pointer on the local process descriptor.
     444 * @ fdid     : file descriptor index in the fd_array.
     445 ********************************************************************************************/
     446void process_fd_remove( process_t * process,
     447                        uint32_t    fdid );
     448
     449/*********************************************************************************************
     450 * This function returns an extended pointer on a file descriptor identified by its index
     451 * in fd_array. It can be called by any thread running in any cluster.
     452 * It accesses first the local process descriptor. In case of local miss, it takes
     453 * the lock protecting the reference fd_array, and access the reference process descriptor.
     454 * It updates the local fd_array when the file descriptor exists in reference cluster.
     455 * It takes the lock protecting the reference fd_array against concurrent accesses.
     456 * The file descriptor refcount is not incremented.
     457 *********************************************************************************************
     458 * @ process  : pointer on the local process descriptor.
     459 * @ fdid     : file descriptor index in the fd_array.
     460 * @ return extended pointer on file descriptor if success / return XPTR_NULL if not found.
     461 ********************************************************************************************/
     462xptr_t process_fd_get_xptr( process_t * process,
     463                            uint32_t    fdid );
    461464
    462465/*********************************************************************************************
     
    465468 * <dst_xp> fd_array, embedded in another process descriptor.
    466469 * The calling thread can be running in any cluster.
    467  * It takes the remote lock protecting the <src_xp> fd_array during the copy.
     470 * It takes the lock protecting the reference fd_array against concurrent accesses.
    468471 * For each involved file descriptor, the refcount is incremented.
    469472 *********************************************************************************************
     
    474477                             xptr_t src_xp );
    475478
     479/*********************************************************************************************
     480 * This function checks the current number of open files for a given process.
     481 * It can be called by any thread in any cluster, because it uses portable remote access
     482 * primitives to access the reference process descriptor.
     483 * It does not take the lock protecting the reference fd_array.
     484 *********************************************************************************************
     485 * @ returns true if file descriptor array full.
     486 ********************************************************************************************/
     487bool_t process_fd_array_full( void );
     488
    476489
    477490
     
    479492
    480493/*********************************************************************************************
    481  * This function registers a new thread in the local process descriptor.
    482  * It checks that there is an available slot in the local th_tbl[] array,
    483  * allocates a new LTID, and registers the new thread in the th_tbl[].
    484  * It takes the lock protecting exclusive access to the th_tbl[].
     494 * This function atomically registers a new thread in the local process descriptor.
     495 * It checks that there is an available slot in the local th_tbl[] array, and allocates
     496 * a new LTID using the relevant lock depending on the kernel/user type.
    485497 *********************************************************************************************
    486498 * @ process  : pointer on the local process descriptor.
     
    494506
    495507/*********************************************************************************************
    496  * This function removes a thread registration from the local process descriptor.
    497  * It takes the lock protecting exclusive access to the th_tbl[].
     508 * This function atomically removes a thread registration from the local process descriptor
     509 * th_tbl[] array, using the relevant lock, depending on the kernel/user type.
    498510 *********************************************************************************************
    499511 * @ thread   : local pointer on thread to be removed.
     
    541553
    542554/*********************************************************************************************
    543  * This function gives the TXT ownership to a process identified by the <process_xp> argument.
     555 * This function gives a process identified by the <process_xp> argument the exclusive
     556 * ownership of its attached TXT_RX terminal (i.e. put the process in foreground).
    544557 * It can be called by a thread running in any cluster, but the <process_xp> must be the
    545558 * owner cluster process descriptor.
     
    568581 * process_xp must be the owner cluster process descriptor.
    569582 *********************************************************************************************
    570  * @ return a non-zero value if target process is TXT owner.
    571  ********************************************************************************************/
    572 uint32_t process_txt_is_owner( xptr_t process_xp );
     583 * @ returns true if target process is TXT owner.
     584 ********************************************************************************************/
     585bool_t process_txt_is_owner( xptr_t process_xp );
    573586
    574587/*********************************************************************************************
  • trunk/kernel/kern/rpc.c

    r503 r564  
    4343
    4444/////////////////////////////////////////////////////////////////////////////////////////
    45 //      array of function pointers  (must be consistent with enum in rpc.h)
     45// Array of function pointers and array of printable strings.
     46// These arrays must be kept consistent with enum in rpc.h file.
    4647/////////////////////////////////////////////////////////////////////////////////////////
    4748
     
    8283};
    8384
    84 //////////////////////////////////////////////
     85char * rpc_str[RPC_MAX_INDEX] =
     86{
     87    "PMEM_GET_PAGES",         // 0
     88    "PMEM_RELEASE_PAGES",     // 1
     89    "undefined",              // 2
     90    "PROCESS_MAKE_FORK",      // 3
     91    "undefined",              // 4
     92    "undefined",              // 5
     93    "THREAD_USER_CREATE",     // 6
     94    "THREAD_KERNEL_CREATE",   // 7
     95    "undefined",              // 8
     96    "PROCESS_SIGACTION",      // 9
     97
     98    "VFS_INODE_CREATE",       // 10
     99    "VFS_INODE_DESTROY",      // 11
     100    "VFS_DENTRY_CREATE",      // 12
     101    "VFS_DENTRY_DESTROY",     // 13
     102    "VFS_FILE_CREATE",        // 14
     103    "VFS_FILE_DESTROY",       // 15
     104    "VFS_INODE_LOAD",         // 16
     105    "VFS_MAPPER_LOAD_ALL",    // 17
     106    "FATFS_GET_CLUSTER",      // 18
     107    "undefined",              // 19
     108
     109    "GET_VSEG",               // 20
     110    "GET_PTE",                // 21
     111    "KCM_ALLOC",              // 22
     112    "KCM_FREE",               // 23
     113    "MAPPER_MOVE_BUFFER",     // 24
     114    "MAPPER_GET_PAGE",        // 25
     115    "VMM_CREATE_VSEG",        // 26
     116    "undefined",              // 27
     117    "VMM_SET_COW",            // 28
     118    "VMM_DISPLAY",            // 29
     119};
     120
     121//////////////////////////////////////////////////////////////////////////////////
    85122void __attribute__((noinline)) rpc_undefined( xptr_t xp __attribute__ ((unused)) )
    86123{
     
    105142    client_core_lid = this->core->lid;
    106143
     144// check calling thread can yield when it is not the idle thread
     145assert( (this->busylocks == 0) || (this->type == THREAD_IDLE),
     146"cannot yield : busylocks = %d\n", this->busylocks );
     147
    107148#if DEBUG_RPC_CLIENT_GENERIC
    108149uint32_t cycle = (uint32_t)hal_get_cycles();
    109150if( DEBUG_RPC_CLIENT_GENERIC < cycle )
    110 printk("\n[DBG] %s : thread %x in process %x enter for rpc[%d] / cycle %d\n",
    111 __FUNCTION__, this->trdid, this->process->pid, rpc->index, cycle );
     151printk("\n[DBG] %s : thread %x in process %x enter for rpc %s / server_cxy %x / cycle %d\n",
     152__FUNCTION__, this->trdid, this->process->pid, rpc_str[rpc->index], server_cxy, cycle );
    112153#endif
    113154
    114155    // select a server_core : use client core index if possible / core 0 otherwise
    115     if( client_core_lid < hal_remote_lw( XPTR( server_cxy , &LOCAL_CLUSTER->cores_nr ) ) )
     156    if( client_core_lid < hal_remote_l32( XPTR( server_cxy , &LOCAL_CLUSTER->cores_nr ) ) )
    116157    {
    117158        server_core_lid = client_core_lid;
     
    130171
    131172    // get local pointer on rpc_fifo in remote cluster,
    132     remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo[server_core_lid];
    133 
    134         // post RPC in remote fifo / deschedule and retry if fifo full
     173    remote_fifo_t * rpc_fifo    = &LOCAL_CLUSTER->rpc_fifo[server_core_lid];
     174    xptr_t          rpc_fifo_xp = XPTR( server_cxy , rpc_fifo );
     175
     176        // post RPC in remote fifo / deschedule without blocking if fifo full
    135177    do
    136178    {
    137         full = remote_fifo_put_item( XPTR( server_cxy , rpc_fifo ), (uint64_t )desc_xp );
     179        full = remote_fifo_put_item( rpc_fifo_xp , (uint64_t )desc_xp );
     180
    138181            if ( full )
    139182        {
     
    151194#if DEBUG_RPC_CLIENT_GENERIC
    152195cycle = (uint32_t)hal_get_cycles();
     196uint32_t items = remote_fifo_items( rpc_fifo_xp );
    153197if( DEBUG_RPC_CLIENT_GENERIC < cycle )
    154 printk("\n[DBG] %s : thread %x in process %x / rpc[%d] / rpc_ptr %x / cycle %d\n",
    155 __FUNCTION__, this->trdid, this->process->pid, rpc->index, rpc, cycle );
     198printk("\n[DBG] %s : thread %x in process %x / rpc %s / items %d / cycle %d\n",
     199__FUNCTION__, this->trdid, this->process->pid, rpc_str[rpc->index], items, cycle );
    156200#endif
    157201       
     
    159203   dev_pic_send_ipi( server_cxy , server_core_lid );
    160204
    161     // wait RPC completion before returning if blocking RPC
    162     // - busy waiting policy during kernel_init, or if threads cannot yield
    163     // - block and deschedule in all other cases
     205    // wait RPC completion before returning if blocking RPC :
     206    // - descheduling without blocking if thread idle (in lernel init)
     207    // - block and deschedule policy for any other thread
    164208    if ( rpc->blocking )
    165209    {
    166         if( (this->type == THREAD_IDLE) || (thread_can_yield() == false) ) // busy waiting
     210        if( this->type == THREAD_IDLE )  // deschedule without blocking policy
    167211        {
    168 
     212 
    169213#if DEBUG_RPC_CLIENT_GENERIC
    170214cycle = (uint32_t)hal_get_cycles();
    171215if( DEBUG_RPC_CLIENT_GENERIC < cycle )
    172 printk("\n[DBG] %s : thread %x in process %x busy waiting for rpc[%d] / cycle %d\n",
    173 __FUNCTION__, this->trdid, this->process->pid, rpc->index , cycle );
    174 #endif
    175 
    176             while( rpc->responses ) hal_fixed_delay( 100 );
     216printk("\n[DBG] %s : thread %x in process %x enter waiting loop for rpc %s / cycle %d\n",
     217__FUNCTION__, this->trdid, this->process->pid, rpc_str[rpc->index], cycle );
     218#endif
     219
     220             while( rpc->responses ) sched_yield( "busy waiting on RPC");
    177221   
    178222#if DEBUG_RPC_CLIENT_GENERIC
    179223cycle = (uint32_t)hal_get_cycles();
    180224if( DEBUG_RPC_CLIENT_GENERIC < cycle )
    181 printk("\n[DBG] %s : thread %x in process %x resumes for rpc[%d] / cycle %d\n",
    182 __FUNCTION__, this->trdid, this->process->pid, rpc->index, cycle );
    183 #endif
    184         }
    185         else                                                         // block & deschedule
     225printk("\n[DBG] %s : thread %x in process %x received response for rpc %s / cycle %d\n",
     226__FUNCTION__, this->trdid, this->process->pid, rpc_str[rpc->index], cycle );
     227#endif
     228 
     229        }
     230        else                            // block and deschedule policy
    186231        {
    187232
     
    189234cycle = (uint32_t)hal_get_cycles();
    190235if( DEBUG_RPC_CLIENT_GENERIC < cycle )
    191 printk("\n[DBG] %s : thread %x in process %x blocks & deschedules for rpc[%d] / cycle %d\n",
    192 __FUNCTION__, this->trdid, this->process->pid, rpc->index , cycle );
    193 #endif
    194             thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC );
    195             sched_yield("blocked on RPC");
     236printk("\n[DBG] %s : thread %x in process %x blocks & deschedules for rpc %s / cycle %d\n",
     237__FUNCTION__, this->trdid, this->process->pid, rpc_str[rpc->index], cycle );
     238#endif
     239
     240        // block client thread
     241        thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC );
     242
     243        // deschedule
     244        sched_yield("blocked on RPC");
    196245
    197246#if DEBUG_RPC_CLIENT_GENERIC
    198247cycle = (uint32_t)hal_get_cycles();
    199248if( DEBUG_RPC_CLIENT_GENERIC < cycle )
    200 printk("\n[DBG] %s : thread %x in process %x resumes for rpc[%d] / cycle %d\n",
    201 __FUNCTION__, this->trdid, this->process->pid, rpc->index, cycle );
     249printk("\n[DBG] %s : thread %x in process %x resumes for rpc %s / cycle %d\n",
     250__FUNCTION__, this->trdid, this->process->pid, rpc_str[rpc->index], cycle );
    202251#endif
    203252        }
    204253
    205         // check response available
    206         assert( (rpc->responses == 0) , "illegal RPC response\n" );
     254// response must be available for a blocking RPC
     255assert( (rpc->responses == 0) , "illegal response for RPC %s\n", rpc_str[rpc->index] );
     256
    207257    }
    208     else  // non blocking RPC
     258    else       // non blocking RPC
    209259    {
    210260
     
    212262cycle = (uint32_t)hal_get_cycles();
    213263if( DEBUG_RPC_CLIENT_GENERIC < cycle )
    214 printk("\n[DBG] %s : thread %x in process %x returns for non blocking rpc[%d] / cycle %d\n",
    215 __FUNCTION__, this->trdid, this->process->pid, rpc->index, cycle );
     264printk("\n[DBG] %s : thread %x in process %x returns for non blocking rpc %s / cycle %d\n",
     265__FUNCTION__, this->trdid, this->process->pid, rpc_str[rpc->index], cycle );
    216266#endif
    217267
     
    224274/***************************************************************************************/
    225275
    226 ////////////////
    227 void rpc_check( void )
    228 {
    229     error_t         error;
    230     thread_t      * thread; 
    231     uint32_t        sr_save;
    232 
    233 #if DEBUG_RPC_SERVER_GENERIC
    234 uint32_t cycle;
    235 #endif
    236 
    237     bool_t          found    = false;
    238         thread_t      * this     = CURRENT_THREAD;
    239     core_t        * core     = this->core;
    240     scheduler_t   * sched    = &core->scheduler;
    241         remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo[core->lid];
    242 
    243     // interrupted thread not preemptable during RPC chek
    244         hal_disable_irq( &sr_save );
    245 
    246     // activate (or create) RPC thread if RPC FIFO not empty and no acive RPC thread 
    247         if( (rpc_fifo->owner == 0) && (local_fifo_is_empty(rpc_fifo) == false) )
    248     {
    249 
    250 #if DEBUG_RPC_SERVER_GENERIC
    251 cycle = (uint32_t)hal_get_cycles();
    252 if( DEBUG_RPC_SERVER_GENERIC < cycle )
    253 printk("\n[DBG] %s : RPC FIFO non empty for core[%x,%d] / cycle %d\n",
    254 __FUNCTION__, local_cxy, core->lid, cycle );
    255 #endif
    256 
    257         // search one IDLE RPC thread associated to the selected core   
    258         list_entry_t * iter;
    259         LIST_FOREACH( &sched->k_root , iter )
    260         {
    261             thread = LIST_ELEMENT( iter , thread_t , sched_list );
    262             if( (thread->type == THREAD_RPC) && (thread->blocked == THREAD_BLOCKED_IDLE ) )
    263             {
    264                 // unblock found RPC thread
    265                 thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_IDLE );
    266 
    267                 // exit loop
    268                 found = true;
    269                 break;
    270             }
    271         }
    272 
    273         // create new RPC thread for the selected core if not found   
    274         if( found == false )                   
    275         {
    276             error = thread_kernel_create( &thread,
    277                                           THREAD_RPC,
    278                                                       &rpc_thread_func,
    279                                           NULL,
    280                                                       core->lid );
    281                  
    282             assert( (error == 0),
    283             "no memory to allocate a new RPC thread in cluster %x", local_cxy );
    284 
    285             // unblock created RPC thread
    286             thread->blocked = 0;
    287 
    288             // update RRPC threads counter 
    289             hal_atomic_add( &LOCAL_CLUSTER->rpc_threads[core->lid] , 1 );
    290 
    291 #if DEBUG_RPC_SERVER_GENERIC
    292 cycle = (uint32_t)hal_get_cycles();
    293 if( DEBUG_RPC_SERVER_GENERIC < cycle )
    294 printk("\n[DBG] %s : new RPC thread %x created for core[%x,%d] / cycle %d\n",
    295 __FUNCTION__, thread, local_cxy, core->lid, cycle );
    296 #endif
    297         }
    298     }
    299 
    300 #if DEBUG_RPC_SERVER_GENERIC
    301 cycle = (uint32_t)hal_get_cycles();
    302 if( DEBUG_RPC_SERVER_GENERIC < cycle )
    303 printk("\n[DBG] %s : interrupted thread %x deschedules on core[%x,%d] / cycle %d\n",
    304 __FUNCTION__, this, local_cxy, core->lid, cycle );
    305 #endif
    306 
    307     // interrupted thread always deschedule         
    308         sched_yield("IPI received");
    309 
    310 #if DEBUG_RPC_SERVER_GENERIC
    311 cycle = (uint32_t)hal_get_cycles();
    312 if( DEBUG_RPC_SERVER_GENERIC < cycle )
    313 printk("\n[DBG] %s : interrupted thread %x resumes on core[%x,%d] / cycle %d\n",
    314 __FUNCTION__, this, local_cxy, core->lid, cycle );
    315 #endif
    316 
    317     // interrupted thread restore IRQs after resume
    318         hal_restore_irq( sr_save );
    319 
    320 } // end rpc_check()
    321 
    322 
    323 //////////////////////
     276////////////////////////////
    324277void rpc_thread_func( void )
    325278{
     
    345298        rpc_fifo        = &LOCAL_CLUSTER->rpc_fifo[server_core_lid];
    346299
    347     // two embedded loops:
    348     // - external loop : "infinite" RPC thread
    349     // - internal loop : handle one RPC request per iteration
    350  
    351         while(1)  // infinite loop
     300    // "infinite" RPC thread loop
     301        while(1)
    352302        {
    353303        // try to take RPC_FIFO ownership
    354         if( hal_atomic_test_set( &rpc_fifo->owner , server_ptr->trdid ) )
     304        if( hal_atomic_test_set( &rpc_fifo->owner , server_ptr->trdid ) ) 
    355305        {
    356306
     
    358308uint32_t cycle = (uint32_t)hal_get_cycles();
    359309if( DEBUG_RPC_SERVER_GENERIC < cycle )
    360 printk("\n[DBG] %s : RPC thread %x in cluster %x takes RPC fifo ownership / cycle %d\n",
    361 __FUNCTION__, server_ptr, local_cxy, cycle );
    362 #endif
    363                 while( 1 )  //  one RPC request per iteration
     310printk("\n[DBG] %s : RPC thread %x on core[%d] takes RPC_FIFO ownership / cycle %d\n",
     311__FUNCTION__, server_ptr->trdid, server_core_lid, cycle );
     312#endif
     313                // try to consume one RPC request 
     314                empty = remote_fifo_get_item( rpc_fifo , (uint64_t *)&desc_xp );
     315
     316            // release RPC_FIFO ownership
     317            rpc_fifo->owner = 0;
     318
     319            // handle RPC request if success
     320                if ( empty == 0 )   
    364321            {
    365                     empty = local_fifo_get_item( rpc_fifo , (uint64_t *)&desc_xp );
    366 
    367                 // exit when FIFO empty or FIFO ownership lost (in case of descheduling)
    368                     if ( (empty == 0) && (rpc_fifo->owner == server_ptr->trdid) )
     322                // get client cluster and pointer on RPC descriptor
     323                desc_cxy = GET_CXY( desc_xp );
     324                desc_ptr = GET_PTR( desc_xp );
     325
     326                    index    = hal_remote_l32( XPTR( desc_cxy , &desc_ptr->index ) );
     327                blocking = hal_remote_l32( XPTR( desc_cxy , &desc_ptr->blocking ) );
     328
     329#if DEBUG_RPC_SERVER_GENERIC
     330cycle = (uint32_t)hal_get_cycles();
     331uint32_t items = remote_fifo_items( XPTR( local_cxy , rpc_fifo ) );
     332if( DEBUG_RPC_SERVER_GENERIC < cycle )
     333printk("\n[DBG] %s : RPC thread %x got rpc %s / client_cxy %x / items %d / cycle %d\n",
     334__FUNCTION__, server_ptr->trdid, rpc_str[index], desc_cxy, items, cycle );
     335#endif
     336                // call the relevant server function
     337                rpc_server[index]( desc_xp );
     338
     339#if DEBUG_RPC_SERVER_GENERIC
     340cycle = (uint32_t)hal_get_cycles();
     341if( DEBUG_RPC_SERVER_GENERIC < cycle )
     342printk("\n[DBG] %s : RPC thread %x completes rpc %s / client_cxy %x / cycle %d\n",
     343__FUNCTION__, server_ptr->trdid, rpc_str[index], desc_cxy, cycle );
     344#endif
     345                // decrement response counter in RPC descriptor if blocking RPC
     346                if( blocking )
    369347                {
    370                     // get client cluster and pointer on RPC descriptor
    371                     desc_cxy = GET_CXY( desc_xp );
    372                     desc_ptr = GET_PTR( desc_xp );
    373 
    374                         index    = hal_remote_lw( XPTR( desc_cxy , &desc_ptr->index ) );
    375                     blocking = hal_remote_lw( XPTR( desc_cxy , &desc_ptr->blocking ) );
     348                    // decrement responses counter in RPC descriptor
     349                    hal_remote_atomic_add( XPTR( desc_cxy, &desc_ptr->responses ), -1 );
     350
     351                    // get client thread pointer and client core lid from RPC descriptor
     352                    client_ptr      = hal_remote_lpt( XPTR( desc_cxy , &desc_ptr->thread ) );
     353                    client_core_lid = hal_remote_l32 ( XPTR( desc_cxy , &desc_ptr->lid ) );
     354
     355                    // unblock client thread
     356                    thread_unblock( XPTR( desc_cxy , client_ptr ) , THREAD_BLOCKED_RPC );
     357
     358                    hal_fence();
    376359
    377360#if DEBUG_RPC_SERVER_GENERIC
    378361cycle = (uint32_t)hal_get_cycles();
    379362if( DEBUG_RPC_SERVER_GENERIC < cycle )
    380 printk("\n[DBG] %s : RPC thread %x in cluster %x got rpc[%d] / rpc_cxy %x / rpc_ptr %x\n",
    381 __FUNCTION__, server_ptr, local_cxy, index, desc_cxy, desc_ptr );
    382 #endif
    383                     // call the relevant server function
    384                     rpc_server[index]( desc_xp );
    385 
    386 #if DEBUG_RPC_SERVER_GENERIC
    387 cycle = (uint32_t)hal_get_cycles();
    388 if( DEBUG_RPC_SERVER_GENERIC < cycle )
    389 printk("\n[DBG] %s : RPC thread %x in cluster %x completes rpc[%d] / rpc_ptr %x / cycle %d\n",
    390 __FUNCTION__, server_ptr, local_cxy, index, desc_ptr, cycle );
    391 #endif
    392                     // decrement response counter in RPC descriptor if blocking
    393                     if( blocking )
    394                     {
    395                         // decrement responses counter in RPC descriptor
    396                         hal_remote_atomic_add( XPTR( desc_cxy, &desc_ptr->responses ), -1 );
    397 
    398                         // get client thread pointer and client core lid from RPC descriptor
    399                         client_ptr      = hal_remote_lpt( XPTR( desc_cxy , &desc_ptr->thread ) );
    400                         client_core_lid = hal_remote_lw ( XPTR( desc_cxy , &desc_ptr->lid ) );
    401 
    402                         // unblock client thread
    403                         thread_unblock( XPTR( desc_cxy , client_ptr ) , THREAD_BLOCKED_RPC );
    404 
    405                         hal_fence();
    406 
    407 #if DEBUG_RPC_SERVER_GENERIC
    408 cycle = (uint32_t)hal_get_cycles();
    409 if( DEBUG_RPC_SERVER_GENERIC < cycle )
    410 printk("\n[DBG] %s : RPC thread %x (cluster %x) unblocked client thread %x (cluster %x)\n",
    411 __FUNCTION__, server_ptr, local_cxy, client_ptr, desc_cxy, cycle );
    412 #endif
    413                         // send IPI to client core
    414                             // dev_pic_send_ipi( desc_cxy , client_core_lid );
    415                     }
    416                         }
    417                 else
    418                 {
    419                     break;
    420                 }
    421                 } // end internal loop
    422 
    423             // release rpc_fifo ownership if not lost
    424             if( rpc_fifo->owner == server_ptr->trdid ) rpc_fifo->owner = 0;
    425 
    426         }  // end if RPC fifo
    427 
    428         // RPC thread blocks on IDLE
    429         thread_block( server_xp , THREAD_BLOCKED_IDLE );
    430 
    431         // sucide if too many RPC threads / simply deschedule otherwise
     363printk("\n[DBG] %s : RPC thread %x unblocked client thread %x / cycle %d\n",
     364__FUNCTION__, server_ptr->trdid, client_ptr->trdid, cycle );
     365#endif
     366                    // send IPI to client core
     367                    dev_pic_send_ipi( desc_cxy , client_core_lid );
     368
     369                }  // end if blocking RPC
     370            }  // end RPC handling if fifo non empty
     371        }  // end if RPC_fIFO ownership successfully taken and released
     372
     373        // sucide if too many RPC threads
    432374        if( LOCAL_CLUSTER->rpc_threads[server_core_lid] >= CONFIG_RPC_THREADS_MAX )
    433375            {
     
    436378uint32_t cycle = (uint32_t)hal_get_cycles();
    437379if( DEBUG_RPC_SERVER_GENERIC < cycle )
    438 printk("\n[DBG] %s : RPC thread %x in cluster %x suicides / cycle %d\n",
    439 __FUNCTION__, server_ptr, local_cxy, cycle );
     380printk("\n[DBG] %s : RPC thread %x suicides / cycle %d\n",
     381__FUNCTION__, server_ptr->trdid, cycle );
    440382#endif
    441383            // update RPC threads counter
    442                 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , -1 );
     384                hal_atomic_add( &LOCAL_CLUSTER->rpc_threads[server_core_lid] , -1 );
    443385
    444386            // RPC thread blocks on GLOBAL
     
    448390            hal_remote_atomic_or( server_xp , THREAD_FLAG_REQ_DELETE );
    449391            }
     392        // block and deschedule otherwise
    450393        else
    451394        {
     
    454397uint32_t cycle = (uint32_t)hal_get_cycles();
    455398if( DEBUG_RPC_SERVER_GENERIC < cycle )
    456 printk("\n[DBG] %s : RPC thread %x in cluster %x block & deschedules / cycle %d\n",
    457 __FUNCTION__, server_ptr, local_cxy, cycle );
    458 #endif
     399printk("\n[DBG] %s : RPC thread %x block IDLE & deschedules / cycle %d\n",
     400__FUNCTION__, server_ptr->trdid, cycle );
     401#endif
     402            // RPC thread blocks on IDLE
     403            thread_block( server_xp , THREAD_BLOCKED_IDLE );
    459404
    460405            // RPC thread deschedules
    461             assert( thread_can_yield() , "illegal sched_yield\n" );
    462             sched_yield("RPC fifo empty");
     406            sched_yield("RPC_FIFO empty");
    463407        }
    464 
    465408        } // end infinite loop
    466 
    467409} // end rpc_thread_func()
    468410
     
    478420{
    479421#if DEBUG_RPC_PMEM_GET_PAGES
     422thread_t * this = CURRENT_THREAD;
    480423uint32_t cycle = (uint32_t)hal_get_cycles();
    481424if( cycle > DEBUG_RPC_PMEM_GET_PAGES )
    482 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
    483 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     425printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     426__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
    484427#endif
    485428
     
    504447cycle = (uint32_t)hal_get_cycles();
    505448if( cycle > DEBUG_RPC_PMEM_GET_PAGES )
    506 printk("\n[DBG] %s : thread %x exit / cycle %d\n",
    507 __FUNCTION__ , CURRENT_THREAD , cycle );
     449printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     450__FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle );
    508451#endif
    509452}
     
    513456{
    514457#if DEBUG_RPC_PMEM_GET_PAGES
     458thread_t * this = CURRENT_THREAD;
    515459uint32_t cycle = (uint32_t)hal_get_cycles();
    516460if( cycle > DEBUG_RPC_PMEM_GET_PAGES )
    517 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
    518 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     461printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
     462__FUNCTION__, this->trdid, this->pr