Changeset 563 for trunk


Ignore:
Timestamp:
Oct 4, 2018, 11:16:13 PM (5 years ago)
Author:
alain
Message:

Complete restructuration of kernel spinlocks.

Location:
trunk/kernel/libk
Files:
10 added
4 deleted
21 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/libk/barrier.c

    r457 r563  
    11/*
    2  * barrier.c - kernel barrier implementaion
     2 * barrier.c - Busy-waiting, local, kernel barrier implementaion
    33 *
    4  * Author   Alain Greiner (2016)
     4 * Author   Alain Greiner (2016,2017,2018)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
  • trunk/kernel/libk/barrier.h

    r457 r563  
    11/*
    2  * _barrier.h - local kernel barrier definition
     2 * barrier.h - Busy-waiting, local, kernel barrier definition
    33 *
    4  * Author  Alain Greiner (2016)
     4 * Author  Alain Greiner (2016,2017,2018)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    3131 * This structure defines a "rendez-vous" barrier, that can be used
    3232 * to synchronise several kernel threads running in the same  cluster.
    33  * It is used in the kernel_init phase.
     33 * As it is used in the kernel_init phase, it implements a busy-waiting policy.
    3434 * It does not need to be initialised, but it must be statically allocated
    3535 * in the KDATA segment to be properly initialised by the compiler/loader.
     
    4040    uint32_t            current;            // number of arrived threads
    4141    volatile uint32_t   sense;              // barrier state (toggle)
    42     uint32_t            pad[(CONFIG_CACHE_LINE_SIZE>>2)-2];
     42    uint32_t            padding[(CONFIG_CACHE_LINE_SIZE>>2)-2];
    4343}
    4444barrier_t;
     
    4848 * expected threads reach the barrier. It can be used several times without
    4949 * specific initialisation.
    50  * It is portable, as it uses the remote_lw() & remote_sw() access functions.
     50 *****************************************************************************************
    5151 * @ barrier : pointer on barrier
    5252 * @ count   : number of expected thread
  • trunk/kernel/libk/htab.c

    r492 r563  
    2626#include <hal_special.h>
    2727#include <htab.h>
    28 #include <rwlock.h>
     28#include <busylock.h>
    2929#include <list.h>
    3030#include <printk.h>
    3131#include <vfs.h>
    3232
     33
    3334///////////////////////////////////////////////////////////////////////////////////////////
    3435//    Item type specific (static) functions (two functions for each item type).
     
    4243// @ return the index value, from 0 to (HASHTAB_SIZE - 1)
    4344///////////////////////////////////////////////////////////////////////////////////////////
    44 
    4545static uint32_t htab_inode_index( void * key )
    4646{
     
    5959// @ return pointer on item if found / return NULL if not found.
    6060///////////////////////////////////////////////////////////////////////////////////////
    61 
    6261static void * htab_inode_scan( htab_t  * htab,
    6362                               uint32_t  index,
     
    8887
    8988    // initialize readlock
    90     rwlock_init( &htab->lock );
     89    busylock_init( &htab->lock , LOCK_HTAB_STATE );
    9190
    9291    htab->items = 0;
     
    117116    uint32_t index = htab->index( key );
    118117
    119     // take the lock in write mode
    120     rwlock_wr_lock( &htab->lock );
     118    // take the lock
     119    busylock_acquire( &htab->lock );
    121120
    122121    // scan sub-list to check if item exist
     
    126125    {
    127126        // release lock
    128         rwlock_wr_unlock( &htab->lock );
    129 
    130         return -1;
     127        busylock_release( &htab->lock );
     128
     129        return 0xFFFFFFFF;
    131130    }
    132131    else               // item doesn't exist => register
     
    139138
    140139        // release lock
    141         rwlock_wr_unlock( &htab->lock );
     140        busylock_release( &htab->lock );
    142141
    143142        return 0;
     
    153152    uint32_t index = htab->index( key );
    154153
    155     // take the lock in write mode
    156     rwlock_wr_lock( &htab->lock );
     154    // take the lock
     155    busylock_acquire( &htab->lock );
    157156
    158157    // scan sub-list to chek if item exist
     
    162161    {
    163162        // release lock
    164         rwlock_wr_unlock( &htab->lock );
    165 
    166         return -1;
     163        busylock_release( &htab->lock );
     164
     165        return 0xFFFFFFFF;
    167166    }
    168167    else               // item exist => remove it
     
    175174
    176175        // release lock
    177         rwlock_wr_unlock( &htab->lock );
     176        busylock_release( &htab->lock );
    178177
    179178        return 0;
     
    188187    uint32_t index = htab->index( key );
    189188
    190     // take the lock in read mode
    191     rwlock_rd_lock( &htab->lock );
     189    // take the lock
     190    busylock_acquire( &htab->lock );
    192191
    193192    // scan sub-list
     
    195194
    196195    // release lock
    197     rwlock_rd_unlock( &htab->lock );
     196    busylock_release( &htab->lock );
    198197
    199198        return item;
  • trunk/kernel/libk/htab.h

    r459 r563  
    8484        htab_scan_t     * scan;                 /*! item type specific function            */
    8585    uint32_t          items;                /*! number of registered items             */
    86     rwlock_t          lock;                 /*! lock protecting hash table accesses    */
     86    busylock_t        lock;                 /*! lock protecting hash table accesses    */
    8787}
    8888htab_t;
  • trunk/kernel/libk/remote_barrier.c

    r457 r563  
    11/*
    2  * remote_barrier.c - Access a POSIX barrier.
    3  *
    4  * Author   Alain Greiner (2016,2017)
     2 * remote_barrier.c -  POSIX barrier implementation.
     3 *
     4 * Author   Alain Greiner (2016,2017,2018)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2525#include <hal_remote.h>
    2626#include <hal_irqmask.h>
    27 #include <remote_spinlock.h>
     27#include <remote_busylock.h>
    2828#include <thread.h>
    2929#include <kmem.h>
     
    3333#include <remote_barrier.h>
    3434
    35 /////////////////////////////////////////////////
    36 inline void remote_barrier( xptr_t    barrier_xp,
    37                             uint32_t  count )
    38 {
    39     uint32_t  expected;
    40 
    41     remote_barrier_t * ptr = (remote_barrier_t *)GET_PTR( barrier_xp );
    42     cxy_t              cxy = GET_CXY( barrier_xp );
    43 
    44     // get barrier sense value
    45     uint32_t sense = hal_remote_lw( XPTR( cxy , &ptr->sense ) );
    46 
    47     // compute expected value
    48     if ( sense == 0 ) expected = 1;
    49     else              expected = 0;
    50 
    51     // atomically increment current
    52     uint32_t current = hal_remote_atomic_add( XPTR( cxy , &ptr->current ) , 1 );
    53 
    54     // last task reset current and toggle sense
    55     if( current == (count-1) )
    56     {
    57         hal_remote_sw( XPTR( cxy , &ptr->current) , 0 );
    58         hal_remote_sw( XPTR( cxy , &ptr->sense  ) , expected );
    59     }
    60     else   // other tasks poll the sense
    61     {
    62         while( hal_remote_lw( XPTR( cxy , &ptr->sense ) ) != expected ) asm volatile ("nop");
    63     }
    64 }
    6535
    6636///////////////////////////////////////////////////
     
    12090    // get reference process cluster and local pointer
    12191    cxy_t       ref_cxy = GET_CXY( ref_xp );
    122     process_t * ref_ptr = (process_t *)GET_PTR( ref_xp );
     92    process_t * ref_ptr = GET_PTR( ref_xp );
    12393
    12494    // allocate memory for barrier descriptor
     
    140110
    141111    // initialise barrier
    142     hal_remote_sw ( XPTR( ref_cxy , &barrier_ptr->nb_threads ) , count );
    143     hal_remote_sw ( XPTR( ref_cxy , &barrier_ptr->current    ) , 0 );
    144     hal_remote_sw ( XPTR( ref_cxy , &barrier_ptr->sense      ) , 0 );
     112    hal_remote_s32 ( XPTR( ref_cxy , &barrier_ptr->nb_threads ) , count );
     113    hal_remote_s32 ( XPTR( ref_cxy , &barrier_ptr->current    ) , 0 );
     114    hal_remote_s32 ( XPTR( ref_cxy , &barrier_ptr->sense      ) , 0 );
    145115    hal_remote_spt( XPTR( ref_cxy , &barrier_ptr->ident      ) , (void*)ident );
    146116
     
    151121    xptr_t entry_xp = XPTR( ref_cxy , &barrier_ptr->list );
    152122
    153     remote_spinlock_lock( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
     123    remote_busylock_acquire( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
    154124    xlist_add_first( root_xp , entry_xp );
    155     remote_spinlock_unlock( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
     125    remote_busylock_release( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
    156126
    157127    return 0;
     
    176146
    177147    // remove barrier from reference process xlist
    178     remote_spinlock_lock( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
     148    remote_busylock_acquire( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
    179149    xlist_unlink( XPTR( barrier_cxy , &barrier_ptr->list ) );
    180     remote_spinlock_unlock( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
     150    remote_busylock_release( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
    181151
    182152    // release memory allocated for barrier descriptor
     
    208178    thread_t         * thread_ptr = CURRENT_THREAD;
    209179
     180// check calling thread can yield
     181assert( (thread_ptr->busylocks == 0),
     182"cannot yield : busylocks = %d\n", thread_ptr->busylocks );
     183
    210184    // get cluster and local pointer on remote barrier
    211185    remote_barrier_t * barrier_ptr = (remote_barrier_t *)GET_PTR( barrier_xp );
     
    213187
    214188    // get count and root fields from barrier descriptor
    215     count   = hal_remote_lw ( XPTR( barrier_cxy , &barrier_ptr->nb_threads ) );
    216     root_xp = hal_remote_lwd( XPTR( barrier_cxy , &barrier_ptr->root ) );
     189    count   = hal_remote_l32 ( XPTR( barrier_cxy , &barrier_ptr->nb_threads ) );
     190    root_xp = hal_remote_l64( XPTR( barrier_cxy , &barrier_ptr->root ) );
    217191
    218192    // get barrier sense value
    219     sense = hal_remote_lw( XPTR( barrier_cxy , &barrier_ptr->sense ) );
     193    sense = hal_remote_l32( XPTR( barrier_cxy , &barrier_ptr->sense ) );
    220194
    221195    // compute expected value
     
    231205    if( current == (count-1) )                       // last thread
    232206    {
    233         hal_remote_sw( XPTR( barrier_cxy , &barrier_ptr->current) , 0 );
    234         hal_remote_sw( XPTR( barrier_cxy , &barrier_ptr->sense  ) , expected );
     207        hal_remote_s32( XPTR( barrier_cxy , &barrier_ptr->current) , 0 );
     208        hal_remote_s32( XPTR( barrier_cxy , &barrier_ptr->sense  ) , expected );
    235209
    236210        // activate waiting threads if required
     
    248222
    249223                // remove waiting thread from queue
    250                 remote_spinlock_lock( XPTR( barrier_cxy , &barrier_ptr->lock ) );
     224                remote_busylock_acquire( XPTR( barrier_cxy , &barrier_ptr->lock ) );
    251225                xlist_unlink( XPTR( barrier_cxy , &barrier_ptr->list ) );
    252                 remote_spinlock_unlock( XPTR( barrier_cxy , &barrier_ptr->lock ) );
     226                remote_busylock_release( XPTR( barrier_cxy , &barrier_ptr->lock ) );
    253227
    254228                // unblock waiting thread
     
    268242        xptr_t entry_xp = XPTR( thread_cxy , &thread_ptr->wait_list );
    269243
    270         remote_spinlock_lock( XPTR( barrier_cxy , &barrier_ptr->lock ) );
     244        remote_busylock_acquire( XPTR( barrier_cxy , &barrier_ptr->lock ) );
    271245        xlist_add_last( root_xp , entry_xp );
    272         remote_spinlock_unlock( XPTR( barrier_cxy , &barrier_ptr->lock ) );
     246        remote_busylock_release( XPTR( barrier_cxy , &barrier_ptr->lock ) );
    273247
    274248        // block & deschedule the calling thread
  • trunk/kernel/libk/remote_barrier.h

    r457 r563  
    11/*
    2  * remote_barrier.h - Access a POSIX barrier.               
     2 * remote_barrier.h - POSIX barrier definition.               
    33 *
    4  * Author  Alain Greiner (2016)
     4 * Author  Alain Greiner (2016,2017,2018)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2727#include <kernel_config.h>
    2828#include <hal_kernel_types.h>
    29 #include <remote_spinlock.h>
     29#include <remote_busylock.h>
    3030#include <xlist.h>
    3131
     
    3333 *          This file defines a POSIX compliant barrier.
    3434 *
    35  * It is used by multi-threaded applications to synchronise threads running in
    36  * different clusters, as all access functions uses hal_remote_lw() / hal_remote_sw()
    37  * portable remote access primitives.
     35 * It is used by multi-threaded iuser applications to synchronise threads running in
     36 * different clusters, as all access functions uses hal_remote_l32() / hal_remote_s32()
     37 * remote access primitives.
    3838 *
    3939 * A barrier is declared by a given user process as a "pthread_barrier_t" global variable.
     
    5050 * is blocked on the THREAD_BLOCKED_USERSYNC condition. The last arrived thread
    5151 * unblocks all registtered waiting threads.
    52  *
    53  * Implementation note:
    54  * This barrier is also used by the kernel in the parallel kernel_init phase, as the
    55  * remote_barrier() function does not require barrier initialisation, when the barrier
    56  * is statically allocated by the compiler in the kdata segment.
    5752 * **************************************************************************************/
    5853
     
    6661typedef struct remote_barrier_s
    6762{
    68     remote_spinlock_t  lock;          /*! lock protecting list of arrived threads       */
     63    remote_busylock_t  lock;          /*! lock protecting xlist of arrived threads      */
    6964    intptr_t           ident;         /*! virtual address in user space == identifier   */
    7065    uint32_t           current;       /*! number of arrived threads                     */
     
    7570}
    7671remote_barrier_t;
    77 
    78 /*****************************************************************************************
    79  * This function is directly used by the kernel in the kernel_init phase,
    80  * because it does not require barrier state initialisation.
    81  * It returns only when the <count> expected threads reach the barrier.
    82  *****************************************************************************************
    83  * @ barrier_xp  : extended pointer on barrier descriptor.
    84  * @ count       : number of expected threads.
    85  ****************************************************************************************/
    86 inline void remote_barrier( xptr_t   barrier_xp, 
    87                             uint32_t count );
    8872
    8973
  • trunk/kernel/libk/remote_condvar.c

    r457 r563  
    11/*
    2  * remote_condvar.c - distributed kernel condvar implementaion
    3  * 
    4  * Author   Alain Greiner (2016)
     2 * remote_condvar.c - remote kernel condition variable implementation.
     3 *
     4 * Authors     Alain Greiner (2016,2017,2018)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2222 */
    2323
     24#include <kernel_config.h>
    2425#include <hal_kernel_types.h>
    25 #include <hal_remote.h>
    26 #include <hal_irqmask.h>
    2726#include <thread.h>
    28 #include <kmem.h>
    29 #include <printk.h>
    30 #include <process.h>
    31 #include <vmm.h>
     27#include <scheduler.h>
    3228#include <xlist.h>
    3329#include <remote_mutex.h>
     30#include <remote_busylock.h>
    3431#include <remote_condvar.h>
     32
    3533
    3634///////////////////////////////////////////////////
     
    4543    // get cluster and local pointer on reference process
    4644    cxy_t          ref_cxy = GET_CXY( ref_xp );
    47     process_t    * ref_ptr = (process_t *)GET_PTR( ref_xp );
    48 
    49     // get extended pointer on root of condvars list
     45    process_t    * ref_ptr = GET_PTR( ref_xp );
     46
     47    // get extended pointer on condvars list
    5048    xptr_t root_xp = XPTR( ref_cxy , &ref_ptr->condvar_root );
    51    
    52     // scan reference process condvars list
     49    xptr_t lock_xp = XPTR( ref_cxy , &ref_ptr->sync_lock );
     50
     51    // get lock protecting synchro lists
     52    remote_queuelock_acquire( lock_xp );
     53 
     54    // scan reference process condvar list
    5355    xptr_t             iter_xp;
    5456    xptr_t             condvar_xp;
    5557    cxy_t              condvar_cxy;
    56     struct remote_condvar_s * condvar_ptr;
     58    remote_condvar_t * condvar_ptr;
    5759    intptr_t           current;
    5860    bool_t             found = false;
     
    6264        condvar_xp  = XLIST_ELEMENT( iter_xp , remote_condvar_t , list );
    6365        condvar_cxy = GET_CXY( condvar_xp );
    64         condvar_ptr = (remote_condvar_t *)GET_PTR( condvar_xp );
    65         current     = (intptr_t)hal_remote_lpt( XPTR( condvar_cxy , &condvar_ptr->ident ) );   
    66         if( ident == current )
     66        condvar_ptr = GET_PTR( condvar_xp );
     67        current = (intptr_t)hal_remote_lpt( XPTR( condvar_cxy , &condvar_ptr->ident ) );   
     68
     69        if( current == ident )
    6770        {
    6871            found = true;
     
    7174    }
    7275
     76    // relese lock protecting synchros lists
     77    remote_queuelock_release( lock_xp );
     78 
    7379    if( found == false )  return XPTR_NULL;
    7480    else                  return condvar_xp;
     
    7682}  // end remote_condvar_from_ident()
    7783
    78 ///////////////////////////////////////////////
    79 error_t remote_condvar_create( intptr_t ident )
    80 {
     84/////////////////////////////////////////////////
     85error_t remote_condvar_create( intptr_t   ident )
     86{
     87    remote_condvar_t * condvar_ptr;
    8188    xptr_t             condvar_xp;
    82     remote_condvar_t * condvar_ptr;
    8389
    8490    // get pointer on local process descriptor
     
    9298    process_t * ref_ptr = (process_t *)GET_PTR( ref_xp );
    9399
    94     // allocate memory for condvar descriptor
    95     if( ref_cxy == local_cxy )                  // local cluster is the reference
     100    // allocate memory for new condvar in reference cluster
     101    if( ref_cxy == local_cxy )                              // local cluster is the reference
    96102    {
    97103        kmem_req_t req;   
    98         req.type      = KMEM_CONDVAR;
    99         req.flags     = AF_ZERO;
    100         condvar_ptr   = kmem_alloc( &req );
    101         condvar_xp    = XPTR( local_cxy , condvar_ptr );
    102     }
    103     else                                       // reference is remote
     104        req.type    = KMEM_CONDVAR;
     105        req.flags   = AF_ZERO;
     106        condvar_ptr = kmem_alloc( &req );
     107        condvar_xp  = XPTR( local_cxy , condvar_ptr );
     108    }
     109    else                                                   // reference cluster is remote
    104110    {
    105111        rpc_kcm_alloc_client( ref_cxy , KMEM_CONDVAR , &condvar_xp );
    106         condvar_ptr = (remote_condvar_t *)GET_PTR( condvar_xp );
    107     }
    108 
    109     if( condvar_ptr == NULL ) return ENOMEM;
     112        condvar_ptr = GET_PTR( condvar_xp );
     113    }
     114
     115    if( condvar_xp == XPTR_NULL ) return 0xFFFFFFFF;
    110116
    111117    // initialise condvar
    112     hal_remote_spt( XPTR( ref_cxy , &condvar_ptr->ident      ) , (void*)ident );
    113     xlist_entry_init( XPTR( ref_cxy , &condvar_ptr->list ) );
    114     xlist_root_init( XPTR( ref_cxy , &condvar_ptr->root ) );
    115 
    116     // register  condvar in reference process xlist
     118        hal_remote_spt( XPTR( ref_cxy , &condvar_ptr->ident ) , (void *)ident );
     119        xlist_root_init( XPTR( ref_cxy , &condvar_ptr->root ) );
     120        xlist_entry_init( XPTR( ref_cxy , &condvar_ptr->list ) );
     121    remote_busylock_init( XPTR( ref_cxy , &condvar_ptr->lock ), LOCK_CONDVAR_STATE );
     122
     123    // register condvar in reference process xlist
    117124    xptr_t root_xp = XPTR( ref_cxy , &ref_ptr->condvar_root );
    118     xptr_t xp_list = XPTR( ref_cxy , &condvar_ptr->list );
    119 
    120     remote_spinlock_lock( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
    121     xlist_add_first( root_xp , xp_list );
    122     remote_spinlock_unlock( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
     125    xptr_t list_xp = XPTR( ref_cxy , &condvar_ptr->list );
     126
     127    remote_queuelock_acquire( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
     128    xlist_add_first( root_xp , list_xp );
     129    remote_queuelock_release( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
    123130
    124131    return 0;
     
    137144    // get reference process cluster and local pointer
    138145    cxy_t       ref_cxy = GET_CXY( ref_xp );
    139     process_t * ref_ptr = (process_t *)GET_PTR( ref_xp );
    140 
    141     // get condvar cluster and local pointer
    142     cxy_t              condvar_cxy = GET_CXY( condvar_xp );
    143     remote_condvar_t * condvar_ptr = (remote_condvar_t *)GET_PTR( condvar_xp );
     146    process_t * ref_ptr = GET_PTR( ref_xp );
     147
     148    // get condvar cluster and local pointer
     149    cxy_t              condvar_cxy = GET_CXY( condvar_xp );
     150    remote_condvar_t * condvar_ptr = GET_PTR( condvar_xp );
     151
     152    // get remote pointer on waiting queue root
     153    xptr_t root_xp = XPTR( condvar_cxy , &condvar_ptr->root );
     154 
     155    if( !xlist_is_empty( root_xp ) )   // user error
     156    {
     157        printk("WARNING in %s for thread %x in process %x : "
     158               "destroy condvar, but  waiting threads queue not empty\n",
     159               __FUNCTION__ , CURRENT_THREAD->trdid , CURRENT_THREAD->process->pid );
     160    }
    144161
    145162    // remove condvar from reference process xlist
    146     remote_spinlock_lock( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
     163    remote_queuelock_acquire( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
    147164    xlist_unlink( XPTR( condvar_cxy , &condvar_ptr->list ) );
    148     remote_spinlock_unlock( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
    149 
    150     // release memory allocated for condvaraphore descriptor
     165    remote_queuelock_release( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
     166
     167    // release memory allocated for condvar descriptor
    151168    if( condvar_cxy == local_cxy )                            // reference is local
    152169    {
    153170        kmem_req_t  req;
    154         req.type = KMEM_BARRIER;
     171        req.type = KMEM_SEM;
    155172        req.ptr  = condvar_ptr;
    156173        kmem_free( &req );
     
    158175    else                                                  // reference is remote
    159176    {
    160         rpc_kcm_free_client( condvar_cxy , condvar_ptr , KMEM_BARRIER );
    161     }
    162 
    163 }  // end remote_condvar_destroy()
     177        rpc_kcm_free_client( condvar_cxy , condvar_ptr , KMEM_CONDVAR );
     178    }
     179
     180}  // end remote_convar_destroy()
    164181
    165182////////////////////////////////////////////
     
    167184                          xptr_t mutex_xp )
    168185{
    169     // unlock the mutex
     186    thread_t * this = CURRENT_THREAD;
     187
     188// check calling thread can yield
     189assert( (this->busylocks == 0),
     190"cannot yield : busylocks = %d\n", this->busylocks );
     191
     192    // get condvar cluster and local pointer
     193    remote_condvar_t * condvar_ptr = GET_PTR( condvar_xp );
     194    cxy_t              condvar_cxy = GET_CXY( condvar_xp );
     195
     196    // register the calling thread in condvar waiting queue
     197    xlist_add_last( XPTR( condvar_cxy , &condvar_ptr->root ),
     198                    XPTR( local_cxy   , &this->wait_xlist ) );
     199
     200    // block the calling thread
     201    thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_USERSYNC );
     202
     203    // release the mutex
    170204    remote_mutex_unlock( mutex_xp );
    171205
    172     thread_t * this = CURRENT_THREAD;
    173 
    174     // get condvar cluster an local pointer
    175     remote_condvar_t * condvar_ptr = (remote_condvar_t *)GET_PTR( condvar_xp );
    176     cxy_t              condvar_cxy = GET_CXY( condvar_xp );
    177 
    178     // get extended pointer on condvar waiting queue
    179     xptr_t root_xp  = XPTR( condvar_cxy , &condvar_ptr->root );
    180 
    181     // get extended pointer on calling thread xlist_entry
    182     xptr_t entry_xp = XPTR( local_cxy , &this->wait_list );
    183 
    184     // register the calling thread in the condvar waiting queue
    185     remote_spinlock_lock( XPTR( condvar_cxy , &condvar_ptr->lock ) );
    186     xlist_add_last( root_xp , entry_xp );
    187     remote_spinlock_unlock( XPTR( condvar_cxy , &condvar_ptr->lock ) );
    188 
    189     // block the calling thread
    190     thread_block( XPTR( local_cxy , CURRENT_THREAD ) , THREAD_BLOCKED_USERSYNC );
     206    // deschedule
    191207    sched_yield("blocked on condvar");
    192208
    193     // lock the mutex before return
    194     remote_mutex_unlock( mutex_xp );
     209    // re-acquire the mutex
     210    remote_mutex_lock( mutex_xp );
    195211
    196212}  // end remote_condvar_wait()
     
    199215void remote_condvar_signal( xptr_t condvar_xp )
    200216{
    201     reg_t     irq_state;
    202 
    203     // get condvar cluster an local pointer
    204     remote_condvar_t * condvar_ptr = (remote_condvar_t *)GET_PTR( condvar_xp );
    205     cxy_t              condvar_cxy = GET_CXY( condvar_xp );
    206 
    207     // get extended pointer on condvar waiting queue
    208     xptr_t root_xp  = XPTR( condvar_cxy , &condvar_ptr->root );
    209 
    210     if( xlist_is_empty( root_xp ) ) return;
    211 
    212     // disable interrupts
    213         hal_disable_irq( &irq_state );
    214  
    215     // get extended pointer on the first waiting thread
    216     xptr_t thread_xp = XLIST_FIRST_ELEMENT( root_xp , thread_t , wait_list );
    217 
    218     // remove the first waiting thread from queue
    219     remote_spinlock_lock( XPTR( condvar_cxy , &condvar_ptr->lock ) );
    220     xlist_unlink( XPTR( condvar_cxy , &condvar_ptr->list ) );
    221     remote_spinlock_unlock( XPTR( condvar_cxy , &condvar_ptr->lock ) );
    222 
    223     // unblock first waiting thread
    224     thread_unblock( thread_xp , THREAD_BLOCKED_USERSYNC );
    225 
    226     // restore interrupts
    227         hal_restore_irq( irq_state );
     217    // get condvar cluster and local pointer
     218    remote_condvar_t * condvar_ptr = GET_PTR( condvar_xp );
     219    cxy_t              condvar_cxy = GET_CXY( condvar_xp );
     220
     221    // does nothing if waiting queue empty
     222    if( xlist_is_empty( XPTR( condvar_cxy, &condvar_ptr->root ) ) == false )
     223    {
     224         // get first waiting thread
     225         xptr_t thread_xp = XLIST_FIRST( XPTR( condvar_cxy , &condvar_ptr->root ),
     226                                         thread_t , wait_xlist );
     227
     228         // remove this waiting thread from queue
     229         thread_t * thread_ptr = GET_PTR( thread_xp );
     230         cxy_t      thread_cxy = GET_CXY( thread_xp );
     231         xlist_unlink( XPTR( thread_cxy , &thread_ptr->wait_xlist ) );
     232
     233         // unblock this waiting thread
     234         thread_unblock( thread_xp , THREAD_BLOCKED_USERSYNC );
     235    }
    228236
    229237}  // end remote_condvar_signal()
     
    232240void remote_condvar_broadcast( xptr_t condvar_xp )
    233241{
    234     reg_t    irq_state;
    235 
    236     // get condvar cluster an local pointer
    237     remote_condvar_t * condvar_ptr = (remote_condvar_t *)GET_PTR( condvar_xp );
    238     cxy_t              condvar_cxy = GET_CXY( condvar_xp );
    239 
    240     // get extended pointer on condvar waiting queue
    241     xptr_t root_xp  = XPTR( condvar_cxy , &condvar_ptr->root );
    242 
    243     if( xlist_is_empty( root_xp ) ) return;
    244 
    245     // disable interrupts
    246     hal_disable_irq( &irq_state );
    247  
    248     // loop on waiting threads
    249     xptr_t  iter_xp;
    250     xptr_t  thread_xp;
    251     XLIST_FOREACH( root_xp , iter_xp )
    252     {
    253         // get extended pointer on waiting thread
    254         thread_xp = XLIST_ELEMENT( iter_xp , thread_t , wait_list );
    255        
    256         // remove waiting thread from queue
    257         remote_spinlock_lock( XPTR( condvar_cxy , &condvar_ptr->lock ) );
    258         xlist_unlink( XPTR( condvar_cxy , &condvar_ptr->list ) );
    259         remote_spinlock_unlock( XPTR( condvar_cxy , &condvar_ptr->lock ) );
    260 
    261         // unblock waiting thread
    262         thread_unblock( thread_xp , THREAD_BLOCKED_USERSYNC );
    263     }
    264 
    265     // restore interrupts
    266     hal_restore_irq( irq_state );
    267 
     242    // get condvar cluster and local pointer
     243    remote_condvar_t * condvar_ptr = GET_PTR( condvar_xp );
     244    cxy_t              condvar_cxy = GET_CXY( condvar_xp );
     245
     246    // does nothing if waiting queue empty
     247    while( xlist_is_empty( XPTR( condvar_cxy , &condvar_ptr->root ) ) == false )
     248    {
     249         // get first waiting thread
     250         xptr_t thread_xp = XLIST_FIRST( XPTR( condvar_cxy , &condvar_ptr->root ),
     251                                         thread_t , wait_xlist );
     252
     253         // remove this waiting thread from queue
     254         thread_t * thread_ptr = GET_PTR( thread_xp );
     255         cxy_t      thread_cxy = GET_CXY( thread_xp );
     256         xlist_unlink( XPTR( thread_cxy , &thread_ptr->wait_xlist ) );
     257
     258         // unblock this waiting thread
     259         thread_unblock( thread_xp , THREAD_BLOCKED_USERSYNC );
     260    }
    268261}  // end remote_condvar_broadcast()
     262
  • trunk/kernel/libk/remote_condvar.h

    r457 r563  
    11/*
    2  * remote_condvar.h - distributed kernel condvar definition
     2 * remote_condvar.h: POSIX condition variable definition.     
    33 *
    4  * Author  Alain Greiner (2016,2017)
     4 * Authors  Alain Greiner (2016,2017,2018)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    1818 *
    1919 * You should have received a copy of the GNU General Public License
    20  * along with ALMOS-MKH; if not, write to the Free Software Foundation,
     20 * along with ALMOS-kernel; if not, write to the Free Software Foundation,
    2121 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
    2222 */
    2323
    24 #ifndef _REMOTE_CONDVAR_H_
    25 #define _REMOTE_CONDVAR_H_
     24#ifndef _CONDVAR_H_
     25#define _CONDVAR_H_
    2626
    2727#include <kernel_config.h>
    2828#include <hal_kernel_types.h>
    29 #include <remote_spinlock.h>
     29#include <remote_busylock.h>
    3030#include <xlist.h>
    3131
    32 /*****************************************************************************************
    33  *          This file defines a POSIX compliant condvar.
     32/*******************************************************************************************
     33 *     This file define an user level POSIX compliant condition variable.
    3434 *
    35  * It is used by multi-threaded applications to synchronise threads running in
    36  * different clusters, as all access functions uses hal_remote_lw() / hal_remote_sw()
    37  * portable remote access primitives.
     35 * It can be used by muti-threaded user applications to synchronise user threads
     36 * running in different clusters.
    3837 *
    3938 * A condvar is declared by a given user process as a "pthread_cond_t" global variable.
    4039 * This user type is implemented as an unsigned long, but the value is not used by the
    41  * kernel. ALMOS-MKH uses only the condvar virtual address as an identifier.
     40 * kernel. ALMOS-MKH uses only the mutex virtual address as an identifier.
    4241 * For each user condvar, ALMOS-MKH creates a kernel "remote_condvar_t" structure,
    4342 * dynamically allocated in the reference cluster by the remote_condvar_create() function,
    44  * and destroyed by the remote_condvar_destroy() function, using RPC if the calling
    45  * thread is not running in the reference cluster. The synchronisation is done by the
    46  * remote_condvar_wait(), remote_condvar_signal(), remote_convar_broadcast() functions.
    47  ****************************************************************************************/
     43 * and destroyed by the remote_condvar_destroy() function, using RPC if the calling thread
     44 * is not running in the reference cluster.
     45 *
     46 * The blocking "remote_condvar_wait() function allowx the calling thread to efficiently
     47 * wait for a change in a shared user object. The calling thread blocks and register in
     48 * a waiting queue attached to the condvar. The blocked thread is unblocked by another
     49 * thread calling the remote_convar signal() or remote_condvar_broadcast().
     50 * The three associated methods wait(), signal() and broadcast() must be called
     51 * by a thread holding the mutex associated to the condvar.
     52 ******************************************************************************************/
    4853
    49 /*****************************************************************************************
    50  * This structure defines the condvar descriptor.
    51  * - It contains an xlist of all condvars dynamically created by a given process,
    52  *   rooted in the reference process descriptor.
    53  * - It contains also the root of another xlist of all threads waiting on the condvar,
    54  *   resumed by a remote_condvar_signal(), or remote_condvar_broadcast().
    55  ****************************************************************************************/
     54/*******************************************************************************************
     55 * This structure defines the kernel implementation of a condvar.
     56 ******************************************************************************************/
    5657
    5758typedef struct remote_condvar_s
    5859{
    59     remote_spinlock_t  lock;     /*! lock protecting the waiting threads list           */
    60     intptr_t           ident;    /*! virtual address in user space == identifier        */
    61     xlist_entry_t      list;     /*! member of list of condvars in same process         */
    62     xlist_entry_t      root;     /*! root of list of waiting threads                    */
     60    remote_busylock_t lock;         /*! lock protecting the condvar state                 */
     61    intptr_t          ident;        /*! virtual address in user space == identifier       */
     62    xlist_entry_t     root;         /*! root of waiting threads queue                     */
     63    xlist_entry_t     list;         /*! member of list of condvars in same process        */
    6364}
    6465remote_condvar_t;
    6566
    66 /*****************************************************************************************
     67/*********************************************************************************************
    6768 * This function returns an extended pointer on the remote condvar identified
    6869 * by its virtual address in a given user process. It makes an associative search,
    69  * scanning the list of condvars rooted in the reference process descriptor.
    70  *****************************************************************************************
    71  * @ ident    : condvar virtual address, used as identifier.
    72  * @ returns extended pointer on condvar if success / returns XPTR_NULL if not found.
    73  ****************************************************************************************/
     70 * scanning the list of user condvars rooted in the reference process descriptor.
     71 *********************************************************************************************
     72 * @ ident    : semaphore virtual address, used as identifier.
     73 * @ returns extended pointer on semaphore if success / returns XPTR_NULL if not found.
     74 ********************************************************************************************/
    7475xptr_t remote_condvar_from_ident( intptr_t  ident );
    7576
    76 /*****************************************************************************************
    77  * This function implement the pthread_condvar_init() syscall.
    78  * It allocates memory for the condvar descriptor in the reference cluster for
    79  * the calling process, it initializes the condvar state, and register it in the
    80  * list of condvars owned by the reference process.
    81  *****************************************************************************************
    82  * @ ident       : condvar identifier (virtual address in user space).
    83  * @ return 0 if success / return ENOMEM if failure.
    84  ****************************************************************************************/
    85 error_t remote_condvar_create( intptr_t ident );
     77/*******************************************************************************************
     78 * This function implements the CONVAR_INIT operation.
     79 * This function creates and initializes a remote_condvar, identified by its virtual
     80 * address <vaddr> in the client process reference cluster, using RPC if required.
     81 * It registers this user condvar in the reference process descriptor.
     82 *******************************************************************************************
     83 * @ vaddr         : [in]  condvar virtual addresss, used as identifier.
     84 ******************************************************************************************/
     85error_t remote_condvar_create( intptr_t   vaddr );
    8686
    87 /*****************************************************************************************
    88  * This function implement the pthread_condvar_destroy() syscall.
    89  * It releases the memory allocated for the condvar descriptor, and remove the condvar
    90  * from the list of condvars owned by the reference process.
    91  *****************************************************************************************
    92  * @ condvar_xp  : extended pointer on condvar descriptor.
    93  ****************************************************************************************/
    94 void remote_condvar_destroy( xptr_t   condvar_xp );
     87/*******************************************************************************************
     88 * This function implements the CONVAR_DESTROY operation.
     89 * This function creates and initializes a remote_condvar, identified by its virtual
     90 * address in the client process reference cluster, and registers it in process descriptor.
     91 *******************************************************************************************
     92 * @ condvar_xp : [in] extended pointer on buffer to store xptr on created condvar.
     93 ******************************************************************************************/
     94void remote_condvar_destroy( xptr_t condvar_xp );
    9595
    96 /*****************************************************************************************
    97  * This function implement the pthread_condvar_wait() syscall.
    98  * It unlock the mutex.
    99  * It register the calling thread in the condvar waiting queue, block the calling thread
    100  * on the THREAD_BLOCKED_CONDVAR condition and deschedule.
    101  * it lock the mutex.
    102  *****************************************************************************************
    103  * @ condvar_xp   : extended pointer on condvar descriptor.
    104  * @ mutex_xp     : extended pointer on associated mutex descriptor.
    105  ****************************************************************************************/
    106 void remote_condvar_wait( xptr_t   condvar_xp, 
    107                           xptr_t   mutex_xp );
     96/*******************************************************************************************
     97 * This function implements the CONDVAR_WAIT operation.
     98 * It atomically releases the mutex identified by the <mutex_xp> argument,
     99 * registers the calling thread in the condvar waiting queue identified by the
     100 * <condvar_xp> argument, blocks and deschedules this calling thread.
     101 * Later, when the calling thread resume, this function re-acquire the mutex and returns.
     102 * WARNING: the calling thread must hold the mutex associated to the condvar.
     103 *******************************************************************************************
     104 * @ condvar_xp : [in] extended pointer on condvar.
     105 * @ mutex_xp   : [in] extended pointer on mutex.
     106 ******************************************************************************************/
     107void remote_condvar_wait( xptr_t condvar_xp,
     108                          xptr_t mutex_xp );
    108109
    109 /*****************************************************************************************
    110  * This function implement the pthread_condvar_signal() syscall.
    111  * It unblocks the first waiting thread in the condvar waiting queue.
    112  *****************************************************************************************
    113  * @ condvar_xp  : extended pointer on condvar descriptor.
    114  ****************************************************************************************/
    115 void remote_condvar_signal( xptr_t   condvar_xp );
     110/*******************************************************************************************
     111 * This function implements the CONDVAR_SIGNAL operation.
     112 * It remove one waiting thread from a remote_condvar waiting queue identified by the
     113 * <condvar_xp> argument and unblocks this thread.
     114 * It does nothing if the queue is empty.
     115 * WARNING: the calling thread must hold the mutex associated to the condvar.
     116 *******************************************************************************************
     117 * @ condvar_xp : extended pointer on remote_condvar.
     118 ******************************************************************************************/
     119void remote_condvar_signal( xptr_t condvar_xp );
    116120
    117 /*****************************************************************************************
    118  * This function implement the pthread_condvar_broadcast() syscall.
    119  * It unblocks all waiting threads in the condvar waiting queue.
    120  *****************************************************************************************
    121  * @ condvar_xp  : extended pointer on condvar descriptor.
    122  ****************************************************************************************/
    123 void remote_condvar_broadcast( xptr_t   condvar_xp );
     121/*******************************************************************************************
     122 * This function implements the CONDVAR_BROADCAST operation.
     123 * It removes all threads from a remote_condvar waiting queue identified by the
     124 * <condvar_xp> argument, and unblocks all these threads.
     125 * It does nothing if the queue is empty.
     126 * WARNING: the calling thread must hold the mutex associated to the condvar.
     127 *******************************************************************************************
     128 * @ condvar_xp : extended pointer on remote_condvar.
     129 ******************************************************************************************/
     130void remote_condvar_broadcast( xptr_t condvar_xp );
    124131
    125 
    126 #endif  /* _REMOTE_BARRIER_H_ */
     132#endif  /* _CONDVAR_H_ */
  • trunk/kernel/libk/remote_fifo.c

    r457 r563  
    3434#include <remote_fifo.h>
    3535
    36 ////////////////////////////////////////////
    37 void local_fifo_init( remote_fifo_t * fifo )
     36/////////////////////////////////////////////
     37void remote_fifo_init( remote_fifo_t * fifo )
    3838{
    3939    uint32_t  slot;
     
    5959
    6060    // get remote cluster identifier and pointer on FIFO
    61     cxy_t           fifo_cxy = (cxy_t)GET_CXY( fifo_xp );
    62     remote_fifo_t * fifo_ptr = (remote_fifo_t *)GET_PTR( fifo_xp );
     61    cxy_t           fifo_cxy = GET_CXY( fifo_xp );
     62    remote_fifo_t * fifo_ptr = GET_PTR( fifo_xp );
    6363
    6464    // initialise watchdog for contention detection
     
    7777
    7878        // read remote rd_id value
    79         rd_id = hal_remote_lw( XPTR( fifo_cxy , &fifo_ptr->rd_id ) );
     79        rd_id = hal_remote_l32( XPTR( fifo_cxy , &fifo_ptr->rd_id ) );
    8080
    8181        // compute number of full slots
     
    8989        // - deschedule without blocking if possible
    9090        // - wait ~1000 cycles otherwise
    91         if( thread_can_yield() ) sched_yield( "wait RPC fifo" );
    92         else                     hal_fixed_delay( 1000 );
     91        if( CURRENT_THREAD->busylocks == 0 ) sched_yield( "wait RPC fifo" );
     92        else                                 hal_fixed_delay( 1000 );
    9393
    9494        // increment watchdog
     
    100100
    101101    // copy item to fifo
    102         hal_remote_swd( XPTR( fifo_cxy , &fifo_ptr->data[ptw] ), item );
     102        hal_remote_s64( XPTR( fifo_cxy , &fifo_ptr->data[ptw] ), item );
    103103        hal_fence();
    104104
    105105    // set the slot valid flag
    106         hal_remote_sw( XPTR( fifo_cxy , &fifo_ptr->valid[ptw] ) , 1 );
     106        hal_remote_s32( XPTR( fifo_cxy , &fifo_ptr->valid[ptw] ) , 1 );
    107107        hal_fence();
    108108
     
    111111} // end remote_fifo_put_item()
    112112
    113 //////////////////////////////////////////////////
    114 error_t local_fifo_get_item( remote_fifo_t * fifo,
    115                              uint64_t      * item )
     113///////////////////////////////////////////////////
     114error_t remote_fifo_get_item( remote_fifo_t * fifo,
     115                              uint64_t      * item )
    116116{
    117117    // get fifo state
     
    138138
    139139        return 0;
    140 } // end local_fifo_get_item()
     140
     141} // end remote_fifo_get_item()
    141142
    142143/////////////////////////////////////////
     
    146147
    147148    // get remote cluster identifier and pointer on FIFO
    148     cxy_t           cxy = (cxy_t)GET_CXY( fifo );
    149     remote_fifo_t * ptr = (remote_fifo_t *)GET_PTR( fifo );
     149    cxy_t           cxy = GET_CXY( fifo );
     150    remote_fifo_t * ptr = GET_PTR( fifo );
    150151   
    151152    // get read and write pointers
    152         uint32_t wr_id = hal_remote_lw( XPTR( cxy , &ptr->wr_id ) );
    153         uint32_t rd_id = hal_remote_lw( XPTR( cxy , &ptr->rd_id ) );
     153        uint32_t wr_id = hal_remote_l32( XPTR( cxy , &ptr->wr_id ) );
     154        uint32_t rd_id = hal_remote_l32( XPTR( cxy , &ptr->rd_id ) );
    154155
    155156    // compute number of full slots
     
    160161}
    161162
    162 //////////////////////////////////////////////////
    163 bool_t local_fifo_is_empty( remote_fifo_t * fifo )
     163///////////////////////////////////////////////////
     164bool_t remote_fifo_is_empty( remote_fifo_t * fifo )
    164165{
    165166    return ( fifo->wr_id == fifo->rd_id );
     
    172173
    173174    // get remote cluster identifier and pointer on FIFO
    174     cxy_t           cxy = (cxy_t)GET_CXY( fifo );
    175     remote_fifo_t * ptr = (remote_fifo_t *)GET_PTR( fifo );
     175    cxy_t           cxy = GET_CXY( fifo );
     176    remote_fifo_t * ptr = GET_PTR( fifo );
    176177   
    177178    // get read and write pointers
    178         uint32_t wr_id = hal_remote_lw( XPTR( cxy , &ptr->wr_id ) );
    179         uint32_t rd_id = hal_remote_lw( XPTR( cxy , &ptr->rd_id ) );
     179        uint32_t wr_id = hal_remote_l32( XPTR( cxy , &ptr->wr_id ) );
     180        uint32_t rd_id = hal_remote_l32( XPTR( cxy , &ptr->rd_id ) );
    180181
    181182    // compute number of full slots
  • trunk/kernel/libk/remote_fifo.h

    r457 r563  
    6262 * It can only initialise a local FIFO.
    6363 ************************************************************************************
    64  * @ fifo    : pointer to the local fifo.
     64 * @ fifo    : local pointer to the local fifo.
    6565 ***********************************************************************************/
    66 void local_fifo_init( remote_fifo_t * fifo );
     66void remote_fifo_init( remote_fifo_t * fifo );
    6767
    6868/************************************************************************************
     
    7171 * The read slot index is incremented.
    7272 ************************************************************************************
    73  * @ fifo    : pointer to the local fifo.
     73 * @ fifo    : local pointer to the local fifo.
    7474 * @ item    : [out] pointer on buffer for extracted item.
    7575 * @ return  0 on success, EAGAIN if the buffer is empty.
    7676 ***********************************************************************************/
    77 error_t local_fifo_get_item( remote_fifo_t * fifo,
    78                              uint64_t      * item );
     77error_t remote_fifo_get_item( remote_fifo_t * fifo,
     78                              uint64_t      * item );
    7979
    8080/************************************************************************************
     
    8686 * registered, or after CONFIG_REMOTE_FIFO_MAX_ITERATIONS failures.   
    8787 ************************************************************************************
    88  * @ fifo    : extended pointer to the fifo in remote cluster.
     88 * @ fifo    : extended pointer to the remote fifo.
    8989 * @ item    : item to be stored.
    9090 * @ return  0 on success / EBUSY if a contention has been detected.
     
    9696 * Query if local fifo is empty
    9797 ************************************************************************************
    98  * @ fifo    : pointer to the fifo.
     98 * @ fifo    : local pointer to the local fifo.
    9999 * @ return true if the fifo is empty, false otherwise.
    100100 ***********************************************************************************/
    101 bool_t local_fifo_is_empty( remote_fifo_t * fifo );
     101bool_t remote_fifo_is_empty( remote_fifo_t * fifo );
    102102
    103103/************************************************************************************
    104104 * Query if remote fifo is full
    105105 ************************************************************************************
    106  * @ fifo    : pointer to the fifo in remote cluster.
    107  * @ cxy         : remote cluster index.
     106 * @ fifo    : extended pointer to the remote fifo.
    108107 * @ return true if the fifo is full, false otherwise.
    109108 ***********************************************************************************/
     
    113112 * Query number ot items in remote fifo.
    114113 ************************************************************************************
    115  * @ fifo     : pointer to the fifo in remote cluster.
    116  * @ cxy          : remote cluster index.
     114 * @ fifo     : extended pointer to the remote fifo.
    117115 * @ return  number of items.
    118116 ***********************************************************************************/
  • trunk/kernel/libk/remote_mutex.c

    r457 r563  
    11/*
    2  * remote_mutex.c - Access a POSIX mutex.
     2 * remote_mutex.c - POSIX mutex implementation.
    33 *
    4  * Authors   Alain   Greiner (2016)
     4 * Authors   Alain   Greiner (2016,2017,2018)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2222 */
    2323
     24#include <kernel_config.h>
    2425#include <hal_kernel_types.h>
    2526#include <hal_remote.h>
    26 #include <hal_special.h>
    27 #include <hal_irqmask.h>
    2827#include <thread.h>
    29 #include <cluster.h>
     28#include <xlist.h>
    3029#include <scheduler.h>
     30#include <remote_busylock.h>
    3131#include <remote_mutex.h>
     32
    3233
    3334/////////////////////////////////////////////////
     
    4445    process_t    * ref_ptr = (process_t *)GET_PTR( ref_xp );
    4546
    46     // get extended pointer on root of mutex list
     47    // get extended pointers on mutexes list
    4748    xptr_t root_xp = XPTR( ref_cxy , &ref_ptr->mutex_root );
    48    
     49    xptr_t lock_xp = XPTR( ref_cxy , &ref_ptr->sync_lock );
     50
     51    // get lock protecting synchro lists
     52    remote_queuelock_acquire( lock_xp );
     53 
    4954    // scan reference process mutex list
    5055    xptr_t           iter_xp;
     
    6873    }
    6974
     75    // relese lock protecting synchros lists
     76    remote_queuelock_release( lock_xp );
     77 
    7078    if( found == false )  return XPTR_NULL;
    7179    else                  return mutex_xp;
     
    8997    process_t * ref_ptr = (process_t *)GET_PTR( ref_xp );
    9098
    91     // allocate memory for barrier descriptor
     99    // allocate memory for mutex descriptor
    92100    if( ref_cxy == local_cxy )                  // local cluster is the reference
    93101    {
     
    101109    {
    102110        rpc_kcm_alloc_client( ref_cxy , KMEM_MUTEX , &mutex_xp );
    103         mutex_ptr = (remote_mutex_t *)GET_PTR( mutex_xp );
    104     }
    105 
    106     if( mutex_ptr == NULL ) return ENOMEM;
     111        mutex_ptr = GET_PTR( mutex_xp );
     112    }
     113
     114    if( mutex_ptr == NULL ) return 0xFFFFFFFF;
    107115
    108116    // initialise mutex
    109     hal_remote_sw ( XPTR( ref_cxy , &mutex_ptr->value )   , 0 );
     117    hal_remote_s32 ( XPTR( ref_cxy , &mutex_ptr->taken )   , 0 );
    110118    hal_remote_spt( XPTR( ref_cxy , &mutex_ptr->ident )   , (void *)ident );
    111     hal_remote_swd( XPTR( ref_cxy , &mutex_ptr->owner )   , XPTR_NULL );
    112 
    113119    xlist_entry_init( XPTR( ref_cxy , &mutex_ptr->list ) );
    114120    xlist_root_init( XPTR( ref_cxy , &mutex_ptr->root ) );
    115     remote_spinlock_init( XPTR( ref_cxy , &mutex_ptr->lock ) );
    116 
    117     // register mutex in reference process xlist
     121    hal_remote_s64( XPTR( ref_cxy , &mutex_ptr->owner ) , XPTR_NULL );
     122    remote_busylock_init( XPTR( ref_cxy , &mutex_ptr->lock ), LOCK_MUTEX_STATE );
     123
     124    // get root of mutexes list in process, and list_entry in mutex
    118125    xptr_t root_xp = XPTR( ref_cxy , &ref_ptr->mutex_root );
    119126    xptr_t xp_list = XPTR( ref_cxy , &mutex_ptr->list );
    120127
    121     remote_spinlock_lock( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
     128    // get lock protecting user synchros lists
     129    remote_queuelock_acquire( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
     130
     131    // register mutex in process descriptor
    122132    xlist_add_first( root_xp , xp_list );
    123     remote_spinlock_unlock( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
     133
     134    // release lock protecting user synchros lists
     135    remote_queuelock_release( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
     136
     137#if DEBUG_MUTEX
     138thread_t * this = CURRENT_THREAD;
     139if( (uint32_t)hal_get_cycles() > DEBUG_QUEUELOCK )
     140printk("\n[DBG] %s : thread %x in %x process / mutex(%x,%x)\n",
     141__FUNCTION__, this->trdid, this->process->pid, local_cxy, mutex_ptr );
     142#endif
     143
    124144
    125145    return 0;
     
    144164    remote_mutex_t * mutex_ptr = (remote_mutex_t *)GET_PTR( mutex_xp );
    145165
     166    // get lock protecting user synchros lists
     167    remote_queuelock_acquire( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
     168
    146169    // remove mutex from reference process xlist
    147     remote_spinlock_lock( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
    148170    xlist_unlink( XPTR( mutex_cxy , &mutex_ptr->list ) );
    149     remote_spinlock_unlock( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
     171
     172    // release lock protecting user synchros lists
     173    remote_queuelock_release( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
    150174
    151175    // release memory allocated for mutexaphore descriptor
     
    167191void remote_mutex_lock( xptr_t mutex_xp )
    168192{
    169     bool_t    success;
    170     reg_t     irq_state;
    171 
    172     // get cluster and local pointer on remote mutex
    173     remote_mutex_t * mutex_ptr = (remote_mutex_t *)GET_PTR( mutex_xp );
     193    // get cluster and local pointer on mutex
     194    remote_mutex_t * mutex_ptr = GET_PTR( mutex_xp );
    174195    cxy_t            mutex_cxy = GET_CXY( mutex_xp );
    175196
    176     // get cluster and local pointer on calling thread
    177     cxy_t            thread_cxy = local_cxy;
    178     thread_t       * thread_ptr = CURRENT_THREAD;
    179 
    180     // get extended pointers on mutex value
    181     xptr_t           value_xp = XPTR( mutex_cxy , &mutex_ptr->value );
    182 
    183     // Try to take the mutex
    184     success = hal_remote_atomic_cas( value_xp , 0 , 1 );
    185 
    186     if( success )  // take the lock
    187     {
    188         // register calling thread as mutex owner
    189         xptr_t owner_xp = XPTR( mutex_cxy , &mutex_ptr->owner );
    190         hal_remote_swd( owner_xp , XPTR( thread_cxy , thread_ptr ) );
    191 
    192         // increment calling thread remote_locks
    193         hal_remote_atomic_add( XPTR( thread_cxy , &thread_ptr->remote_locks ) , 1 );
    194     }
    195     else           // deschedule and register calling thread in queue
    196     {
    197         // disable interrupts
    198             hal_disable_irq( &irq_state );
    199  
    200         // register calling thread in mutex waiting queue
    201         xptr_t root_xp  = XPTR( mutex_cxy  , &mutex_ptr->root );
    202         xptr_t entry_xp = XPTR( thread_cxy , &thread_ptr->wait_list );
    203 
    204         remote_spinlock_lock( XPTR( mutex_cxy , &mutex_ptr->lock ) );
    205         xlist_add_last( root_xp , entry_xp );
    206         remote_spinlock_unlock( XPTR( mutex_cxy , &mutex_ptr->lock ) );
    207 
    208         // block & deschedule the calling thread   
    209         thread_block( XPTR( local_cxy , thread_ptr ) , THREAD_BLOCKED_USERSYNC );
    210         sched_yield("blocked on mutex");
    211 
    212         // restore interrupts
    213         hal_restore_irq( irq_state );
    214     } 
    215 
    216     hal_fence();
    217 
    218 }  // end remote_mutex_lock()
    219 
    220 ///////////////////////////////////////////
    221 void remote_mutex_unlock( xptr_t mutex_xp )
    222 {
    223         reg_t               irq_state;
    224 
    225     // get cluster and local pointer on remote mutex
    226     remote_mutex_t * mutex_ptr = (remote_mutex_t *)GET_PTR( mutex_xp );
    227     cxy_t            mutex_cxy = GET_CXY( mutex_xp );
    228 
    229     // get cluster and local pointer on calling thread
    230     cxy_t            thread_cxy = local_cxy;
    231     thread_t       * thread_ptr = CURRENT_THREAD;
    232 
    233     // get extended pointers on mutex value, root, lock & owner fields
    234     xptr_t           value_xp = XPTR( mutex_cxy , &mutex_ptr->value );
     197    // get extended pointers on mutex fields
     198    xptr_t           taken_xp = XPTR( mutex_cxy , &mutex_ptr->taken );
    235199    xptr_t           owner_xp = XPTR( mutex_cxy , &mutex_ptr->owner );
    236200    xptr_t           root_xp  = XPTR( mutex_cxy , &mutex_ptr->root );
     201    xptr_t           lock_xp  = XPTR( mutex_cxy , &mutex_ptr->lock );
     202
     203    // get cluster and pointers on calling thread
     204    cxy_t            caller_cxy = local_cxy;
     205    thread_t       * caller_ptr = CURRENT_THREAD;
     206    xptr_t           caller_xp  = XPTR( caller_cxy , caller_ptr );
     207
     208// check calling thread can yield
     209assert( (caller_ptr->busylocks == 0),
     210"cannot yield : busylocks = %d\n", caller_ptr->busylocks );
     211
     212    while( 1 )
     213    {
     214        // get busylock protecting mutex state
     215        remote_busylock_acquire( lock_xp );
     216
     217        // test mutex state
     218        if( hal_remote_l32( taken_xp ) == 0 )                 // success
     219        {
     220            // register calling thread as mutex owner
     221            hal_remote_s64( owner_xp , caller_xp );
     222
     223            // update mutex state
     224            hal_remote_s32( taken_xp , 1 );
     225
     226#if DEBUG_MUTEX
     227thread_t * this = CURRENT_THREAD;
     228if( (uint32_t)hal_get_cycles() > DEBUG_MUTEX )
     229printk("\n[DBG] %s : thread %x in process %x SUCCESS on mutex(%x,%x)\n",
     230__FUNCTION__, this->trdid, this->process->pid, mutex_cxy, mutex_ptr );
     231#endif
     232
     233            // release busylock protecting mutex state
     234            remote_busylock_release( lock_xp );
     235
     236             return;
     237        }
     238        else                                                 //  already taken
     239        {
     240            // block the calling thread   
     241            thread_block( caller_xp , THREAD_BLOCKED_USERSYNC );
     242
     243            // register calling thread in mutex waiting queue
     244            xptr_t entry_xp = XPTR( caller_cxy , &caller_ptr->wait_xlist );
     245            xlist_add_last( root_xp , entry_xp );
     246
     247#if DEBUG_MUTEX
     248thread_t * this = CURRENT_THREAD;
     249if( (uint32_t)hal_get_cycles() > DEBUG_MUTEX )
     250printk("\n[DBG] %s : thread %x in process %x BLOCKED on mutex(%x,%x)\n",
     251__FUNCTION__, this->trdid, this->process->pid, mutex_cxy, mutex_ptr );
     252#endif
     253
     254            // release busylock protecting mutex state
     255            remote_busylock_release( lock_xp );
     256
     257            // deschedule calling thread
     258            sched_yield("blocked on mutex");
     259        }
     260    } 
     261}  // end remote_mutex_lock()
     262
     263//////////////////////////////////////////////
     264error_t remote_mutex_unlock( xptr_t mutex_xp )
     265{
     266    // memory barrier before mutex release
     267    hal_fence();
     268
     269    // get cluster and local pointer on mutex
     270    remote_mutex_t * mutex_ptr = GET_PTR( mutex_xp );
     271    cxy_t            mutex_cxy = GET_CXY( mutex_xp );
     272
     273    // get cluster and pointers on calling thread
     274    cxy_t            caller_cxy = local_cxy;
     275    thread_t       * caller_ptr = CURRENT_THREAD;
     276    xptr_t           caller_xp  = XPTR( caller_cxy , caller_ptr );
     277
     278    // get extended pointers on mutex fields
     279    xptr_t           taken_xp = XPTR( mutex_cxy , &mutex_ptr->taken );
     280    xptr_t           owner_xp = XPTR( mutex_cxy , &mutex_ptr->owner );
     281    xptr_t           root_xp  = XPTR( mutex_cxy , &mutex_ptr->root );
     282    xptr_t           lock_xp  = XPTR( mutex_cxy , &mutex_ptr->lock );
     283
     284    // get busylock protecting mutex state
     285    remote_busylock_acquire( lock_xp );
    237286   
    238     // disable interrupts
    239         hal_disable_irq( &irq_state );
    240  
    241     // unregister owner thread,
    242     hal_remote_swd( owner_xp , XPTR_NULL );
    243 
    244     // decrement calling thread remote_locks
    245         hal_remote_atomic_add( XPTR( thread_cxy , &thread_ptr->remote_locks ) , -1 );
    246 
    247     // activate first waiting thread if required
    248     if( xlist_is_empty( root_xp ) == false )        // one waiiting thread
     287    // check calling thread is mutex owner
     288    if( hal_remote_l64( owner_xp ) != caller_xp )
     289    {
     290        // release busylock protecting mutex state
     291        remote_busylock_release( lock_xp );
     292
     293        return 0xFFFFFFFF;
     294    }
     295
     296#if DEBUG_MUTEX
     297thread_t * this = CURRENT_THREAD;
     298if( (uint32_t)hal_get_cycles() > DEBUG_MUTEX )
     299printk("\n[DBG] %s : thread %x in %x process EXIT / mutex(%x,%x)\n",
     300__FUNCTION__, this->trdid, this->process->pid, mutex_cxy, mutex_ptr );
     301#endif
     302
     303    // update owner field,
     304    hal_remote_s64( owner_xp , XPTR_NULL );
     305
     306    // update taken field
     307    hal_remote_s32( taken_xp , 0 );
     308
     309    // unblock first waiting thread if waiting list non empty
     310    if( xlist_is_empty( root_xp ) == false )
    249311    {
    250312        // get extended pointer on first waiting thread
    251         xptr_t thread_xp = XLIST_FIRST_ELEMENT( root_xp , thread_t , wait_list );
    252 
    253         // remove first waiting thread from queue
    254         remote_spinlock_lock( XPTR( mutex_cxy , &mutex_ptr->lock ) );
    255         xlist_unlink( XPTR( mutex_cxy , &mutex_ptr->list ) );
    256         remote_spinlock_unlock( XPTR( mutex_cxy , &mutex_ptr->lock ) );
     313        xptr_t     thread_xp  = XLIST_FIRST( root_xp , thread_t , wait_xlist );
     314        thread_t * thread_ptr = GET_PTR( thread_xp );
     315        cxy_t      thread_cxy = GET_CXY( thread_xp );
     316
     317#if DEBUG_MUTEX
     318if( (uint32_t)hal_get_cycles() > DEBUG_MUTEX )
     319{
     320trdid_t     trdid   = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
     321process_t * process = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->process ) );
     322pid_t       pid     = hal_remote_l32( XPTR( thread_cxy , &process->pid ) );
     323printk("\n[DBG] %s : thread %x in process %x UNBLOCK thread %x in process %d / mutex(%x,%x)\n",
     324__FUNCTION__, this->trdid, this->process->pid, trdid, pid, mutex_cxy, mutex_ptr );
     325}
     326#endif
     327
     328        // remove this thread from waiting queue
     329        xlist_unlink( XPTR( thread_cxy , &thread_ptr->wait_xlist ) );
    257330
    258331        // unblock first waiting thread
    259332        thread_unblock( thread_xp , THREAD_BLOCKED_USERSYNC );
    260333    }
    261     else                                            // no waiting thread
    262     {
    263         // release mutex
    264         hal_remote_sw( value_xp , 0 );
    265     }
    266 
    267     // restore interrupts
    268         hal_restore_irq( irq_state );
     334   
     335    // release busylock protecting mutex state
     336    remote_busylock_release( lock_xp );
     337
     338    return 0;
    269339
    270340}  // end remote_mutex_unlock()
    271341
     342///////////////////////////////////////////////
     343error_t remote_mutex_trylock( xptr_t mutex_xp )
     344{
     345    // get cluster and local pointer on mutex
     346    remote_mutex_t * mutex_ptr = GET_PTR( mutex_xp );
     347    cxy_t            mutex_cxy = GET_CXY( mutex_xp );
     348
     349    // get cluster and pointers on calling thread
     350    cxy_t            caller_cxy = local_cxy;
     351    thread_t       * caller_ptr = CURRENT_THREAD;
     352    xptr_t           caller_xp  = XPTR( caller_cxy , caller_ptr );
     353
     354    // get extended pointers on mutex fields
     355    xptr_t           taken_xp = XPTR( mutex_cxy , &mutex_ptr->taken );
     356    xptr_t           owner_xp = XPTR( mutex_cxy , &mutex_ptr->owner );
     357    xptr_t           lock_xp  = XPTR( mutex_cxy , &mutex_ptr->lock );
     358
     359    // get busylock protecting mutex state
     360    remote_busylock_acquire( lock_xp );
     361
     362    // test mutex state
     363    if( hal_remote_l32( taken_xp ) == 0 )                 // success
     364    {
     365        // register calling thread as mutex owner
     366        hal_remote_s64( owner_xp , caller_xp );
     367
     368        // update mutex state
     369        hal_remote_s32( taken_xp , 1 );
     370
     371#if DEBUG_MUTEX
     372thread_t * this = CURRENT_THREAD;
     373if( (uint32_t)hal_get_cycles() > DEBUG_QUEUELOCK )
     374printk("\n[DBG] %s : SUCCESS for thread %x in process %x / mutex(%x,%x)\n",
     375__FUNCTION__, this->trdid, this->process->pid, mutex_cxy, mutex_ptr );
     376#endif
     377        // release busylock protecting mutex state
     378        remote_busylock_release( lock_xp );
     379
     380        return 0;
     381    }
     382    else                                                 //  already taken
     383    {
     384
     385#if DEBUG_MUTEX
     386thread_t * this = CURRENT_THREAD;
     387if( (uint32_t)hal_get_cycles() > DEBUG_QUEUELOCK )
     388printk("\n[DBG] %s : FAILURE for thread %x in process %x / mutex(%x,%x)\n",
     389__FUNCTION__, this->trdid, this->process->pid, mutex_cxy, mutex_ptr );
     390#endif
     391        // release busylock protecting mutex state
     392        remote_busylock_release( lock_xp );
     393
     394        return 0xFFFFFFFF;
     395    }
     396}  // end remote_mutex_trylock()
  • trunk/kernel/libk/remote_mutex.h

    r457 r563  
    11/*
    2  * remote_mutex.h -  remote_mutex operations definition.
     2 * remote_mutex.h -  POSIX mutex definition.
    33 *
    4  * Authors   Alain Greiner   (2016)
     4 * Authors   Alain Greiner   (2016,2017,2018)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    3030
    3131/***************************************************************************************
    32  *    This file defines a POSIX compliant mutex.
     32 *    This file defines an user level POSIX compliant mutex.
    3333 *
    34  * It is used by muti-threaded applications to synchronise threads running in
    35  * different clusters, as all access functions uses hal_remote_lw() / hal_remote_sw()
    36  * portable remote access primitives.
     34 * It can be used by muti-threaded user applications to synchronise user threads
     35 * running in different clusters.
    3736 *
    3837 * A mutex is declared by a given user process as a "pthread_mutex_t" global variable.
     
    4140 * For each user mutex, ALMOS-MKH creates a kernel "remote_mutex_t" structure,
    4241 * dynamically allocated in the reference cluster by the remote_mutex_create() function,
    43  * and destroyed by the remote_barrier_destroy() function, using RPC if the calling thread
     42 * and destroyed by the remote_mutex_destroy() function, using RPC if the calling thread
    4443 * is not running in the reference cluster.
    4544 *
     
    5352
    5453/*****************************************************************************************
    55  * This structure defines the mutex descriptor.
    56  * - It contains an xlist of all mutex dynamically created by a given process,
    57  *   rooted in the reference process descriptor.
    58  * - It contains the root of another xlist to register all waiting threads.
     54 * This structure defines the kernel implementation of an user level mutex.
    5955 ****************************************************************************************/
    6056
    6157typedef struct remote_mutex_s
    6258{
    63     remote_spinlock_t  lock;            /*! lock protecting list of waiting threads   */
     59    remote_busylock_t  lock;            /*! lock protecting the mutex state           */
    6460    intptr_t           ident;           /*! mutex identifier (vaddr in user space)    */
    65     uint32_t           value;           /*! mutex non allocated if 0                  */
    66     xptr_t             owner;           /*! extended pointer on owner thread          */
     61    uint32_t           taken;           /*! mutex non allocated if 0                  */
    6762    xlist_entry_t      list;            /*! member of list of mutex in same process   */
    6863    xlist_entry_t      root;            /*! root of list of waiting threads           */
     64    xptr_t             owner;           /*! extended pointer on owner thread          */
    6965}
    7066remote_mutex_t;
     
    8177
    8278/***************************************************************************************
    83  * This function implement the pthread_mutex_init() syscall.
     79 * This function implements the pthread_mutex_init() syscall.
    8480 * It allocates memory for the mutex descriptor in the reference cluster for
    8581 * the calling process, it initializes the mutex state, and register it in the
     
    8783 ***************************************************************************************
    8884 * @ ident       : mutex identifier (virtual address in user space).
    89  * @ return 0 if success / return ENOMEM if failure.
     85 * @ return 0 if success / ENOMEM if no memory / EINVAL if invalid argument.
    9086 **************************************************************************************/
    9187error_t remote_mutex_create( intptr_t ident );
    9288
    9389/***************************************************************************************
    94  * This function implement the pthread_mutex_destroy() syscall.
     90 * This function implements the pthread_mutex_destroy() syscall.
    9591 * It releases thr memory allocated for the mutex descriptor, and remove the mutex
    9692 * from the list of mutex owned by the reference process.
     
    10197
    10298/***************************************************************************************
    103  * This blocking function get ownership of a remote mutex.
     99 * This blocking function implements the pthread_mutex_lock() syscall.
     100 * It returns only when the ownership of the mutex identified by the <mutex_xp>
     101 * argument has been obtained by the calling thread. It register in the mutex waiting
     102 * queue when the mutex is already taken by another thread.
    104103 ***************************************************************************************
    105104 * @ mutex_xp  : extended pointer on mutex descriptor.
     
    108107
    109108/***************************************************************************************
    110  * This function releases a remote mutex.
     109 * This function implements the pthread_mutex_unlock() syscall.
     110 * It cheks that the calling thread is actually the mutex owner.
     111 * It reset the "taken" & "owner" fields for the mutex identified by <mutex_xp>.
     112 * It unblocks the first thread registered in the mutex waiting queue, when the
     113 * queue is not empty.
    111114 ***************************************************************************************
    112115 * @ mutex_xp  : extended pointer on mutex descriptor.
     116 * @ return 0 if success / return non zero if calling thread is not mutex owner.
    113117 **************************************************************************************/
    114 void remote_mutex_unlock( xptr_t  mutex_xp );
     118error_t remote_mutex_unlock( xptr_t  mutex_xp );
     119
     120/***************************************************************************************
     121 * This non blocking function function attempts to lock a mutex without blocking.
     122 ***************************************************************************************
     123 * @ mutex_xp  : extended pointer on mutex descriptor.
     124 * @ return 0 if success / return non zero if already taken.
     125 **************************************************************************************/
     126error_t remote_mutex_trylock( xptr_t  mutex_xp );
    115127
    116128
  • trunk/kernel/libk/remote_rwlock.c

    r457 r563  
    3131#include <remote_rwlock.h>
    3232
    33 ///////////////////////////////////////////
    34 void remote_rwlock_init( xptr_t lock_xp )
     33//////////////////////////////////////////////////////////////////////////////
     34//                Extern global variables
     35//////////////////////////////////////////////////////////////////////////////
     36
     37extern char * lock_type_str[];          // allocated in kernel_init.c
     38
     39
     40//////////////////////////////////////////
     41void remote_rwlock_init( xptr_t   lock_xp,
     42                         uint32_t type )
    3543{
    3644    remote_rwlock_t * lock_ptr = GET_PTR( lock_xp );
    3745    cxy_t             lock_cxy = GET_CXY( lock_xp );
    3846
    39     hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->ticket )  , 0 );
    40     hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->current ) , 0 );
    41     hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->count )   , 0 );
    42 
    43 #if DEBUG_REMOTE_RWLOCKS
    44 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner )   , XPTR_NULL );
    45 xlist_entry_init( XPTR( lock_cxy , &lock_ptr->list ) );
    46 #endif
    47 
    48 }
    49 
    50 //////////////////////////////////////////////
    51 void remote_rwlock_rd_lock( xptr_t lock_xp )
     47    hal_remote_s32 ( XPTR( lock_cxy , &lock_ptr->taken ) , 0 );
     48    hal_remote_s32 ( XPTR( lock_cxy , &lock_ptr->count ) , 0 );
     49
     50    xlist_root_init( XPTR( lock_cxy , &lock_ptr->rd_xroot ) );
     51    xlist_root_init( XPTR( lock_cxy , &lock_ptr->wr_xroot ) );
     52
     53    remote_busylock_init( XPTR( lock_cxy , &lock_ptr->lock ) , type );
     54}
     55
     56///////////////////////////////////////////////
     57void remote_rwlock_rd_acquire( xptr_t lock_xp )
    5258{
    53         reg_t      mode;
    54     uint32_t   ticket;
     59    thread_t * this = CURRENT_THREAD;
     60
     61    // check calling thread can yield
     62    thread_assert_can_yield( this , __FUNCTION__ );
    5563
    5664    // get cluster and local pointer on remote_rwlock
     
    5866    cxy_t             lock_cxy = GET_CXY( lock_xp );
    5967
    60     // get local pointer on local thread
    61     thread_t          * thread_ptr = CURRENT_THREAD;
    62 
    63     // extended pointers on ticket, current, count
    64     xptr_t              ticket_xp  = XPTR( lock_cxy   , &lock_ptr->ticket );
    65     xptr_t              current_xp = XPTR( lock_cxy   , &lock_ptr->current );
    66     xptr_t              count_xp   = XPTR( lock_cxy   , &lock_ptr->count );
    67 
    68     // disable interrupts
    69     hal_disable_irq( &mode );
    70 
    71     // get next free ticket
    72     ticket = hal_remote_atomic_add( ticket_xp , 1 );
    73 
    74     // busy waiting loop to take the lock
    75         while( ticket != hal_remote_lw( current_xp ) )
    76         {
    77         hal_fixed_delay( CONFIG_RWLOCK_DELAY );
    78         }
    79 
    80     ////////// From here we have the lock  ////////////
    81 
    82     // increment count
     68    // build useful extended pointers
     69    xptr_t busylock_xp = XPTR( lock_cxy , &lock_ptr->lock );
     70    xptr_t taken_xp    = XPTR( lock_cxy , &lock_ptr->taken );
     71    xptr_t count_xp    = XPTR( lock_cxy , &lock_ptr->count );
     72    xptr_t rd_root_xp  = XPTR( lock_cxy , &lock_ptr->rd_xroot );
     73
     74    // get busylock
     75    remote_busylock_acquire( busylock_xp );
     76
     77    // block and deschedule if lock taken
     78    while( hal_remote_l32( taken_xp ) )
     79    {
     80
     81#if DEBUG_RWLOCK
     82if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles() )
     83{
     84    uint32_t   type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) );
     85    printk("\n[DBG] %s : thread %x (%s) READ BLOCK on rwlock %s [%x,%x] / cycle %d\n",
     86    __FUNCTION__, this->trdid, thread_type_str(this->type),
     87    lock_type_str[type], lock_cxy, lock_ptr, (uint32_t)hal_get_cycles() );
     88}
     89#endif
     90        // get pointer on calling thread
     91        thread_t * this = CURRENT_THREAD;
     92
     93        // register reader thread in waiting queue
     94        xlist_add_last( rd_root_xp , XPTR( local_cxy , &this->wait_xlist ) );
     95
     96        // block reader thread
     97        thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_LOCK );
     98
     99        // release busylock
     100        remote_busylock_release( busylock_xp );
     101
     102        // deschedule
     103        sched_yield("reader wait remote_rwlock");
     104
     105        // get busylock
     106        remote_busylock_acquire( busylock_xp );
     107    }
     108
     109#if DEBUG_RWLOCK
     110if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles() )
     111{
     112    uint32_t   type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) );
     113    printk("\n[DBG] %s : thread %x (%s) READ ACQUIRE on rwlock %s [%x,%x] / cycle %d\n",
     114    __FUNCTION__, this->trdid, thread_type_str(this->type),
     115    lock_type_str[type], lock_cxy, lock_ptr, (uint32_t)hal_get_cycles() );
     116}
     117#endif
     118
     119    // increment number of readers
    83120    hal_remote_atomic_add( count_xp , 1 );
    84121
    85     // increment thread.remote_locks
    86     thread_ptr->remote_locks++;
    87 
    88 #if DEBUG_REMOTE_RWLOCKS
    89 xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
    90                  XPTR( lock_cxy ,  &lock_ptr->list ) );
    91 #endif
    92 
    93     // sync
    94     hal_fence();
    95 
    96     // release lock to allow several simultaneous readers
    97     hal_remote_atomic_add( current_xp , 1 );
    98 
    99     // enable interrupts
    100         hal_restore_irq( mode );
    101 
    102 }  // end remote_rwlock_rd_lock()
    103 
    104 ////////////////////////////////////////////////
    105 void remote_rwlock_rd_unlock( xptr_t lock_xp )
    106 {
    107         reg_t               mode;
     122    // release busylock
     123    remote_busylock_release( busylock_xp );
     124
     125}  // end remote_rwlock_rd_acquire()
     126
     127///////////////////////////////////////////////
     128void remote_rwlock_wr_acquire( xptr_t lock_xp )
     129{
     130    thread_t * this = CURRENT_THREAD;
     131
     132    // check calling thread can yield
     133    thread_assert_can_yield( this , __FUNCTION__ );
    108134
    109135    // get cluster and local pointer on remote_rwlock
     
    111137    cxy_t             lock_cxy = GET_CXY( lock_xp );
    112138
    113     // get cluster and local pointer on local thread
    114     thread_t          * thread_ptr = CURRENT_THREAD;
    115 
    116     // extended pointers on lock->count
    117     xptr_t              count_xp = XPTR( lock_cxy   , &lock_ptr->count );
    118 
    119     // disable interrupts
    120         hal_disable_irq( &mode );
    121  
    122     // decrement count
    123     hal_remote_atomic_add( count_xp , -1 );
    124 
    125     // decrement thread.remote_locks
    126         thread_ptr->remote_locks--;
    127 
    128 #if DEBUG_REMOTE_RWLOCKS
    129 xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
    130 #endif
    131 
    132     // enable interrupts
    133         hal_restore_irq( mode );
    134    
    135     // deschedule if pending request
    136     thread_check_sched();
    137 
    138 }  // end remote_rwlock_rd_unlock()
    139 
    140 //////////////////////////////////////////////
    141 void remote_rwlock_wr_lock( xptr_t lock_xp )
    142 {
    143         reg_t      mode;
    144     uint32_t   ticket;
     139    // build useful extended pointers
     140    xptr_t busylock_xp = XPTR( lock_cxy , &lock_ptr->lock );
     141    xptr_t taken_xp    = XPTR( lock_cxy , &lock_ptr->taken );
     142    xptr_t count_xp    = XPTR( lock_cxy , &lock_ptr->count );
     143    xptr_t wr_root_xp  = XPTR( lock_cxy , &lock_ptr->wr_xroot );
     144
     145    // get busylock
     146    remote_busylock_acquire( busylock_xp );
     147
     148    // block and deschedule if lock already taken or current readers
     149    while( hal_remote_l32( taken_xp ) || hal_remote_l32( count_xp ) )
     150    {
     151
     152#if DEBUG_RWLOCK
     153if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles() )
     154{
     155    uint32_t   type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) );
     156    printk("\n[DBG] %s : thread %x (%s) WRITE BLOCK on rwlock %s [%x,%x] / cycle %d\n",
     157    __FUNCTION__, this->trdid, thread_type_str(this->type),
     158    lock_type_str[type], lock_cxy, lock_ptr, (uint32_t)hal_get_cycles() );
     159}
     160#endif
     161        // get local pointer on calling thread
     162        thread_t * this = CURRENT_THREAD;
     163
     164        // register writer thread in waiting queue
     165        xlist_add_last( wr_root_xp , XPTR( local_cxy , &this->wait_xlist ) );
     166
     167        // block writer thread
     168        thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_LOCK );
     169
     170        // release busylock
     171        remote_busylock_release( busylock_xp );
     172
     173        // deschedule
     174        sched_yield("writer wait remote_rwlock");
     175
     176        // get busylock
     177        remote_busylock_acquire( busylock_xp );
     178    }
     179
     180#if DEBUG_RWLOCK
     181if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles() )
     182{
     183    uint32_t   type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) );
     184    printk("\n[DBG] %s : thread %x (%s) WRITE ACQUIRE on rwlock %s [%x,%x] / cycle %d\n",
     185    __FUNCTION__, this->trdid, thread_type_str(this->type),
     186    lock_type_str[type], lock_cxy, lock_ptr, (uint32_t)hal_get_cycles() );
     187}
     188#endif
     189
     190    // take rwlock
     191    hal_remote_s32( taken_xp , 1 );
     192
     193    // release busylock
     194    remote_busylock_release( busylock_xp );
     195
     196}  // end remote_rwlock_wr_acquire()
     197
     198
     199///////////////////////////////////////////////
     200void remote_rwlock_rd_release( xptr_t lock_xp )
     201{
     202    // memory barrier before lock release
     203    hal_fence();
    145204
    146205    // get cluster and local pointer on remote_rwlock
     
    148207    cxy_t             lock_cxy = GET_CXY( lock_xp );
    149208
    150     // get local pointer on local thread
    151     thread_t          * thread_ptr = CURRENT_THREAD;
    152 
    153     // compute extended pointers on lock->ticket, lock->owner
    154     xptr_t              ticket_xp  = XPTR( lock_cxy   , &lock_ptr->ticket );
    155     xptr_t              count_xp   = XPTR( lock_cxy   , &lock_ptr->count );
    156     xptr_t              current_xp = XPTR( lock_cxy   , &lock_ptr->current );
    157 
    158     // disable interrupts
    159     hal_disable_irq( &mode );
    160 
    161     // get next free ticket
    162     ticket = hal_remote_atomic_add( ticket_xp , 1 );
    163 
    164     // loop to take the lock
    165         while( ticket != hal_remote_lw( current_xp ) )
    166         {
    167         hal_fixed_delay( CONFIG_RWLOCK_DELAY );
    168         }
    169 
    170     ////////// From here we have the lock  ////////////
    171 
    172     // wait completion of read accesses
    173     while( hal_remote_lw( count_xp ) != 0 )
    174     {
    175         hal_fixed_delay( CONFIG_RWLOCK_DELAY );
    176     }
    177 
    178 #if DEBUG_REMOTE_RWLOCKS
    179 hal_remote_swd( XPTR( lock_cxy  , &lock_ptr->owner ) ,
    180                 XPTR( local_cxy , thread_ptr ) );
    181 xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
    182                  XPTR( lock_cxy  , &lock_ptr->list ) );
    183 #endif   
    184 
    185     // increment thread.remote_locks
    186     thread_ptr->remote_locks++;
    187 
    188     // enable interrupts
    189         hal_restore_irq( mode );
    190 
    191 }  // end remote_rwlock_wr_lock()
    192 
    193 //////////////////////////////////////////////
    194 void remote_rwlock_wr_unlock( xptr_t lock_xp )
    195 {
    196         reg_t               mode;
     209    // build useful extended pointers
     210    xptr_t busylock_xp = XPTR( lock_cxy , &lock_ptr->lock );
     211    xptr_t count_xp    = XPTR( lock_cxy , &lock_ptr->count );
     212    xptr_t rd_root_xp  = XPTR( lock_cxy , &lock_ptr->rd_xroot );
     213    xptr_t wr_root_xp  = XPTR( lock_cxy , &lock_ptr->wr_xroot );
     214
     215    // get busylock
     216    remote_busylock_acquire( busylock_xp );
     217
     218#if DEBUG_RWLOCK
     219if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles() )
     220{
     221    thread_t * this = CURRENT_THREAD;
     222    uint32_t   type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) );
     223    printk("\n[DBG] %s : thread %x (%s) READ RELEASE on rwlock %s [%x,%x] / cycle %d\n",
     224    __FUNCTION__, this->trdid, thread_type_str(this->type),
     225    lock_type_str[type], lock_cxy, lock_ptr, (uint32_t)hal_get_cycles() );
     226}
     227#endif
     228
     229        // decrement number of readers
     230    hal_remote_atomic_add( count_xp , -1 );
     231
     232    // release first writer in waiting queue if no current readers
     233    // and writers waiting queue non empty
     234    if( (hal_remote_l32( count_xp ) == 0) && (xlist_is_empty( wr_root_xp ) == false) )
     235    {
     236        // get first writer thread
     237        xptr_t      thread_xp  = XLIST_FIRST( wr_root_xp , thread_t, wait_xlist );
     238        cxy_t       thread_cxy = GET_CXY( thread_xp );
     239        thread_t *  thread_ptr = GET_PTR( thread_xp );
     240
     241        // remove this waiting thread from waiting list
     242        xlist_unlink( XPTR( thread_cxy , &thread_ptr->wait_xlist ) );
     243
     244        // unblock this waiting thread
     245        thread_unblock( thread_xp , THREAD_BLOCKED_LOCK );
     246
     247#if DEBUG_RWLOCK
     248if( (uint32_t)hal_get_cycles() > DEBUG_RWLOCK )
     249{
     250    thread_t  * this        = CURRENT_THREAD;
     251    uint32_t    lock_type   = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) );
     252    trdid_t     trdid       = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
     253    uint32_t    thread_type = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->type ) );
     254    printk("\n[DBG] %s : thread %x (%s) UNBLOCK thread %x (%s)"
     255    " / rwlock %s [%x,%x] / cycle %d\n",
     256    __FUNCTION__, this->trdid, thread_type_str(this->type), trdid, thread_type_str(thread_type),
     257    lock_type_str[lock_type], lock_cxy, lock_ptr, (uint32_t)hal_get_cycles() );
     258}
     259#endif
     260
     261    }
     262
     263    // release all readers in waiting queue if writers waiting queue empty
     264    // and readers waiting queue non empty
     265    else if( xlist_is_empty( wr_root_xp ) && (xlist_is_empty( rd_root_xp ) == false) )
     266    {
     267        while( xlist_is_empty( rd_root_xp ) == false )
     268        {
     269            // get first writer thread
     270            xptr_t      thread_xp  = XLIST_FIRST( wr_root_xp , thread_t, wait_xlist );
     271            cxy_t       thread_cxy = GET_CXY( thread_xp );
     272            thread_t *  thread_ptr = GET_PTR( thread_xp );
     273
     274            // remove this waiting thread from waiting list
     275            xlist_unlink( XPTR( thread_cxy , &thread_ptr->wait_xlist ) );
     276
     277            // unblock this waiting thread
     278            thread_unblock( thread_xp , THREAD_BLOCKED_LOCK );
     279
     280#if DEBUG_RWLOCK
     281if( (uint32_t)hal_get_cycles() > DEBUG_RWLOCK )
     282{
     283    thread_t  * this        = CURRENT_THREAD;
     284    uint32_t    lock_type   = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) );
     285    trdid_t     trdid       = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
     286    uint32_t    thread_type = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->type ) );
     287    printk("\n[DBG] %s : thread %x (%s) UNBLOCK thread %x (%s)"
     288    " / rwlock %s [%x,%x] / cycle %d\n",
     289    __FUNCTION__, this->trdid, thread_type_str(this->type), trdid, thread_type_str(thread_type),
     290    lock_type_str[lock_type], lock_cxy, lock_ptr, (uint32_t)hal_get_cycles() );
     291}
     292#endif
     293
     294        }
     295    }
     296
     297    // release busylock
     298    remote_busylock_release( busylock_xp );
     299
     300}  // end remote_rwlock_rd_release()
     301
     302///////////////////////////////////////////////
     303void remote_rwlock_wr_release( xptr_t lock_xp )
     304{
     305    // memory barrier before lock release
     306    hal_fence();
    197307
    198308    // get cluster and local pointer on remote_rwlock
     
    200310    cxy_t             lock_cxy = GET_CXY( lock_xp );
    201311
    202     // get cluster and local pointer on local thread
    203     thread_t          * thread_ptr = CURRENT_THREAD;
    204 
    205     // compute extended pointer on lock->ticket
    206     xptr_t              current_xp = XPTR( lock_cxy   , &lock_ptr->current );
    207 
    208     // disable interrupts
    209         hal_disable_irq( &mode );
    210  
    211 #if CONFIG_LOCKS_OWNER
    212 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL );
    213 xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
    214 #endif
    215 
    216     // release lock
    217     hal_remote_atomic_add( current_xp , 1 );
    218 
    219     // decrement thread.remote_locks
    220         thread_ptr->remote_locks--;
    221 
    222     // enable interrupts
    223         hal_restore_irq( mode );
    224    
    225     // deschedule if pending request
    226     thread_check_sched();
    227 
    228 }  // end remote_rwlock_wr_unlock()
    229 
    230 ///////////////////////////////////////////
    231 void remote_rwlock_print( xptr_t   lock_xp,
    232                           char   * comment )
    233 {
    234     uint32_t     ticket;                // first free ticket index
    235     uint32_t     current;               // ticket index of current owner
    236     uint32_t     count;                 // current number of reader threads
    237 
    238     // get cluster and local pointer on remote_rwlock
    239     remote_rwlock_t * lock_ptr = GET_PTR( lock_xp );
    240     cxy_t             lock_cxy = GET_CXY( lock_xp );
    241 
    242     ticket  = hal_remote_lw ( XPTR( lock_cxy , &lock_ptr->ticket ) );
    243     current = hal_remote_lw ( XPTR( lock_cxy , &lock_ptr->current ) );
    244     count   = hal_remote_lw ( XPTR( lock_cxy , &lock_ptr->count ) );
    245 
    246     printk("\n*** rwlock <%l> %s : ticket = %d / current = %d / count = %d\n",
    247            lock_xp , comment , ticket , current , count );
    248 
    249 }  // end remote_rwlock_print()
    250 
     312    // build useful extended pointers
     313    xptr_t busylock_xp = XPTR( lock_cxy , &lock_ptr->lock );
     314    xptr_t taken_xp    = XPTR( lock_cxy , &lock_ptr->taken );
     315    xptr_t rd_root_xp  = XPTR( lock_cxy , &lock_ptr->rd_xroot );
     316    xptr_t wr_root_xp  = XPTR( lock_cxy , &lock_ptr->wr_xroot );
     317
     318    // get busylock
     319    remote_busylock_acquire( busylock_xp );
     320
     321#if DEBUG_RWLOCK
     322if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles() )
     323{
     324    thread_t * this = CURRENT_THREAD;
     325    uint32_t   type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) );
     326    printk("\n[DBG] %s : thread %x (%s) WRITE RELEASE on rwlock %s [%x,%x] / cycle %d\n",
     327    __FUNCTION__, this->trdid, thread_type_str(this->type),
     328    lock_type_str[type], lock_cxy, lock_ptr, (uint32_t)hal_get_cycles() );
     329}
     330#endif
     331
     332    // release rwlock
     333    hal_remote_s32( taken_xp , 0 );
     334
     335    // unblock first waiting writer thread if writers waiting queue non empty
     336    if( xlist_is_empty( wr_root_xp ) == false )
     337    {
     338        // get first writer thread
     339        xptr_t      thread_xp  = XLIST_FIRST( wr_root_xp , thread_t, wait_xlist );
     340        cxy_t       thread_cxy = GET_CXY( thread_xp );
     341        thread_t *  thread_ptr = GET_PTR( thread_xp );
     342
     343        // remove this waiting thread from waiting list
     344        xlist_unlink( XPTR( thread_cxy , &thread_ptr->wait_xlist ) );
     345
     346        // unblock this waiting thread
     347        thread_unblock( thread_xp , THREAD_BLOCKED_LOCK );
     348
     349#if DEBUG_RWLOCK
     350if( (uint32_t)hal_get_cycles() > DEBUG_RWLOCK )
     351{
     352    thread_t  * this        = CURRENT_THREAD;
     353    uint32_t    lock_type   = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) );
     354    trdid_t     trdid       = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
     355    uint32_t    thread_type = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->type ) );
     356    printk("\n[DBG] %s : thread %x (%s) UNBLOCK thread %x (%s)"
     357    " / rwlock %s [%x,%x] / cycle %d\n",
     358    __FUNCTION__, this->trdid, thread_type_str(this->type), trdid, thread_type_str(thread_type),
     359    lock_type_str[lock_type], lock_cxy, lock_ptr, (uint32_t)hal_get_cycles() );
     360}
     361#endif
     362
     363    }
     364
     365    // check readers waiting queue and unblock all if writers waiting queue empty
     366    else
     367    {
     368        while( xlist_is_empty( rd_root_xp ) == false )
     369        {
     370            // get first writer thread
     371            xptr_t      thread_xp  = XLIST_FIRST( rd_root_xp , thread_t, wait_xlist );
     372            cxy_t       thread_cxy = GET_CXY( thread_xp );
     373            thread_t *  thread_ptr = GET_PTR( thread_xp );
     374
     375            // remove this waiting thread from waiting list
     376            xlist_unlink( XPTR( thread_cxy , &thread_ptr->wait_xlist ) );
     377
     378            // unblock this waiting thread
     379            thread_unblock( thread_xp , THREAD_BLOCKED_LOCK );
     380
     381#if DEBUG_RWLOCK
     382if( (uint32_t)hal_get_cycles() > DEBUG_RWLOCK )
     383{
     384    thread_t  * this        = CURRENT_THREAD;
     385    uint32_t    lock_type   = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) );
     386    trdid_t     trdid       = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
     387    uint32_t    thread_type = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->type ) );
     388    printk("\n[DBG] %s : thread %x (%s) UNBLOCK thread %x (%s)"
     389    " / rwlock %s [%x,%x] / cycle %d\n",
     390    __FUNCTION__, this->trdid, thread_type_str(this->type), trdid, thread_type_str(thread_type),
     391    lock_type_str[lock_type], lock_cxy, lock_ptr, (uint32_t)hal_get_cycles() );
     392}
     393#endif
     394
     395        }
     396    }
     397
     398    // release busylock
     399    remote_busylock_release( busylock_xp );
     400
     401}  // end remote_rwlock_wr_release()
     402
     403
     404
  • trunk/kernel/libk/remote_rwlock.h

    r457 r563  
    11/*
    2  * remote_rwlock.h - kernel remote_rwlock definition.
     2 * remote_rwlock.h - kernel remote read/writelock definition.
    33 *
    44 * Authors   Alain Greiner   (2016,2017,2018)
     
    2727#include <kernel_config.h>
    2828#include <hal_kernel_types.h>
     29#include <remote_busylock.h>
    2930#include <xlist.h>
    3031
    31 /***************************************************************************************
    32  * This file defines a remote kernel lock, that supports several simultaneous read
    33  * accesses, but only one write access. It implements a ticket based allocation policy.
    34  * It can be used to synchronize threads running in different clusters, because
    35  * all access functions use remote pointers.
    36  * - A reader take the lock to atomically increments the registered readers count.
    37  *   Then it release the lock and access the protected structure. It atomically
    38  *   decrements the readers count without taking the lock when access is completed.
    39  * - A writer take the lock and keep it, but must wait completion of all current read
    40  *   accesses before starting its own access.
    41  * When the lock is taken by another thread, the new-comers use a busy waiting policy.
    42  **************************************************************************************/
     32/*******************************************************************************************
     33 * This structure defines a kernel, global, read/write lock, supporting several simultaneous
     34 * read accesses, but only one write access to a globally shared object, that can be
     35 * accessed by threads running in any cluster.
     36 * Both readers and writers take the associated busylock before accessing or updating
     37 * the rwlock state, and releases the busylock after rwlock state update.
     38 * - when a reader try to access the object, it increments the readers "count" when the
     39 *   lock is not "taken" by a writer. It registers in the "rd_root" waiting queue, blocks,
     40 *   and deschedules when the lock is taken.
     41 * - when a writer try to take the rwlock, it check the "taken" field. If the lock is already
     42 *   taken, or if the number of readers is non zero, it registers in the "wr_root" waiting
     43 *   queue, blocks, and deschedules. It set "taken" otherwise.
     44 * - when a reader completes its access, it decrement the readers "count", unblock the
     45 *   the first waiting writer if there is no other readers, and unblock all waiting
     46 *   readers if there no write request.
     47 * - when a  writer completes its access, it reset the "taken" field, releases the first
     48 *   waiting writer if queue non empty, or releases all waiting readers if no writer.
     49 ******************************************************************************************/
     50
     51
     52/*******************************************************************************************
     53 * This structure defines a remote rwlock.
     54 ******************************************************************************************/
    4355
    4456typedef struct remote_rwlock_s
    4557{
    46     uint32_t       ticket;          /*! first free ticket index                       */
    47     uint32_t       current;         /*! ticket index of current owner                 */
    48     uint32_t       count;           /*! current number of reader threads              */
     58    remote_busylock_t   lock;        /*! busylock protecting the rwlock state             */
     59        volatile uint32_t   taken;       /*! lock taken by an exclusive writer if non zero    */
     60    volatile uint32_t   count;       /*! current number of simultaneous readers threads   */
     61    xlist_entry_t       rd_xroot;    /*! root of list of waiting readers                  */
     62    xlist_entry_t       wr_xroot;    /*! root of list of waiting writers                  */
     63}
     64remote_rwlock_t;
    4965
    50 #if DEBUG_REMOTE_RWLOCKS
    51     xptr_t         owner;           /*! extended pointer on writer thread             */
    52     xlist_entry_t  list;            /*! member of list of remote locks taken by owner */
    53 #endif
    54 
    55 }
    56 remote_rwlock_t;
    5766
    5867/***************************************************************************************
    5968 * This function initializes a remote rwlock.
     69 * The <type> argument defines the lock usage and is only used for debug.
     70 * This type is actually stored in the associated busylock descriptor.
     71 ***************************************************************************************
     72 * @ lock_xp    : extended pointer on the remote rwlock
     73 * @ type       : lock usage for debug.
     74 **************************************************************************************/
     75void remote_rwlock_init( xptr_t   lock_xp,
     76                         uint32_t type );
     77
     78/***************************************************************************************
     79 * This blocking function get access to a remote rwlock for a reader.
    6080 ***************************************************************************************
    6181 * @ lock_xp    : extended pointer on the remote rwlock
    6282 **************************************************************************************/
    63 void remote_rwlock_init( xptr_t lock_xp );
     83void remote_rwlock_rd_acquire( xptr_t lock_xp );
    6484
    6585/***************************************************************************************
    66  * This blocking function get access to a remote rwlock for a reader.
    67  * It increments the calling thread locks count when the lock has been taken.
     86 * This function releases a remote rwlock for a reader.
    6887 ***************************************************************************************
    6988 * @ lock_xp    : extended pointer on the remote rwlock
    7089 **************************************************************************************/
    71 void remote_rwlock_rd_lock( xptr_t lock_xp );
     90void remote_rwlock_rd_release( xptr_t lock_xp );
    7291
    7392/***************************************************************************************
    74  * This function releases a remote rwlock for a reader.
    75  * It decrements the calling thread locks count when the lock has been released.
     93 * This blocking function get access to a remote rwlock for a writer.
    7694 ***************************************************************************************
    7795 * @ lock_xp    : extended pointer on the remote rwlock
    7896 **************************************************************************************/
    79 void remote_rwlock_rd_unlock( xptr_t lock_xp );
     97void remote_rwlock_wr_acquire( xptr_t lock_xp );
    8098
    8199/***************************************************************************************
    82  * This blocking function get access to a remote rwlock for a writer.
    83  * It increments the calling thread locks count when the lock has been taken.
     100 * This function releases a remote rwlock for a writer.
    84101 ***************************************************************************************
    85102 * @ lock_xp    : extended pointer on the remote rwlock
    86103 **************************************************************************************/
    87 void remote_rwlock_wr_lock( xptr_t lock_xp );
    88 
    89 /***************************************************************************************
    90  * This function releases a remote rwlock for a writer.
    91  * It decrements the calling thread locks count when the lock has been released.
    92  ***************************************************************************************
    93  * @ lock_xp    : extended pointer on the remote rwlock
    94  **************************************************************************************/
    95 void remote_rwlock_wr_unlock( xptr_t lock_xp );
    96 
    97 /***************************************************************************************
    98  * Display the lock state on kernel TTY.
    99  ***************************************************************************************
    100  * @ lock_xp    : extended pointer on the remote rwlock
    101  * @ comment    : comment to be printed.
    102  **************************************************************************************/
    103 void remote_rwlock_print( xptr_t   lock_xp,
    104                           char   * comment );
     104void remote_rwlock_wr_release( xptr_t lock_xp );
    105105
    106106#endif
  • trunk/kernel/libk/remote_sem.c

    r469 r563  
    11/*
    2  * remote_sem.c - Kernel function implementing the semaphore related syscalls.
     2 * remote_sem.c - POSIX unnamed semaphore implementation.
    33 *
    4  * Author   Alain Greiner  (2016)
     4 * Author   Alain Greiner  (2016,2017,2018)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    3333
    3434///////////////////////////////////////////////
    35 xptr_t remote_sem_from_vaddr( intptr_t  vaddr )
     35xptr_t remote_sem_from_ident( intptr_t  ident )
    3636{
    3737    // get pointer on local process_descriptor
     
    4545    process_t    * ref_ptr = GET_PTR( ref_xp );
    4646
    47     // get extended pointer on root of semaphores list
     47    // get extended pointer on semaphores list
    4848    xptr_t root_xp = XPTR( ref_cxy , &ref_ptr->sem_root );
     49    xptr_t lock_xp = XPTR( ref_cxy , &ref_ptr->sync_lock );
    4950   
     51    // get lock protecting synchro lists
     52    remote_queuelock_acquire( lock_xp );
     53 
    5054    // scan reference process semaphores list
    5155    xptr_t         iter_xp;
     
    5357    cxy_t          sem_cxy;
    5458    remote_sem_t * sem_ptr;
    55     intptr_t       ident;
     59    intptr_t       current;
    5660    bool_t         found = false;
    5761           
     
    6165        sem_cxy = GET_CXY( sem_xp );
    6266        sem_ptr = GET_PTR( sem_xp );
    63         ident  = (intptr_t)hal_remote_lpt( XPTR( sem_cxy , &sem_ptr->ident ) );   
    64 
    65         if( ident == vaddr )
     67        current = (intptr_t)hal_remote_lpt( XPTR( sem_cxy , &sem_ptr->ident ) );   
     68
     69        if( current == ident )
    6670        {
    6771            found = true;
     
    7074    }
    7175
     76    // relese lock protecting synchros lists
     77    remote_queuelock_release( lock_xp );
     78 
    7279    if( found == false )  return XPTR_NULL;
    7380    else                  return sem_xp;
    7481
    75 }  // end remote_sem_from_vaddr()
     82}  // end remote_sem_from_ident()
    7683
    7784///////////////////////////////////////////
    7885error_t remote_sem_create( intptr_t   vaddr,
    79                            uint32_t   value,
    80                            xptr_t     sem_xp_xp )
     86                           uint32_t   value )
    8187{
    8288    remote_sem_t * sem_ptr;
     
    108114    }
    109115
    110     if( sem_xp == XPTR_NULL ) return -1;
     116    if( sem_xp == XPTR_NULL ) return 0xFFFFFFFF;
    111117
    112118    // initialise semaphore
    113     hal_remote_sw ( XPTR( ref_cxy , &sem_ptr->count ) , value );
     119    hal_remote_s32 ( XPTR( ref_cxy , &sem_ptr->count ) , value );
    114120        hal_remote_spt( XPTR( ref_cxy , &sem_ptr->ident ) , (void *)vaddr );
    115     remote_spinlock_init( XPTR( ref_cxy , &sem_ptr->lock ) );
    116121        xlist_root_init( XPTR( ref_cxy , &sem_ptr->root ) );
    117122        xlist_entry_init( XPTR( ref_cxy , &sem_ptr->list ) );
    118 
    119     // register semaphore in reference process xlist
     123    remote_busylock_init( XPTR( ref_cxy , &sem_ptr->lock ), LOCK_SEM_STATE );
     124
    120125    xptr_t root_xp = XPTR( ref_cxy , &ref_ptr->sem_root );
    121     xptr_t xp_list = XPTR( ref_cxy , &sem_ptr->list );
    122     remote_spinlock_lock( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
    123     xlist_add_first( root_xp , xp_list );
    124     remote_spinlock_unlock( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
    125 
    126     // write extended pointer on semaphore in calling thread buffer
    127     hal_remote_swd( sem_xp_xp , sem_xp );
     126    xptr_t list_xp = XPTR( ref_cxy , &sem_ptr->list );
     127
     128    // get lock protecting user synchro lists
     129    remote_queuelock_acquire( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
     130
     131    // register semaphore in reference process list of semaphores
     132    xlist_add_first( root_xp , list_xp );
     133
     134    // release lock protecting user synchro lists
     135    remote_queuelock_release( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
     136
     137#if DEBUG_SEM
     138thread_t * this = CURRENT_THREAD;
     139if( (uint32_t)hal_get_cycles() > DEBUG_SEM )
     140printk("\n[DBG] %s : thread %x in process %x INITIALIZE sem(%x,%x) / value %d\n",
     141__FUNCTION__, this->trdid, this->process->pid, local_cxy, sem_ptr, value );
     142#endif
    128143
    129144    return 0;
    130145
    131 }  // en remote_sem_create()
     146}  // end remote_sem_create()
    132147 
    133148////////////////////////////////////////
     
    146161    // get semaphore cluster and local pointer
    147162    cxy_t          sem_cxy = GET_CXY( sem_xp );
    148     remote_sem_t * sem_ptr = (remote_sem_t *)GET_PTR( sem_xp );
    149 
    150     // get lock protecting semaphore
    151     remote_spinlock_lock( XPTR( sem_cxy , &sem_ptr->lock ) );
    152  
     163    remote_sem_t * sem_ptr = GET_PTR( sem_xp );
     164
    153165    // get remote pointer on waiting queue root
    154166    xptr_t root_xp = XPTR( sem_cxy , &sem_ptr->root );
     
    161173    }
    162174
    163     // reset semaphore count
    164     hal_remote_sw( XPTR( sem_cxy , &sem_ptr->count ) , 0 );
    165 
    166175    // remove semaphore from reference process xlist
    167     remote_spinlock_lock( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
     176    remote_queuelock_acquire( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
    168177    xlist_unlink( XPTR( sem_cxy , &sem_ptr->list ) );
    169     remote_spinlock_unlock( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
    170 
    171     // release lock
    172     remote_spinlock_unlock( XPTR( sem_cxy , &sem_ptr->lock ) );
     178    remote_queuelock_release( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
    173179
    174180    // release memory allocated for semaphore descriptor
     
    190196void remote_sem_wait( xptr_t sem_xp )
    191197{
     198    thread_t * this = CURRENT_THREAD;
     199
     200// check calling thread can yield
     201assert( (this->busylocks == 0),
     202"cannot yield : busylocks = %d\n", this->busylocks );
     203
     204
    192205    // get semaphore cluster and local pointer
    193206    cxy_t          sem_cxy = GET_CXY( sem_xp );
    194207    remote_sem_t * sem_ptr = GET_PTR( sem_xp );
    195208
    196     // get lock protecting semaphore     
    197         remote_spinlock_lock( XPTR( sem_cxy , &sem_ptr->lock ) );
     209    // get extended pointers on sem fields
     210    xptr_t           count_xp = XPTR( sem_cxy , &sem_ptr->count );
     211    xptr_t           root_xp  = XPTR( sem_cxy , &sem_ptr->root );
     212    xptr_t           lock_xp  = XPTR( sem_cxy , &sem_ptr->lock );
     213
     214    while( 1 )
     215    {
     216        // get busylock protecting semaphore     
     217            remote_busylock_acquire( lock_xp );
    198218 
    199     // get semaphore current value
    200     uint32_t count = hal_remote_lw( XPTR( sem_cxy , &sem_ptr->count ) );
    201 
    202         if( count > 0 )       // success
    203         {
    204         // decrement semaphore value
    205         hal_remote_sw( XPTR( sem_cxy , &sem_ptr->count ) , count - 1 );
    206 
    207         // release lock
    208             remote_spinlock_unlock( XPTR( sem_cxy , &sem_ptr->lock ) );
    209         }
    210         else                 // failure
    211         {
    212         thread_t * this = CURRENT_THREAD;
    213 
    214         // register thread in waiting queue
    215         xptr_t root_xp = XPTR( sem_cxy   , &sem_ptr->root );
    216         xptr_t list_xp = XPTR( local_cxy , &this->wait_list );
    217                 xlist_add_last( root_xp , list_xp );
    218 
    219         // release lock
    220             remote_spinlock_unlock( XPTR( sem_cxy , &sem_ptr->lock ) );
    221 
    222         // block and deschedule
    223         thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_SEM ); 
    224         sched_yield("blocked on semaphore");
     219        // get semaphore current value
     220        uint32_t count = hal_remote_l32( count_xp );
     221
     222            if( count > 0 )                     // success
     223            {
     224            // decrement semaphore value
     225            hal_remote_s32( count_xp , count - 1 );
     226
     227#if DEBUG_SEM
     228if( (uint32_t)hal_get_cycles() > DEBUG_SEM )
     229printk("\n[DBG] %s : thread %x in process %x DECREMENT sem(%x,%x) / value %d\n",
     230__FUNCTION__, this->trdid, this->process->pid, sem_cxy, sem_ptr, count-1 );
     231#endif
     232            // release busylock protecting semaphore
     233                remote_busylock_release( XPTR( sem_cxy , &sem_ptr->lock ) );
     234
     235            return;
     236        }
     237            else                               // failure
     238            {
     239            // get cluster and pointers on calling thread
     240            cxy_t            caller_cxy = local_cxy;
     241            thread_t       * caller_ptr = CURRENT_THREAD;
     242            xptr_t           caller_xp  = XPTR( caller_cxy , caller_ptr );
     243
     244            // block the calling thread
     245            thread_block( caller_xp , THREAD_BLOCKED_SEM ); 
     246
     247            // register calling thread in waiting queue
     248            xptr_t entry_xp = XPTR( caller_cxy , &caller_ptr->wait_xlist );
     249                    xlist_add_last( root_xp , entry_xp );
     250
     251#if DEBUG_SEM
     252if( (uint32_t)hal_get_cycles() > DEBUG_SEM )
     253printk("\n[DBG] %s : thread %x in process %x BLOCK on sem(%x,%x) / value %d\n",
     254__FUNCTION__, this->trdid, this->process->pid, sem_cxy, sem_ptr, count );
     255#endif
     256            // release busylock protecting semaphore
     257                remote_busylock_release( XPTR( sem_cxy , &sem_ptr->lock ) );
     258
     259            // deschedule calling thread
     260            sched_yield("blocked on semaphore");
     261        }
    225262        }
    226263}  // end remote_sem_wait()
     
    229266void remote_sem_post( xptr_t sem_xp )
    230267{
     268    // memory barrier before sem release
     269    hal_fence();
     270
    231271    // get semaphore cluster and local pointer
    232272    cxy_t          sem_cxy = GET_CXY( sem_xp );
    233273    remote_sem_t * sem_ptr = GET_PTR( sem_xp );
    234274
    235     // get lock protecting semaphore
    236         remote_spinlock_lock( XPTR( sem_cxy , &sem_ptr->lock ) );
     275    // get extended pointers on sem fields
     276    xptr_t           count_xp = XPTR( sem_cxy , &sem_ptr->count );
     277    xptr_t           root_xp  = XPTR( sem_cxy , &sem_ptr->root );
     278    xptr_t           lock_xp  = XPTR( sem_cxy , &sem_ptr->lock );
     279
     280    // get busylock protecting semaphore
     281        remote_busylock_acquire( lock_xp );
    237282 
    238     // get semaphore current value
    239     uint32_t count = hal_remote_lw( XPTR( sem_cxy , &sem_ptr->count ) );
    240 
    241     // get remote pointer on waiting queue root
    242     xptr_t root_xp = XPTR( sem_cxy , &sem_ptr->root );
    243  
    244         if( xlist_is_empty( root_xp ) )   // no waiting thread
    245     {
    246         // increment semaphore value
    247         hal_remote_sw( XPTR( sem_cxy , &sem_ptr->count ) , count + 1 );
    248     }
    249     else
     283    // increment semaphore value
     284    hal_remote_atomic_add( count_xp , 1 );
     285
     286#if DEBUG_SEM
     287uint32_t count = hal_remote_l32( count_xp );
     288thread_t * this = CURRENT_THREAD;
     289if( (uint32_t)hal_get_cycles() > DEBUG_SEM )
     290printk("\n[DBG] %s : thread %x in process %x INCREMENT sem(%x,%x) / value %d\n",
     291__FUNCTION__, this->trdid, this->process->pid, sem_cxy, sem_ptr, count );
     292#endif
     293
     294    // scan waiting queue to unblock all waiting threads
     295        while( xlist_is_empty( root_xp ) == false )   // waiting queue non empty
    250296    {
    251297        // get first waiting thread from queue
    252         xptr_t thread_xp = XLIST_FIRST_ELEMENT( root_xp , thread_t , wait_list );
    253 
    254         // get thread cluster and local poiner
     298        xptr_t     thread_xp  = XLIST_FIRST( root_xp , thread_t , wait_xlist );
    255299        cxy_t      thread_cxy = GET_CXY( thread_xp );
    256300        thread_t * thread_ptr = GET_PTR( thread_xp );
    257301
    258         // remove this thread from the waiting queue, and unblock it
    259         xlist_unlink( XPTR( thread_cxy , &thread_ptr->wait_list ) );
     302        // remove this thread from the waiting queue
     303        xlist_unlink( XPTR( thread_cxy , &thread_ptr->wait_xlist ) );
     304
     305        // unblock this waiting thread
    260306                thread_unblock( thread_xp , THREAD_BLOCKED_SEM );
    261     }
    262 
    263     // release lock
    264         remote_spinlock_unlock( XPTR( sem_cxy , &sem_ptr->lock ) );
     307
     308#if DEBUG_SEM
     309if( (uint32_t)hal_get_cycles() > DEBUG_SEM )
     310{
     311trdid_t     trdid   = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
     312process_t * process = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->process ) );
     313pid_t       pid     = hal_remote_l32( XPTR( thread_cxy , &process->pid ) );
     314printk("\n[DBG] %s : thread %x in process %x UNBLOCK thread %x in process %x / sem(%x,%x)\n",
     315__FUNCTION__, this->trdid, this->process->pid, trdid, pid, sem_cxy, sem_ptr );
     316}
     317#endif
     318
     319    }
     320
     321    // release busylock protecting the semaphore
     322        remote_busylock_release( XPTR( sem_cxy , &sem_ptr->lock ) );
    265323
    266324}  // end remote_sem_post()
     
    275333    remote_sem_t * sem_ptr = GET_PTR( sem_xp );
    276334
    277     *data = hal_remote_lw( XPTR( sem_cxy , &sem_ptr->count ) );
     335    *data = hal_remote_l32( XPTR( sem_cxy , &sem_ptr->count ) );
    278336
    279337}  // end remote_sem_get_value()
  • trunk/kernel/libk/remote_sem.h

    r457 r563  
    11/*
    2  * remote_sem.h - POSIX unnammed semaphore definition.
     2 * remote_sem.h - POSIX unnamed semaphore definition.
    33 *
    4  * Author   Alain Greiner    (2016)
     4 * Author   Alain Greiner (2016,2017,2018)
    55 *
    66 * Copyright (c)  UPMC Sorbonne Universites
     
    1010 * ALMOS-MKH is free software; you can redistribute it and/or modify it
    1111 * under the terms of the GNU General Public License as published by
    12  * the Free Software Foundation; version 2.0 of the License.
     12e* the Free Software Foundation; version 2.0 of the License.
    1313 *
    1414 * ALMOS-MKH is distributed in the hope that it will be useful, but
     
    2727#include <hal_kernel_types.h>
    2828#include <xlist.h>
    29 #include <remote_spinlock.h>
     29#include <remote_busylock.h>
    3030
    31 /*********************************************************************************************
    32  *      This file defines a POSIX compliant unnamed semaphore.
     31/********************************************************************************************n
     32 * This is the kernel representation of an user level, POSIX compliant unnamed semaphore.
    3333 *
    34  * A semaphore is declared by a given user process as a "sem_t" global variable.
     34 * It can be used by muti-threaded user applications to synchronise user threads
     35 * running in different clusters.
     36 *
     37* A semaphore is declared by a given user process as a "sem_t" global variable.
    3538 * The user "sem_t" structure is implemented as an unsigned long, but the value is not
    3639 * used by the kernel. ALMOS-MKH uses only the sem_t virtual address as an identifier.
    37  * For each user semaphore, ALMOS-MKH creates a kernel "remote_sem_t" structure.
    38  * - This structure is allocated in the reference cluster by the sem_init() syscall,
    39  *   and destroyed by the sem_destroy() syscall, using RPC if the calling thread is not
    40  *   running in the reference cluster.
    41  * - The threads accessing the semaphore with the sem_get_value(), sem_wait(), sem_post()
    42  *   syscalls can be running in any cluster, as these syscalls access the "remote_sem_t"
    43  *   structure with remote_read / remote_write access functions.
     40 * For each user semaphore, ALMOS-MKH creates a kernel "remote_sem_t" structure,
     41 * dynamically allocated in the reference cluster by the remote_sem_create() function,
     42 * and destroyed by the remote_sem_destroy() function, using RPC if the calling thread is not
     43 * running in the reference cluster.
     44 *
     45 * The threads accessing the semaphore with the sem_get_value(), sem_wait(), sem_post()
     46 * syscalls can be running in any cluster, as these syscalls access the "remote_sem_t"
     47 * structure with remote_read / remote_write access functions.
    4448 ********************************************************************************************/
    4549
    4650
    4751/*********************************************************************************************
    48  * This structure defines the kernel semaphore descriptor.
    49  * - It contains the root of a waiting threads queue, implemented as a distributed xlist.
    50  * - It contains an xlist of all semaphores, rooted in the reference process descriptor.
    51  * - It contains a lock protecting both the semaphore value and the waiting queue.
     52 * This structure defines the kernel implementation of an user level semaphore.
    5253 ********************************************************************************************/
    5354
    5455typedef struct remote_sem_s
    5556{
    56         remote_spinlock_t lock;          /*! lock protecting both count and wait_queue          */
     57        remote_busylock_t lock;          /*! lock protecting the semaphore state                */
    5758        uint32_t          count;         /*! current value                                      */
    5859    intptr_t          ident;         /*! virtual address in user space == identifier        */
     
    6667 * This function returns an extended pointer on the remote semaphore identified
    6768 * by its virtual address in a given user process. It makes an associative search,
    68  * scanning the list of semaphores rooted in the reference process descriptor.
     69 * scanning the list of user semaphores rooted in the reference process descriptor.
    6970 *********************************************************************************************
    7071 * @ process  : pointer on local process descriptor.
    71  * @ vaddr    : semaphore virtual address, used as identifier.
     72 * @ ident    : semaphore virtual address, used as identifier.
    7273 * @ returns extended pointer on semaphore if success / returns XPTR_NULL if not found.
    7374 ********************************************************************************************/
    74 xptr_t remote_sem_from_vaddr( intptr_t  vaddr );
     75xptr_t remote_sem_from_ident( intptr_t  ident );
    7576
    7677/*********************************************************************************************
     
    7879 * It allocates memory for a remote semaphore in reference cluster, using a RPC if required,
    7980 * and initializes it, using remote accesses from values defined by the <vaddr> and <value>
    80  * arguments. It uses also a remote access to return the extended pointer on the semaphore
    81  * in the buffer identified by the <buf_xp> argument.
     81 * arguments. It register this semaphore in the calling process list of user semaphores.
    8282 *********************************************************************************************
    8383 * @ vaddr     : [in] semaphore virtual address, used as identifier.
    8484 * @ value     : [in] semaphore initial value.
    85  * @ sem_xp_xp : [out] extended pointer on buffer to store extended pointer on semaphore.
    8685 * @ returns 0 if success / returns -1 if no memory.
    8786 ********************************************************************************************/
    8887error_t remote_sem_create( intptr_t  vaddr,
    89                            uint32_t  value,
    90                            xptr_t    sem_xp_xp );
     88                           uint32_t  value );
    9189 
    9290/****************************yy***************************************************************
     
    10199/****************************yy***************************************************************
    102100 * This blocking function implements the SEM_WAIT operation.
    103  * - It returns only when the remote semaphore has a non-zero value,
    104  *   and has been atomically decremented.
    105  * -  if the semaphore has a zero value, it register the calling thread in the semaphore
    106  *    waiting queue, block the thread, and yield.
     101 * - It returns only when the remote semaphore has a non-zero value, and has been
     102 *   atomically decremented.
     103 * - If the semaphore has a zero value, it register the calling thread in the semaphore
     104 *   waiting queue, block the thread, and yield.
    107105 *********************************************************************************************
    108106 * @ sem_xp   : [in] extended pointer on semaphore.
     
    112110/****************************yy***************************************************************
    113111 * This function mplements the SEM_POST operation.
    114  * - It atomically increments the remote semaphore if the semaphore waiting queue is empty.
    115  * - If the waiting queue is not empty, it wakes up the first waiting thread.
     112 * - It atomically increments the remote semaphore.
     113 * - If the waiting queue is not empty, it wakes up all waiting thread.
    116114 *********************************************************************************************
    117115 * @ sem_xp   : [in] extended pointer on semaphore.
  • trunk/kernel/libk/rwlock.c

    r457 r563  
    11/*
    2  * rwlock.c - kernel read/write lock synchronization.
     2 * rwlock.c - kernel local read/write lock implementation.
    33 *
    44 * Author  Alain Greiner     (2016,2017,2018)
     
    3131#include <rwlock.h>
    3232
    33 ///////////////////////////////////////
    34 void rwlock_init( rwlock_t * lock )
     33//////////////////////////////////////////////////////////////////////////////
     34//                Extern global variables
     35//////////////////////////////////////////////////////////////////////////////
     36
     37extern char * lock_type_str[];          // allocated in kernel_init.c
     38
     39
     40//////////////////////////////////
     41void rwlock_init( rwlock_t * lock,
     42                  uint32_t   type )
    3543
    36         lock->ticket  = 0;
    37     lock->current = 0;
     44        lock->taken   = 0;
    3845    lock->count   = 0;
    3946
    40 #if DEBUG_RWLOCKS
    41 lock->owner   = NULL;
    42 list_entry_init( &lock->list );
    43 #endif
    44 
    45 }
    46 
    47 //////////////////////////////////////
    48 void rwlock_rd_lock( rwlock_t * lock )
    49 {
    50         reg_t               mode;
    51         uint32_t            ticket;
    52         thread_t          * this = CURRENT_THREAD;
    53 
    54     // disable IRQs
    55         hal_disable_irq( &mode );
    56 
    57     // get next free ticket
    58     ticket = hal_atomic_add( &lock->ticket , 1 );
    59  
    60     // poll the current ticket value
    61         while( lock->current != ticket )
    62     {
    63         hal_fixed_delay( CONFIG_RWLOCK_DELAY );
    64     }
    65 
    66     ////////// From here we have the lock  ////////////
     47    list_root_init( &lock->rd_root );
     48    list_root_init( &lock->wr_root );
     49
     50    busylock_init( &lock->lock , type );
     51}
     52
     53/////////////////////////////////////////
     54void rwlock_rd_acquire( rwlock_t * lock )
     55{
     56    thread_t * this = CURRENT_THREAD;
     57
     58    // check calling thread can yield
     59    thread_assert_can_yield( this , __FUNCTION__ );
     60
     61    // get busylock
     62    busylock_acquire( &lock->lock );
     63
     64    // block and deschedule if lock already taken
     65    while( lock->taken )
     66    {
     67
     68#if DEBUG_RWLOCK
     69if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles() )
     70{
     71    printk("\n[DBG] %s : thread %x in process %x READ BLOCK on rwlock %s [%x,%x]\n",
     72    __FUNCTION__, this->trdid, this->process->pid,
     73    lock_type_str[lock->lock.type], local_cxy, lock );
     74}
     75#endif
     76        // get pointer on calling thread
     77        thread_t * this = CURRENT_THREAD;
     78
     79        // register reader thread in waiting queue
     80        list_add_last( &lock->rd_root , &this->wait_list );
     81
     82        // block reader thread
     83        thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_LOCK );
     84       
     85        // release busylock
     86        busylock_release( &lock->lock );
     87
     88        // deschedule
     89        sched_yield("reader wait rwlock");
     90       
     91        // get busylock
     92        busylock_acquire( &lock->lock );
     93    }
     94
     95#if DEBUG_RWLOCK
     96if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles() )
     97{
     98    printk("\n[DBG] %s : thread %x in process READ ACQUIRE on rwlock %s [%x,%x]\n",
     99    __FUNCTION__, this->trdid, this->process->pid,
     100    lock_type_str[lock->lock.type], local_cxy, lock );
     101}
     102#endif
    67103
    68104    // increment number of readers
    69105    lock->count++;
    70     this->local_locks++;
    71 
    72 #if DEBUG_RWLOCKS
    73 list_add_first( &this->locks_root , &lock->list );
    74 #endif
    75 
    76     // consistency
     106
     107    // release busylock
     108    busylock_release( &lock->lock );
     109
     110}  // end rwlock_rd_acquire()
     111
     112/////////////////////////////////////////
     113void rwlock_wr_acquire( rwlock_t * lock )
     114{
     115    thread_t * this = CURRENT_THREAD;
     116
     117    // check calling thread can yield
     118    thread_assert_can_yield( this , __FUNCTION__ );
     119
     120    // get busylock
     121    busylock_acquire( &lock->lock );
     122
     123    // block and deschedule if lock already taken or existing read access
     124    while( lock->taken || lock->count )
     125    {
     126
     127#if DEBUG_RWLOCK
     128if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles() )
     129{
     130    printk("\n[DBG] %s : thread %x in process WRITE BLOCK on rwlock %s [%x,%x]\n",
     131    __FUNCTION__, this->trdid, this->process->pid,
     132    lock_type_str[lock->lock.type], local_cxy, lock );
     133}
     134#endif
     135        // get pointer on calling thread
     136        thread_t * this = CURRENT_THREAD;
     137
     138        // register writer in waiting queue
     139        list_add_last( &lock->wr_root , &this->wait_list );
     140
     141        // block reader thread
     142        thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_LOCK );
     143       
     144        // release busylock
     145        busylock_release( &lock->lock );
     146
     147        // deschedule
     148        sched_yield("writer wait rwlock");
     149       
     150        // get busylock
     151        busylock_acquire( &lock->lock );
     152    }
     153
     154#if DEBUG_RWLOCK
     155if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles() )
     156{
     157    printk("\n[DBG] %s : thread %x in process WRITE ACQUIRE on rwlock %s [%x,%x]\n",
     158    __FUNCTION__, this->trdid, this->process->pid,
     159    lock_type_str[lock->lock.type], local_cxy, lock );
     160}
     161#endif
     162
     163    // take the rwlock
     164    lock->taken = 1;
     165
     166    // release busylock
     167    busylock_release( &lock->lock );
     168
     169}  // end rwlock_wr_acquire()
     170
     171/////////////////////////////////////////
     172void rwlock_rd_release( rwlock_t * lock )
     173{
     174    // synchronize memory before lock release
    77175    hal_fence();
    78176
    79     // release the lock to allow several simultaneous readers
    80     lock->current++;
    81 
    82     // enable IRQs
    83         hal_restore_irq( mode );
    84 
    85 }  // end  rwlock_rd_lock()
    86 
    87 ////////////////////////////////////////
    88 void rwlock_rd_unlock( rwlock_t * lock )
    89 {
    90     reg_t      mode;
    91         thread_t * this = CURRENT_THREAD;
    92 
    93     // disable IRQs
    94         hal_disable_irq( &mode );
    95  
     177    // get busylock
     178    busylock_acquire( &lock->lock );
     179
     180#if DEBUG_RWLOCK
     181if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles() )
     182{
     183    thread_t * this = CURRENT_THREAD;
     184    printk("\n[DBG] %s : thread %x in process READ RELEASE on rwlock %s [%x,%x]\n",
     185    __FUNCTION__, this->trdid, this->process->pid,
     186    lock_type_str[lock->lock.type], local_cxy, lock );
     187}
     188#endif
     189
    96190    // decrement number of readers
    97     hal_atomic_add( &lock->count , -1 );
    98     this->local_locks--;
    99 
    100 #if DEBUG_RWLOCKS
    101 list_unlink( &lock->list );
    102 #endif
    103 
    104     // enable IRQs
    105         hal_restore_irq( mode );
    106 
    107     // deschedule if pending request
    108     thread_check_sched();
    109 }
    110 
    111 //////////////////////////////////////
    112 void rwlock_wr_lock( rwlock_t * lock )
    113 {
    114         reg_t              mode;
    115     uint32_t           ticket;
    116         thread_t         * this = CURRENT_THREAD;
    117 
    118     // disable IRQs
    119         hal_disable_irq( &mode );
    120  
    121     // get next free ticket
    122     ticket = hal_atomic_add( &lock->ticket , 1 );
    123  
    124     // poll the current ticket value
    125         while( lock->current != ticket )
    126     {
    127         hal_fixed_delay( CONFIG_RWLOCK_DELAY );
    128     }
    129 
    130     ////////// From here we have the lock  ////////////
    131 
    132     // wait completion of existing read access
    133     while( lock->count != 0 )
    134     {
    135         hal_fixed_delay( CONFIG_RWLOCK_DELAY );
    136     }
    137 
    138     this->local_locks++;
    139 
    140 #if DEBUG_RWLOCKS
    141 lock->owner = this;
    142 list_add_first( &this->locks_root , &lock->list );
    143 #endif
    144 
    145     // enable IRQs
    146         hal_restore_irq( mode );
    147 
    148 }  // end rwlock_wr_lock()
    149 
    150 ////////////////////////////////////////////
    151 void rwlock_wr_unlock( rwlock_t * lock )
    152 {
    153     reg_t      mode;
    154         thread_t * this = CURRENT_THREAD;
    155 
    156     // disable IRQs
    157         hal_disable_irq( &mode );
    158  
    159 #if DEBUG_RWLOCKS
    160 lock->owner = NULL;
    161 list_unlink( &lock->list );
    162 #endif
    163 
    164     // release lock
    165     lock->current++;
    166     this->local_locks--;
    167 
    168     // enable IRQs
    169         hal_restore_irq( mode );
    170    
    171     // deschedule if pending request
    172     thread_check_sched();
    173 }
    174 
     191    lock->count--;
     192
     193    // release first writer in waiting queue if no current readers
     194    // and writers waiting queue non empty
     195    if( (lock->count == 0) && (list_is_empty( &lock->wr_root ) == false) )
     196    {
     197        // get first writer thread
     198        thread_t * thread = LIST_FIRST( &lock->wr_root , thread_t , wait_list );
     199
     200#if DEBUG_RWLOCK
     201if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles() )
     202{
     203    thread_t * this = CURRENT_THREAD;
     204    printk("\n[DBG] %s : thread %x in process %x UNBLOCK thread %x in process %d"
     205    " / rwlock %s [%x,%x]\n",
     206    __FUNCTION__, this->trdid, this->process->pid, thread->trdid, thread->process->pid,
     207    lock_type_str[lock->lock.type], local_cxy, lock );
     208}
     209#endif
     210
     211        // remove this waiting thread from waiting list
     212        list_unlink( &thread->wait_list );
     213
     214        // unblock this waiting thread
     215        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_LOCK );
     216    }
     217    // release all readers in waiting queue if writers waiting queue empty
     218    // and readers waiting queue non empty
     219    else if( list_is_empty( &lock->wr_root ) && (list_is_empty( &lock->rd_root ) == false) )
     220    {
     221        while( list_is_empty( &lock->rd_root ) == false )
     222        {
     223            // get first reader thread
     224            thread_t * thread = LIST_FIRST( &lock->wr_root , thread_t , wait_list );
     225
     226#if DEBUG_RWLOCK
     227if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles() )
     228{
     229    thread_t * this = CURRENT_THREAD;
     230    printk("\n[DBG] %s : thread %x in process %x UNBLOCK thread %x in process %d"
     231    " / rwlock %s [%x,%x]\n",
     232    __FUNCTION__, this->trdid, this->process->pid, thread->trdid, thread->process->pid,
     233    lock_type_str[lock->lock.type], local_cxy, lock );
     234}
     235#endif
     236   
     237            // remove this waiting thread from waiting list
     238            list_unlink( &thread->wait_list );
     239
     240            // unblock this waiting thread
     241            thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_LOCK );
     242        }
     243    }
     244
     245    // release busylock
     246    busylock_release( &lock->lock );
     247
     248}  // end rwlock_rd_release()
     249
     250/////////////////////////////////////////
     251void rwlock_wr_release( rwlock_t * lock )
     252{
     253    // synchronize memory before lock release
     254    hal_fence();
     255
     256    // get busylock
     257    busylock_acquire( &lock->lock );
     258
     259#if DEBUG_RWLOCK
     260if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles() )
     261{
     262    thread_t * this = CURRENT_THREAD;
     263    printk("\n[DBG] %s : thread %x in process WRITE RELEASE on rwlock %s [%x,%x]\n",
     264    __FUNCTION__, this->trdid, this->process->pid,
     265    lock_type_str[lock->lock.type], local_cxy, lock );
     266}
     267#endif
     268
     269    // release the rwlock
     270    lock->taken = 0;
     271
     272    // release first waiting writer thread if writers waiting queue non empty
     273    if( list_is_empty( &lock->wr_root ) == false )
     274    {
     275        // get first writer thread
     276        thread_t * thread = LIST_FIRST( &lock->wr_root , thread_t , wait_list );
     277
     278#if DEBUG_RWLOCK
     279if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles() )
     280{
     281    thread_t * this = CURRENT_THREAD;
     282    printk("\n[DBG] %s : thread %x in process %x UNBLOCK thread %x in process %d"
     283    " / rwlock %s [%x,%x]\n",
     284    __FUNCTION__, this->trdid, this->process->pid, thread->trdid, thread->process->pid,
     285    lock_type_str[lock->lock.type], local_cxy, lock );
     286}
     287#endif
     288        // remove this waiting thread from waiting list
     289        list_unlink( &thread->wait_list );
     290
     291        // unblock this waiting thread
     292        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_LOCK );
     293    }
     294
     295    // check readers waiting queue and release all if writers waiting queue empty
     296    else
     297    {
     298        while( list_is_empty( &lock->rd_root ) == false )
     299        {
     300            // get first reader thread
     301            thread_t * thread = LIST_FIRST( &lock->rd_root , thread_t , wait_list );
     302
     303#if DEBUG_RWLOCK
     304if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles() )
     305{
     306    thread_t * this = CURRENT_THREAD;
     307    printk("\n[DBG] %s : thread %x in process %x UNBLOCK thread %x in process %d"
     308    " / rwlock %s [%x,%x]\n",
     309    __FUNCTION__, this->trdid, this->process->pid, thread->trdid, thread->process->pid,
     310    lock_type_str[lock->lock.type], local_cxy, lock );
     311}
     312#endif
     313            // remove this waiting thread from waiting list
     314            list_unlink( &thread->wait_list );
     315
     316            // unblock this waiting thread
     317            thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_LOCK );
     318        }
     319    }
     320
     321    // release busylock
     322    busylock_release( &lock->lock );
     323
     324}  // end rwlock_wr_release()
     325
     326
  • trunk/kernel/libk/rwlock.h

    r457 r563  
    11/*
    2  * rwlock.h - kernel read/write lock definition.
     2 * rwlock.h - kernel local read/write lock definition.
    33 *
    44 * Author   Alain Greiner    (2016,2017,2018)
     
    2727#include <kernel_config.h>
    2828#include <hal_kernel_types.h>
     29#include <busylock.h>
    2930#include <list.h>
    3031
    31 /*******************************************************************************************
    32  * This structure defines a local rwlock, that supports several simultaneous read
    33  * accesses, but only one write access. It implements a ticket based allocation policy.
    34  * Both readers and writers must take a ticket before doing anything else, and access
    35  * are done in same order as requests (for both read an write ).
    36  * - A reader take the lock to atomically increments the registered readers count.
    37  *   Then it release the lock and access the protected structure. It atomically decrement
    38  *   the readers count without taking the lock when access is completed.
    39  * - A writer take the lock and keep it, but must wait completion of all current read
    40  *   accesses before starting its own access.
    41  * As this local lock is only accessed by the local threads, if the lock is taken,
    42  * the new-comers use a busy waiting policy with a delay between retry.
    43  * TODO : Introduce the rwlocks in the list of locks taken by a given thread for debug.
     32/******************************************************************************************n
     33 * This structure defines a kernel, local, read/write lock, supporting several simultaneous
     34 * read accesses, but only one write access to a given locally shared object in a cluster.
     35 * Both readers and writers take the associated busylock before accessing or updating
     36 * the rwlock state, and releases the busylock after rwlock state update.
     37 * - when a reader try to access the object, it increments the readers "count" when the
     38 *   lock is not "taken" by a writer. It registers in the "rd_root" waiting queue, blocks,
     39 *   and deschedules when the lock is taken.
     40 * - when a writer try to take the rwlock, it check the "taken" field. If the lock is already
     41 *   taken, or if the number of readers is non zero, it registers in the "wr_root" waiting
     42 *   queue, blocks, and deschedules. It set "taken" otherwise.
     43 * - when a reader completes its access, it decrement the readers "count", unblock the
     44 *   the first waiting writer if there is no other readers, and unblock all waiting
     45 *   readers if there no write request.
     46 * - when a  writer completes its access, it reset the "taken" field, releases the first
     47 *   waiting writer if queue non empty, or releases all waiting readers if no writer.
    4448 ******************************************************************************************/
    45 
    46 /****     Forward declarations    ****/
    47 
    48 struct thread_s;
    4949
    5050/*******************************************************************************************
    5151 * This structure defines a local rwlock.
    52  * The "owner" and "list" fields are used for debug.
    5352 ******************************************************************************************/
    5453
    5554typedef struct rwlock_s
    5655{
    57         uint32_t            ticket;           /*! first free ticket index                     */
    58     uint32_t            current;          /*! ticket index of current owner               */
    59     uint32_t            count;            /*! number of simultaneous readers threads      */
    60 
    61 #if DEBUG_RWLOCKS
    62         struct thread_s   * owner;            /*! pointer on curent writer thread             */
    63     list_entry_t        list;             /*! member of list of locks taken by owner      */
    64 #endif
    65 
     56    busylock_t          lock;        /*! busylock protecting the rwlock state             */
     57        volatile uint32_t   taken;       /*! lock taken by an exclusive writer if non zero    */
     58    volatile uint32_t   count;       /*! current number of simultaneous readers threads   */
     59    list_entry_t        rd_root;     /*! root of list of waiting readers                  */
     60    list_entry_t        wr_root;     /*! root of list of waiting writers                  */
    6661}
    6762rwlock_t;
     
    6964/*******************************************************************************************
    7065 * This function initializes a local rwlock.
     66 * The <type> argument defines the lock usage and is only used for debug.
     67 * This type is actually stored in the associated busylock descriptor.
    7168 *******************************************************************************************
    72  * @ lock       : pointer on rwlock
     69 * @ lock       : pointer on rwlock.
     70 * @ type       : lock usage for debug.
    7371 ******************************************************************************************/
    74 void rwlock_init( rwlock_t * lock );
     72void rwlock_init( rwlock_t * lock,
     73                  uint32_t   type );
    7574
    7675/*******************************************************************************************
     
    7978 * @ lock       : pointer on rwlock
    8079 ******************************************************************************************/
    81 void rwlock_rd_lock( rwlock_t * lock );
     80void rwlock_rd_acquire( rwlock_t * lock );
    8281
    8382/*******************************************************************************************
     
    8685 * @ lock       : pointer on rwlock
    8786 ******************************************************************************************/
    88 void rwlock_wr_lock( rwlock_t * lock );
     87void rwlock_wr_acquire( rwlock_t * lock );
    8988
    9089/*******************************************************************************************
     
    9392 * @ lock       : pointer on rwlock
    9493 ******************************************************************************************/
    95 void rwlock_rd_unlock( rwlock_t * lock );
     94void rwlock_rd_release( rwlock_t * lock );
    9695
    9796/*******************************************************************************************
     
    10099 * @ lock       : pointer on rwlock
    101100 ******************************************************************************************/
    102 void rwlock_wr_unlock( rwlock_t * lock );
     101void rwlock_wr_release( rwlock_t * lock );
    103102
    104103
  • trunk/kernel/libk/xhtab.c

    r492 r563  
    2727#include <hal_remote.h>
    2828#include <xlist.h>
    29 #include <remote_rwlock.h>
     29#include <remote_busylock.h>
    3030#include <string.h>
    3131#include <printk.h>
     
    3535
    3636///////////////////////////////////////////////////////////////////////////////////////////
    37 // Item type specific functions (three functions for each item type).
    38 ///////////////////////////////////////////////////////////////////////////////////////////
    39 
    40 ///////////////////////////////////////////////////////////////////////////////////////////
    41 // This functions compute the hash index from the key when item is a vfs_dentry_t.
    42 // The key is the directory entry name.
     37// Item type specific functions (four functions for each item type).
     38// Example below is for <vfs_dentry_t> where the identifier is the dentry name.
     39///////////////////////////////////////////////////////////////////////////////////////////
     40
     41///////////////////////////////////////////////////////////////////////////////////////////
     42// vfs_dentry_t
     43// This functions compute the hash index from the key, that is the directory entry name.
    4344///////////////////////////////////////////////////////////////////////////////////////////
    4445// @ key      : local pointer on name.
     
    5758
    5859///////////////////////////////////////////////////////////////////////////////////////////
     60// vfs_dentry_t
    5961// This functions returns the extended pointer on the item, from the extended pointer
    60 // on xlist contained in the item, when the item is a vfs_entry_t.
     62// on xlist contained in the item.
    6163///////////////////////////////////////////////////////////////////////////////////////////
    6264// @ xlist_xp      : extended pointer on embedded xlist entry.
     
    6971
    7072////////////////////////////////////////////////////////////////////////////////////////////
    71 // This function compare the identifier of an item to a given <key>. For a vfs_entry_t,
     73// vfs_dentry_t
     74// This function compares the identifier of an item to a given <key>.
    7275// it returns true when the directory name matches the name pointed by the <key> argument.
    7376////////////////////////////////////////////////////////////////////////////////////////////
     
    8386    // get dentry cluster and local pointer
    8487    cxy_t          dentry_cxy = GET_CXY( item_xp );
    85     vfs_dentry_t * dentry_ptr = (vfs_dentry_t *)GET_PTR( item_xp );
     88    vfs_dentry_t * dentry_ptr = GET_PTR( item_xp );
    8689
    8790    // make a local copy of directory entry name
     
    9396
    9497////////////////////////////////////////////////////////////////////////////////////////////
    95 // This function print the item key, that is the name for a vfs_entry_t,
     98// vfs_dentry_t
     99// This function print the item key, that is the name for a vfs_dentry_t.
    96100////////////////////////////////////////////////////////////////////////////////////////////
    97101// @ item_xp   : extended pointer on item.
     
    103107    // get dentry cluster and local pointer
    104108    cxy_t          dentry_cxy = GET_CXY( item_xp );
    105     vfs_dentry_t * dentry_ptr = (vfs_dentry_t *)GET_PTR( item_xp );
     109    vfs_dentry_t * dentry_ptr = GET_PTR( item_xp );
    106110   
    107111    // make a local copy of directory entry name
     
    124128
    125129    // initialize readlock
    126     remote_rwlock_init( XPTR( local_cxy , &xhtab->lock) );
     130    remote_busylock_init( XPTR( local_cxy , &xhtab->lock), LOCK_XHTAB_STATE );
    127131
    128132    xhtab->items            = 0;
     
    163167    // get hash table cluster and local pointer
    164168    xhtab_cxy = GET_CXY( xhtab_xp );
    165     xhtab_ptr = (xhtab_t *)GET_PTR( xhtab_xp );
     169    xhtab_ptr = GET_PTR( xhtab_xp );
    166170
    167171    // get pointer on "item_from_xlist" function
     
    198202    index_from_key_t * index_from_key;     // function pointer
    199203   
    200     // get xhtab cluster and local pointer
    201     xhtab_cxy = GET_CXY( xhtab_xp );
    202     xhtab_ptr = (xhtab_t *)GET_PTR( xhtab_xp );
     204#if DEBUG_XHTAB
     205printk("\n[DBG] %s : enter / %s\n", __FUNCTION__, key );
     206#endif
     207
     208    // get xhtab cluster and local pointer
     209    xhtab_cxy = GET_CXY( xhtab_xp );
     210    xhtab_ptr = GET_PTR( xhtab_xp );
    203211
    204212    // get pointer on "index_from_key" function
     
    209217
    210218    // take the lock protecting hash table
    211     remote_rwlock_wr_lock( XPTR( xhtab_cxy , &xhtab_ptr->lock ) );
     219    remote_busylock_acquire( XPTR( xhtab_cxy , &xhtab_ptr->lock ) );
    212220
    213221    // search a matching item
     
    217225    {
    218226        // release the lock protecting hash table
    219         remote_rwlock_wr_unlock( XPTR( xhtab_cxy , &xhtab_ptr->lock ) );
     227        remote_busylock_release( XPTR( xhtab_cxy , &xhtab_ptr->lock ) );
    220228
    221229        return EINVAL;
     
    230238
    231239        // release the lock protecting hash table
    232         remote_rwlock_wr_unlock( XPTR( xhtab_cxy , &xhtab_ptr->lock ) );
     240        remote_busylock_release( XPTR( xhtab_cxy , &xhtab_ptr->lock ) );
     241   
     242#if DEBUG_XHTAB
     243printk("\n[DBG] %s : success / %s\n", __FUNCTION__, key );
     244#endif
    233245
    234246        return 0;
     
    249261    // get xhtab cluster and local pointer
    250262    xhtab_cxy = GET_CXY( xhtab_xp );
    251     xhtab_ptr = (xhtab_t *)GET_PTR( xhtab_xp );
     263    xhtab_ptr = GET_PTR( xhtab_xp );
    252264
    253265    // get pointer on "index_from_key" function
     
    258270
    259271    // take the lock protecting hash table
    260     remote_rwlock_wr_lock( XPTR( xhtab_cxy , &xhtab_ptr->lock ) );
     272    remote_busylock_acquire( XPTR( xhtab_cxy , &xhtab_ptr->lock ) );
    261273
    262274    // get extended pointer on item to remove
     
    266278    {
    267279        // release the lock protecting hash table
    268         remote_rwlock_wr_unlock( XPTR( xhtab_cxy , &xhtab_ptr->lock ) );
     280        remote_busylock_release( XPTR( xhtab_cxy , &xhtab_ptr->lock ) );
    269281
    270282        return EINVAL;
     
    279291
    280292        // release the lock protecting hash table
    281         remote_rwlock_wr_unlock( XPTR( xhtab_cxy , &xhtab_ptr->lock ) );
     293        remote_busylock_release( XPTR( xhtab_cxy , &xhtab_ptr->lock ) );
    282294
    283295        return 0;
     
    297309    // get xhtab cluster and local pointer
    298310    xhtab_cxy = GET_CXY( xhtab_xp );
    299     xhtab_ptr = (xhtab_t *)GET_PTR( xhtab_xp );
     311    xhtab_ptr = GET_PTR( xhtab_xp );
    300312
    301313    // get pointer on "index_from_key" function
     
    304316    // compute index from key
    305317        index = index_from_key( key );
     318   
     319#if DEBUG_XHTAB
     320printk("\n[DBG] %s : enter / %s\n", __FUNCTION__, key );
     321#endif
    306322
    307323    // take the lock protecting hash table
    308     remote_rwlock_rd_lock( XPTR( xhtab_cxy , &xhtab_ptr->lock ) );
     324    remote_busylock_acquire( XPTR( xhtab_cxy , &xhtab_ptr->lock ) );
     325   
     326#if DEBUG_XHTAB
     327printk("\n[DBG] %s : after lock acquire / %s\n", __FUNCTION__, key );
     328#endif
    309329
    310330    // scan sub-list
    311331    item_xp = xhtab_scan( xhtab_xp , index , key );
    312332
     333#if DEBUG_XHTAB
     334printk("\n[DBG] %s : after xhtab scan / %s\n", __FUNCTION__, key );
     335#endif
     336
    313337    // release the lock protecting hash table
    314     remote_rwlock_rd_unlock( XPTR( xhtab_cxy , &xhtab_ptr->lock ) );
     338    remote_busylock_release( XPTR( xhtab_cxy , &xhtab_ptr->lock ) );
     339
     340#if DEBUG_XHTAB
     341printk("\n[DBG] %s : after lock release / %s\n", __FUNCTION__, key );
     342#endif
    315343
    316344    return item_xp;
     
    318346}  // end xhtab_lookup()
    319347
    320 ///////////////////////////////////////
    321 void xhtab_read_lock( xptr_t xhtab_xp )
     348//////////////////////////////////
     349void xhtab_lock( xptr_t xhtab_xp )
    322350{
    323351    // get xhtab cluster and local pointer
    324352    cxy_t     xhtab_cxy = GET_CXY( xhtab_xp );
    325     xhtab_t * xhtab_ptr = (xhtab_t *)GET_PTR( xhtab_xp );
     353    xhtab_t * xhtab_ptr = GET_PTR( xhtab_xp );
    326354
    327355    // take the lock protecting hash table
    328     remote_rwlock_rd_lock( XPTR( xhtab_cxy , &xhtab_ptr->lock ) );
     356    remote_busylock_acquire( XPTR( xhtab_cxy , &xhtab_ptr->lock ) );
    329357}
    330358
    331 /////////////////////////////////////////
    332 void xhtab_read_unlock( xptr_t xhtab_xp )
     359////////////////////////////////////
     360void xhtab_unlock( xptr_t xhtab_xp )
    333361{
    334362    // get xhtab cluster and local pointer
    335363    cxy_t     xhtab_cxy = GET_CXY( xhtab_xp );
    336     xhtab_t * xhtab_ptr = (xhtab_t *)GET_PTR( xhtab_xp );
     364    xhtab_t * xhtab_ptr = GET_PTR( xhtab_xp );
    337365
    338366    // release the lock protecting hash table
    339     remote_rwlock_rd_unlock( XPTR( xhtab_cxy , &xhtab_ptr->lock ) );
     367    remote_busylock_release( XPTR( xhtab_cxy , &xhtab_ptr->lock ) );
    340368}
    341369
     
    353381    // get xhtab cluster and local pointer
    354382    xhtab_cxy = GET_CXY( xhtab_xp );
    355     xhtab_ptr = (xhtab_t *)GET_PTR( xhtab_xp );
     383    xhtab_ptr = GET_PTR( xhtab_xp );
    356384
    357385    // get pointer on "item_from_xlist" function
     
    373401
    374402            // register item in hash table header
    375             hal_remote_sw ( XPTR( xhtab_cxy , &xhtab_ptr->current_index ) , index );
    376             hal_remote_swd( XPTR( xhtab_cxy , &xhtab_ptr->current_xlist_xp ) , xlist_xp );
     403            hal_remote_s32 ( XPTR( xhtab_cxy , &xhtab_ptr->current_index ) , index );
     404            hal_remote_s64( XPTR( xhtab_cxy , &xhtab_ptr->current_xlist_xp ) , xlist_xp );
    377405
    378406            return item_xp;
     
    401429    // get xhtab cluster and local pointer
    402430    xhtab_cxy = GET_CXY( xhtab_xp );
    403     xhtab_ptr = (xhtab_t *)GET_PTR( xhtab_xp );
     431    xhtab_ptr = GET_PTR( xhtab_xp );
    404432
    405433    // get current item pointers
    406     current_index    = hal_remote_lw ( XPTR( xhtab_cxy , &xhtab_ptr->current_index ) );
    407     current_xlist_xp = hal_remote_lwd( XPTR( xhtab_cxy , &xhtab_ptr->current_xlist_xp ) );
     434    current_index    = hal_remote_l32 ( XPTR( xhtab_cxy , &xhtab_ptr->current_index ) );
     435    current_xlist_xp = hal_remote_l64( XPTR( xhtab_cxy , &xhtab_ptr->current_xlist_xp ) );
    408436
    409437    // get pointer on "item_from_xlist" function
     
    426454
    427455            // register item in hash table header
    428             hal_remote_sw ( XPTR( xhtab_cxy , &xhtab_ptr->current_index ) , index );
    429             hal_remote_swd( XPTR( xhtab_cxy , &xhtab_ptr->current_xlist_xp ) , xlist_xp );
     456            hal_remote_s32 ( XPTR( xhtab_cxy , &xhtab_ptr->current_index ) , index );
     457            hal_remote_s64( XPTR( xhtab_cxy , &xhtab_ptr->current_xlist_xp ) , xlist_xp );
    430458
    431459            return item_xp;
     
    452480    // get xhtab cluster and local pointer
    453481    xhtab_cxy = GET_CXY( xhtab_xp );
    454     xhtab_ptr = (xhtab_t *)GET_PTR( xhtab_xp );
     482    xhtab_ptr = GET_PTR( xhtab_xp );
    455483
    456484    // get pointer on "item_from_xlist" function
  • trunk/kernel/libk/xhtab.h

    r459 r563  
    3838// The main goal is to speedup search by key in a large number of items of same type.
    3939// For this purpose the set of all registered items is split in several subsets.
    40 // Each subset is organised as an embedded double linked lists.
     40// Each subset is organised as an embedded double linked xlists.
    4141// - an item is uniquely identified by a <key>, that is a single uint32_t value.
    4242// - From the <key> value, the hash table uses an item type specific xhtab_index()
     
    9393    item_print_key_t  * item_print_key;        /*! item specific function pointer        */
    9494    uint32_t            items;                 /*! number of registered items            */
    95     remote_rwlock_t     lock;                  /*! lock protecting hash table accesses   */
     95    remote_busylock_t   lock;                  /*! lock protecting hash table accesses   */
    9696    uint32_t            current_index;         /*! current item subset index             */
    9797    xptr_t              current_xlist_xp;      /*! xptr on current item xlist entry      */
     
    149149 * @ xhtab_xp  : extended pointer on hash table.
    150150 *****************************************************************************************/
    151 void xhtab_read_lock( xptr_t xhtab_xp );
     151void xhtab_lock( xptr_t xhtab_xp );
    152152
    153153/******************************************************************************************
     
    157157 * @ xhtab_xp  : extended pointer on hash table.
    158158 *****************************************************************************************/
    159 void xhtab_read_unlock( xptr_t xhtab_xp );
     159void xhtab_unlock( xptr_t xhtab_xp );
    160160
    161161/******************************************************************************************
  • trunk/kernel/libk/xlist.h

    r457 r563  
    11/*
     2    // check calling thread can yield
     3    thread_assert_can_yield( this , __FUNCTION__ );
     4
    25 * xlist.h - Double Circular Linked lists, using extended pointers.
    36 *
     
    7881 **************************************************************************/
    7982
    80 #define XLIST_FIRST_ELEMENT( root_xp , type , member ) \
    81     ({ xptr_t __first = hal_remote_lwd( root_xp );     \
     83#define XLIST_FIRST( root_xp , type , member ) \
     84    ({ xptr_t __first = hal_remote_l64( root_xp );     \
    8285           XLIST_ELEMENT( __first , type , member ); })
    8386
     
    9295 **************************************************************************/
    9396
    94 #define XLIST_LAST_ELEMENT( root_xp , type , member )  \
    95     ({ xptr_t __last = hal_remote_lwd( root_xp + 8 );  \
     97#define XLIST_LAST( root_xp , type , member )  \
     98    ({ xptr_t __last = hal_remote_l64( root_xp + 8 );  \
    9699           XLIST_ELEMENT( __last , type , member ); })
    97100
    98101/***************************************************************************
    99102 * This macro traverses an extended double linked list in forward order.
    100  * The iter variable should NOT be deleted during traversal.
     103 * WARNING : the iter variable should NOT be deleted during traversal.
    101104 * @ root_xp  : extended pointer on the root xlist_entry_t
    102105 * @ iter_xp  : current extended pointer on a xlist_entry_t
     
    104107
    105108#define XLIST_FOREACH( root_xp , iter_xp )    \
    106 for( (iter_xp) = hal_remote_lwd( root_xp ) ;  \
     109for( (iter_xp) = hal_remote_l64( root_xp ) ;  \
    107110     (iter_xp) != (root_xp) ;                 \
    108      (iter_xp) = hal_remote_lwd( iter_xp ) )
     111     (iter_xp) = hal_remote_l64( iter_xp ) )
    109112
    110113/***************************************************************************
    111114 * This macro traverses an extended double linked list in backward order.
    112  * The iter variable should NOT be deleted during traversal.
     115 * WARNING : the iter variable should NOT be deleted during traversal.
    113116 * @ root_xp  : extended pointer on the root xlist_entry_t
    114117 * @ iter_xp  : current extended pointer on a xlist_entry_t
     
    116119
    117120#define XLIST_FOREACH_BACKWARD( root_xp , iter_xp )  \
    118 for( (iter_xp) = hal_remote_lwd( (root_xp) + 8 ) ;   \
     121for( (iter_xp) = hal_remote_l64( (root_xp) + 8 ) ;   \
    119122     (iter_xp) != (root_xp) ;                        \
    120      (iter_xp) = hal_remote_lwd( (iter_xp) + 8 ) )
     123     (iter_xp) = hal_remote_l64( (iter_xp) + 8 ) )
    121124
    122125/***************************************************************************
     
    130133{
    131134    // get root->next
    132     xptr_t root_next = (xptr_t)hal_remote_lwd( root );
     135    xptr_t root_next = (xptr_t)hal_remote_l64( root );
    133136
    134137    // get ref->next
    135     xptr_t ref_next  = (xptr_t)hal_remote_lwd( ref );
     138    xptr_t ref_next  = (xptr_t)hal_remote_l64( ref );
    136139
    137140    // test if list is empty or ref is the last element 
     
    150153{
    151154    // get root->next
    152     xptr_t root_next = (xptr_t)hal_remote_lwd( root );
     155    xptr_t root_next = (xptr_t)hal_remote_l64( root );
    153156
    154157    // get ref->pred
    155     xptr_t ref_pred  = (xptr_t)hal_remote_lwd( ref + 8 );
     158    xptr_t ref_pred  = (xptr_t)hal_remote_l64( ref + 8 );
    156159
    157160    // test if list is empty or ref is the first element 
     
    165168 * The root can be located in any cluster.
    166169 * @ root_xp   :  extended pointer on the root xlist_entry_t
    167  **************************************************************************/
     170xixi **************************************************************************/
    168171static inline void xlist_root_init( xptr_t root_xp )
    169172{
    170     hal_remote_swd(  root_xp   , root_xp );
    171     hal_remote_swd(  root_xp+8 , root_xp );
     173    hal_remote_s64(  root_xp   , root_xp );
     174    hal_remote_s64(  root_xp+8 , root_xp );
    172175}
    173176
     
    179182static inline void xlist_entry_init( xptr_t entry_xp )
    180183{
    181     hal_remote_swd(  entry_xp   , 0 );
    182     hal_remote_swd(  entry_xp+8 , 0 );
     184    hal_remote_s64(  entry_xp   , 0 );
     185    hal_remote_s64(  entry_xp+8 , 0 );
    183186}
    184187
     
    194197{
    195198    // get the extended pointer on the first element in list
    196     xptr_t first = (xptr_t)hal_remote_lwd( root );
     199    xptr_t first = (xptr_t)hal_remote_l64( root );
    197200
    198201    // update root.next <= entry
    199     hal_remote_swd( root , (uint64_t)entry );
     202    hal_remote_s64( root , (uint64_t)entry );
    200203
    201204    // update entry.next <= first
    202     hal_remote_swd( entry , (uint64_t)first );
     205    hal_remote_s64( entry , (uint64_t)first );
    203206
    204207    // entry.pred <= root
    205     hal_remote_swd( entry + 8 , (uint64_t)root );
     208    hal_remote_s64( entry + 8 , (uint64_t)root );
    206209   
    207210    // first.pred <= new
    208     hal_remote_swd( first + 8 , (uint64_t)entry );
     211    hal_remote_s64( first + 8 , (uint64_t)entry );
    209212}
    210213
     
    220223{
    221224    // get the extended pointer on the last element in list
    222     xptr_t last = (xptr_t)hal_remote_lwd( root + 8 );
     225    xptr_t last = (xptr_t)hal_remote_l64( root + 8 );
    223226
    224227    // update root.pred <= entry
    225     hal_remote_swd( root + 8 , (uint64_t)entry );
     228    hal_remote_s64( root + 8 , (uint64_t)entry );
    226229
    227230    // update entry.pred <= last
    228     hal_remote_swd( entry + 8 , (uint64_t)last );
     231    hal_remote_s64( entry + 8 , (uint64_t)last );
    229232
    230233    // entry.next <= root
    231     hal_remote_swd( entry , (uint64_t)root );
     234    hal_remote_s64( entry , (uint64_t)root );
    232235   
    233236    // last.next <= entry
    234     hal_remote_swd( last , (uint64_t)entry );
     237    hal_remote_s64( last , (uint64_t)entry );
    235238}
    236239
     
    243246{
    244247    // get the extended pointer root.next value
    245     xptr_t next = (xptr_t)hal_remote_lwd( root );
     248    xptr_t next = (xptr_t)hal_remote_l64( root );
    246249
    247250    return ( root == next );
     
    266269
    267270    // update pred.next <= next
    268     hal_remote_swd( pred , (uint64_t)next );
     271    hal_remote_s64( pred , (uint64_t)next );
    269272
    270273    // update next.pred <= pred
    271     hal_remote_swd( next + 8 , (uint64_t)pred );
     274    hal_remote_s64( next + 8 , (uint64_t)pred );
    272275}
    273276
     
    292295
    293296        // update new.next <= next
    294     hal_remote_swd( new , (uint64_t)next );
     297    hal_remote_s64( new , (uint64_t)next );
    295298
    296299    // update new.pred <= pred
    297     hal_remote_swd( new + 8 , (uint64_t)pred );
     300    hal_remote_s64( new + 8 , (uint64_t)pred );
    298301
    299302        // update pred.next <= new
    300     hal_remote_swd( pred , (uint64_t)new );
     303    hal_remote_s64( pred , (uint64_t)new );
    301304
    302305    // update next.pred <= new
    303     hal_remote_swd( next + 8 , (uint64_t)new );
     306    hal_remote_s64( next + 8 , (uint64_t)new );
    304307}
    305308
Note: See TracChangeset for help on using the changeset viewer.