Ignore:
Timestamp:
Oct 4, 2018, 11:16:13 PM (6 years ago)
Author:
alain
Message:

Complete restructuration of kernel spinlocks.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/libk/remote_mutex.c

    r457 r563  
    11/*
    2  * remote_mutex.c - Access a POSIX mutex.
     2 * remote_mutex.c - POSIX mutex implementation.
    33 *
    4  * Authors   Alain   Greiner (2016)
     4 * Authors   Alain   Greiner (2016,2017,2018)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2222 */
    2323
     24#include <kernel_config.h>
    2425#include <hal_kernel_types.h>
    2526#include <hal_remote.h>
    26 #include <hal_special.h>
    27 #include <hal_irqmask.h>
    2827#include <thread.h>
    29 #include <cluster.h>
     28#include <xlist.h>
    3029#include <scheduler.h>
     30#include <remote_busylock.h>
    3131#include <remote_mutex.h>
     32
    3233
    3334/////////////////////////////////////////////////
     
    4445    process_t    * ref_ptr = (process_t *)GET_PTR( ref_xp );
    4546
    46     // get extended pointer on root of mutex list
     47    // get extended pointers on mutexes list
    4748    xptr_t root_xp = XPTR( ref_cxy , &ref_ptr->mutex_root );
    48    
     49    xptr_t lock_xp = XPTR( ref_cxy , &ref_ptr->sync_lock );
     50
     51    // get lock protecting synchro lists
     52    remote_queuelock_acquire( lock_xp );
     53 
    4954    // scan reference process mutex list
    5055    xptr_t           iter_xp;
     
    6873    }
    6974
     75    // relese lock protecting synchros lists
     76    remote_queuelock_release( lock_xp );
     77 
    7078    if( found == false )  return XPTR_NULL;
    7179    else                  return mutex_xp;
     
    8997    process_t * ref_ptr = (process_t *)GET_PTR( ref_xp );
    9098
    91     // allocate memory for barrier descriptor
     99    // allocate memory for mutex descriptor
    92100    if( ref_cxy == local_cxy )                  // local cluster is the reference
    93101    {
     
    101109    {
    102110        rpc_kcm_alloc_client( ref_cxy , KMEM_MUTEX , &mutex_xp );
    103         mutex_ptr = (remote_mutex_t *)GET_PTR( mutex_xp );
    104     }
    105 
    106     if( mutex_ptr == NULL ) return ENOMEM;
     111        mutex_ptr = GET_PTR( mutex_xp );
     112    }
     113
     114    if( mutex_ptr == NULL ) return 0xFFFFFFFF;
    107115
    108116    // initialise mutex
    109     hal_remote_sw ( XPTR( ref_cxy , &mutex_ptr->value )   , 0 );
     117    hal_remote_s32 ( XPTR( ref_cxy , &mutex_ptr->taken )   , 0 );
    110118    hal_remote_spt( XPTR( ref_cxy , &mutex_ptr->ident )   , (void *)ident );
    111     hal_remote_swd( XPTR( ref_cxy , &mutex_ptr->owner )   , XPTR_NULL );
    112 
    113119    xlist_entry_init( XPTR( ref_cxy , &mutex_ptr->list ) );
    114120    xlist_root_init( XPTR( ref_cxy , &mutex_ptr->root ) );
    115     remote_spinlock_init( XPTR( ref_cxy , &mutex_ptr->lock ) );
    116 
    117     // register mutex in reference process xlist
     121    hal_remote_s64( XPTR( ref_cxy , &mutex_ptr->owner ) , XPTR_NULL );
     122    remote_busylock_init( XPTR( ref_cxy , &mutex_ptr->lock ), LOCK_MUTEX_STATE );
     123
     124    // get root of mutexes list in process, and list_entry in mutex
    118125    xptr_t root_xp = XPTR( ref_cxy , &ref_ptr->mutex_root );
    119126    xptr_t xp_list = XPTR( ref_cxy , &mutex_ptr->list );
    120127
    121     remote_spinlock_lock( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
     128    // get lock protecting user synchros lists
     129    remote_queuelock_acquire( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
     130
     131    // register mutex in process descriptor
    122132    xlist_add_first( root_xp , xp_list );
    123     remote_spinlock_unlock( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
     133
     134    // release lock protecting user synchros lists
     135    remote_queuelock_release( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
     136
     137#if DEBUG_MUTEX
     138thread_t * this = CURRENT_THREAD;
     139if( (uint32_t)hal_get_cycles() > DEBUG_QUEUELOCK )
     140printk("\n[DBG] %s : thread %x in %x process / mutex(%x,%x)\n",
     141__FUNCTION__, this->trdid, this->process->pid, local_cxy, mutex_ptr );
     142#endif
     143
    124144
    125145    return 0;
     
    144164    remote_mutex_t * mutex_ptr = (remote_mutex_t *)GET_PTR( mutex_xp );
    145165
     166    // get lock protecting user synchros lists
     167    remote_queuelock_acquire( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
     168
    146169    // remove mutex from reference process xlist
    147     remote_spinlock_lock( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
    148170    xlist_unlink( XPTR( mutex_cxy , &mutex_ptr->list ) );
    149     remote_spinlock_unlock( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
     171
     172    // release lock protecting user synchros lists
     173    remote_queuelock_release( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
    150174
    151175    // release memory allocated for mutexaphore descriptor
     
    167191void remote_mutex_lock( xptr_t mutex_xp )
    168192{
    169     bool_t    success;
    170     reg_t     irq_state;
    171 
    172     // get cluster and local pointer on remote mutex
    173     remote_mutex_t * mutex_ptr = (remote_mutex_t *)GET_PTR( mutex_xp );
     193    // get cluster and local pointer on mutex
     194    remote_mutex_t * mutex_ptr = GET_PTR( mutex_xp );
    174195    cxy_t            mutex_cxy = GET_CXY( mutex_xp );
    175196
    176     // get cluster and local pointer on calling thread
    177     cxy_t            thread_cxy = local_cxy;
    178     thread_t       * thread_ptr = CURRENT_THREAD;
    179 
    180     // get extended pointers on mutex value
    181     xptr_t           value_xp = XPTR( mutex_cxy , &mutex_ptr->value );
    182 
    183     // Try to take the mutex
    184     success = hal_remote_atomic_cas( value_xp , 0 , 1 );
    185 
    186     if( success )  // take the lock
    187     {
    188         // register calling thread as mutex owner
    189         xptr_t owner_xp = XPTR( mutex_cxy , &mutex_ptr->owner );
    190         hal_remote_swd( owner_xp , XPTR( thread_cxy , thread_ptr ) );
    191 
    192         // increment calling thread remote_locks
    193         hal_remote_atomic_add( XPTR( thread_cxy , &thread_ptr->remote_locks ) , 1 );
    194     }
    195     else           // deschedule and register calling thread in queue
    196     {
    197         // disable interrupts
    198             hal_disable_irq( &irq_state );
    199  
    200         // register calling thread in mutex waiting queue
    201         xptr_t root_xp  = XPTR( mutex_cxy  , &mutex_ptr->root );
    202         xptr_t entry_xp = XPTR( thread_cxy , &thread_ptr->wait_list );
    203 
    204         remote_spinlock_lock( XPTR( mutex_cxy , &mutex_ptr->lock ) );
    205         xlist_add_last( root_xp , entry_xp );
    206         remote_spinlock_unlock( XPTR( mutex_cxy , &mutex_ptr->lock ) );
    207 
    208         // block & deschedule the calling thread   
    209         thread_block( XPTR( local_cxy , thread_ptr ) , THREAD_BLOCKED_USERSYNC );
    210         sched_yield("blocked on mutex");
    211 
    212         // restore interrupts
    213         hal_restore_irq( irq_state );
    214     } 
    215 
    216     hal_fence();
    217 
    218 }  // end remote_mutex_lock()
    219 
    220 ///////////////////////////////////////////
    221 void remote_mutex_unlock( xptr_t mutex_xp )
    222 {
    223         reg_t               irq_state;
    224 
    225     // get cluster and local pointer on remote mutex
    226     remote_mutex_t * mutex_ptr = (remote_mutex_t *)GET_PTR( mutex_xp );
    227     cxy_t            mutex_cxy = GET_CXY( mutex_xp );
    228 
    229     // get cluster and local pointer on calling thread
    230     cxy_t            thread_cxy = local_cxy;
    231     thread_t       * thread_ptr = CURRENT_THREAD;
    232 
    233     // get extended pointers on mutex value, root, lock & owner fields
    234     xptr_t           value_xp = XPTR( mutex_cxy , &mutex_ptr->value );
     197    // get extended pointers on mutex fields
     198    xptr_t           taken_xp = XPTR( mutex_cxy , &mutex_ptr->taken );
    235199    xptr_t           owner_xp = XPTR( mutex_cxy , &mutex_ptr->owner );
    236200    xptr_t           root_xp  = XPTR( mutex_cxy , &mutex_ptr->root );
     201    xptr_t           lock_xp  = XPTR( mutex_cxy , &mutex_ptr->lock );
     202
     203    // get cluster and pointers on calling thread
     204    cxy_t            caller_cxy = local_cxy;
     205    thread_t       * caller_ptr = CURRENT_THREAD;
     206    xptr_t           caller_xp  = XPTR( caller_cxy , caller_ptr );
     207
     208// check calling thread can yield
     209assert( (caller_ptr->busylocks == 0),
     210"cannot yield : busylocks = %d\n", caller_ptr->busylocks );
     211
     212    while( 1 )
     213    {
     214        // get busylock protecting mutex state
     215        remote_busylock_acquire( lock_xp );
     216
     217        // test mutex state
     218        if( hal_remote_l32( taken_xp ) == 0 )                 // success
     219        {
     220            // register calling thread as mutex owner
     221            hal_remote_s64( owner_xp , caller_xp );
     222
     223            // update mutex state
     224            hal_remote_s32( taken_xp , 1 );
     225
     226#if DEBUG_MUTEX
     227thread_t * this = CURRENT_THREAD;
     228if( (uint32_t)hal_get_cycles() > DEBUG_MUTEX )
     229printk("\n[DBG] %s : thread %x in process %x SUCCESS on mutex(%x,%x)\n",
     230__FUNCTION__, this->trdid, this->process->pid, mutex_cxy, mutex_ptr );
     231#endif
     232
     233            // release busylock protecting mutex state
     234            remote_busylock_release( lock_xp );
     235
     236             return;
     237        }
     238        else                                                 //  already taken
     239        {
     240            // block the calling thread   
     241            thread_block( caller_xp , THREAD_BLOCKED_USERSYNC );
     242
     243            // register calling thread in mutex waiting queue
     244            xptr_t entry_xp = XPTR( caller_cxy , &caller_ptr->wait_xlist );
     245            xlist_add_last( root_xp , entry_xp );
     246
     247#if DEBUG_MUTEX
     248thread_t * this = CURRENT_THREAD;
     249if( (uint32_t)hal_get_cycles() > DEBUG_MUTEX )
     250printk("\n[DBG] %s : thread %x in process %x BLOCKED on mutex(%x,%x)\n",
     251__FUNCTION__, this->trdid, this->process->pid, mutex_cxy, mutex_ptr );
     252#endif
     253
     254            // release busylock protecting mutex state
     255            remote_busylock_release( lock_xp );
     256
     257            // deschedule calling thread
     258            sched_yield("blocked on mutex");
     259        }
     260    } 
     261}  // end remote_mutex_lock()
     262
     263//////////////////////////////////////////////
     264error_t remote_mutex_unlock( xptr_t mutex_xp )
     265{
     266    // memory barrier before mutex release
     267    hal_fence();
     268
     269    // get cluster and local pointer on mutex
     270    remote_mutex_t * mutex_ptr = GET_PTR( mutex_xp );
     271    cxy_t            mutex_cxy = GET_CXY( mutex_xp );
     272
     273    // get cluster and pointers on calling thread
     274    cxy_t            caller_cxy = local_cxy;
     275    thread_t       * caller_ptr = CURRENT_THREAD;
     276    xptr_t           caller_xp  = XPTR( caller_cxy , caller_ptr );
     277
     278    // get extended pointers on mutex fields
     279    xptr_t           taken_xp = XPTR( mutex_cxy , &mutex_ptr->taken );
     280    xptr_t           owner_xp = XPTR( mutex_cxy , &mutex_ptr->owner );
     281    xptr_t           root_xp  = XPTR( mutex_cxy , &mutex_ptr->root );
     282    xptr_t           lock_xp  = XPTR( mutex_cxy , &mutex_ptr->lock );
     283
     284    // get busylock protecting mutex state
     285    remote_busylock_acquire( lock_xp );
    237286   
    238     // disable interrupts
    239         hal_disable_irq( &irq_state );
    240  
    241     // unregister owner thread,
    242     hal_remote_swd( owner_xp , XPTR_NULL );
    243 
    244     // decrement calling thread remote_locks
    245         hal_remote_atomic_add( XPTR( thread_cxy , &thread_ptr->remote_locks ) , -1 );
    246 
    247     // activate first waiting thread if required
    248     if( xlist_is_empty( root_xp ) == false )        // one waiiting thread
     287    // check calling thread is mutex owner
     288    if( hal_remote_l64( owner_xp ) != caller_xp )
     289    {
     290        // release busylock protecting mutex state
     291        remote_busylock_release( lock_xp );
     292
     293        return 0xFFFFFFFF;
     294    }
     295
     296#if DEBUG_MUTEX
     297thread_t * this = CURRENT_THREAD;
     298if( (uint32_t)hal_get_cycles() > DEBUG_MUTEX )
     299printk("\n[DBG] %s : thread %x in %x process EXIT / mutex(%x,%x)\n",
     300__FUNCTION__, this->trdid, this->process->pid, mutex_cxy, mutex_ptr );
     301#endif
     302
     303    // update owner field,
     304    hal_remote_s64( owner_xp , XPTR_NULL );
     305
     306    // update taken field
     307    hal_remote_s32( taken_xp , 0 );
     308
     309    // unblock first waiting thread if waiting list non empty
     310    if( xlist_is_empty( root_xp ) == false )
    249311    {
    250312        // get extended pointer on first waiting thread
    251         xptr_t thread_xp = XLIST_FIRST_ELEMENT( root_xp , thread_t , wait_list );
    252 
    253         // remove first waiting thread from queue
    254         remote_spinlock_lock( XPTR( mutex_cxy , &mutex_ptr->lock ) );
    255         xlist_unlink( XPTR( mutex_cxy , &mutex_ptr->list ) );
    256         remote_spinlock_unlock( XPTR( mutex_cxy , &mutex_ptr->lock ) );
     313        xptr_t     thread_xp  = XLIST_FIRST( root_xp , thread_t , wait_xlist );
     314        thread_t * thread_ptr = GET_PTR( thread_xp );
     315        cxy_t      thread_cxy = GET_CXY( thread_xp );
     316
     317#if DEBUG_MUTEX
     318if( (uint32_t)hal_get_cycles() > DEBUG_MUTEX )
     319{
     320trdid_t     trdid   = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
     321process_t * process = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->process ) );
     322pid_t       pid     = hal_remote_l32( XPTR( thread_cxy , &process->pid ) );
     323printk("\n[DBG] %s : thread %x in process %x UNBLOCK thread %x in process %d / mutex(%x,%x)\n",
     324__FUNCTION__, this->trdid, this->process->pid, trdid, pid, mutex_cxy, mutex_ptr );
     325}
     326#endif
     327
     328        // remove this thread from waiting queue
     329        xlist_unlink( XPTR( thread_cxy , &thread_ptr->wait_xlist ) );
    257330
    258331        // unblock first waiting thread
    259332        thread_unblock( thread_xp , THREAD_BLOCKED_USERSYNC );
    260333    }
    261     else                                            // no waiting thread
    262     {
    263         // release mutex
    264         hal_remote_sw( value_xp , 0 );
    265     }
    266 
    267     // restore interrupts
    268         hal_restore_irq( irq_state );
     334   
     335    // release busylock protecting mutex state
     336    remote_busylock_release( lock_xp );
     337
     338    return 0;
    269339
    270340}  // end remote_mutex_unlock()
    271341
     342///////////////////////////////////////////////
     343error_t remote_mutex_trylock( xptr_t mutex_xp )
     344{
     345    // get cluster and local pointer on mutex
     346    remote_mutex_t * mutex_ptr = GET_PTR( mutex_xp );
     347    cxy_t            mutex_cxy = GET_CXY( mutex_xp );
     348
     349    // get cluster and pointers on calling thread
     350    cxy_t            caller_cxy = local_cxy;
     351    thread_t       * caller_ptr = CURRENT_THREAD;
     352    xptr_t           caller_xp  = XPTR( caller_cxy , caller_ptr );
     353
     354    // get extended pointers on mutex fields
     355    xptr_t           taken_xp = XPTR( mutex_cxy , &mutex_ptr->taken );
     356    xptr_t           owner_xp = XPTR( mutex_cxy , &mutex_ptr->owner );
     357    xptr_t           lock_xp  = XPTR( mutex_cxy , &mutex_ptr->lock );
     358
     359    // get busylock protecting mutex state
     360    remote_busylock_acquire( lock_xp );
     361
     362    // test mutex state
     363    if( hal_remote_l32( taken_xp ) == 0 )                 // success
     364    {
     365        // register calling thread as mutex owner
     366        hal_remote_s64( owner_xp , caller_xp );
     367
     368        // update mutex state
     369        hal_remote_s32( taken_xp , 1 );
     370
     371#if DEBUG_MUTEX
     372thread_t * this = CURRENT_THREAD;
     373if( (uint32_t)hal_get_cycles() > DEBUG_QUEUELOCK )
     374printk("\n[DBG] %s : SUCCESS for thread %x in process %x / mutex(%x,%x)\n",
     375__FUNCTION__, this->trdid, this->process->pid, mutex_cxy, mutex_ptr );
     376#endif
     377        // release busylock protecting mutex state
     378        remote_busylock_release( lock_xp );
     379
     380        return 0;
     381    }
     382    else                                                 //  already taken
     383    {
     384
     385#if DEBUG_MUTEX
     386thread_t * this = CURRENT_THREAD;
     387if( (uint32_t)hal_get_cycles() > DEBUG_QUEUELOCK )
     388printk("\n[DBG] %s : FAILURE for thread %x in process %x / mutex(%x,%x)\n",
     389__FUNCTION__, this->trdid, this->process->pid, mutex_cxy, mutex_ptr );
     390#endif
     391        // release busylock protecting mutex state
     392        remote_busylock_release( lock_xp );
     393
     394        return 0xFFFFFFFF;
     395    }
     396}  // end remote_mutex_trylock()
Note: See TracChangeset for help on using the changeset viewer.