Changeset 581 for trunk/kernel


Ignore:
Timestamp:
Oct 10, 2018, 3:11:53 PM (6 years ago)
Author:
alain
Message:

1) Improve the busylock debug infrastructure.
2) introduce a non-distributed, but portable implementation for the pthread_barrier.

Location:
trunk/kernel
Files:
16 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/kern/chdev.c

    r564 r581  
    154154    chdev_t * chdev_ptr = GET_PTR( chdev_xp );
    155155
    156 // check calling thread can yield
    157 assert( (this->busylocks == 0),
    158 "cannot yield : busylocks = %d\n", this->busylocks );
     156    // check calling thread can yield
     157    thread_assert_can_yield( this , __FUNCTION__ );
    159158
    160159    // get local and extended pointers on server thread
     
    197196    // build extended pointer on lock protecting chdev waiting queue
    198197    lock_xp            = XPTR( chdev_cxy , &chdev_ptr->wait_lock );
     198
     199    // TODO the hal_disable_irq() / hal_restore_irq()
     200    // in the sequence below is probably useless, as it is
     201    // already done by the busylock_acquire() / busylock_release()
     202    // => remove it [AG] october 2018
    199203
    200204    // critical section for the following sequence:
     
    206210    // (6) release the lock protecting waiting queue
    207211    // (7) deschedule
    208     // ... in this order
    209212
    210213    // enter critical section
  • trunk/kernel/kern/process.c

    r580 r581  
    112112    cxy_t       chdev_cxy;
    113113    pid_t       parent_pid;
    114     lpid_t      process_lpid;
    115     lpid_t      parent_lpid;
    116114
    117115    // get parent process cluster and local pointer
     
    121119    // get parent_pid
    122120    parent_pid = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
    123 
    124     // get process and parent lpid
    125     process_lpid = LPID_FROM_PID( pid );
    126     parent_lpid  = LPID_FROM_PID( parent_pid );
    127121
    128122#if DEBUG_PROCESS_REFERENCE_INIT
     
    156150
    157151    // define the stdin/stdout/stderr pseudo files <=> select a TXT terminal.
    158     if( (process_lpid == 1) ||     // INIT process
    159         (parent_lpid  == 1) )      // KSH  process
    160     {
    161         // allocate a TXT channel
    162         if( process_lpid == 1 )  txt_id = 0;                     // INIT
    163         else                     txt_id = process_txt_alloc();   // KSH
     152    if( (pid == 1) || (parent_pid  == 1) )      // INIT or KSH  process
     153    {
     154        // select a TXT channel
     155        if( pid == 1 )  txt_id = 0;                     // INIT
     156        else            txt_id = process_txt_alloc();   // KSH
    164157
    165158        // attach process to TXT
     
    17911784    if( txt_owner_xp == process_xp )
    17921785    {
    1793         nolock_printk("PID %X | PPID %X | TS %X | %s (FG) | %X | %d | %s\n",
    1794         pid, ppid, state, txt_name, process_ptr, th_nr, elf_name );
     1786        nolock_printk("PID %X | %s (FG) | %X | PPID %X | TS %X | %d | %s\n",
     1787        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
    17951788    }
    17961789    else
    17971790    {
    1798         nolock_printk("PID %X | PPID %X | TS %X | %s (BG) | %X | %d | %s\n",
    1799         pid, ppid, state, txt_name, process_ptr, th_nr, elf_name );
     1791        nolock_printk("PID %X | %s (BG) | %X | PPID %X | TS %X | %d | %s\n",
     1792        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
    18001793    }
    18011794}  // end process_display()
     
    18061799////////////////////////////////////////////////////////////////////////////////////////
    18071800
    1808 ////////////////////////////
     1801//////////////////////////////////
    18091802uint32_t process_txt_alloc( void )
    18101803{
  • trunk/kernel/kern/rpc.c

    r564 r581  
    142142    client_core_lid = this->core->lid;
    143143
    144 // check calling thread can yield when it is not the idle thread
    145 assert( (this->busylocks == 0) || (this->type == THREAD_IDLE),
    146 "cannot yield : busylocks = %d\n", this->busylocks );
     144    // check calling thread can yield when client thread is not the IDLE thread
     145    // RPCs executed by the IDLE thread during kernel_init do not deschedule
     146    if( this->type != THREAD_IDLE ) thread_assert_can_yield( this , __FUNCTION__ );
    147147
    148148#if DEBUG_RPC_CLIENT_GENERIC
     
    204204
    205205    // wait RPC completion before returning if blocking RPC :
    206     // - descheduling without blocking if thread idle (in lernel init)
     206    // - descheduling without blocking if thread idle (in kernel init)
    207207    // - block and deschedule policy for any other thread
    208208    if ( rpc->blocking )
  • trunk/kernel/kern/scheduler.c

    r564 r581  
    411411#endif
    412412
    413 // check current thread busylocks counter
     413// This assert should never be false, as this check must be
     414// done before by any function that can possibly deschedule...
    414415assert( (current->busylocks == 0),
    415 "thread cannot yield : busylocks = %d\n", current->busylocks );
     416"unexpected descheduling of thread holding %d busylocks = %d\n", current->busylocks );
    416417
    417418    // activate or create an RPC thread if RPC_FIFO non empty
  • trunk/kernel/kern/thread.c

    r580 r581  
    190190    if( error )
    191191    {
    192         printk("\n[ERROR] in %s : cannot get TRDID\n", __FUNCTION__ );
     192        printk("\n[ERROR] in %s : thread %x in process %x cannot get TRDID in cluster %x\n"
     193        "    for thread %s in process %x / cycle %d\n",
     194        __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,
     195        local_cxy, thread_type_str(type), process->pid, (uint32_t)hal_get_cycles() );
    193196        return EINVAL;
    194197    }
     
    710713
    711714    assert( ( (type == THREAD_IDLE) || (type == THREAD_RPC) || (type == THREAD_DEV) ) ,
    712         "illegal thread type" );
     715    "illegal thread type" );
    713716
    714717    assert( (core_lid < LOCAL_CLUSTER->cores_nr) ,
    715             "illegal core_lid" );
     718    "illegal core_lid" );
    716719
    717720#if DEBUG_THREAD_KERNEL_CREATE
     
    725728    thread = thread_alloc();
    726729
    727     if( thread == NULL ) return ENOMEM;
     730    if( thread == NULL )
     731    {
     732        printk("\n[ERROR] in %s : thread %x in process %x\n"
     733        "   no memory for thread descriptor\n",
     734        __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid );
     735        return ENOMEM;
     736    }
    728737
    729738    // initialize thread descriptor
     
    738747    if( error ) // release allocated memory for thread descriptor
    739748    {
     749        printk("\n[ERROR] in %s : thread %x in process %x\n"
     750        "   cannot initialize thread descriptor\n",
     751        __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid );
    740752        thread_release( thread );
    741753        return ENOMEM;
     
    744756    // allocate & initialize CPU context
    745757        error = hal_cpu_context_alloc( thread );
     758
    746759    if( error )
    747760    {
     761        printk("\n[ERROR] in %s : thread %x in process %x\n"
     762        "   cannot cannot create CPU context\n",
     763        __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid );
    748764        thread_release( thread );
    749765        return EINVAL;
    750766    }
     767
    751768    hal_cpu_context_init( thread );
    752769
     
    13641381void thread_display_busylocks( xptr_t  thread_xp )
    13651382{
    1366     if( DEBUG_BUSYLOCK )
    1367     {
    1368         xptr_t    iter_xp;
    1369 
    1370         // get cluster and local pointer of target thread
    1371         cxy_t      thread_cxy = GET_CXY( thread_xp );
    1372         thread_t * thread_ptr = GET_PTR( thread_xp );
    1373 
    1374         // get target thread TRDID and busylocks
    1375         trdid_t  trdid = hal_remote_l32(XPTR( thread_cxy , &thread_ptr->trdid ));
    1376         uint32_t locks = hal_remote_l32(XPTR( thread_cxy , &thread_ptr->busylocks ));
    1377 
    1378         // get target thread process and PID;
    1379         process_t * process = hal_remote_lpt(XPTR( thread_cxy , &thread_ptr->process ));
    1380         pid_t       pid     = hal_remote_l32(XPTR( thread_cxy , &process->pid ));
    1381 
    1382         // get extended pointer on root of busylocks
    1383         xptr_t    root_xp = XPTR( thread_cxy , &thread_ptr->busylocks_root );
    1384 
    1385         // get pointers on TXT0 chdev
    1386         xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
    1387         cxy_t     txt0_cxy = GET_CXY( txt0_xp );
    1388         chdev_t * txt0_ptr = GET_PTR( txt0_xp );
    1389 
    1390         // get extended pointer on remote TXT0 lock
    1391         xptr_t  txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
    1392 
    1393         // get TXT0 lock
    1394         remote_busylock_acquire( txt0_lock_xp );
    1395 
    1396         // display header
    1397         nolock_printk("\n***** thread %x in process %x : %d busylocks at cycle %d\n",
    1398         trdid, pid, locks, (uint32_t)hal_get_cycles() );
    1399 
    1400         // scan the xlist of busylocks when required
    1401         if( locks )
     1383    // get cluster and local pointer of target thread
     1384    cxy_t      thread_cxy = GET_CXY( thread_xp );
     1385    thread_t * thread_ptr = GET_PTR( thread_xp );
     1386
     1387#if( DEBUG_BUSYLOCK )
     1388
     1389    xptr_t    iter_xp;
     1390
     1391    // get target thread TRDID and busylocks
     1392    trdid_t  trdid = hal_remote_l32(XPTR( thread_cxy , &thread_ptr->trdid ));
     1393    uint32_t locks = hal_remote_l32(XPTR( thread_cxy , &thread_ptr->busylocks ));
     1394
     1395    // get target thread process and PID;
     1396    process_t * process = hal_remote_lpt(XPTR( thread_cxy , &thread_ptr->process ));
     1397    pid_t       pid     = hal_remote_l32(XPTR( thread_cxy , &process->pid ));
     1398
     1399    // get extended pointer on root of busylocks
     1400    xptr_t    root_xp = XPTR( thread_cxy , &thread_ptr->busylocks_root );
     1401
     1402    // get pointers on TXT0 chdev
     1403    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
     1404    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
     1405    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
     1406
     1407    // get extended pointer on remote TXT0 lock
     1408    xptr_t  txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
     1409
     1410    // get TXT0 lock
     1411    remote_busylock_acquire( txt0_lock_xp );
     1412
     1413    // display header
     1414    nolock_printk("\n***** thread %x in process %x : %d busylocks at cycle %d\n",
     1415    trdid, pid, locks, (uint32_t)hal_get_cycles() );
     1416
     1417    // scan the xlist of busylocks when required
     1418    if( locks )
     1419    {
     1420        XLIST_FOREACH( root_xp , iter_xp )
    14021421        {
    1403             XLIST_FOREACH( root_xp , iter_xp )
    1404             {
    1405                 xptr_t       lock_xp   = XLIST_ELEMENT( iter_xp , busylock_t , xlist );
    1406                 cxy_t        lock_cxy  = GET_CXY( lock_xp );
    1407                 busylock_t * lock_ptr  = GET_PTR( lock_xp );
    1408                 uint32_t     lock_type = hal_remote_l32(XPTR( lock_cxy , &lock_ptr->type ));
    1409                 nolock_printk(" - %s in cluster %x\n", lock_type_str[lock_type] , lock_cxy );
    1410             }
     1422            xptr_t       lock_xp   = XLIST_ELEMENT( iter_xp , busylock_t , xlist );
     1423            cxy_t        lock_cxy  = GET_CXY( lock_xp );
     1424            busylock_t * lock_ptr  = GET_PTR( lock_xp );
     1425            uint32_t     lock_type = hal_remote_l32(XPTR( lock_cxy , &lock_ptr->type ));
     1426            nolock_printk(" - %s in cluster %x\n", lock_type_str[lock_type] , lock_cxy );
    14111427        }
    1412 
    1413         // release TXT0 lock
    1414         remote_busylock_release( txt0_lock_xp );
    1415     }
    1416     else
    1417     {
    1418         // display a warning
    1419         printk("\n[WARNING] set the DEBUG_BUSYLOCK parmeter in kernel_config.h"
    1420         " to use the %s function\n", __FUNCTION__ );
    1421     }
     1428    }
     1429
     1430    // release TXT0 lock
     1431    remote_busylock_release( txt0_lock_xp );
     1432
     1433    return;
     1434
     1435#endif
     1436
     1437    // display a warning
     1438    printk("\n[WARNING] set the DEBUG_BUSYLOCK parmeter in kernel_config.h"
     1439    " to display busylocks for thread %x/%x\n", thread_cxy, thread_ptr );
     1440
    14221441}  // end thread_display_busylock()
     1442
  • trunk/kernel/kernel_config.h

    r580 r581  
    2727#define _KERNEL_CONFIG_H_
    2828
    29 #define CONFIG_ALMOS_VERSION           "Version 1.0 / August 2018"
     29#define CONFIG_ALMOS_VERSION           "Version 1.1 / October 2018"
    3030
    3131////////////////////////////////////////////////////////////////////////////////////////////
     
    3636////////////////////////////////////////////////////////////////////////////////////////////
    3737
     38#define DEBUG_BARRIER                  0
     39
    3840#define DEBUG_BUSYLOCK                 1
    39 #define DEBUG_BUSYLOCK_THREAD_XP       0xBC000ULL    // selected thread_xp
     41#define DEBUG_BUSYLOCK_THREAD_XP       0x11000CC000ULL  // selected thread extended pointer (ULL)
    4042                 
    4143#define DEBUG_CHDEV_CMD_RX             0
     
    132134
    133135#define DEBUG_SCHED_HANDLE_SIGNALS     0
    134 #define DEBUG_SCHED_YIELD              1    // must be activated by the trace() syscall
     136#define DEBUG_SCHED_YIELD              2    // must be activated by the trace() syscall
    135137#define DEBUG_SCHED_RPC_CHECK          0
    136138
     
    139141#define DEBUG_SYSCALLS_ERROR           2
    140142
     143#define DEBUG_SYS_BARRIER              0
    141144#define DEBUG_SYS_CLOSE                0
    142145#define DEBUG_SYS_CONDVAR              0
     
    175178#define DEBUG_THREAD_DELETE            0
    176179#define DEBUG_THREAD_DESTROY           0
    177 #define DEBUG_THREAD_GET_XPTR          1
     180#define DEBUG_THREAD_GET_XPTR          0
    178181#define DEBUG_THREAD_IDLE              0
    179182#define DEBUG_THREAD_INIT              0
  • trunk/kernel/libk/remote_barrier.c

    r563 r581  
    8383
    8484    // get pointer on local process descriptor
    85     process_t * process = CURRENT_THREAD->process;
     85    thread_t  * this    = CURRENT_THREAD;
     86    process_t * process = this->process;
     87
     88#if DEBUG_BARRIER
     89uint32_t cycle = (uint32_t)hal_get_cycles();
     90if( cycle > DEBUG_BARRIER )
     91printk("\n[DBG] %s : thread %x in process %x enter / count %d / cycle %d\n",
     92__FUNCTION__, this->trdid, process->pid, count, cycle );
     93#endif
    8694
    8795    // get extended pointer on reference process
     
    110118
    111119    // initialise barrier
    112     hal_remote_s32 ( XPTR( ref_cxy , &barrier_ptr->nb_threads ) , count );
    113     hal_remote_s32 ( XPTR( ref_cxy , &barrier_ptr->current    ) , 0 );
    114     hal_remote_s32 ( XPTR( ref_cxy , &barrier_ptr->sense      ) , 0 );
     120    hal_remote_s32( XPTR( ref_cxy , &barrier_ptr->nb_threads ) , count );
     121    hal_remote_s32( XPTR( ref_cxy , &barrier_ptr->current    ) , 0 );
     122    hal_remote_s32( XPTR( ref_cxy , &barrier_ptr->sense      ) , 0 );
    115123    hal_remote_spt( XPTR( ref_cxy , &barrier_ptr->ident      ) , (void*)ident );
    116124
    117     xlist_entry_init( XPTR( ref_cxy , &barrier_ptr->list ) );
     125    xlist_root_init( XPTR( ref_cxy , &barrier_ptr->root ) );
    118126
    119127    // register  barrier in reference process xlist
     
    125133    remote_busylock_release( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
    126134
     135#if DEBUG_BARRIER
     136cycle = (uint32_t)hal_get_cycles();
     137if( cycle > DEBUG_BARRIER )
     138printk("\n[DBG] %s : thread %x in process %x exit / barrier %x in cluster %x / cycle %d\n",
     139__FUNCTION__, this->trdid, process->pid, barrier_ptr, ref_cxy, cycle );
     140#endif
     141
    127142    return 0;
    128 }
     143
     144}  // end remote_barrier_create()
    129145
    130146////////////////////////////////////////////////
     
    162178        rpc_kcm_free_client( barrier_cxy , barrier_ptr , KMEM_BARRIER );
    163179    }
    164 }
     180}  // end remote_barrier_destroy()
    165181
    166182/////////////////////////////////////////////
     
    168184{
    169185    uint32_t  expected;
     186    uint32_t  sense;
    170187    uint32_t  current;
    171     uint32_t  count;
    172     uint32_t  sense;
    173     reg_t     irq_state;
     188    uint32_t  nb_threads;
    174189    xptr_t    root_xp;
    175 
    176     // get cluster and local pointer on calling thread
    177     cxy_t              thread_cxy = local_cxy;
    178     thread_t         * thread_ptr = CURRENT_THREAD;
    179 
    180 // check calling thread can yield
    181 assert( (thread_ptr->busylocks == 0),
    182 "cannot yield : busylocks = %d\n", thread_ptr->busylocks );
     190    xptr_t    lock_xp;
     191    xptr_t    current_xp;
     192    xptr_t    sense_xp;
     193    xptr_t    nb_threads_xp;
     194
     195    // get pointer on calling thread
     196    thread_t * this = CURRENT_THREAD;
     197
     198    // check calling thread can yield
     199    thread_assert_can_yield( this , __FUNCTION__ );
    183200
    184201    // get cluster and local pointer on remote barrier
    185     remote_barrier_t * barrier_ptr = (remote_barrier_t *)GET_PTR( barrier_xp );
     202    remote_barrier_t * barrier_ptr = GET_PTR( barrier_xp );
    186203    cxy_t              barrier_cxy = GET_CXY( barrier_xp );
    187204
    188     // get count and root fields from barrier descriptor
    189     count   = hal_remote_l32 ( XPTR( barrier_cxy , &barrier_ptr->nb_threads ) );
    190     root_xp = hal_remote_l64( XPTR( barrier_cxy , &barrier_ptr->root ) );
    191 
    192     // get barrier sense value
    193     sense = hal_remote_l32( XPTR( barrier_cxy , &barrier_ptr->sense ) );
     205#if DEBUG_BARRIER
     206uint32_t cycle = (uint32_t)hal_get_cycles();
     207if( cycle > DEBUG_BARRIER )
     208printk("\n[DBG] %s : thread %x in process %x enter / barrier %x in cluster %x / cycle %d\n",
     209__FUNCTION__, this->trdid, this->process->pid, barrier_ptr, barrier_cxy, cycle );
     210#endif
     211
     212    // compute extended pointers on various barrier fields
     213    lock_xp       = XPTR( barrier_cxy , &barrier_ptr->lock );
     214    root_xp       = XPTR( barrier_cxy , &barrier_ptr->root );
     215    current_xp    = XPTR( barrier_cxy , &barrier_ptr->current );
     216    sense_xp      = XPTR( barrier_cxy , &barrier_ptr->sense );
     217    nb_threads_xp = XPTR( barrier_cxy , &barrier_ptr->nb_threads );
     218
     219    // take busylock protecting the remote_barrier
     220    remote_busylock_acquire( lock_xp );
     221
     222#if (DEBUG_BARRIER & 1)
     223cycle = (uint32_t)hal_get_cycles();
     224if( cycle > DEBUG_BARRIER )
     225printk("\n[DBG] %s : thread %x in process %x get lock / cycle %d\n",
     226__FUNCTION__, this->trdid, this->process->pid, cycle );
     227#endif
     228
     229    // get sense and nb_threads values from barrier descriptor
     230    sense      = hal_remote_l32( sense_xp );
     231    nb_threads = hal_remote_l32( nb_threads_xp );
    194232
    195233    // compute expected value
     
    197235    else              expected = 0;
    198236
    199     // atomically increment current
    200     current = hal_remote_atomic_add( XPTR( barrier_cxy , &barrier_ptr->current ) , 1 );
     237#if (DEBUG_BARRIER & 1)
     238cycle = (uint32_t)hal_get_cycles();
     239if( cycle > DEBUG_BARRIER )
     240printk("\n[DBG] %s : thread %x in process %x / count %d / sense %d / cycle %d\n",
     241__FUNCTION__, this->trdid, this->process->pid, nb_threads, sense, cycle );
     242#endif
     243
     244    // atomically increment current, and get value before increment
     245    current = hal_remote_atomic_add( current_xp , 1 );
    201246
    202247    // last thread reset current, toggle sense, and activate all waiting threads
    203248    // other threads block, register in queue, and deschedule
    204249
    205     if( current == (count-1) )                       // last thread
    206     {
    207         hal_remote_s32( XPTR( barrier_cxy , &barrier_ptr->current) , 0 );
    208         hal_remote_s32( XPTR( barrier_cxy , &barrier_ptr->sense  ) , expected );
    209 
    210         // activate waiting threads if required
    211         if( xlist_is_empty( root_xp ) == false )
     250    if( current == (nb_threads-1) )                       // last thread
     251    {
     252        hal_remote_s32( current_xp , 0 );
     253        hal_remote_s32( sense_xp , expected );
     254
     255        // unblock all waiting threads
     256        while( xlist_is_empty( root_xp ) == false )
    212257        {
    213             // disable interrupts
    214             hal_disable_irq( &irq_state );
    215 
    216             xptr_t  iter_xp;
    217             xptr_t  thread_xp;
    218             XLIST_FOREACH( root_xp , iter_xp )
    219             {
    220                 // get extended pointer on waiting thread
    221                 thread_xp = XLIST_ELEMENT( iter_xp , thread_t , wait_list );
    222 
    223                 // remove waiting thread from queue
    224                 remote_busylock_acquire( XPTR( barrier_cxy , &barrier_ptr->lock ) );
    225                 xlist_unlink( XPTR( barrier_cxy , &barrier_ptr->list ) );
    226                 remote_busylock_release( XPTR( barrier_cxy , &barrier_ptr->lock ) );
    227 
    228                 // unblock waiting thread
    229                 thread_unblock( thread_xp , THREAD_BLOCKED_USERSYNC );
    230             }
    231 
    232             // restore interrupts
    233             hal_restore_irq( irq_state );
     258            // get pointers on first waiting thread
     259            xptr_t     thread_xp  = XLIST_FIRST( root_xp , thread_t , wait_list );
     260            cxy_t      thread_cxy = GET_CXY( thread_xp );
     261            thread_t * thread_ptr = GET_PTR( thread_xp );
     262
     263#if (DEBUG_BARRIER & 1)
     264cycle = (uint32_t)hal_get_cycles();
     265if( cycle > DEBUG_BARRIER )
     266printk("\n[DBG] %s : thread %x in process %x / unblock thread %x / cycle %d\n",
     267__FUNCTION__, this->trdid, this->process->pid, thread_ptr, cycle );
     268#endif
     269
     270            // remove waiting thread from queue
     271            xlist_unlink( XPTR( thread_cxy , &thread_ptr->wait_list ) );
     272
     273            // unblock waiting thread
     274            thread_unblock( thread_xp , THREAD_BLOCKED_USERSYNC );
    234275        }
     276
     277        // release busylock protecting the remote_barrier
     278        remote_busylock_release( lock_xp );
    235279    }
    236280    else                                             // not the last thread
    237281    {
    238         // disable interrupts
    239         hal_disable_irq( &irq_state );
     282
     283#if (DEBUG_BARRIER & 1)
     284cycle = (uint32_t)hal_get_cycles();
     285if( cycle > DEBUG_BARRIER )
     286printk("\n[DBG] %s : thread %x in process %x / blocked / cycle %d\n",
     287__FUNCTION__, this->trdid, this->process->pid, cycle );
     288#endif
    240289
    241290        // register calling thread in barrier waiting queue
    242         xptr_t entry_xp = XPTR( thread_cxy , &thread_ptr->wait_list );
    243 
    244         remote_busylock_acquire( XPTR( barrier_cxy , &barrier_ptr->lock ) );
    245         xlist_add_last( root_xp , entry_xp );
    246         remote_busylock_release( XPTR( barrier_cxy , &barrier_ptr->lock ) );
    247 
    248         // block & deschedule the calling thread
    249         thread_block( XPTR( local_cxy , thread_ptr ) , THREAD_BLOCKED_USERSYNC );
     291        xlist_add_last( root_xp , XPTR( local_cxy , &this->wait_list ) );
     292
     293        // block calling thread
     294        thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_USERSYNC );
     295
     296        // release busylock protecting the remote_barrier
     297        remote_busylock_release( lock_xp );
     298
     299        // deschedule
    250300        sched_yield("blocked on barrier");
    251 
    252         // restore interrupts
    253         hal_restore_irq( irq_state );
    254     }
    255 }
     301    }
     302
     303#if DEBUG_BARRIER
     304cycle = (uint32_t)hal_get_cycles();
     305if( cycle > DEBUG_BARRIER )
     306printk("\n[DBG] %s : thread %x in process %x exit / barrier %x in cluster %x / cycle %d\n",
     307__FUNCTION__, this->trdid, this->process->pid, barrier_ptr, barrier_cxy, cycle );
     308#endif
     309
     310}  // end remote_barrier_wait()
  • trunk/kernel/libk/remote_barrier.h

    r563 r581  
    3333 *          This file defines a POSIX compliant barrier.
    3434 *
    35  * It is used by multi-threaded iuser applications to synchronise threads running in
     35 * It is used by multi-threaded user applications to synchronise threads running in
    3636 * different clusters, as all access functions uses hal_remote_l32() / hal_remote_s32()
    3737 * remote access primitives.
     
    6161typedef struct remote_barrier_s
    6262{
    63     remote_busylock_t  lock;          /*! lock protecting xlist of arrived threads      */
     63    remote_busylock_t  lock;          /*! lock protecting list of waiting threads       */
    6464    intptr_t           ident;         /*! virtual address in user space == identifier   */
    6565    uint32_t           current;       /*! number of arrived threads                     */
     
    6767    uint32_t           nb_threads;    /*! number of expected threads                    */
    6868    xlist_entry_t      list;          /*! member of list of barriers in same process    */
    69     xlist_entry_t      root;          /*! root of list of arrived threads               */
     69    xlist_entry_t      root;          /*! root of list of waiting threads               */
    7070}
    7171remote_barrier_t;
  • trunk/kernel/libk/remote_condvar.c

    r563 r581  
    186186    thread_t * this = CURRENT_THREAD;
    187187
    188 // check calling thread can yield
    189 assert( (this->busylocks == 0),
    190 "cannot yield : busylocks = %d\n", this->busylocks );
     188    // check calling thread can yield
     189    thread_assert_can_yield( this , __FUNCTION__ );
    191190
    192191    // get condvar cluster and local pointer
  • trunk/kernel/libk/remote_condvar.h

    r563 r581  
    3131
    3232/*******************************************************************************************
    33  *     This file define an user level POSIX compliant condition variable.
     33 * This file defines the ALMOS-MKH implentation of an user level, POSIX compliant condvar.
    3434 *
    3535 * It can be used by muti-threaded user applications to synchronise user threads
     
    4444 * is not running in the reference cluster.
    4545 *
    46  * The blocking "remote_condvar_wait() function allowx the calling thread to efficiently
     46 * The blocking "remote_condvar_wait() function allows the calling thread to efficiently
    4747 * wait for a change in a shared user object. The calling thread blocks and register in
    4848 * a waiting queue attached to the condvar. The blocked thread is unblocked by another
  • trunk/kernel/libk/remote_mutex.c

    r563 r581  
    191191void remote_mutex_lock( xptr_t mutex_xp )
    192192{
     193    // get cluster and pointers on calling thread
     194    cxy_t            caller_cxy = local_cxy;
     195    thread_t       * caller_ptr = CURRENT_THREAD;
     196    xptr_t           caller_xp  = XPTR( caller_cxy , caller_ptr );
     197
     198    // check calling thread can yield
     199    thread_assert_can_yield( caller_ptr , __FUNCTION__ );
     200
    193201    // get cluster and local pointer on mutex
    194202    remote_mutex_t * mutex_ptr = GET_PTR( mutex_xp );
     
    200208    xptr_t           root_xp  = XPTR( mutex_cxy , &mutex_ptr->root );
    201209    xptr_t           lock_xp  = XPTR( mutex_cxy , &mutex_ptr->lock );
    202 
    203     // get cluster and pointers on calling thread
    204     cxy_t            caller_cxy = local_cxy;
    205     thread_t       * caller_ptr = CURRENT_THREAD;
    206     xptr_t           caller_xp  = XPTR( caller_cxy , caller_ptr );
    207 
    208 // check calling thread can yield
    209 assert( (caller_ptr->busylocks == 0),
    210 "cannot yield : busylocks = %d\n", caller_ptr->busylocks );
    211210
    212211    while( 1 )
  • trunk/kernel/libk/remote_mutex.h

    r563 r581  
    2929#include <xlist.h>
    3030
    31 /***************************************************************************************
    32  *    This file defines an user level POSIX compliant mutex.
     31/*****************************************************************************************
     32 * This file defines the ALMOS-MKH implementation of an user level POSIX compliant mutex.
    3333 *
    3434 * It can be used by muti-threaded user applications to synchronise user threads
     
    4949 * The "remote_mutex_unlock()" function unblocks the first waiting thread in the queue
    5050 * without releasing the mutex if queue is not empty.
    51  **************************************************************************************/
     51 ****************************************************************************************/
    5252
    5353/*****************************************************************************************
     
    5757typedef struct remote_mutex_s
    5858{
    59     remote_busylock_t  lock;            /*! lock protecting the mutex state           */
    60     intptr_t           ident;           /*! mutex identifier (vaddr in user space)    */
    61     uint32_t           taken;           /*! mutex non allocated if 0                  */
    62     xlist_entry_t      list;            /*! member of list of mutex in same process   */
    63     xlist_entry_t      root;            /*! root of list of waiting threads           */
    64     xptr_t             owner;           /*! extended pointer on owner thread          */
     59    remote_busylock_t  lock;            /*! lock protecting the mutex state             */
     60    intptr_t           ident;           /*! mutex identifier (vaddr in user space)      */
     61    uint32_t           taken;           /*! mutex non allocated if 0                    */
     62    xlist_entry_t      list;            /*! member of list of mutex in same process     */
     63    xlist_entry_t      root;            /*! root of list of waiting threads             */
     64    xptr_t             owner;           /*! extended pointer on owner thread            */
    6565}
    6666remote_mutex_t;
    6767
    68 /***************************************************************************************
     68/*****************************************************************************************
    6969 * This function returns an extended pointer on the remote mutex, identified
    7070 * by its virtual address in a given user process. It makes an associative search,
    7171 * scanning the list of mutex rooted in the reference process descriptor.
    72  ***************************************************************************************
     72 *****************************************************************************************
    7373 * @ ident    : mutex virtual address, used as identifier.
    7474 * @ returns extended pointer on mutex if success / returns XPTR_NULL if not found.
    75  **************************************************************************************/
     75 ****************************************************************************************/
    7676xptr_t remote_mutex_from_ident( intptr_t  ident );
    7777
    78 /***************************************************************************************
     78/*****************************************************************************************
    7979 * This function implements the pthread_mutex_init() syscall.
    8080 * It allocates memory for the mutex descriptor in the reference cluster for
    8181 * the calling process, it initializes the mutex state, and register it in the
    8282 * list of mutex owned by the reference process.
    83  ***************************************************************************************
     83 *****************************************************************************************
    8484 * @ ident       : mutex identifier (virtual address in user space).
    8585 * @ return 0 if success / ENOMEM if no memory / EINVAL if invalid argument.
    86  **************************************************************************************/
     86 ****************************************************************************************/
    8787error_t remote_mutex_create( intptr_t ident );
    8888
    89 /***************************************************************************************
     89/*****************************************************************************************
    9090 * This function implements the pthread_mutex_destroy() syscall.
    9191 * It releases thr memory allocated for the mutex descriptor, and remove the mutex
    9292 * from the list of mutex owned by the reference process.
    93  ***************************************************************************************
     93 *****************************************************************************************
    9494 * @ mutex_xp  : extended pointer on mutex descriptor.
    95  **************************************************************************************/
     95 ****************************************************************************************/
    9696void remote_mutex_destroy( xptr_t  mutex_xp );
    9797
    98 /***************************************************************************************
     98/*****************************************************************************************
    9999 * This blocking function implements the pthread_mutex_lock() syscall.
    100100 * It returns only when the ownership of the mutex identified by the <mutex_xp>
    101101 * argument has been obtained by the calling thread. It register in the mutex waiting
    102102 * queue when the mutex is already taken by another thread.
    103  ***************************************************************************************
     103 *****************************************************************************************
    104104 * @ mutex_xp  : extended pointer on mutex descriptor.
    105  **************************************************************************************/
     105 ****************************************************************************************/
    106106void remote_mutex_lock( xptr_t  mutex_xp );
    107107
    108 /***************************************************************************************
     108/*****************************************************************************************
    109109 * This function implements the pthread_mutex_unlock() syscall.
    110110 * It cheks that the calling thread is actually the mutex owner.
     
    112112 * It unblocks the first thread registered in the mutex waiting queue, when the
    113113 * queue is not empty.
    114  ***************************************************************************************
     114 *****************************************************************************************
    115115 * @ mutex_xp  : extended pointer on mutex descriptor.
    116116 * @ return 0 if success / return non zero if calling thread is not mutex owner.
    117  **************************************************************************************/
     117 ****************************************************************************************/
    118118error_t remote_mutex_unlock( xptr_t  mutex_xp );
    119119
    120 /***************************************************************************************
     120/*****************************************************************************************
    121121 * This non blocking function function attempts to lock a mutex without blocking.
    122  ***************************************************************************************
     122 *****************************************************************************************
    123123 * @ mutex_xp  : extended pointer on mutex descriptor.
    124124 * @ return 0 if success / return non zero if already taken.
    125  **************************************************************************************/
     125 ****************************************************************************************/
    126126error_t remote_mutex_trylock( xptr_t  mutex_xp );
    127127
  • trunk/kernel/libk/remote_sem.h

    r563 r581  
    109109
    110110/****************************yy***************************************************************
    111  * This function mplements the SEM_POST operation.
     111 * This function implements the SEM_POST operation.
    112112 * - It atomically increments the remote semaphore.
    113113 * - If the waiting queue is not empty, it wakes up all waiting thread.
  • trunk/kernel/mm/mapper.c

    r567 r581  
    154154    thread_t * this = CURRENT_THREAD;
    155155
     156    // check thread can yield
     157    thread_assert_can_yield( this , __FUNCTION__ );
     158
    156159    // take mapper lock in READ_MODE
    157160    rwlock_rd_acquire( &mapper->lock );
  • trunk/kernel/syscalls/shared_include/shared_pthread.h

    r566 r581  
    3232
    3333/*******************************************************************************************
    34  * These typedef define the POSIX thread related types.
     34 * These typedef and enum define the shared information related to the POSIX thread.
    3535 ******************************************************************************************/
    3636
    37 typedef unsigned int      pthread_mutex_t;
    38 typedef unsigned int      pthread_mutexattr_t;         // TODO not implemented
    39 
    40 typedef unsigned int      pthread_cond_t;
    41 typedef unsigned int      pthread_condattr_t;          // TODO not implemented
    42 
    43 typedef unsigned int      pthread_rwlock_t;            // TODO not implemented
    44 typedef unsigned int      pthread_rwlockattr_t;        // TODO not implemented
    45 
    46 /*******************************************************************************************
    47  * This structure and enum define the attributes for the pthread_create() syscall.
    48  ******************************************************************************************/
    49 
    50 typedef unsigned int  pthread_t;               
     37typedef unsigned int    pthread_t;               
    5138
    5239typedef struct pthread_attr_s
    5340{
    54         unsigned int      attributes;      /*! user defined attributes bit vector             */
    55         unsigned int      cxy;             /*! target cluster identifier                      */
    56         unsigned int      lid;             /*! target core local index                        */
     41        unsigned int        attributes;      /*! user defined attributes bit vector           */
     42        unsigned int        cxy;             /*! target cluster identifier                    */
     43        unsigned int        lid;             /*! target core local index                      */
    5744}
    5845pthread_attr_t;
     
    6047enum
    6148{
    62     PT_ATTR_DETACH          = 0x0001,  /*! user defined not joinable                      */
    63     PT_ATTR_CLUSTER_DEFINED = 0x0002,  /*! user defined target cluster                    */
    64     PT_ATTR_CORE_DEFINED    = 0x0004,  /*! user defined core index in cluster             */
     49    PT_ATTR_DETACH          = 0x0001,    /*! user defined not joinable                    */
     50    PT_ATTR_CLUSTER_DEFINED = 0x0002,    /*! user defined target cluster                  */
     51    PT_ATTR_CORE_DEFINED    = 0x0004,    /*! user defined core index in cluster           */
    6552};
    6653
    67 /******************************************************************************************* 
    68  * This enum defines the operation mnemonics for operations on POSIX condition variables.
     54/*******************************************************************************************
     55 * These typedef and enum define the shared informations related to the POSIX mutex.
    6956 ******************************************************************************************/
     57
     58typedef unsigned int    pthread_mutex_t;
     59
     60typedef unsigned int    pthread_mutexattr_t;         // TODO not implemented
     61
     62typedef enum
     63{
     64        MUTEX_INIT,
     65        MUTEX_DESTROY,
     66        MUTEX_LOCK,
     67        MUTEX_UNLOCK,
     68    MUTEX_TRYLOCK,
     69}
     70mutex_operation_t;
     71
     72/*******************************************************************************************
     73 * These typedef and enum define the shared informations related to the POSIX condvar.
     74 ******************************************************************************************/
     75
     76typedef unsigned int    pthread_cond_t;
     77
     78typedef unsigned int    pthread_condattr_t;          // TODO not implemented
    7079
    7180typedef enum
     
    7988condvar_operation_t;
    8089
     90/*******************************************************************************************
     91 * These typedef define and enum the shared informations related to the POSIX rwlock.
     92 ******************************************************************************************/
     93
     94typedef unsigned int    pthread_rwlock_t;            // TODO not implemented
     95
     96typedef unsigned int    pthread_rwlockattr_t;        // TODO not implemented
     97
    8198/*******************************************************************************************
    82  * This enum defines the operation mnemonics for operations on POSIX barriers.
     99 * These typedef and enum define the shared informations related to POSIX barriers.
    83100 ******************************************************************************************/
     101
     102typedef unsigned int    pthread_barrier_t;
     103
     104typedef struct pthread_barrierattr_s
     105{
     106    unsigned int        x_size;         /*! number of clusters in a row                   */
     107    unsigned int        y_size;         /*! number of clusters in a column                */
     108    unsigned int        nthreads;       /*! number of expected threads in a cluster       */
     109}
     110pthread_barrierattr_t;
    84111
    85112typedef enum
     
    91118barrier_operation_t;
    92119
    93 /*******************************************************************************************
    94  * This enum defines the operation mnemonics for operations on POSIX mutex.
    95  ******************************************************************************************/
     120/*********************************************************************************************
     121 * These structures define another implementation for the POSIX barrier:
     122 * It is implemented as a hierarchical, physically distributed quad-tree,
     123 * covering all clusters specified, with the following constraints:
     124 *   . The involved clusters form a mesh [x_size * y_size]
     125 *   . The lower left involved cluster is cluster(0,0) 
     126 *   . The number of threads per cluster is the same in all clusters.
     127 *
     128 * Implementation note:
     129 * - The quad three is implemented as a three dimensions array of node[x][y][l]
     130 *   . [x][y] are the cluster coordinates / max values are (QDT_XMAX-1), (QDT_YMAX-1)
     131 *   . [l] is the node level / 0 for terminal nodes / (QDT_LMAX-1) for the root node
     132 ********************************************************************************************/
    96133
    97 typedef enum
     134/*
     135
     136#define  QDT_XMAX    16                // max number of clusters in a row
     137#define  QDT_YMAX    16                // max number of clusters in a column
     138#define  QDT_LMAX    5                 // max depth of the quad tree
     139#define  QDT_YWIDTH  4                 // Y field in cxy, for cxy <=> (x,y) translation
     140#define  QDT_YMASK   0xF               // Y field in cxy, for cxy <=> (x,y) translation
     141
     142typedef struct sqt_node_s
    98143{
    99         MUTEX_INIT,
    100         MUTEX_DESTROY,
    101         MUTEX_LOCK,
    102         MUTEX_UNLOCK,
    103     MUTEX_TRYLOCK,
    104 }
    105 mutex_operation_t;
     144    volatile unsigned int sense;       // barrier state (toggle)
     145    volatile unsigned int count;       // number of not arrived tasks
     146    unsigned int          arity;       // number of locally expected tasks
     147    unsigned int          level;       // hierarchical level (0 is bottom)
     148    struct sqt_node_s   * parent;      // pointer on parent node (NULL for root)
     149    struct sqt_node_s   * child[4];    // pointer on children node (NULL for bottom)
     150}
     151sqt_node_t;
    106152
     153typedef struct pthread_barrier_s
     154{
     155    sqt_node_t          * node[QDT_XMAX][QDT_YMAX][QDT_LMAX];
     156}
     157pthread_barrier_t;
    107158
     159*/
    108160
    109161#endif  // _PTHREAD_H_
  • trunk/kernel/syscalls/sys_barrier.c

    r508 r581  
    3131#include <remote_barrier.h>
    3232
     33#if DEBUG_SYS_BARRIER
     34//////////////////////////////////////////////////////
     35static char * sys_barrier_op_str( uint32_t operation )
     36{
     37        if     ( operation == BARRIER_INIT    ) return "INIT";
     38        else if( operation == BARRIER_DESTROY ) return "DESTROY";
     39        else if( operation == BARRIER_WAIT    ) return "WAIT";
     40        else                                    return "undefined";
     41}
     42#endif
     43
    3344//////////////////////////////////
    3445int sys_barrier( void     * vaddr,
     
    4152    thread_t   * this    = CURRENT_THREAD;
    4253    process_t  * process = this->process;
     54
     55#if DEBUG_SYS_BARRIER
     56uint64_t   tm_start;
     57uint64_t   tm_end;
     58tm_start = hal_get_cycles();
     59if( DEBUG_SYS_BARRIER < tm_start )
     60printk("\n[DBG] %s : thread %x in process %x enter for %s / count %d / cycle %d\n",
     61__FUNCTION__, this->trdid, process->pid, sys_barrier_op_str(operation), count,
     62(uint32_t)tm_start );
     63#endif
    4364
    4465    // check vaddr in user vspace
     
    125146        }  // end switch
    126147
     148#if DEBUG_SYS_BARRIER
     149tm_end = hal_get_cycles();
     150if( DEBUG_SYS_BARRIER < tm_end )
     151printk("\n[DBG] %s : thread %x in process %x exit for %s / cost %d / cycle %d\n",
     152__FUNCTION__, this->trdid, process->pid, sys_barrier_op_str(operation),
     153(uint32_t)(tm_end - tm_start), (uint32_t)tm_end );
     154#endif
     155
    127156        return 0;
    128157
Note: See TracChangeset for help on using the changeset viewer.