Ignore:
Timestamp:
Nov 7, 2017, 3:08:12 PM (6 years ago)
Author:
alain
Message:

First implementation of fork/exec.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/kern/scheduler.c

    r406 r407  
    2424#include <kernel_config.h>
    2525#include <hal_types.h>
     26#include <hal_switch.h>
    2627#include <hal_irqmask.h>
    2728#include <hal_context.h>
     
    3839
    3940extern chdev_directory_t    chdev_dir;            // allocated in kernel_init.c file
    40 
     41extern uint32_t             switch_save_sr[];     // allocated in kernel_init.c file
    4142
    4243////////////////////////////////
     
    127128}  // end sched_remove()
    128129
    129 ///////////////////////////////////////////
    130 void sched_kill_thread( thread_t * thread )
    131 {
    132     // check thread locks
    133     if( thread_can_yield() == false )
    134     {
    135         panic("thread %x in process %x on core[%x][%d]"
    136               " did not released all locks",
    137               thread->trdid , thread->process->pid,
    138               local_cxy , thread->core->lid );
    139     }
    140 
    141     // remove thread from scheduler
    142     sched_remove_thread( thread );
    143 
    144     // reset the THREAD_SIG_KILL signal
    145     thread_reset_signal( thread , THREAD_SIG_KILL );
    146 
    147 }  // end sched_kill_thread()
    148 
    149130////////////////////////////////////////
    150131thread_t * sched_select( core_t * core )
     
    154135    scheduler_t * sched = &core->scheduler;
    155136
    156     sched_dmsg("\n[DMSG] %s : enter core[%x,%d] / cycle %d\n",
    157     __FUNCTION__ , local_cxy , core->lid , hal_time_stamp() );
    158 
    159137    // take lock protecting sheduler lists
    160138    spinlock_lock( &sched->lock );
     
    163141    list_entry_t * last;
    164142
    165     // first : scan the kernel threads list if not empty
     143    // first loop : scan the kernel threads list if not empty
    166144    if( list_is_empty( &sched->k_root ) == false )
    167145    {
     
    179157            thread = LIST_ELEMENT( current , thread_t , sched_list );
    180158
    181             // return thread if not idle_thread and runnable
    182             if( (thread->type != THREAD_IDLE) && (thread->blocked == 0) )
     159            // analyse kernel thread type
     160            switch( thread->type )
    183161            {
    184                 // release lock
    185                 spinlock_unlock( &sched->lock );
    186 
    187                 sched_dmsg("\n[DMSG] %s : exit core[%x,%d] / k_thread = %x / cycle %d\n",
    188                 __FUNCTION__ , local_cxy , core->lid , thread->trdid , hal_time_stamp() );
    189 
    190                 return thread;
    191             }
     162                case THREAD_IDLE: // skip IDLE thread
     163                break;
     164
     165                case THREAD_RPC:  // RPC thread if non blocked and FIFO non-empty
     166                if( (thread->blocked == 0) &&
     167                    (local_fifo_is_empty( &LOCAL_CLUSTER->rpc_fifo ) == 0) )
     168                {
     169                    spinlock_unlock( &sched->lock );
     170                    return thread;
     171                }
     172                break;
     173
     174                default:          // DEV thread if non blocked
     175                if( thread->blocked == 0 )
     176                {
     177                    spinlock_unlock( &sched->lock );
     178                    return thread;
     179                }
     180                break;
     181            }  // end switch type
    192182        }
    193183        while( current != last );
    194184    }
    195185
    196     // second : scan the user threads list if not empty
     186    // second loop : scan the user threads list if not empty
    197187    if( list_is_empty( &sched->u_root ) == false )
    198188    {
     
    213203            if( thread->blocked == 0 )
    214204            {
    215                 // release lock
    216205                spinlock_unlock( &sched->lock );
    217 
    218                 sched_dmsg("\n[DMSG] %s : exit core[%x,%d] / u_thread = %x / cycle %d\n",
    219                 __FUNCTION__ , local_cxy , core->lid , thread->trdid , hal_time_stamp() );
    220206                return thread;
    221207            }
     
    224210    }
    225211
    226     // release lock
     212    // third : return idle thread if no runnable thread
    227213    spinlock_unlock( &sched->lock );
    228 
    229     sched_dmsg("\n[DMSG] %s : exit core[%x,%d] / idle = %x / cycle %d\n",
    230     __FUNCTION__ , local_cxy , core->lid , sched->idle->trdid , hal_time_stamp() );
    231 
    232     // third : return idle thread if no runnable thread
    233214    return sched->idle;
    234215
    235216}  // end sched_select()
     217
     218///////////////////////////////////////////
     219void sched_kill_thread( thread_t * thread )
     220{
     221    // check locks
     222    if( thread_can_yield() == false )
     223    {
     224        panic("locks not released for thread %x in process %x on core[%x][%d]",
     225        thread->trdid , thread->process->pid, local_cxy , thread->core->lid );
     226    }
     227
     228    // remove thread from scheduler
     229    sched_remove_thread( thread );
     230
     231    // reset the THREAD_SIG_KILL signal
     232    thread_reset_signal( thread , THREAD_SIG_KILL );
     233
     234    // detached thread can suicide
     235    if( thread->signals & THREAD_SIG_SUICIDE )
     236    {
     237        assert( (thread->flags & THREAD_FLAG_DETACHED), __FUNCTION__,
     238        "thread must be detached in case of suicide\n" );
     239
     240        // remove thread from process
     241        process_remove_thread( thread );
     242
     243        // release memory for thread descriptor
     244        thread_destroy( thread );
     245    }
     246}  // end sched_kill_thread()
    236247
    237248//////////////////////////////////////////
     
    242253    scheduler_t  * sched = &core->scheduler;
    243254
    244     sched_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d]\n",
    245     __FUNCTION__, CURRENT_THREAD->trdid , local_cxy , core->lid );
    246 
    247255    // take lock protecting threads lists
    248256    spinlock_lock( &sched->lock );
     
    252260    {
    253261        thread = LIST_ELEMENT( iter , thread_t , sched_list );
    254         if( thread->signals & THREAD_SIG_KILL ) sched_kill_thread( thread );
     262        if( thread->signals ) sched_kill_thread( thread );
    255263    }
    256264
     
    259267    {
    260268        thread = LIST_ELEMENT( iter , thread_t , sched_list );
    261         if( thread->signals & THREAD_SIG_KILL ) sched_kill_thread( thread );
     269        if( thread->signals ) sched_kill_thread( thread );
    262270    }
    263271
     
    265273    spinlock_unlock( &sched->lock );
    266274
    267     sched_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d]\n",
    268     __FUNCTION__, CURRENT_THREAD->trdid , local_cxy , core->lid );
    269 
    270275} // end sched_handle_signals()
    271276
    272 ///////////////////////////////////
    273 void sched_yield( thread_t * next )
    274 {
    275     reg_t         sr_save;
    276 
     277//////////////////////////////////////
     278void sched_update( thread_t * current,
     279                   thread_t * next )
     280{
     281    scheduler_t * sched = &current->core->scheduler;
     282
     283    if( current->type == THREAD_USER ) sched->u_last = &current->sched_list;
     284    else                               sched->k_last = &current->sched_list;
     285
     286    sched->current = next;
     287}
     288
     289//////////////////
     290void sched_yield()
     291{
     292    thread_t    * next;
    277293    thread_t    * current = CURRENT_THREAD;
    278     core_t      * core    = current->core;
    279     scheduler_t * sched   = &core->scheduler;
    280 
    281     sched_dmsg("\n[DMSG] %s : thread %x on core[%x,%d] enter / cycle %d\n",
    282     __FUNCTION__, current->trdid, local_cxy, core->lid, hal_time_stamp() );
     294 
     295#if( CONFIG_SCHED_DEBUG & 0x1 )
     296if( hal_time_stamp() > CONFIG_SCHED_DEBUG ) sched_display( current->core->lid );
     297#endif
    283298
    284299    // delay the yield if current thread has locks
    285     if( thread_can_yield() == false )
     300    if( (current->local_locks != 0) || (current->remote_locks != 0) )
    286301    {
    287302        current->flags |= THREAD_FLAG_SCHED;
     
    289304    }
    290305
    291     // first loop on all threads to handle pending signals
    292     sched_handle_signals( core );
    293 
    294     // second loop on threads to select next thread if required
    295     if( next == NULL ) next = sched_select( core );
     306    // loop on threads to select next thread
     307    next = sched_select( current->core );
    296308
    297309    // check next thread attached to same core as the calling thread
    298     assert( (next->core == current->core), __FUNCTION__ , "next core != current core\n");
    299 
    300     // check next thread not blocked
    301     assert( (next->blocked == 0), __FUNCTION__ , "next thread is blocked\n");
     310    assert( (next->core == current->core), __FUNCTION__ ,
     311    "next core != current core\n");
     312
     313    // check next thread not blocked when type != IDLE
     314    assert( (next->blocked == 0) || (next->type = THREAD_IDLE) , __FUNCTION__ ,
     315    "next thread %x (%s) is blocked on core[%x,%d]\n",
     316    next->trdid , thread_type_str(next->type) , local_cxy , current->core->lid );
    302317
    303318    // switch contexts and update scheduler state if next != current
    304319        if( next != current )
    305320    {
    306         sched_dmsg("\n[DMSG] %s : trd %x (%s) on core[%x,%d] => trd %x (%s) / cycle %d\n",
    307         __FUNCTION__, current->trdid, thread_type_str(current->type), local_cxy, core->lid,
    308         next->trdid, thread_type_str(next->type), hal_time_stamp() );
    309 
    310         // calling thread desactivate IRQs
    311         hal_disable_irq( &sr_save );
     321        // current thread desactivate IRQs
     322        hal_disable_irq( &switch_save_sr[CURRENT_THREAD->core->lid] );
     323
     324sched_dmsg("\n[DBG] %s : core[%x,%d] / trd %x (%s) (%x,%x) => trd %x (%s) (%x,%x) / cycle %d\n",
     325__FUNCTION__, local_cxy, current->core->lid,
     326current, thread_type_str(current->type), current->process->pid, current->trdid,
     327next   , thread_type_str(next->type)   , next->process->pid   , next->trdid,
     328hal_time_stamp() );
    312329
    313330        // update scheduler
    314         if( current->type == THREAD_USER ) sched->u_last = &current->sched_list;
    315         else                               sched->k_last = &current->sched_list;
    316         sched->current = next;
    317 
    318         // handle FPU
     331        sched_update( current , next );
     332
     333        // handle FPU ownership
    319334            if( next->type == THREAD_USER )
    320335        {
    321                 if( next == core->fpu_owner )  hal_fpu_enable();
    322                 else                           hal_fpu_disable();
     336                if( next == current->core->fpu_owner )  hal_fpu_enable();
     337                else                                    hal_fpu_disable();
    323338        }
    324339
    325         // switch contexts
    326         hal_cpu_context_switch( current , next );
    327 
    328         // restore IRQs when calling thread resume
    329         hal_restore_irq( sr_save );
     340        // switch CPU from calling thread context to new thread context
     341        hal_do_cpu_switch( current->cpu_context, next->cpu_context );
     342
     343        // restore IRQs when next thread resume
     344        hal_restore_irq( switch_save_sr[CURRENT_THREAD->core->lid] );
    330345    }
    331346    else
    332347    {
    333         sched_dmsg("\n[DMSG] %s : thread %x on core[%x,%d] continue / cycle %d\n",
    334         __FUNCTION__, current->trdid, local_cxy, core->lid, hal_time_stamp() );
     348
     349sched_dmsg("\n[DBG] %s : core[%x,%d] / thread %x (%s) continue / cycle %d\n",
     350__FUNCTION__, local_cxy, current->core->lid, current->trdid,
     351thread_type_str(current->type) ,hal_time_stamp() );
     352
    335353    }
    336354}  // end sched_yield()
    337355
    338 ////////////////////
    339 void sched_display()
     356
     357///////////////////////////////
     358void sched_display( lid_t lid )
    340359{
    341360    list_entry_t * iter;
     
    343362    uint32_t       save_sr;
    344363
    345     thread_t     * current = CURRENT_THREAD;
    346     core_t       * core    = current->core;
     364    if( lid >= LOCAL_CLUSTER->cores_nr )
     365    {
     366        printk("\n[ERROR] in %s : illegal local index %d in cluster %x\n",
     367        __FUNCTION__ , lid , local_cxy );
     368        return;
     369    }
     370
     371    core_t       * core    = &LOCAL_CLUSTER->core_tbl[lid];
    347372    scheduler_t  * sched   = &core->scheduler;
    348373   
    349374    // get pointers on TXT0 chdev
    350     xptr_t    txt0_xp  = chdev_dir.txt[0];
     375    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
    351376    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
    352377    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
     
    358383    remote_spinlock_lock_busy( lock_xp , &save_sr );
    359384
    360     nolock_printk("\n***** scheduler state for core[%x,%d]\n"
    361            "kernel_threads = %d / user_threads = %d / current = %x\n",
    362             local_cxy , core->lid,
    363             sched->k_threads_nr, sched->u_threads_nr, sched->current->trdid );
     385    nolock_printk("\n***** scheduler state for core[%x,%d] at cycle %d\n"
     386           "kernel_threads = %d / user_threads = %d / current = %x / idle = %x\n",
     387            local_cxy , core->lid, hal_time_stamp(),
     388            sched->k_threads_nr, sched->u_threads_nr,
     389            sched->current->trdid , sched->idle->trdid );
    364390
    365391    // display kernel threads
     
    367393    {
    368394        thread = LIST_ELEMENT( iter , thread_t , sched_list );
    369         nolock_printk(" - type = %s / trdid = %x / pid = %x / func = %x / blocked_vect = %x\n",
     395        nolock_printk(" - type = %s / trdid = %X / pid = %X / func = %X / blocked = %X\n",
    370396        thread_type_str( thread->type ), thread->trdid, thread->process->pid,
    371397        thread->entry_func, thread->blocked );
     
    376402    {
    377403        thread = LIST_ELEMENT( iter , thread_t , sched_list );
    378         nolock_printk(" - type = %s / trdid = %x / pid = %x / func = %x / blocked_vect = %x\n",
     404        nolock_printk(" - type = %s / trdid = %X / pid = %X / func = %X / blocked = %X\n",
    379405        thread_type_str( thread->type ), thread->trdid, thread->process->pid,
    380406        thread->entry_func, thread->blocked );
Note: See TracChangeset for help on using the changeset viewer.