source: trunk/kernel/kern/scheduler.c @ 444

Last change on this file since 444 was 443, checked in by alain, 6 years ago

Fix few bugs whike debugging the sort multi-thread application.

File size: 13.2 KB
RevLine 
[1]1/*
2 * scheduler.c - Core scheduler implementation.
3 *
4 * Author    Alain Greiner (2016)
5 *
6 * Copyright (c)  UPMC Sorbonne Universites
7 *
8 * This file is part of ALMOS-MKH.
9 *
10 * ALMOS-MKH. is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2.0 of the License.
13 *
14 * ALMOS-MKH. is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
[14]24#include <kernel_config.h>
[1]25#include <hal_types.h>
[407]26#include <hal_switch.h>
[1]27#include <hal_irqmask.h>
28#include <hal_context.h>
29#include <printk.h>
30#include <list.h>
31#include <core.h>
32#include <thread.h>
[296]33#include <chdev.h>
[1]34#include <scheduler.h>
35
[443]36
[296]37///////////////////////////////////////////////////////////////////////////////////////////
38// Extern global variables
39///////////////////////////////////////////////////////////////////////////////////////////
[1]40
[443]41uint32_t   idle_thread_count;
42uint32_t   idle_thread_count_active;
[296]43
[443]44extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c file
45extern uint32_t             switch_save_sr[];   // allocated in kernel_init.c file
46
[1]47////////////////////////////////
48void sched_init( core_t * core )
49{
50    scheduler_t * sched = &core->scheduler;
51
52    sched->u_threads_nr   = 0;
53    sched->k_threads_nr   = 0;
54
[279]55    sched->current        = CURRENT_THREAD;
[443]56    sched->idle           = NULL;               // initialized in kernel_init()
57    sched->u_last         = NULL;               // initialized in sched_register_thread()
58    sched->k_last         = NULL;               // initialized in sched_register_thread()
[1]59
60    // initialise threads lists
61    list_root_init( &sched->u_root );
62    list_root_init( &sched->k_root );
63
[443]64    sched->req_ack_pending = false;             // no pending request
65    sched->trace           = false;             // context switches trace desactivated
[409]66
[1]67}  // end sched_init()
68
69////////////////////////////////////////////
70void sched_register_thread( core_t   * core,
71                            thread_t * thread )
72{
73    scheduler_t * sched = &core->scheduler;
74    thread_type_t type  = thread->type;
75
76    // take lock protecting sheduler lists
77    spinlock_lock( &sched->lock );
78
79    if( type == THREAD_USER )
80    {
81        list_add_last( &sched->u_root , &thread->sched_list );
82        sched->u_threads_nr++;
[279]83        if( sched->u_last == NULL ) sched->u_last = &thread->sched_list;
[1]84    }
85    else // kernel thread
86    {
87        list_add_last( &sched->k_root , &thread->sched_list );
88        sched->k_threads_nr++;
[279]89        if( sched->k_last == NULL ) sched->k_last = &thread->sched_list; 
[1]90    }
91
92    // release lock
[428]93    hal_fence();
[1]94    spinlock_unlock( &sched->lock );
95
[409]96}  // end sched_register_thread()
[1]97
[408]98//////////////////////////////////////////////
99thread_t * sched_select( scheduler_t * sched )
[1]100{
[408]101    thread_t     * thread;
102    list_entry_t * current;
103    list_entry_t * last;
[437]104    list_entry_t * root;
105    bool_t         done;
[1]106
107    // take lock protecting sheduler lists
108    spinlock_lock( &sched->lock );
109
[437]110    // first : scan the kernel threads list if not empty
[279]111    if( list_is_empty( &sched->k_root ) == false )
[1]112    {
[437]113        root    = &sched->k_root;
[279]114        last    = sched->k_last;
[437]115        current = last;
116        done    = false;
117
118        while( done == false )
[279]119        {
120            // get next entry in kernel list
[437]121            current = current->next;
[1]122
[437]123            // check exit condition
124            if( current == last ) done = true;
125
[279]126            // skip the root that does not contain a thread
[437]127            if( current == root ) continue;
[1]128
[279]129            // get thread pointer for this entry
130            thread = LIST_ELEMENT( current , thread_t , sched_list );
[1]131
[440]132            // select kernel thread if non blocked and non IDLE
133            if( (thread->blocked == 0)  && (thread->type != THREAD_IDLE) )
[279]134            {
[438]135                spinlock_unlock( &sched->lock );
136                return thread;
137            }
[437]138        } // end loop on kernel threads
139    } // end if kernel threads
140
141    // second : scan the user threads list if not empty
[279]142    if( list_is_empty( &sched->u_root ) == false )
[1]143    {
[437]144        root    = &sched->u_root;
[279]145        last    = sched->u_last;
[437]146        current = last;
147        done    = false;
148
149        while( done == false )
[279]150        {
151            // get next entry in user list
[437]152            current = current->next;
[1]153
[437]154            // check exit condition
155            if( current == last ) done = true;
156
[279]157            // skip the root that does not contain a thread
[437]158            if( current == root ) continue;
[1]159
[279]160            // get thread pointer for this entry
161            thread = LIST_ELEMENT( current , thread_t , sched_list );
[1]162
[438]163            // return thread if non blocked
[279]164            if( thread->blocked == 0 )
165            {
166                spinlock_unlock( &sched->lock );
167                return thread;
168            }
[437]169        } // end loop on user threads
170    } // end if user threads
[1]171
[437]172    // third : return idle thread if no other runnable thread
[1]173    spinlock_unlock( &sched->lock );
174    return sched->idle;
175
[296]176}  // end sched_select()
[1]177
[416]178///////////////////////////////////////////
[433]179void sched_handle_signals( core_t * core )
[1]180{
[437]181
[1]182    list_entry_t * iter;
[440]183    list_entry_t * root;
[1]184    thread_t     * thread;
[428]185    process_t    * process;
[443]186    bool_t         last_thread;
[409]187
[440]188    // get pointer on scheduler
[1]189    scheduler_t  * sched = &core->scheduler;
190
[440]191    // get pointer on user threads root
192    root = &sched->u_root;
193
[1]194    // take lock protecting threads lists
195    spinlock_lock( &sched->lock );
196
[440]197    // We use a while to scan the user threads, to control the iterator increment,
198    // because some threads will be destroyed, and we cannot use a LIST_FOREACH()
199
200    // initialise list iterator
201    iter = root->next;
202
[416]203    // scan all user threads
[440]204    while( iter != root )
[1]205    {
[440]206        // get pointer on thread
[1]207        thread = LIST_ELEMENT( iter , thread_t , sched_list );
208
[440]209        // increment iterator
210        iter = iter->next;
211
[416]212        // handle REQ_ACK
213        if( thread->flags & THREAD_FLAG_REQ_ACK )
[408]214        {
[416]215            // check thread blocked
216            assert( (thread->blocked & THREAD_BLOCKED_GLOBAL) , 
217            __FUNCTION__ , "thread not blocked" );
218 
219            // decrement response counter
220            hal_atomic_add( thread->ack_rsp_count , -1 );
[408]221
[416]222            // reset REQ_ACK in thread descriptor
223            thread_reset_req_ack( thread );
[408]224        }
[416]225
226        // handle REQ_DELETE
227        if( thread->flags & THREAD_FLAG_REQ_DELETE )
228        {
[428]229            // get thread process descriptor
230            process = thread->process;
[416]231
232                // release FPU if required
233                if( thread->core->fpu_owner == thread )  thread->core->fpu_owner = NULL;
234
[428]235            // remove thread from scheduler (scheduler lock already taken)
236            uint32_t threads_nr = sched->u_threads_nr;
[440]237
[428]238            assert( (threads_nr != 0) , __FUNCTION__ , "u_threads_nr cannot be 0\n" );
[440]239
[428]240            sched->u_threads_nr = threads_nr - 1;
[416]241            list_unlink( &thread->sched_list );
[428]242            if( threads_nr == 1 ) sched->u_last = NULL;
[416]243
[428]244            // delete thread
[443]245            last_thread = thread_destroy( thread );
[416]246
[438]247#if DEBUG_SCHED_HANDLE_SIGNALS
[440]248uint32_t cycle = (uint32_t)hal_get_cycles();
[438]249if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
[443]250printk("\n[DBG] %s : thread %x in proces %x on core[%x,%d] deleted / cycle %d\n",
251__FUNCTION__ , thread->trdid , process->pid , local_cxy , thread->core->lid , cycle );
[433]252#endif
[416]253            // destroy process descriptor if no more threads
[443]254            if( last_thread ) 
[428]255            {
256                // delete process   
257                process_destroy( process );
258
[438]259#if DEBUG_SCHED_HANDLE_SIGNALS
[433]260cycle = (uint32_t)hal_get_cycles();
[438]261if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
[443]262printk("\n[DBG] %s : process %x in cluster %x deleted / cycle %d\n",
263__FUNCTION__ , process->pid , local_cxy , cycle );
[433]264#endif
[428]265
266            }
[416]267        }
[1]268    }
269
270    // release lock
[428]271    hal_fence();
[1]272    spinlock_unlock( &sched->lock );
273
[433]274} // end sched_handle_signals()
[416]275
[408]276////////////////////////////////
277void sched_yield( char * cause )
[1]278{
[407]279    thread_t    * next;
[1]280    thread_t    * current = CURRENT_THREAD;
[409]281    core_t      * core    = current->core;
282    scheduler_t * sched   = &core->scheduler;
[407]283 
[438]284#if (DEBUG_SCHED_YIELD & 0x1)
[443]285if( sched->trace )
[433]286sched_display( core->lid );
[407]287#endif
[1]288
[337]289    // delay the yield if current thread has locks
[407]290    if( (current->local_locks != 0) || (current->remote_locks != 0) )
[337]291    {
292        current->flags |= THREAD_FLAG_SCHED;
293        return;
294    }
[1]295
[435]296    // enter critical section / save SR in current thread descriptor
297    hal_disable_irq( &CURRENT_THREAD->save_sr );
[408]298
[407]299    // loop on threads to select next thread
[408]300    next = sched_select( sched );
[1]301
[436]302    // check next thread kernel_stack overflow
[443]303    assert( (next->signature == THREAD_SIGNATURE), __FUNCTION__ , 
304    "kernel stack overflow for thread %x on core[%x,%d] \n", next, local_cxy, core->lid );
[436]305
[296]306    // check next thread attached to same core as the calling thread
[443]307    assert( (next->core == current->core), __FUNCTION__ , 
308    "next core %x != current core %x\n", next->core, current->core );
[296]309
[407]310    // check next thread not blocked when type != IDLE
[428]311    assert( ((next->blocked == 0) || (next->type == THREAD_IDLE)) , __FUNCTION__ ,
[407]312    "next thread %x (%s) is blocked on core[%x,%d]\n", 
[409]313    next->trdid , thread_type_str(next->type) , local_cxy , core->lid );
[296]314
315    // switch contexts and update scheduler state if next != current
316        if( next != current )
[1]317    {
318
[438]319#if DEBUG_SCHED_YIELD
[443]320if( sched->trace )
[433]321printk("\n[DBG] %s : core[%x,%d] / cause = %s\n"
[408]322"      thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n",
[409]323__FUNCTION__, local_cxy, core->lid, cause, 
[443]324current, thread_type_str(current->type), current->process->pid, current->trdid,next ,
325thread_type_str(next->type) , next->process->pid , next->trdid , (uint32_t)hal_get_cycles() );
[433]326#endif
[279]327
[296]328        // update scheduler
[408]329        sched->current = next;
330        if( next->type == THREAD_USER ) sched->u_last = &next->sched_list;
331        else                            sched->k_last = &next->sched_list;
[1]332
[407]333        // handle FPU ownership
[306]334            if( next->type == THREAD_USER )
[296]335        {
[407]336                if( next == current->core->fpu_owner )  hal_fpu_enable();
337                else                                    hal_fpu_disable();
[296]338        }
[1]339
[435]340        // switch CPU from current thread context to new thread context
[407]341        hal_do_cpu_switch( current->cpu_context, next->cpu_context );
[296]342    }
343    else
344    {
[407]345
[443]346#if DEBUG_SCHED_YIELD
347if( sched->trace )
[435]348printk("\n[DBG] %s : core[%x,%d] / cause = %s\n"
349"      thread %x (%s) (%x,%x) continue / cycle %d\n",
[443]350__FUNCTION__, local_cxy, core->lid, cause, current, thread_type_str(current->type),
351current->process->pid, current->trdid, (uint32_t)hal_get_cycles() );
[428]352#endif
[407]353
[296]354    }
[408]355
[416]356    // handle pending requests for all threads executing on this core.
[433]357    sched_handle_signals( core );
[409]358
[435]359    // exit critical section / restore SR from current thread descriptor
360    hal_restore_irq( CURRENT_THREAD->save_sr );
[408]361
[1]362}  // end sched_yield()
363
[407]364
365///////////////////////////////
366void sched_display( lid_t lid )
[1]367{
[296]368    list_entry_t * iter;
369    thread_t     * thread;
370    uint32_t       save_sr;
[1]371
[436]372    assert( (lid < LOCAL_CLUSTER->cores_nr), __FUNCTION__, "illegal core index %d\n", lid);
[407]373
374    core_t       * core    = &LOCAL_CLUSTER->core_tbl[lid];
[296]375    scheduler_t  * sched   = &core->scheduler;
376   
377    // get pointers on TXT0 chdev
[407]378    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
[296]379    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
380    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
[1]381
[296]382    // get extended pointer on remote TXT0 chdev lock
383    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
[1]384
[296]385    // get TXT0 lock in busy waiting mode
386    remote_spinlock_lock_busy( lock_xp , &save_sr );
387
[437]388    nolock_printk("\n***** threads on core[%x,%d] / current %x / cycle %d\n",
[443]389    local_cxy , core->lid, sched->current, (uint32_t)hal_get_cycles() );
[296]390
391    // display kernel threads
392    LIST_FOREACH( &sched->k_root , iter )
[1]393    {
[296]394        thread = LIST_ELEMENT( iter , thread_t , sched_list );
[408]395        if (thread->type == THREAD_DEV) 
396        {
[416]397            nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X / %s\n",
[408]398            thread_type_str( thread->type ), thread->process->pid, thread->trdid,
[416]399            thread, thread->blocked, thread->flags, thread->chdev->name );
[408]400        }
401        else
402        {
[437]403            nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n",
[408]404            thread_type_str( thread->type ), thread->process->pid, thread->trdid,
[437]405            thread, thread->blocked, thread->flags );
[408]406        }
[1]407    }
408
[296]409    // display user threads
410    LIST_FOREACH( &sched->u_root , iter )
[1]411    {
[296]412        thread = LIST_ELEMENT( iter , thread_t , sched_list );
[416]413        nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n",
[408]414        thread_type_str( thread->type ), thread->process->pid, thread->trdid,
[416]415        thread, thread->blocked, thread->flags );
[1]416    }
417
[296]418    // release TXT0 lock
419    remote_spinlock_unlock_busy( lock_xp , save_sr );
[1]420
[296]421}  // end sched_display()
[1]422
Note: See TracBrowser for help on using the repository browser.