source: trunk/kernel/kern/scheduler.c @ 408

Last change on this file since 408 was 408, checked in by alain, 6 years ago

Fix several bugs in the fork() syscall.

File size: 13.5 KB
RevLine 
[1]1/*
2 * scheduler.c - Core scheduler implementation.
3 *
4 * Author    Alain Greiner (2016)
5 *
6 * Copyright (c)  UPMC Sorbonne Universites
7 *
8 * This file is part of ALMOS-MKH.
9 *
10 * ALMOS-MKH. is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2.0 of the License.
13 *
14 * ALMOS-MKH. is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
[14]24#include <kernel_config.h>
[1]25#include <hal_types.h>
[407]26#include <hal_switch.h>
[1]27#include <hal_irqmask.h>
28#include <hal_context.h>
29#include <printk.h>
30#include <list.h>
31#include <core.h>
32#include <thread.h>
[296]33#include <chdev.h>
[1]34#include <scheduler.h>
35
[296]36///////////////////////////////////////////////////////////////////////////////////////////
37// Extern global variables
38///////////////////////////////////////////////////////////////////////////////////////////
[1]39
[296]40extern chdev_directory_t    chdev_dir;            // allocated in kernel_init.c file
[407]41extern uint32_t             switch_save_sr[];     // allocated in kernel_init.c file
[296]42
[1]43////////////////////////////////
44void sched_init( core_t * core )
45{
46    scheduler_t * sched = &core->scheduler;
47
48    sched->u_threads_nr   = 0;
49    sched->k_threads_nr   = 0;
50
[279]51    sched->current        = CURRENT_THREAD;
52    sched->idle           = NULL;             // initialized in kernel_init()
53    sched->u_last         = NULL;             // initialized in sched_register_thread()
54    sched->k_last         = NULL;             // initialized in sched_register_thread()
[1]55
56    // initialise threads lists
57    list_root_init( &sched->u_root );
58    list_root_init( &sched->k_root );
59
60}  // end sched_init()
61
62////////////////////////////////////////////
63void sched_register_thread( core_t   * core,
64                            thread_t * thread )
65{
66    scheduler_t * sched = &core->scheduler;
67    thread_type_t type  = thread->type;
68
69    // take lock protecting sheduler lists
70    spinlock_lock( &sched->lock );
71
72    if( type == THREAD_USER )
73    {
[279]74        // register thread in scheduler user list
[1]75        list_add_last( &sched->u_root , &thread->sched_list );
76        sched->u_threads_nr++;
[279]77
78        // initialize u_last field if first user thread
79        if( sched->u_last == NULL ) sched->u_last = &thread->sched_list;
[1]80    }
81    else // kernel thread
82    {
[279]83        // register thread in scheduler kernel list
[1]84        list_add_last( &sched->k_root , &thread->sched_list );
85        sched->k_threads_nr++;
[279]86
87        // initialize k_last field if first kernel thread
88        if( sched->k_last == NULL ) sched->k_last = &thread->sched_list; 
[1]89    }
90
91    // release lock
92    spinlock_unlock( &sched->lock );
93
94}  // end sched_register()
95
96/////////////////////////////////////////////
97void sched_remove_thread( thread_t * thread )
98{
99    core_t       * core  = thread->core;
100    scheduler_t  * sched = &core->scheduler;
101    thread_type_t  type  = thread->type;
102
103    // take lock protecting sheduler lists
104    spinlock_lock( &sched->lock );
105
106    if( type == THREAD_USER )
107    {
[279]108        // remove thread from user list
[1]109        list_unlink( &thread->sched_list );
110        sched->u_threads_nr--;
[279]111
112        // reset the u_last field if list empty
113        if( sched->u_threads_nr == 0 ) sched->u_last = NULL;
[1]114    }
115    else // kernel thread
116    {
[279]117        // remove thread from kernel list
[1]118        list_unlink( &thread->sched_list );
119        sched->k_threads_nr--;
[279]120
121        // reset the k_last field if list empty
122        if( sched->k_threads_nr == 0 ) sched->k_last = NULL;
[1]123    }
124
125    // release lock
126    spinlock_unlock( &sched->lock );
127
128}  // end sched_remove()
129
[408]130//////////////////////////////////////////////
131thread_t * sched_select( scheduler_t * sched )
[1]132{
[408]133    thread_t     * thread;
134    list_entry_t * current;
135    list_entry_t * last;
[1]136
137    // take lock protecting sheduler lists
138    spinlock_lock( &sched->lock );
139
[407]140    // first loop : scan the kernel threads list if not empty
[279]141    if( list_is_empty( &sched->k_root ) == false )
[1]142    {
[279]143        last    = sched->k_last;
144        current = sched->k_last;
145        do
146        {
147            // get next entry in kernel list
148            current = list_next( &sched->k_root , current );
[1]149
[279]150            // skip the root that does not contain a thread
151            if( current == NULL ) current = sched->k_root.next;
[1]152
[279]153            // get thread pointer for this entry
154            thread = LIST_ELEMENT( current , thread_t , sched_list );
[1]155
[407]156            // analyse kernel thread type
157            switch( thread->type )
[279]158            {
[407]159                case THREAD_IDLE: // skip IDLE thread
160                break;
[296]161
[407]162                case THREAD_RPC:  // RPC thread if non blocked and FIFO non-empty
163                if( (thread->blocked == 0) && 
164                    (local_fifo_is_empty( &LOCAL_CLUSTER->rpc_fifo ) == 0) )
165                {
166                    spinlock_unlock( &sched->lock );
167                    return thread;
168                }
169                break;
[296]170
[408]171                default:          // DEV thread if non blocked and waiting queue non empty
172                if( (thread->blocked == 0) &&
173                    (xlist_is_empty( XPTR( local_cxy , &thread->chdev->wait_root)) == 0) ) 
[407]174                {
175                    spinlock_unlock( &sched->lock );
176                    return thread;
177                }
178                break;
179            }  // end switch type
[1]180        }
[279]181        while( current != last );
[1]182    }
183
[407]184    // second loop : scan the user threads list if not empty
[279]185    if( list_is_empty( &sched->u_root ) == false )
[1]186    {
[279]187        last    = sched->u_last;
188        current = sched->u_last;
189        do
190        {
191            // get next entry in user list
192            current = list_next( &sched->u_root , current );
[1]193
[279]194            // skip the root that does not contain a thread
195            if( current == NULL ) current = sched->u_root.next;
[1]196
[279]197            // get thread pointer for this entry
198            thread = LIST_ELEMENT( current , thread_t , sched_list );
[1]199
[279]200            // return thread if runnable
201            if( thread->blocked == 0 )
202            {
203                spinlock_unlock( &sched->lock );
204                return thread;
205            }
[1]206        }
[279]207        while( current != last );
[1]208    }
209
[407]210    // third : return idle thread if no runnable thread
[1]211    spinlock_unlock( &sched->lock );
212    return sched->idle;
213
[296]214}  // end sched_select()
[1]215
[407]216///////////////////////////////////////////
217void sched_kill_thread( thread_t * thread )
218{
219    // check locks
220    if( thread_can_yield() == false )
221    {
222        panic("locks not released for thread %x in process %x on core[%x][%d]",
223        thread->trdid , thread->process->pid, local_cxy , thread->core->lid ); 
224    }
225
226    // remove thread from scheduler
227    sched_remove_thread( thread );
228
229    // reset the THREAD_SIG_KILL signal
230    thread_reset_signal( thread , THREAD_SIG_KILL );
231
232    // detached thread can suicide
233    if( thread->signals & THREAD_SIG_SUICIDE ) 
234    {
235        assert( (thread->flags & THREAD_FLAG_DETACHED), __FUNCTION__,
236        "thread must be detached in case of suicide\n" );
237
238        // remove thread from process
239        process_remove_thread( thread );
240
241        // release memory for thread descriptor
242        thread_destroy( thread );
243    }
244}  // end sched_kill_thread()
245
[1]246//////////////////////////////////////////
247void sched_handle_signals( core_t * core )
248{
249    list_entry_t * iter;
250    thread_t     * thread;
251    scheduler_t  * sched = &core->scheduler;
252
[408]253// signal_dmsg("\n@@@ %s enter at cycle %d\n",
254// __FUNCTION__ , hal_time_stamp() );
255
[1]256    // take lock protecting threads lists
257    spinlock_lock( &sched->lock );
258
259    // handle user threads
260    LIST_FOREACH( &sched->u_root , iter )
261    {
262        thread = LIST_ELEMENT( iter , thread_t , sched_list );
[408]263        if( thread->signals ) // sched_kill_thread( thread );
264        {
265            printk("\n[WARNING] %s : thread %x has signal %x at cycle %d\n",
266            __FUNCTION__, thread, thread->signals, hal_time_stamp() );
267        }
[1]268    }
269
270    // handle kernel threads
271    LIST_FOREACH( &sched->k_root , iter )
272    {
273        thread = LIST_ELEMENT( iter , thread_t , sched_list );
[408]274        if( thread->signals )  // sched_kill_thread( thread );
275        {
276            printk("\n[WARNING] %s : thread %x has signal %x at cycle %d\n",
277            __FUNCTION__, thread, thread->signals, hal_time_stamp() );
278
279        }
[1]280    }
281
282    // release lock
283    spinlock_unlock( &sched->lock );
284
[408]285// signal_dmsg("\n@@@ %s exit at cycle %d\n",
286// __FUNCTION__ , hal_time_stamp() );
287
[1]288} // end sched_handle_signals()
289
[408]290////////////////////////////////
291void sched_yield( char * cause )
[1]292{
[407]293    thread_t    * next;
[1]294    thread_t    * current = CURRENT_THREAD;
[408]295    scheduler_t * sched   = &current->core->scheduler;
[407]296 
297#if( CONFIG_SCHED_DEBUG & 0x1 )
298if( hal_time_stamp() > CONFIG_SCHED_DEBUG ) sched_display( current->core->lid );
299#endif
[1]300
[337]301    // delay the yield if current thread has locks
[407]302    if( (current->local_locks != 0) || (current->remote_locks != 0) )
[337]303    {
304        current->flags |= THREAD_FLAG_SCHED;
305        return;
306    }
[1]307
[408]308    // enter critical section / save SR in current thread context
309    hal_disable_irq( &current->save_sr );
310
[407]311    // loop on threads to select next thread
[408]312    next = sched_select( sched );
[1]313
[296]314    // check next thread attached to same core as the calling thread
[407]315    assert( (next->core == current->core), __FUNCTION__ , 
316    "next core != current core\n");
[296]317
[407]318    // check next thread not blocked when type != IDLE
319    assert( (next->blocked == 0) || (next->type = THREAD_IDLE) , __FUNCTION__ ,
320    "next thread %x (%s) is blocked on core[%x,%d]\n", 
321    next->trdid , thread_type_str(next->type) , local_cxy , current->core->lid );
[296]322
323    // switch contexts and update scheduler state if next != current
324        if( next != current )
[1]325    {
326
[408]327sched_dmsg("\n[DBG] %s : core[%x,%d] / cause = %s\n"
328"      thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n",
329__FUNCTION__, local_cxy, current->core->lid, cause, 
[407]330current, thread_type_str(current->type), current->process->pid, current->trdid,
331next   , thread_type_str(next->type)   , next->process->pid   , next->trdid,
[408]332(uint32_t)hal_get_cycles() );
[279]333
[296]334        // update scheduler
[408]335        sched->current = next;
336        if( next->type == THREAD_USER ) sched->u_last = &next->sched_list;
337        else                            sched->k_last = &next->sched_list;
[1]338
[407]339        // handle FPU ownership
[306]340            if( next->type == THREAD_USER )
[296]341        {
[407]342                if( next == current->core->fpu_owner )  hal_fpu_enable();
343                else                                    hal_fpu_disable();
[296]344        }
[1]345
[407]346        // switch CPU from calling thread context to new thread context
347        hal_do_cpu_switch( current->cpu_context, next->cpu_context );
[296]348    }
349    else
350    {
[407]351
[408]352sched_dmsg("\n[DBG] %s : core[%x,%d] / cause = %s\n" 
353"      thread %x (%s) (%x,%x) continue / cycle %d\n",
354__FUNCTION__, local_cxy, current->core->lid, cause,
355current, thread_type_str(current->type), current->process->pid, current->trdid, 
356(uint32_t)hal_get_cycles() );
[407]357
[296]358    }
[408]359
360    // exit critical section / restore SR from next thread context
361    hal_restore_irq( next->save_sr );
362
[1]363}  // end sched_yield()
364
[407]365
366///////////////////////////////
367void sched_display( lid_t lid )
[1]368{
[296]369    list_entry_t * iter;
370    thread_t     * thread;
371    uint32_t       save_sr;
[1]372
[407]373    if( lid >= LOCAL_CLUSTER->cores_nr )
374    {
375        printk("\n[ERROR] in %s : illegal local index %d in cluster %x\n",
376        __FUNCTION__ , lid , local_cxy );
377        return;
378    }
379
380    core_t       * core    = &LOCAL_CLUSTER->core_tbl[lid];
[296]381    scheduler_t  * sched   = &core->scheduler;
382   
383    // get pointers on TXT0 chdev
[407]384    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
[296]385    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
386    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
[1]387
[296]388    // get extended pointer on remote TXT0 chdev lock
389    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
[1]390
[296]391    // get TXT0 lock in busy waiting mode
392    remote_spinlock_lock_busy( lock_xp , &save_sr );
393
[407]394    nolock_printk("\n***** scheduler state for core[%x,%d] at cycle %d\n"
[408]395           "kernel_threads = %d / user_threads = %d / current = (%x,%x)\n",
[407]396            local_cxy , core->lid, hal_time_stamp(),
397            sched->k_threads_nr, sched->u_threads_nr,
[408]398            sched->current->process->pid , sched->current->trdid );
[296]399
400    // display kernel threads
401    LIST_FOREACH( &sched->k_root , iter )
[1]402    {
[296]403        thread = LIST_ELEMENT( iter , thread_t , sched_list );
[408]404        if (thread->type == THREAD_DEV) 
405        {
406            nolock_printk(" - %s / pid %X / trdid %X / desc %X / blocked %X / %s\n",
407            thread_type_str( thread->type ), thread->process->pid, thread->trdid,
408            thread, thread->blocked , thread->chdev->name );
409        }
410        else
411        {
412            nolock_printk(" - %s / pid %X / trdid %X / desc %X / blocked %X\n",
413            thread_type_str( thread->type ), thread->process->pid, thread->trdid,
414            thread, thread->blocked );
415        }
[1]416    }
417
[296]418    // display user threads
419    LIST_FOREACH( &sched->u_root , iter )
[1]420    {
[296]421        thread = LIST_ELEMENT( iter , thread_t , sched_list );
[408]422        nolock_printk(" - %s / pid %X / trdid %X / desc %X / blocked %X\n",
423        thread_type_str( thread->type ), thread->process->pid, thread->trdid,
424        thread, thread->blocked );
[1]425    }
426
[296]427    // release TXT0 lock
428    remote_spinlock_unlock_busy( lock_xp , save_sr );
[1]429
[296]430}  // end sched_display()
[1]431
Note: See TracBrowser for help on using the repository browser.