source: trunk/kernel/kern/scheduler.c @ 408

Last change on this file since 408 was 408, checked in by alain, 6 years ago

Fix several bugs in the fork() syscall.

File size: 13.5 KB
Line 
1/*
2 * scheduler.c - Core scheduler implementation.
3 *
4 * Author    Alain Greiner (2016)
5 *
6 * Copyright (c)  UPMC Sorbonne Universites
7 *
8 * This file is part of ALMOS-MKH.
9 *
10 * ALMOS-MKH. is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2.0 of the License.
13 *
14 * ALMOS-MKH. is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24#include <kernel_config.h>
25#include <hal_types.h>
26#include <hal_switch.h>
27#include <hal_irqmask.h>
28#include <hal_context.h>
29#include <printk.h>
30#include <list.h>
31#include <core.h>
32#include <thread.h>
33#include <chdev.h>
34#include <scheduler.h>
35
36///////////////////////////////////////////////////////////////////////////////////////////
37// Extern global variables
38///////////////////////////////////////////////////////////////////////////////////////////
39
40extern chdev_directory_t    chdev_dir;            // allocated in kernel_init.c file
41extern uint32_t             switch_save_sr[];     // allocated in kernel_init.c file
42
43////////////////////////////////
44void sched_init( core_t * core )
45{
46    scheduler_t * sched = &core->scheduler;
47
48    sched->u_threads_nr   = 0;
49    sched->k_threads_nr   = 0;
50
51    sched->current        = CURRENT_THREAD;
52    sched->idle           = NULL;             // initialized in kernel_init()
53    sched->u_last         = NULL;             // initialized in sched_register_thread()
54    sched->k_last         = NULL;             // initialized in sched_register_thread()
55
56    // initialise threads lists
57    list_root_init( &sched->u_root );
58    list_root_init( &sched->k_root );
59
60}  // end sched_init()
61
62////////////////////////////////////////////
63void sched_register_thread( core_t   * core,
64                            thread_t * thread )
65{
66    scheduler_t * sched = &core->scheduler;
67    thread_type_t type  = thread->type;
68
69    // take lock protecting sheduler lists
70    spinlock_lock( &sched->lock );
71
72    if( type == THREAD_USER )
73    {
74        // register thread in scheduler user list
75        list_add_last( &sched->u_root , &thread->sched_list );
76        sched->u_threads_nr++;
77
78        // initialize u_last field if first user thread
79        if( sched->u_last == NULL ) sched->u_last = &thread->sched_list;
80    }
81    else // kernel thread
82    {
83        // register thread in scheduler kernel list
84        list_add_last( &sched->k_root , &thread->sched_list );
85        sched->k_threads_nr++;
86
87        // initialize k_last field if first kernel thread
88        if( sched->k_last == NULL ) sched->k_last = &thread->sched_list; 
89    }
90
91    // release lock
92    spinlock_unlock( &sched->lock );
93
94}  // end sched_register()
95
96/////////////////////////////////////////////
97void sched_remove_thread( thread_t * thread )
98{
99    core_t       * core  = thread->core;
100    scheduler_t  * sched = &core->scheduler;
101    thread_type_t  type  = thread->type;
102
103    // take lock protecting sheduler lists
104    spinlock_lock( &sched->lock );
105
106    if( type == THREAD_USER )
107    {
108        // remove thread from user list
109        list_unlink( &thread->sched_list );
110        sched->u_threads_nr--;
111
112        // reset the u_last field if list empty
113        if( sched->u_threads_nr == 0 ) sched->u_last = NULL;
114    }
115    else // kernel thread
116    {
117        // remove thread from kernel list
118        list_unlink( &thread->sched_list );
119        sched->k_threads_nr--;
120
121        // reset the k_last field if list empty
122        if( sched->k_threads_nr == 0 ) sched->k_last = NULL;
123    }
124
125    // release lock
126    spinlock_unlock( &sched->lock );
127
128}  // end sched_remove()
129
130//////////////////////////////////////////////
131thread_t * sched_select( scheduler_t * sched )
132{
133    thread_t     * thread;
134    list_entry_t * current;
135    list_entry_t * last;
136
137    // take lock protecting sheduler lists
138    spinlock_lock( &sched->lock );
139
140    // first loop : scan the kernel threads list if not empty
141    if( list_is_empty( &sched->k_root ) == false )
142    {
143        last    = sched->k_last;
144        current = sched->k_last;
145        do
146        {
147            // get next entry in kernel list
148            current = list_next( &sched->k_root , current );
149
150            // skip the root that does not contain a thread
151            if( current == NULL ) current = sched->k_root.next;
152
153            // get thread pointer for this entry
154            thread = LIST_ELEMENT( current , thread_t , sched_list );
155
156            // analyse kernel thread type
157            switch( thread->type )
158            {
159                case THREAD_IDLE: // skip IDLE thread
160                break;
161
162                case THREAD_RPC:  // RPC thread if non blocked and FIFO non-empty
163                if( (thread->blocked == 0) && 
164                    (local_fifo_is_empty( &LOCAL_CLUSTER->rpc_fifo ) == 0) )
165                {
166                    spinlock_unlock( &sched->lock );
167                    return thread;
168                }
169                break;
170
171                default:          // DEV thread if non blocked and waiting queue non empty
172                if( (thread->blocked == 0) &&
173                    (xlist_is_empty( XPTR( local_cxy , &thread->chdev->wait_root)) == 0) ) 
174                {
175                    spinlock_unlock( &sched->lock );
176                    return thread;
177                }
178                break;
179            }  // end switch type
180        }
181        while( current != last );
182    }
183
184    // second loop : scan the user threads list if not empty
185    if( list_is_empty( &sched->u_root ) == false )
186    {
187        last    = sched->u_last;
188        current = sched->u_last;
189        do
190        {
191            // get next entry in user list
192            current = list_next( &sched->u_root , current );
193
194            // skip the root that does not contain a thread
195            if( current == NULL ) current = sched->u_root.next;
196
197            // get thread pointer for this entry
198            thread = LIST_ELEMENT( current , thread_t , sched_list );
199
200            // return thread if runnable
201            if( thread->blocked == 0 )
202            {
203                spinlock_unlock( &sched->lock );
204                return thread;
205            }
206        }
207        while( current != last );
208    }
209
210    // third : return idle thread if no runnable thread
211    spinlock_unlock( &sched->lock );
212    return sched->idle;
213
214}  // end sched_select()
215
216///////////////////////////////////////////
217void sched_kill_thread( thread_t * thread )
218{
219    // check locks
220    if( thread_can_yield() == false )
221    {
222        panic("locks not released for thread %x in process %x on core[%x][%d]",
223        thread->trdid , thread->process->pid, local_cxy , thread->core->lid ); 
224    }
225
226    // remove thread from scheduler
227    sched_remove_thread( thread );
228
229    // reset the THREAD_SIG_KILL signal
230    thread_reset_signal( thread , THREAD_SIG_KILL );
231
232    // detached thread can suicide
233    if( thread->signals & THREAD_SIG_SUICIDE ) 
234    {
235        assert( (thread->flags & THREAD_FLAG_DETACHED), __FUNCTION__,
236        "thread must be detached in case of suicide\n" );
237
238        // remove thread from process
239        process_remove_thread( thread );
240
241        // release memory for thread descriptor
242        thread_destroy( thread );
243    }
244}  // end sched_kill_thread()
245
246//////////////////////////////////////////
247void sched_handle_signals( core_t * core )
248{
249    list_entry_t * iter;
250    thread_t     * thread;
251    scheduler_t  * sched = &core->scheduler;
252
253// signal_dmsg("\n@@@ %s enter at cycle %d\n",
254// __FUNCTION__ , hal_time_stamp() );
255
256    // take lock protecting threads lists
257    spinlock_lock( &sched->lock );
258
259    // handle user threads
260    LIST_FOREACH( &sched->u_root , iter )
261    {
262        thread = LIST_ELEMENT( iter , thread_t , sched_list );
263        if( thread->signals ) // sched_kill_thread( thread );
264        {
265            printk("\n[WARNING] %s : thread %x has signal %x at cycle %d\n",
266            __FUNCTION__, thread, thread->signals, hal_time_stamp() );
267        }
268    }
269
270    // handle kernel threads
271    LIST_FOREACH( &sched->k_root , iter )
272    {
273        thread = LIST_ELEMENT( iter , thread_t , sched_list );
274        if( thread->signals )  // sched_kill_thread( thread );
275        {
276            printk("\n[WARNING] %s : thread %x has signal %x at cycle %d\n",
277            __FUNCTION__, thread, thread->signals, hal_time_stamp() );
278
279        }
280    }
281
282    // release lock
283    spinlock_unlock( &sched->lock );
284
285// signal_dmsg("\n@@@ %s exit at cycle %d\n",
286// __FUNCTION__ , hal_time_stamp() );
287
288} // end sched_handle_signals()
289
290////////////////////////////////
291void sched_yield( char * cause )
292{
293    thread_t    * next;
294    thread_t    * current = CURRENT_THREAD;
295    scheduler_t * sched   = &current->core->scheduler;
296 
297#if( CONFIG_SCHED_DEBUG & 0x1 )
298if( hal_time_stamp() > CONFIG_SCHED_DEBUG ) sched_display( current->core->lid );
299#endif
300
301    // delay the yield if current thread has locks
302    if( (current->local_locks != 0) || (current->remote_locks != 0) )
303    {
304        current->flags |= THREAD_FLAG_SCHED;
305        return;
306    }
307
308    // enter critical section / save SR in current thread context
309    hal_disable_irq( &current->save_sr );
310
311    // loop on threads to select next thread
312    next = sched_select( sched );
313
314    // check next thread attached to same core as the calling thread
315    assert( (next->core == current->core), __FUNCTION__ , 
316    "next core != current core\n");
317
318    // check next thread not blocked when type != IDLE
319    assert( (next->blocked == 0) || (next->type = THREAD_IDLE) , __FUNCTION__ ,
320    "next thread %x (%s) is blocked on core[%x,%d]\n", 
321    next->trdid , thread_type_str(next->type) , local_cxy , current->core->lid );
322
323    // switch contexts and update scheduler state if next != current
324        if( next != current )
325    {
326
327sched_dmsg("\n[DBG] %s : core[%x,%d] / cause = %s\n"
328"      thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n",
329__FUNCTION__, local_cxy, current->core->lid, cause, 
330current, thread_type_str(current->type), current->process->pid, current->trdid,
331next   , thread_type_str(next->type)   , next->process->pid   , next->trdid,
332(uint32_t)hal_get_cycles() );
333
334        // update scheduler
335        sched->current = next;
336        if( next->type == THREAD_USER ) sched->u_last = &next->sched_list;
337        else                            sched->k_last = &next->sched_list;
338
339        // handle FPU ownership
340            if( next->type == THREAD_USER )
341        {
342                if( next == current->core->fpu_owner )  hal_fpu_enable();
343                else                                    hal_fpu_disable();
344        }
345
346        // switch CPU from calling thread context to new thread context
347        hal_do_cpu_switch( current->cpu_context, next->cpu_context );
348    }
349    else
350    {
351
352sched_dmsg("\n[DBG] %s : core[%x,%d] / cause = %s\n" 
353"      thread %x (%s) (%x,%x) continue / cycle %d\n",
354__FUNCTION__, local_cxy, current->core->lid, cause,
355current, thread_type_str(current->type), current->process->pid, current->trdid, 
356(uint32_t)hal_get_cycles() );
357
358    }
359
360    // exit critical section / restore SR from next thread context
361    hal_restore_irq( next->save_sr );
362
363}  // end sched_yield()
364
365
366///////////////////////////////
367void sched_display( lid_t lid )
368{
369    list_entry_t * iter;
370    thread_t     * thread;
371    uint32_t       save_sr;
372
373    if( lid >= LOCAL_CLUSTER->cores_nr )
374    {
375        printk("\n[ERROR] in %s : illegal local index %d in cluster %x\n",
376        __FUNCTION__ , lid , local_cxy );
377        return;
378    }
379
380    core_t       * core    = &LOCAL_CLUSTER->core_tbl[lid];
381    scheduler_t  * sched   = &core->scheduler;
382   
383    // get pointers on TXT0 chdev
384    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
385    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
386    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
387
388    // get extended pointer on remote TXT0 chdev lock
389    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
390
391    // get TXT0 lock in busy waiting mode
392    remote_spinlock_lock_busy( lock_xp , &save_sr );
393
394    nolock_printk("\n***** scheduler state for core[%x,%d] at cycle %d\n"
395           "kernel_threads = %d / user_threads = %d / current = (%x,%x)\n",
396            local_cxy , core->lid, hal_time_stamp(),
397            sched->k_threads_nr, sched->u_threads_nr,
398            sched->current->process->pid , sched->current->trdid );
399
400    // display kernel threads
401    LIST_FOREACH( &sched->k_root , iter )
402    {
403        thread = LIST_ELEMENT( iter , thread_t , sched_list );
404        if (thread->type == THREAD_DEV) 
405        {
406            nolock_printk(" - %s / pid %X / trdid %X / desc %X / blocked %X / %s\n",
407            thread_type_str( thread->type ), thread->process->pid, thread->trdid,
408            thread, thread->blocked , thread->chdev->name );
409        }
410        else
411        {
412            nolock_printk(" - %s / pid %X / trdid %X / desc %X / blocked %X\n",
413            thread_type_str( thread->type ), thread->process->pid, thread->trdid,
414            thread, thread->blocked );
415        }
416    }
417
418    // display user threads
419    LIST_FOREACH( &sched->u_root , iter )
420    {
421        thread = LIST_ELEMENT( iter , thread_t , sched_list );
422        nolock_printk(" - %s / pid %X / trdid %X / desc %X / blocked %X\n",
423        thread_type_str( thread->type ), thread->process->pid, thread->trdid,
424        thread, thread->blocked );
425    }
426
427    // release TXT0 lock
428    remote_spinlock_unlock_busy( lock_xp , save_sr );
429
430}  // end sched_display()
431
Note: See TracBrowser for help on using the repository browser.