source: trunk/kernel/kern/scheduler.c @ 407

Last change on this file since 407 was 407, checked in by alain, 6 years ago

First implementation of fork/exec.

File size: 12.7 KB
Line 
1/*
2 * scheduler.c - Core scheduler implementation.
3 *
4 * Author    Alain Greiner (2016)
5 *
6 * Copyright (c)  UPMC Sorbonne Universites
7 *
8 * This file is part of ALMOS-MKH.
9 *
10 * ALMOS-MKH. is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2.0 of the License.
13 *
14 * ALMOS-MKH. is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24#include <kernel_config.h>
25#include <hal_types.h>
26#include <hal_switch.h>
27#include <hal_irqmask.h>
28#include <hal_context.h>
29#include <printk.h>
30#include <list.h>
31#include <core.h>
32#include <thread.h>
33#include <chdev.h>
34#include <scheduler.h>
35
36///////////////////////////////////////////////////////////////////////////////////////////
37// Extern global variables
38///////////////////////////////////////////////////////////////////////////////////////////
39
40extern chdev_directory_t    chdev_dir;            // allocated in kernel_init.c file
41extern uint32_t             switch_save_sr[];     // allocated in kernel_init.c file
42
43////////////////////////////////
44void sched_init( core_t * core )
45{
46    scheduler_t * sched = &core->scheduler;
47
48    sched->u_threads_nr   = 0;
49    sched->k_threads_nr   = 0;
50
51    sched->current        = CURRENT_THREAD;
52    sched->idle           = NULL;             // initialized in kernel_init()
53    sched->u_last         = NULL;             // initialized in sched_register_thread()
54    sched->k_last         = NULL;             // initialized in sched_register_thread()
55
56    // initialise threads lists
57    list_root_init( &sched->u_root );
58    list_root_init( &sched->k_root );
59
60}  // end sched_init()
61
62////////////////////////////////////////////
63void sched_register_thread( core_t   * core,
64                            thread_t * thread )
65{
66    scheduler_t * sched = &core->scheduler;
67    thread_type_t type  = thread->type;
68
69    // take lock protecting sheduler lists
70    spinlock_lock( &sched->lock );
71
72    if( type == THREAD_USER )
73    {
74        // register thread in scheduler user list
75        list_add_last( &sched->u_root , &thread->sched_list );
76        sched->u_threads_nr++;
77
78        // initialize u_last field if first user thread
79        if( sched->u_last == NULL ) sched->u_last = &thread->sched_list;
80    }
81    else // kernel thread
82    {
83        // register thread in scheduler kernel list
84        list_add_last( &sched->k_root , &thread->sched_list );
85        sched->k_threads_nr++;
86
87        // initialize k_last field if first kernel thread
88        if( sched->k_last == NULL ) sched->k_last = &thread->sched_list; 
89    }
90
91    // release lock
92    spinlock_unlock( &sched->lock );
93
94}  // end sched_register()
95
96/////////////////////////////////////////////
97void sched_remove_thread( thread_t * thread )
98{
99    core_t       * core  = thread->core;
100    scheduler_t  * sched = &core->scheduler;
101    thread_type_t  type  = thread->type;
102
103    // take lock protecting sheduler lists
104    spinlock_lock( &sched->lock );
105
106    if( type == THREAD_USER )
107    {
108        // remove thread from user list
109        list_unlink( &thread->sched_list );
110        sched->u_threads_nr--;
111
112        // reset the u_last field if list empty
113        if( sched->u_threads_nr == 0 ) sched->u_last = NULL;
114    }
115    else // kernel thread
116    {
117        // remove thread from kernel list
118        list_unlink( &thread->sched_list );
119        sched->k_threads_nr--;
120
121        // reset the k_last field if list empty
122        if( sched->k_threads_nr == 0 ) sched->k_last = NULL;
123    }
124
125    // release lock
126    spinlock_unlock( &sched->lock );
127
128}  // end sched_remove()
129
130////////////////////////////////////////
131thread_t * sched_select( core_t * core )
132{
133    thread_t    * thread;
134
135    scheduler_t * sched = &core->scheduler;
136
137    // take lock protecting sheduler lists
138    spinlock_lock( &sched->lock );
139
140    list_entry_t * current;
141    list_entry_t * last;
142
143    // first loop : scan the kernel threads list if not empty
144    if( list_is_empty( &sched->k_root ) == false )
145    {
146        last    = sched->k_last;
147        current = sched->k_last;
148        do
149        {
150            // get next entry in kernel list
151            current = list_next( &sched->k_root , current );
152
153            // skip the root that does not contain a thread
154            if( current == NULL ) current = sched->k_root.next;
155
156            // get thread pointer for this entry
157            thread = LIST_ELEMENT( current , thread_t , sched_list );
158
159            // analyse kernel thread type
160            switch( thread->type )
161            {
162                case THREAD_IDLE: // skip IDLE thread
163                break;
164
165                case THREAD_RPC:  // RPC thread if non blocked and FIFO non-empty
166                if( (thread->blocked == 0) && 
167                    (local_fifo_is_empty( &LOCAL_CLUSTER->rpc_fifo ) == 0) )
168                {
169                    spinlock_unlock( &sched->lock );
170                    return thread;
171                }
172                break;
173
174                default:          // DEV thread if non blocked
175                if( thread->blocked == 0 )
176                {
177                    spinlock_unlock( &sched->lock );
178                    return thread;
179                }
180                break;
181            }  // end switch type
182        }
183        while( current != last );
184    }
185
186    // second loop : scan the user threads list if not empty
187    if( list_is_empty( &sched->u_root ) == false )
188    {
189        last    = sched->u_last;
190        current = sched->u_last;
191        do
192        {
193            // get next entry in user list
194            current = list_next( &sched->u_root , current );
195
196            // skip the root that does not contain a thread
197            if( current == NULL ) current = sched->u_root.next;
198
199            // get thread pointer for this entry
200            thread = LIST_ELEMENT( current , thread_t , sched_list );
201
202            // return thread if runnable
203            if( thread->blocked == 0 )
204            {
205                spinlock_unlock( &sched->lock );
206                return thread;
207            }
208        }
209        while( current != last );
210    }
211
212    // third : return idle thread if no runnable thread
213    spinlock_unlock( &sched->lock );
214    return sched->idle;
215
216}  // end sched_select()
217
218///////////////////////////////////////////
219void sched_kill_thread( thread_t * thread )
220{
221    // check locks
222    if( thread_can_yield() == false )
223    {
224        panic("locks not released for thread %x in process %x on core[%x][%d]",
225        thread->trdid , thread->process->pid, local_cxy , thread->core->lid ); 
226    }
227
228    // remove thread from scheduler
229    sched_remove_thread( thread );
230
231    // reset the THREAD_SIG_KILL signal
232    thread_reset_signal( thread , THREAD_SIG_KILL );
233
234    // detached thread can suicide
235    if( thread->signals & THREAD_SIG_SUICIDE ) 
236    {
237        assert( (thread->flags & THREAD_FLAG_DETACHED), __FUNCTION__,
238        "thread must be detached in case of suicide\n" );
239
240        // remove thread from process
241        process_remove_thread( thread );
242
243        // release memory for thread descriptor
244        thread_destroy( thread );
245    }
246}  // end sched_kill_thread()
247
248//////////////////////////////////////////
249void sched_handle_signals( core_t * core )
250{
251    list_entry_t * iter;
252    thread_t     * thread;
253    scheduler_t  * sched = &core->scheduler;
254
255    // take lock protecting threads lists
256    spinlock_lock( &sched->lock );
257
258    // handle user threads
259    LIST_FOREACH( &sched->u_root , iter )
260    {
261        thread = LIST_ELEMENT( iter , thread_t , sched_list );
262        if( thread->signals ) sched_kill_thread( thread );
263    }
264
265    // handle kernel threads
266    LIST_FOREACH( &sched->k_root , iter )
267    {
268        thread = LIST_ELEMENT( iter , thread_t , sched_list );
269        if( thread->signals ) sched_kill_thread( thread );
270    }
271
272    // release lock
273    spinlock_unlock( &sched->lock );
274
275} // end sched_handle_signals()
276
277//////////////////////////////////////
278void sched_update( thread_t * current,
279                   thread_t * next )
280{
281    scheduler_t * sched = &current->core->scheduler;
282
283    if( current->type == THREAD_USER ) sched->u_last = &current->sched_list;
284    else                               sched->k_last = &current->sched_list;
285
286    sched->current = next;
287}
288
289//////////////////
290void sched_yield()
291{
292    thread_t    * next;
293    thread_t    * current = CURRENT_THREAD;
294 
295#if( CONFIG_SCHED_DEBUG & 0x1 )
296if( hal_time_stamp() > CONFIG_SCHED_DEBUG ) sched_display( current->core->lid );
297#endif
298
299    // delay the yield if current thread has locks
300    if( (current->local_locks != 0) || (current->remote_locks != 0) )
301    {
302        current->flags |= THREAD_FLAG_SCHED;
303        return;
304    }
305
306    // loop on threads to select next thread
307    next = sched_select( current->core );
308
309    // check next thread attached to same core as the calling thread
310    assert( (next->core == current->core), __FUNCTION__ , 
311    "next core != current core\n");
312
313    // check next thread not blocked when type != IDLE
314    assert( (next->blocked == 0) || (next->type = THREAD_IDLE) , __FUNCTION__ ,
315    "next thread %x (%s) is blocked on core[%x,%d]\n", 
316    next->trdid , thread_type_str(next->type) , local_cxy , current->core->lid );
317
318    // switch contexts and update scheduler state if next != current
319        if( next != current )
320    {
321        // current thread desactivate IRQs
322        hal_disable_irq( &switch_save_sr[CURRENT_THREAD->core->lid] );
323
324sched_dmsg("\n[DBG] %s : core[%x,%d] / trd %x (%s) (%x,%x) => trd %x (%s) (%x,%x) / cycle %d\n",
325__FUNCTION__, local_cxy, current->core->lid, 
326current, thread_type_str(current->type), current->process->pid, current->trdid,
327next   , thread_type_str(next->type)   , next->process->pid   , next->trdid,
328hal_time_stamp() );
329
330        // update scheduler
331        sched_update( current , next );
332
333        // handle FPU ownership
334            if( next->type == THREAD_USER )
335        {
336                if( next == current->core->fpu_owner )  hal_fpu_enable();
337                else                                    hal_fpu_disable();
338        }
339
340        // switch CPU from calling thread context to new thread context
341        hal_do_cpu_switch( current->cpu_context, next->cpu_context );
342
343        // restore IRQs when next thread resume
344        hal_restore_irq( switch_save_sr[CURRENT_THREAD->core->lid] );
345    }
346    else
347    {
348
349sched_dmsg("\n[DBG] %s : core[%x,%d] / thread %x (%s) continue / cycle %d\n",
350__FUNCTION__, local_cxy, current->core->lid, current->trdid, 
351thread_type_str(current->type) ,hal_time_stamp() );
352
353    }
354}  // end sched_yield()
355
356
357///////////////////////////////
358void sched_display( lid_t lid )
359{
360    list_entry_t * iter;
361    thread_t     * thread;
362    uint32_t       save_sr;
363
364    if( lid >= LOCAL_CLUSTER->cores_nr )
365    {
366        printk("\n[ERROR] in %s : illegal local index %d in cluster %x\n",
367        __FUNCTION__ , lid , local_cxy );
368        return;
369    }
370
371    core_t       * core    = &LOCAL_CLUSTER->core_tbl[lid];
372    scheduler_t  * sched   = &core->scheduler;
373   
374    // get pointers on TXT0 chdev
375    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
376    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
377    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
378
379    // get extended pointer on remote TXT0 chdev lock
380    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
381
382    // get TXT0 lock in busy waiting mode
383    remote_spinlock_lock_busy( lock_xp , &save_sr );
384
385    nolock_printk("\n***** scheduler state for core[%x,%d] at cycle %d\n"
386           "kernel_threads = %d / user_threads = %d / current = %x / idle = %x\n",
387            local_cxy , core->lid, hal_time_stamp(),
388            sched->k_threads_nr, sched->u_threads_nr,
389            sched->current->trdid , sched->idle->trdid );
390
391    // display kernel threads
392    LIST_FOREACH( &sched->k_root , iter )
393    {
394        thread = LIST_ELEMENT( iter , thread_t , sched_list );
395        nolock_printk(" - type = %s / trdid = %X / pid = %X / func = %X / blocked = %X\n",
396        thread_type_str( thread->type ), thread->trdid, thread->process->pid,
397        thread->entry_func, thread->blocked );
398    }
399
400    // display user threads
401    LIST_FOREACH( &sched->u_root , iter )
402    {
403        thread = LIST_ELEMENT( iter , thread_t , sched_list );
404        nolock_printk(" - type = %s / trdid = %X / pid = %X / func = %X / blocked = %X\n",
405        thread_type_str( thread->type ), thread->trdid, thread->process->pid,
406        thread->entry_func, thread->blocked );
407    }
408
409    // release TXT0 lock
410    remote_spinlock_unlock_busy( lock_xp , save_sr );
411
412}  // end sched_display()
413
Note: See TracBrowser for help on using the repository browser.