source: trunk/kernel/kern/scheduler.c @ 437

Last change on this file since 437 was 437, checked in by alain, 4 years ago

Fix various bugs

File size: 13.8 KB
Line 
1/*
2 * scheduler.c - Core scheduler implementation.
3 *
4 * Author    Alain Greiner (2016)
5 *
6 * Copyright (c)  UPMC Sorbonne Universites
7 *
8 * This file is part of ALMOS-MKH.
9 *
10 * ALMOS-MKH. is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2.0 of the License.
13 *
14 * ALMOS-MKH. is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24#include <kernel_config.h>
25#include <hal_types.h>
26#include <hal_switch.h>
27#include <hal_irqmask.h>
28#include <hal_context.h>
29#include <printk.h>
30#include <list.h>
31#include <core.h>
32#include <thread.h>
33#include <chdev.h>
34#include <scheduler.h>
35
36///////////////////////////////////////////////////////////////////////////////////////////
37// Extern global variables
38///////////////////////////////////////////////////////////////////////////////////////////
39
40extern chdev_directory_t    chdev_dir;            // allocated in kernel_init.c file
41extern uint32_t             switch_save_sr[];     // allocated in kernel_init.c file
42
43////////////////////////////////
44void sched_init( core_t * core )
45{
46    scheduler_t * sched = &core->scheduler;
47
48    sched->u_threads_nr   = 0;
49    sched->k_threads_nr   = 0;
50
51    sched->current        = CURRENT_THREAD;
52    sched->idle           = NULL;             // initialized in kernel_init()
53    sched->u_last         = NULL;             // initialized in sched_register_thread()
54    sched->k_last         = NULL;             // initialized in sched_register_thread()
55
56    // initialise threads lists
57    list_root_init( &sched->u_root );
58    list_root_init( &sched->k_root );
59
60    sched->req_ack_pending = false;           // no pending request
61
62}  // end sched_init()
63
64////////////////////////////////////////////
65void sched_register_thread( core_t   * core,
66                            thread_t * thread )
67{
68    scheduler_t * sched = &core->scheduler;
69    thread_type_t type  = thread->type;
70
71    // take lock protecting sheduler lists
72    spinlock_lock( &sched->lock );
73
74    if( type == THREAD_USER )
75    {
76        list_add_last( &sched->u_root , &thread->sched_list );
77        sched->u_threads_nr++;
78        if( sched->u_last == NULL ) sched->u_last = &thread->sched_list;
79    }
80    else // kernel thread
81    {
82        list_add_last( &sched->k_root , &thread->sched_list );
83        sched->k_threads_nr++;
84        if( sched->k_last == NULL ) sched->k_last = &thread->sched_list; 
85    }
86
87    // release lock
88    hal_fence();
89    spinlock_unlock( &sched->lock );
90
91}  // end sched_register_thread()
92
93//////////////////////////////////////////////
94thread_t * sched_select( scheduler_t * sched )
95{
96    thread_t     * thread;
97    list_entry_t * current;
98    list_entry_t * last;
99    list_entry_t * root;
100    bool_t         done;
101
102    // take lock protecting sheduler lists
103    spinlock_lock( &sched->lock );
104
105    // first : scan the kernel threads list if not empty
106    if( list_is_empty( &sched->k_root ) == false )
107    {
108        root    = &sched->k_root;
109        last    = sched->k_last;
110        current = last;
111        done    = false;
112
113        while( done == false )
114        {
115            // get next entry in kernel list
116            current = current->next;
117
118            // check exit condition
119            if( current == last ) done = true;
120
121            // skip the root that does not contain a thread
122            if( current == root ) continue;
123
124            // get thread pointer for this entry
125            thread = LIST_ELEMENT( current , thread_t , sched_list );
126
127            // analyse kernel thread type
128            switch( thread->type )
129            {
130                case THREAD_RPC:  // if non blocked and RPC FIFO non-empty
131                if( (thread->blocked == 0) && 
132                    (local_fifo_is_empty( &LOCAL_CLUSTER->rpc_fifo ) == 0) )
133                {
134                    spinlock_unlock( &sched->lock );
135                    return thread;
136                }
137                break;
138
139                case THREAD_DEV:  // if non blocked and waiting queue non empty
140                if( (thread->blocked == 0) &&
141                    (xlist_is_empty( XPTR( local_cxy , &thread->chdev->wait_root)) == 0) ) 
142                {
143                    spinlock_unlock( &sched->lock );
144                    return thread;
145                }
146                break;
147
148                default:
149                break;
150            }
151        } // end loop on kernel threads
152    } // end if kernel threads
153
154    // second : scan the user threads list if not empty
155    if( list_is_empty( &sched->u_root ) == false )
156    {
157        root    = &sched->u_root;
158        last    = sched->u_last;
159        current = last;
160        done    = false;
161
162        while( done == false )
163        {
164            // get next entry in user list
165            current = current->next;
166
167            // check exit condition
168            if( current == last ) done = true;
169
170            // skip the root that does not contain a thread
171            if( current == root ) continue;
172
173            // get thread pointer for this entry
174            thread = LIST_ELEMENT( current , thread_t , sched_list );
175
176            // return thread if runnable
177            if( thread->blocked == 0 )
178            {
179                spinlock_unlock( &sched->lock );
180                return thread;
181            }
182        } // end loop on user threads
183    } // end if user threads
184
185    // third : return idle thread if no other runnable thread
186    spinlock_unlock( &sched->lock );
187    return sched->idle;
188
189}  // end sched_select()
190
191///////////////////////////////////////////
192void sched_handle_signals( core_t * core )
193{
194
195    list_entry_t * iter;
196    thread_t     * thread;
197    process_t    * process;
198
199    scheduler_t  * sched = &core->scheduler;
200
201    // take lock protecting threads lists
202    spinlock_lock( &sched->lock );
203
204    // scan all user threads
205    LIST_FOREACH( &sched->u_root , iter )
206    {
207        thread = LIST_ELEMENT( iter , thread_t , sched_list );
208
209        // handle REQ_ACK
210        if( thread->flags & THREAD_FLAG_REQ_ACK )
211        {
212            // check thread blocked
213            assert( (thread->blocked & THREAD_BLOCKED_GLOBAL) , 
214            __FUNCTION__ , "thread not blocked" );
215 
216            // decrement response counter
217            hal_atomic_add( thread->ack_rsp_count , -1 );
218
219            // reset REQ_ACK in thread descriptor
220            thread_reset_req_ack( thread );
221        }
222
223        // handle REQ_DELETE
224        if( thread->flags & THREAD_FLAG_REQ_DELETE )
225        {
226            // get thread process descriptor
227            process = thread->process;
228
229#if CONFIG_DEBUG_SCHED_HANDLE_SIGNALS
230uint32_t cycle = (uint32_t)hal_get_cycles();
231if( CONFIG_DEBUG_SCHED_HANDLE_SIGNALS < cycle )
232printk("\n[DBG] %s : thread %x in proces %x must be deleted / cycle %d\n",
233__FUNCTION__ , thread , process->pid , cycle );
234#endif
235                // release FPU if required
236                if( thread->core->fpu_owner == thread )  thread->core->fpu_owner = NULL;
237
238            // detach thread from parent if attached
239            if( (thread->flags & THREAD_FLAG_DETACHED) == 0 ) 
240            thread_child_parent_unlink( thread->parent , XPTR( local_cxy , thread ) );
241
242            // remove thread from scheduler (scheduler lock already taken)
243            uint32_t threads_nr = sched->u_threads_nr;
244            assert( (threads_nr != 0) , __FUNCTION__ , "u_threads_nr cannot be 0\n" );
245            sched->u_threads_nr = threads_nr - 1;
246            list_unlink( &thread->sched_list );
247            if( threads_nr == 1 ) sched->u_last = NULL;
248
249            // delete thread
250            thread_destroy( thread );
251
252#if CONFIG_DEBUG_SCHED_HANDLE_SIGNALS
253cycle = (uint32_t)hal_get_cycles();
254if( CONFIG_DEBUG_SCHED_HANDLE_SIGNALS < cycle )
255printk("\n[DBG] %s : thread %x in process %x has been deleted / cycle %d\n",
256__FUNCTION__ , thread , process->pid , cycle );
257#endif
258            // destroy process descriptor if no more threads
259            if( process->th_nr == 0 ) 
260            {
261                // delete process   
262                process_destroy( process );
263
264#if CONFIG_DEBUG_SCHED_HANDLE_SIGNALS
265cycle = (uint32_t)hal_get_cycles();
266if( CONFIG_DEBUG_SCHED_HANDLE_SIGNALS < cycle )
267printk("\n[DBG] %s : process %x has been deleted / cycle %d\n",
268__FUNCTION__ , process->pid , cycle );
269#endif
270
271            }
272        }
273    }
274
275    // release lock
276    hal_fence();
277    spinlock_unlock( &sched->lock );
278
279} // end sched_handle_signals()
280
281////////////////////////////////
282void sched_yield( char * cause )
283{
284    thread_t    * next;
285    thread_t    * current = CURRENT_THREAD;
286    core_t      * core    = current->core;
287    scheduler_t * sched   = &core->scheduler;
288 
289#if (CONFIG_DEBUG_SCHED_YIELD & 0x1)
290if( CONFIG_DEBUG_SCHED_YIELD < (uint32_t)hal_get_cycles() )
291sched_display( core->lid );
292#endif
293
294    // delay the yield if current thread has locks
295    if( (current->local_locks != 0) || (current->remote_locks != 0) )
296    {
297        current->flags |= THREAD_FLAG_SCHED;
298        return;
299    }
300
301    // enter critical section / save SR in current thread descriptor
302    hal_disable_irq( &CURRENT_THREAD->save_sr );
303
304    // loop on threads to select next thread
305    next = sched_select( sched );
306
307    // check next thread kernel_stack overflow
308    assert( (next->signature == THREAD_SIGNATURE),
309    __FUNCTION__ , "kernel stack overflow for thread %x\n", next );
310
311    // check next thread attached to same core as the calling thread
312    assert( (next->core == current->core),
313    __FUNCTION__ , "next core %x != current core %x\n", next->core, current->core );
314
315    // check next thread not blocked when type != IDLE
316    assert( ((next->blocked == 0) || (next->type == THREAD_IDLE)) , __FUNCTION__ ,
317    "next thread %x (%s) is blocked on core[%x,%d]\n", 
318    next->trdid , thread_type_str(next->type) , local_cxy , core->lid );
319
320    // switch contexts and update scheduler state if next != current
321        if( next != current )
322    {
323
324#if CONFIG_DEBUG_SCHED_YIELD
325uint32_t cycle = (uint32_t)hal_get_cycles();
326if( CONFIG_DEBUG_SCHED_YIELD < cycle )
327printk("\n[DBG] %s : core[%x,%d] / cause = %s\n"
328"      thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n",
329__FUNCTION__, local_cxy, core->lid, cause, 
330current, thread_type_str(current->type), current->process->pid, current->trdid,
331next , thread_type_str(next->type) , next->process->pid , next->trdid , cycle );
332#endif
333
334        // update scheduler
335        sched->current = next;
336        if( next->type == THREAD_USER ) sched->u_last = &next->sched_list;
337        else                            sched->k_last = &next->sched_list;
338
339        // handle FPU ownership
340            if( next->type == THREAD_USER )
341        {
342                if( next == current->core->fpu_owner )  hal_fpu_enable();
343                else                                    hal_fpu_disable();
344        }
345
346        // switch CPU from current thread context to new thread context
347        hal_do_cpu_switch( current->cpu_context, next->cpu_context );
348    }
349    else
350    {
351
352#if (CONFIG_DEBUG_SCHED_YIELD & 1)
353uint32_t cycle = (uint32_t)hal_get_cycles();
354if( CONFIG_DEBUG_SCHED_YIELD < cycle )
355printk("\n[DBG] %s : core[%x,%d] / cause = %s\n"
356"      thread %x (%s) (%x,%x) continue / cycle %d\n",
357__FUNCTION__, local_cxy, core->lid, cause,
358current, thread_type_str(current->type), current->process->pid, current->trdid, cycle );
359#endif
360
361    }
362
363    // handle pending requests for all threads executing on this core.
364    sched_handle_signals( core );
365
366    // exit critical section / restore SR from current thread descriptor
367    hal_restore_irq( CURRENT_THREAD->save_sr );
368
369}  // end sched_yield()
370
371
372///////////////////////////////
373void sched_display( lid_t lid )
374{
375    list_entry_t * iter;
376    thread_t     * thread;
377    uint32_t       save_sr;
378
379    assert( (lid < LOCAL_CLUSTER->cores_nr), __FUNCTION__, "illegal core index %d\n", lid);
380
381    core_t       * core    = &LOCAL_CLUSTER->core_tbl[lid];
382    scheduler_t  * sched   = &core->scheduler;
383   
384    // get pointers on TXT0 chdev
385    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
386    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
387    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
388
389    // get extended pointer on remote TXT0 chdev lock
390    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
391
392    // get TXT0 lock in busy waiting mode
393    remote_spinlock_lock_busy( lock_xp , &save_sr );
394
395    nolock_printk("\n***** threads on core[%x,%d] / current %x / cycle %d\n",
396            local_cxy , core->lid, sched->current, (uint32_t)hal_get_cycles() );
397
398    // display kernel threads
399    LIST_FOREACH( &sched->k_root , iter )
400    {
401        thread = LIST_ELEMENT( iter , thread_t , sched_list );
402        if (thread->type == THREAD_DEV) 
403        {
404            nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X / %s\n",
405            thread_type_str( thread->type ), thread->process->pid, thread->trdid,
406            thread, thread->blocked, thread->flags, thread->chdev->name );
407        }
408        else
409        {
410            nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n",
411            thread_type_str( thread->type ), thread->process->pid, thread->trdid,
412            thread, thread->blocked, thread->flags );
413        }
414    }
415
416    // display user threads
417    LIST_FOREACH( &sched->u_root , iter )
418    {
419        thread = LIST_ELEMENT( iter , thread_t , sched_list );
420        nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n",
421        thread_type_str( thread->type ), thread->process->pid, thread->trdid,
422        thread, thread->blocked, thread->flags );
423    }
424
425    // release TXT0 lock
426    remote_spinlock_unlock_busy( lock_xp , save_sr );
427
428}  // end sched_display()
429
Note: See TracBrowser for help on using the repository browser.