[1] | 1 | /* |
---|
| 2 | * scheduler.c - Core scheduler implementation. |
---|
| 3 | * |
---|
| 4 | * Author Alain Greiner (2016) |
---|
| 5 | * |
---|
| 6 | * Copyright (c) UPMC Sorbonne Universites |
---|
| 7 | * |
---|
| 8 | * This file is part of ALMOS-MKH. |
---|
| 9 | * |
---|
| 10 | * ALMOS-MKH. is free software; you can redistribute it and/or modify it |
---|
| 11 | * under the terms of the GNU General Public License as published by |
---|
| 12 | * the Free Software Foundation; version 2.0 of the License. |
---|
| 13 | * |
---|
| 14 | * ALMOS-MKH. is distributed in the hope that it will be useful, but |
---|
| 15 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
| 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
| 17 | * General Public License for more details. |
---|
| 18 | * |
---|
| 19 | * You should have received a copy of the GNU General Public License |
---|
| 20 | * along with ALMOS-MKH.; if not, write to the Free Software Foundation, |
---|
| 21 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
---|
| 22 | */ |
---|
| 23 | |
---|
[14] | 24 | #include <kernel_config.h> |
---|
[1] | 25 | #include <hal_types.h> |
---|
[407] | 26 | #include <hal_switch.h> |
---|
[1] | 27 | #include <hal_irqmask.h> |
---|
| 28 | #include <hal_context.h> |
---|
| 29 | #include <printk.h> |
---|
| 30 | #include <list.h> |
---|
| 31 | #include <core.h> |
---|
| 32 | #include <thread.h> |
---|
[296] | 33 | #include <chdev.h> |
---|
[1] | 34 | #include <scheduler.h> |
---|
| 35 | |
---|
[296] | 36 | /////////////////////////////////////////////////////////////////////////////////////////// |
---|
| 37 | // Extern global variables |
---|
| 38 | /////////////////////////////////////////////////////////////////////////////////////////// |
---|
[1] | 39 | |
---|
[296] | 40 | extern chdev_directory_t chdev_dir; // allocated in kernel_init.c file |
---|
[407] | 41 | extern uint32_t switch_save_sr[]; // allocated in kernel_init.c file |
---|
[296] | 42 | |
---|
[1] | 43 | //////////////////////////////// |
---|
| 44 | void sched_init( core_t * core ) |
---|
| 45 | { |
---|
| 46 | scheduler_t * sched = &core->scheduler; |
---|
| 47 | |
---|
| 48 | sched->u_threads_nr = 0; |
---|
| 49 | sched->k_threads_nr = 0; |
---|
| 50 | |
---|
[279] | 51 | sched->current = CURRENT_THREAD; |
---|
| 52 | sched->idle = NULL; // initialized in kernel_init() |
---|
| 53 | sched->u_last = NULL; // initialized in sched_register_thread() |
---|
| 54 | sched->k_last = NULL; // initialized in sched_register_thread() |
---|
[1] | 55 | |
---|
| 56 | // initialise threads lists |
---|
| 57 | list_root_init( &sched->u_root ); |
---|
| 58 | list_root_init( &sched->k_root ); |
---|
| 59 | |
---|
| 60 | } // end sched_init() |
---|
| 61 | |
---|
| 62 | //////////////////////////////////////////// |
---|
| 63 | void sched_register_thread( core_t * core, |
---|
| 64 | thread_t * thread ) |
---|
| 65 | { |
---|
| 66 | scheduler_t * sched = &core->scheduler; |
---|
| 67 | thread_type_t type = thread->type; |
---|
| 68 | |
---|
| 69 | // take lock protecting sheduler lists |
---|
| 70 | spinlock_lock( &sched->lock ); |
---|
| 71 | |
---|
| 72 | if( type == THREAD_USER ) |
---|
| 73 | { |
---|
[279] | 74 | // register thread in scheduler user list |
---|
[1] | 75 | list_add_last( &sched->u_root , &thread->sched_list ); |
---|
| 76 | sched->u_threads_nr++; |
---|
[279] | 77 | |
---|
| 78 | // initialize u_last field if first user thread |
---|
| 79 | if( sched->u_last == NULL ) sched->u_last = &thread->sched_list; |
---|
[1] | 80 | } |
---|
| 81 | else // kernel thread |
---|
| 82 | { |
---|
[279] | 83 | // register thread in scheduler kernel list |
---|
[1] | 84 | list_add_last( &sched->k_root , &thread->sched_list ); |
---|
| 85 | sched->k_threads_nr++; |
---|
[279] | 86 | |
---|
| 87 | // initialize k_last field if first kernel thread |
---|
| 88 | if( sched->k_last == NULL ) sched->k_last = &thread->sched_list; |
---|
[1] | 89 | } |
---|
| 90 | |
---|
| 91 | // release lock |
---|
| 92 | spinlock_unlock( &sched->lock ); |
---|
| 93 | |
---|
| 94 | } // end sched_register() |
---|
| 95 | |
---|
| 96 | ///////////////////////////////////////////// |
---|
| 97 | void sched_remove_thread( thread_t * thread ) |
---|
| 98 | { |
---|
| 99 | core_t * core = thread->core; |
---|
| 100 | scheduler_t * sched = &core->scheduler; |
---|
| 101 | thread_type_t type = thread->type; |
---|
| 102 | |
---|
| 103 | // take lock protecting sheduler lists |
---|
| 104 | spinlock_lock( &sched->lock ); |
---|
| 105 | |
---|
| 106 | if( type == THREAD_USER ) |
---|
| 107 | { |
---|
[279] | 108 | // remove thread from user list |
---|
[1] | 109 | list_unlink( &thread->sched_list ); |
---|
| 110 | sched->u_threads_nr--; |
---|
[279] | 111 | |
---|
| 112 | // reset the u_last field if list empty |
---|
| 113 | if( sched->u_threads_nr == 0 ) sched->u_last = NULL; |
---|
[1] | 114 | } |
---|
| 115 | else // kernel thread |
---|
| 116 | { |
---|
[279] | 117 | // remove thread from kernel list |
---|
[1] | 118 | list_unlink( &thread->sched_list ); |
---|
| 119 | sched->k_threads_nr--; |
---|
[279] | 120 | |
---|
| 121 | // reset the k_last field if list empty |
---|
| 122 | if( sched->k_threads_nr == 0 ) sched->k_last = NULL; |
---|
[1] | 123 | } |
---|
| 124 | |
---|
| 125 | // release lock |
---|
| 126 | spinlock_unlock( &sched->lock ); |
---|
| 127 | |
---|
| 128 | } // end sched_remove() |
---|
| 129 | |
---|
| 130 | //////////////////////////////////////// |
---|
| 131 | thread_t * sched_select( core_t * core ) |
---|
| 132 | { |
---|
[296] | 133 | thread_t * thread; |
---|
[1] | 134 | |
---|
| 135 | scheduler_t * sched = &core->scheduler; |
---|
| 136 | |
---|
| 137 | // take lock protecting sheduler lists |
---|
| 138 | spinlock_lock( &sched->lock ); |
---|
| 139 | |
---|
| 140 | list_entry_t * current; |
---|
| 141 | list_entry_t * last; |
---|
| 142 | |
---|
[407] | 143 | // first loop : scan the kernel threads list if not empty |
---|
[279] | 144 | if( list_is_empty( &sched->k_root ) == false ) |
---|
[1] | 145 | { |
---|
[279] | 146 | last = sched->k_last; |
---|
| 147 | current = sched->k_last; |
---|
| 148 | do |
---|
| 149 | { |
---|
| 150 | // get next entry in kernel list |
---|
| 151 | current = list_next( &sched->k_root , current ); |
---|
[1] | 152 | |
---|
[279] | 153 | // skip the root that does not contain a thread |
---|
| 154 | if( current == NULL ) current = sched->k_root.next; |
---|
[1] | 155 | |
---|
[279] | 156 | // get thread pointer for this entry |
---|
| 157 | thread = LIST_ELEMENT( current , thread_t , sched_list ); |
---|
[1] | 158 | |
---|
[407] | 159 | // analyse kernel thread type |
---|
| 160 | switch( thread->type ) |
---|
[279] | 161 | { |
---|
[407] | 162 | case THREAD_IDLE: // skip IDLE thread |
---|
| 163 | break; |
---|
[296] | 164 | |
---|
[407] | 165 | case THREAD_RPC: // RPC thread if non blocked and FIFO non-empty |
---|
| 166 | if( (thread->blocked == 0) && |
---|
| 167 | (local_fifo_is_empty( &LOCAL_CLUSTER->rpc_fifo ) == 0) ) |
---|
| 168 | { |
---|
| 169 | spinlock_unlock( &sched->lock ); |
---|
| 170 | return thread; |
---|
| 171 | } |
---|
| 172 | break; |
---|
[296] | 173 | |
---|
[407] | 174 | default: // DEV thread if non blocked |
---|
| 175 | if( thread->blocked == 0 ) |
---|
| 176 | { |
---|
| 177 | spinlock_unlock( &sched->lock ); |
---|
| 178 | return thread; |
---|
| 179 | } |
---|
| 180 | break; |
---|
| 181 | } // end switch type |
---|
[1] | 182 | } |
---|
[279] | 183 | while( current != last ); |
---|
[1] | 184 | } |
---|
| 185 | |
---|
[407] | 186 | // second loop : scan the user threads list if not empty |
---|
[279] | 187 | if( list_is_empty( &sched->u_root ) == false ) |
---|
[1] | 188 | { |
---|
[279] | 189 | last = sched->u_last; |
---|
| 190 | current = sched->u_last; |
---|
| 191 | do |
---|
| 192 | { |
---|
| 193 | // get next entry in user list |
---|
| 194 | current = list_next( &sched->u_root , current ); |
---|
[1] | 195 | |
---|
[279] | 196 | // skip the root that does not contain a thread |
---|
| 197 | if( current == NULL ) current = sched->u_root.next; |
---|
[1] | 198 | |
---|
[279] | 199 | // get thread pointer for this entry |
---|
| 200 | thread = LIST_ELEMENT( current , thread_t , sched_list ); |
---|
[1] | 201 | |
---|
[279] | 202 | // return thread if runnable |
---|
| 203 | if( thread->blocked == 0 ) |
---|
| 204 | { |
---|
| 205 | spinlock_unlock( &sched->lock ); |
---|
| 206 | return thread; |
---|
| 207 | } |
---|
[1] | 208 | } |
---|
[279] | 209 | while( current != last ); |
---|
[1] | 210 | } |
---|
| 211 | |
---|
[407] | 212 | // third : return idle thread if no runnable thread |
---|
[1] | 213 | spinlock_unlock( &sched->lock ); |
---|
| 214 | return sched->idle; |
---|
| 215 | |
---|
[296] | 216 | } // end sched_select() |
---|
[1] | 217 | |
---|
[407] | 218 | /////////////////////////////////////////// |
---|
| 219 | void sched_kill_thread( thread_t * thread ) |
---|
| 220 | { |
---|
| 221 | // check locks |
---|
| 222 | if( thread_can_yield() == false ) |
---|
| 223 | { |
---|
| 224 | panic("locks not released for thread %x in process %x on core[%x][%d]", |
---|
| 225 | thread->trdid , thread->process->pid, local_cxy , thread->core->lid ); |
---|
| 226 | } |
---|
| 227 | |
---|
| 228 | // remove thread from scheduler |
---|
| 229 | sched_remove_thread( thread ); |
---|
| 230 | |
---|
| 231 | // reset the THREAD_SIG_KILL signal |
---|
| 232 | thread_reset_signal( thread , THREAD_SIG_KILL ); |
---|
| 233 | |
---|
| 234 | // detached thread can suicide |
---|
| 235 | if( thread->signals & THREAD_SIG_SUICIDE ) |
---|
| 236 | { |
---|
| 237 | assert( (thread->flags & THREAD_FLAG_DETACHED), __FUNCTION__, |
---|
| 238 | "thread must be detached in case of suicide\n" ); |
---|
| 239 | |
---|
| 240 | // remove thread from process |
---|
| 241 | process_remove_thread( thread ); |
---|
| 242 | |
---|
| 243 | // release memory for thread descriptor |
---|
| 244 | thread_destroy( thread ); |
---|
| 245 | } |
---|
| 246 | } // end sched_kill_thread() |
---|
| 247 | |
---|
[1] | 248 | ////////////////////////////////////////// |
---|
| 249 | void sched_handle_signals( core_t * core ) |
---|
| 250 | { |
---|
| 251 | list_entry_t * iter; |
---|
| 252 | thread_t * thread; |
---|
| 253 | scheduler_t * sched = &core->scheduler; |
---|
| 254 | |
---|
| 255 | // take lock protecting threads lists |
---|
| 256 | spinlock_lock( &sched->lock ); |
---|
| 257 | |
---|
| 258 | // handle user threads |
---|
| 259 | LIST_FOREACH( &sched->u_root , iter ) |
---|
| 260 | { |
---|
| 261 | thread = LIST_ELEMENT( iter , thread_t , sched_list ); |
---|
[407] | 262 | if( thread->signals ) sched_kill_thread( thread ); |
---|
[1] | 263 | } |
---|
| 264 | |
---|
| 265 | // handle kernel threads |
---|
| 266 | LIST_FOREACH( &sched->k_root , iter ) |
---|
| 267 | { |
---|
| 268 | thread = LIST_ELEMENT( iter , thread_t , sched_list ); |
---|
[407] | 269 | if( thread->signals ) sched_kill_thread( thread ); |
---|
[1] | 270 | } |
---|
| 271 | |
---|
| 272 | // release lock |
---|
| 273 | spinlock_unlock( &sched->lock ); |
---|
| 274 | |
---|
| 275 | } // end sched_handle_signals() |
---|
| 276 | |
---|
[407] | 277 | ////////////////////////////////////// |
---|
| 278 | void sched_update( thread_t * current, |
---|
| 279 | thread_t * next ) |
---|
[1] | 280 | { |
---|
[407] | 281 | scheduler_t * sched = ¤t->core->scheduler; |
---|
[1] | 282 | |
---|
[407] | 283 | if( current->type == THREAD_USER ) sched->u_last = ¤t->sched_list; |
---|
| 284 | else sched->k_last = ¤t->sched_list; |
---|
| 285 | |
---|
| 286 | sched->current = next; |
---|
| 287 | } |
---|
| 288 | |
---|
| 289 | ////////////////// |
---|
| 290 | void sched_yield() |
---|
| 291 | { |
---|
| 292 | thread_t * next; |
---|
[1] | 293 | thread_t * current = CURRENT_THREAD; |
---|
[407] | 294 | |
---|
| 295 | #if( CONFIG_SCHED_DEBUG & 0x1 ) |
---|
| 296 | if( hal_time_stamp() > CONFIG_SCHED_DEBUG ) sched_display( current->core->lid ); |
---|
| 297 | #endif |
---|
[1] | 298 | |
---|
[337] | 299 | // delay the yield if current thread has locks |
---|
[407] | 300 | if( (current->local_locks != 0) || (current->remote_locks != 0) ) |
---|
[337] | 301 | { |
---|
| 302 | current->flags |= THREAD_FLAG_SCHED; |
---|
| 303 | return; |
---|
| 304 | } |
---|
[1] | 305 | |
---|
[407] | 306 | // loop on threads to select next thread |
---|
| 307 | next = sched_select( current->core ); |
---|
[1] | 308 | |
---|
[296] | 309 | // check next thread attached to same core as the calling thread |
---|
[407] | 310 | assert( (next->core == current->core), __FUNCTION__ , |
---|
| 311 | "next core != current core\n"); |
---|
[296] | 312 | |
---|
[407] | 313 | // check next thread not blocked when type != IDLE |
---|
| 314 | assert( (next->blocked == 0) || (next->type = THREAD_IDLE) , __FUNCTION__ , |
---|
| 315 | "next thread %x (%s) is blocked on core[%x,%d]\n", |
---|
| 316 | next->trdid , thread_type_str(next->type) , local_cxy , current->core->lid ); |
---|
[296] | 317 | |
---|
| 318 | // switch contexts and update scheduler state if next != current |
---|
| 319 | if( next != current ) |
---|
[1] | 320 | { |
---|
[407] | 321 | // current thread desactivate IRQs |
---|
| 322 | hal_disable_irq( &switch_save_sr[CURRENT_THREAD->core->lid] ); |
---|
[1] | 323 | |
---|
[407] | 324 | sched_dmsg("\n[DBG] %s : core[%x,%d] / trd %x (%s) (%x,%x) => trd %x (%s) (%x,%x) / cycle %d\n", |
---|
| 325 | __FUNCTION__, local_cxy, current->core->lid, |
---|
| 326 | current, thread_type_str(current->type), current->process->pid, current->trdid, |
---|
| 327 | next , thread_type_str(next->type) , next->process->pid , next->trdid, |
---|
| 328 | hal_time_stamp() ); |
---|
[279] | 329 | |
---|
[296] | 330 | // update scheduler |
---|
[407] | 331 | sched_update( current , next ); |
---|
[1] | 332 | |
---|
[407] | 333 | // handle FPU ownership |
---|
[306] | 334 | if( next->type == THREAD_USER ) |
---|
[296] | 335 | { |
---|
[407] | 336 | if( next == current->core->fpu_owner ) hal_fpu_enable(); |
---|
| 337 | else hal_fpu_disable(); |
---|
[296] | 338 | } |
---|
[1] | 339 | |
---|
[407] | 340 | // switch CPU from calling thread context to new thread context |
---|
| 341 | hal_do_cpu_switch( current->cpu_context, next->cpu_context ); |
---|
[1] | 342 | |
---|
[407] | 343 | // restore IRQs when next thread resume |
---|
| 344 | hal_restore_irq( switch_save_sr[CURRENT_THREAD->core->lid] ); |
---|
[296] | 345 | } |
---|
| 346 | else |
---|
| 347 | { |
---|
[407] | 348 | |
---|
| 349 | sched_dmsg("\n[DBG] %s : core[%x,%d] / thread %x (%s) continue / cycle %d\n", |
---|
| 350 | __FUNCTION__, local_cxy, current->core->lid, current->trdid, |
---|
| 351 | thread_type_str(current->type) ,hal_time_stamp() ); |
---|
| 352 | |
---|
[296] | 353 | } |
---|
[1] | 354 | } // end sched_yield() |
---|
| 355 | |
---|
[407] | 356 | |
---|
| 357 | /////////////////////////////// |
---|
| 358 | void sched_display( lid_t lid ) |
---|
[1] | 359 | { |
---|
[296] | 360 | list_entry_t * iter; |
---|
| 361 | thread_t * thread; |
---|
| 362 | uint32_t save_sr; |
---|
[1] | 363 | |
---|
[407] | 364 | if( lid >= LOCAL_CLUSTER->cores_nr ) |
---|
| 365 | { |
---|
| 366 | printk("\n[ERROR] in %s : illegal local index %d in cluster %x\n", |
---|
| 367 | __FUNCTION__ , lid , local_cxy ); |
---|
| 368 | return; |
---|
| 369 | } |
---|
| 370 | |
---|
| 371 | core_t * core = &LOCAL_CLUSTER->core_tbl[lid]; |
---|
[296] | 372 | scheduler_t * sched = &core->scheduler; |
---|
| 373 | |
---|
| 374 | // get pointers on TXT0 chdev |
---|
[407] | 375 | xptr_t txt0_xp = chdev_dir.txt_tx[0]; |
---|
[296] | 376 | cxy_t txt0_cxy = GET_CXY( txt0_xp ); |
---|
| 377 | chdev_t * txt0_ptr = GET_PTR( txt0_xp ); |
---|
[1] | 378 | |
---|
[296] | 379 | // get extended pointer on remote TXT0 chdev lock |
---|
| 380 | xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); |
---|
[1] | 381 | |
---|
[296] | 382 | // get TXT0 lock in busy waiting mode |
---|
| 383 | remote_spinlock_lock_busy( lock_xp , &save_sr ); |
---|
| 384 | |
---|
[407] | 385 | nolock_printk("\n***** scheduler state for core[%x,%d] at cycle %d\n" |
---|
| 386 | "kernel_threads = %d / user_threads = %d / current = %x / idle = %x\n", |
---|
| 387 | local_cxy , core->lid, hal_time_stamp(), |
---|
| 388 | sched->k_threads_nr, sched->u_threads_nr, |
---|
| 389 | sched->current->trdid , sched->idle->trdid ); |
---|
[296] | 390 | |
---|
| 391 | // display kernel threads |
---|
| 392 | LIST_FOREACH( &sched->k_root , iter ) |
---|
[1] | 393 | { |
---|
[296] | 394 | thread = LIST_ELEMENT( iter , thread_t , sched_list ); |
---|
[407] | 395 | nolock_printk(" - type = %s / trdid = %X / pid = %X / func = %X / blocked = %X\n", |
---|
[296] | 396 | thread_type_str( thread->type ), thread->trdid, thread->process->pid, |
---|
| 397 | thread->entry_func, thread->blocked ); |
---|
[1] | 398 | } |
---|
| 399 | |
---|
[296] | 400 | // display user threads |
---|
| 401 | LIST_FOREACH( &sched->u_root , iter ) |
---|
[1] | 402 | { |
---|
[296] | 403 | thread = LIST_ELEMENT( iter , thread_t , sched_list ); |
---|
[407] | 404 | nolock_printk(" - type = %s / trdid = %X / pid = %X / func = %X / blocked = %X\n", |
---|
[296] | 405 | thread_type_str( thread->type ), thread->trdid, thread->process->pid, |
---|
| 406 | thread->entry_func, thread->blocked ); |
---|
[1] | 407 | } |
---|
| 408 | |
---|
[296] | 409 | // release TXT0 lock |
---|
| 410 | remote_spinlock_unlock_busy( lock_xp , save_sr ); |
---|
[1] | 411 | |
---|
[296] | 412 | } // end sched_display() |
---|
[1] | 413 | |
---|