[1] | 1 | /* |
---|
| 2 | * scheduler.c - Core scheduler implementation. |
---|
| 3 | * |
---|
| 4 | * Author Alain Greiner (2016) |
---|
| 5 | * |
---|
| 6 | * Copyright (c) UPMC Sorbonne Universites |
---|
| 7 | * |
---|
| 8 | * This file is part of ALMOS-MKH. |
---|
| 9 | * |
---|
| 10 | * ALMOS-MKH. is free software; you can redistribute it and/or modify it |
---|
| 11 | * under the terms of the GNU General Public License as published by |
---|
| 12 | * the Free Software Foundation; version 2.0 of the License. |
---|
| 13 | * |
---|
| 14 | * ALMOS-MKH. is distributed in the hope that it will be useful, but |
---|
| 15 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
| 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
| 17 | * General Public License for more details. |
---|
| 18 | * |
---|
| 19 | * You should have received a copy of the GNU General Public License |
---|
| 20 | * along with ALMOS-MKH.; if not, write to the Free Software Foundation, |
---|
| 21 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
---|
| 22 | */ |
---|
| 23 | |
---|
[14] | 24 | #include <kernel_config.h> |
---|
[1] | 25 | #include <hal_types.h> |
---|
[407] | 26 | #include <hal_switch.h> |
---|
[1] | 27 | #include <hal_irqmask.h> |
---|
| 28 | #include <hal_context.h> |
---|
| 29 | #include <printk.h> |
---|
| 30 | #include <list.h> |
---|
| 31 | #include <core.h> |
---|
| 32 | #include <thread.h> |
---|
[296] | 33 | #include <chdev.h> |
---|
[1] | 34 | #include <scheduler.h> |
---|
| 35 | |
---|
[296] | 36 | /////////////////////////////////////////////////////////////////////////////////////////// |
---|
| 37 | // Extern global variables |
---|
| 38 | /////////////////////////////////////////////////////////////////////////////////////////// |
---|
[1] | 39 | |
---|
[296] | 40 | extern chdev_directory_t chdev_dir; // allocated in kernel_init.c file |
---|
[407] | 41 | extern uint32_t switch_save_sr[]; // allocated in kernel_init.c file |
---|
[296] | 42 | |
---|
[1] | 43 | //////////////////////////////// |
---|
| 44 | void sched_init( core_t * core ) |
---|
| 45 | { |
---|
| 46 | scheduler_t * sched = &core->scheduler; |
---|
| 47 | |
---|
| 48 | sched->u_threads_nr = 0; |
---|
| 49 | sched->k_threads_nr = 0; |
---|
| 50 | |
---|
[279] | 51 | sched->current = CURRENT_THREAD; |
---|
| 52 | sched->idle = NULL; // initialized in kernel_init() |
---|
| 53 | sched->u_last = NULL; // initialized in sched_register_thread() |
---|
| 54 | sched->k_last = NULL; // initialized in sched_register_thread() |
---|
[1] | 55 | |
---|
| 56 | // initialise threads lists |
---|
| 57 | list_root_init( &sched->u_root ); |
---|
| 58 | list_root_init( &sched->k_root ); |
---|
| 59 | |
---|
[416] | 60 | sched->req_ack_pending = false; // no pending request |
---|
[409] | 61 | |
---|
[1] | 62 | } // end sched_init() |
---|
| 63 | |
---|
| 64 | //////////////////////////////////////////// |
---|
| 65 | void sched_register_thread( core_t * core, |
---|
| 66 | thread_t * thread ) |
---|
| 67 | { |
---|
| 68 | scheduler_t * sched = &core->scheduler; |
---|
| 69 | thread_type_t type = thread->type; |
---|
| 70 | |
---|
| 71 | // take lock protecting sheduler lists |
---|
| 72 | spinlock_lock( &sched->lock ); |
---|
| 73 | |
---|
| 74 | if( type == THREAD_USER ) |
---|
| 75 | { |
---|
| 76 | list_add_last( &sched->u_root , &thread->sched_list ); |
---|
| 77 | sched->u_threads_nr++; |
---|
[279] | 78 | if( sched->u_last == NULL ) sched->u_last = &thread->sched_list; |
---|
[1] | 79 | } |
---|
| 80 | else // kernel thread |
---|
| 81 | { |
---|
| 82 | list_add_last( &sched->k_root , &thread->sched_list ); |
---|
| 83 | sched->k_threads_nr++; |
---|
[279] | 84 | if( sched->k_last == NULL ) sched->k_last = &thread->sched_list; |
---|
[1] | 85 | } |
---|
| 86 | |
---|
| 87 | // release lock |
---|
[428] | 88 | hal_fence(); |
---|
[1] | 89 | spinlock_unlock( &sched->lock ); |
---|
| 90 | |
---|
[409] | 91 | } // end sched_register_thread() |
---|
[1] | 92 | |
---|
[408] | 93 | ////////////////////////////////////////////// |
---|
| 94 | thread_t * sched_select( scheduler_t * sched ) |
---|
[1] | 95 | { |
---|
[408] | 96 | thread_t * thread; |
---|
| 97 | list_entry_t * current; |
---|
| 98 | list_entry_t * last; |
---|
[437] | 99 | list_entry_t * root; |
---|
| 100 | bool_t done; |
---|
[1] | 101 | |
---|
| 102 | // take lock protecting sheduler lists |
---|
| 103 | spinlock_lock( &sched->lock ); |
---|
| 104 | |
---|
[437] | 105 | // first : scan the kernel threads list if not empty |
---|
[279] | 106 | if( list_is_empty( &sched->k_root ) == false ) |
---|
[1] | 107 | { |
---|
[437] | 108 | root = &sched->k_root; |
---|
[279] | 109 | last = sched->k_last; |
---|
[437] | 110 | current = last; |
---|
| 111 | done = false; |
---|
| 112 | |
---|
| 113 | while( done == false ) |
---|
[279] | 114 | { |
---|
| 115 | // get next entry in kernel list |
---|
[437] | 116 | current = current->next; |
---|
[1] | 117 | |
---|
[437] | 118 | // check exit condition |
---|
| 119 | if( current == last ) done = true; |
---|
| 120 | |
---|
[279] | 121 | // skip the root that does not contain a thread |
---|
[437] | 122 | if( current == root ) continue; |
---|
[1] | 123 | |
---|
[279] | 124 | // get thread pointer for this entry |
---|
| 125 | thread = LIST_ELEMENT( current , thread_t , sched_list ); |
---|
[1] | 126 | |
---|
[407] | 127 | // analyse kernel thread type |
---|
| 128 | switch( thread->type ) |
---|
[279] | 129 | { |
---|
[437] | 130 | case THREAD_RPC: // if non blocked and RPC FIFO non-empty |
---|
[407] | 131 | if( (thread->blocked == 0) && |
---|
| 132 | (local_fifo_is_empty( &LOCAL_CLUSTER->rpc_fifo ) == 0) ) |
---|
| 133 | { |
---|
| 134 | spinlock_unlock( &sched->lock ); |
---|
| 135 | return thread; |
---|
| 136 | } |
---|
| 137 | break; |
---|
[296] | 138 | |
---|
[437] | 139 | case THREAD_DEV: // if non blocked and waiting queue non empty |
---|
[408] | 140 | if( (thread->blocked == 0) && |
---|
| 141 | (xlist_is_empty( XPTR( local_cxy , &thread->chdev->wait_root)) == 0) ) |
---|
[407] | 142 | { |
---|
| 143 | spinlock_unlock( &sched->lock ); |
---|
| 144 | return thread; |
---|
| 145 | } |
---|
| 146 | break; |
---|
[1] | 147 | |
---|
[437] | 148 | default: |
---|
| 149 | break; |
---|
| 150 | } |
---|
| 151 | } // end loop on kernel threads |
---|
| 152 | } // end if kernel threads |
---|
| 153 | |
---|
| 154 | // second : scan the user threads list if not empty |
---|
[279] | 155 | if( list_is_empty( &sched->u_root ) == false ) |
---|
[1] | 156 | { |
---|
[437] | 157 | root = &sched->u_root; |
---|
[279] | 158 | last = sched->u_last; |
---|
[437] | 159 | current = last; |
---|
| 160 | done = false; |
---|
| 161 | |
---|
| 162 | while( done == false ) |
---|
[279] | 163 | { |
---|
| 164 | // get next entry in user list |
---|
[437] | 165 | current = current->next; |
---|
[1] | 166 | |
---|
[437] | 167 | // check exit condition |
---|
| 168 | if( current == last ) done = true; |
---|
| 169 | |
---|
[279] | 170 | // skip the root that does not contain a thread |
---|
[437] | 171 | if( current == root ) continue; |
---|
[1] | 172 | |
---|
[279] | 173 | // get thread pointer for this entry |
---|
| 174 | thread = LIST_ELEMENT( current , thread_t , sched_list ); |
---|
[1] | 175 | |
---|
[279] | 176 | // return thread if runnable |
---|
| 177 | if( thread->blocked == 0 ) |
---|
| 178 | { |
---|
| 179 | spinlock_unlock( &sched->lock ); |
---|
| 180 | return thread; |
---|
| 181 | } |
---|
[437] | 182 | } // end loop on user threads |
---|
| 183 | } // end if user threads |
---|
[1] | 184 | |
---|
[437] | 185 | // third : return idle thread if no other runnable thread |
---|
[1] | 186 | spinlock_unlock( &sched->lock ); |
---|
| 187 | return sched->idle; |
---|
| 188 | |
---|
[296] | 189 | } // end sched_select() |
---|
[1] | 190 | |
---|
[416] | 191 | /////////////////////////////////////////// |
---|
[433] | 192 | void sched_handle_signals( core_t * core ) |
---|
[1] | 193 | { |
---|
[437] | 194 | |
---|
[1] | 195 | list_entry_t * iter; |
---|
| 196 | thread_t * thread; |
---|
[428] | 197 | process_t * process; |
---|
[409] | 198 | |
---|
[1] | 199 | scheduler_t * sched = &core->scheduler; |
---|
| 200 | |
---|
| 201 | // take lock protecting threads lists |
---|
| 202 | spinlock_lock( &sched->lock ); |
---|
| 203 | |
---|
[416] | 204 | // scan all user threads |
---|
[1] | 205 | LIST_FOREACH( &sched->u_root , iter ) |
---|
| 206 | { |
---|
| 207 | thread = LIST_ELEMENT( iter , thread_t , sched_list ); |
---|
| 208 | |
---|
[416] | 209 | // handle REQ_ACK |
---|
| 210 | if( thread->flags & THREAD_FLAG_REQ_ACK ) |
---|
[408] | 211 | { |
---|
[416] | 212 | // check thread blocked |
---|
| 213 | assert( (thread->blocked & THREAD_BLOCKED_GLOBAL) , |
---|
| 214 | __FUNCTION__ , "thread not blocked" ); |
---|
| 215 | |
---|
| 216 | // decrement response counter |
---|
| 217 | hal_atomic_add( thread->ack_rsp_count , -1 ); |
---|
[408] | 218 | |
---|
[416] | 219 | // reset REQ_ACK in thread descriptor |
---|
| 220 | thread_reset_req_ack( thread ); |
---|
[408] | 221 | } |
---|
[416] | 222 | |
---|
| 223 | // handle REQ_DELETE |
---|
| 224 | if( thread->flags & THREAD_FLAG_REQ_DELETE ) |
---|
| 225 | { |
---|
[428] | 226 | // get thread process descriptor |
---|
| 227 | process = thread->process; |
---|
[416] | 228 | |
---|
[437] | 229 | #if CONFIG_DEBUG_SCHED_HANDLE_SIGNALS |
---|
| 230 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
| 231 | if( CONFIG_DEBUG_SCHED_HANDLE_SIGNALS < cycle ) |
---|
| 232 | printk("\n[DBG] %s : thread %x in proces %x must be deleted / cycle %d\n", |
---|
| 233 | __FUNCTION__ , thread , process->pid , cycle ); |
---|
| 234 | #endif |
---|
[416] | 235 | // release FPU if required |
---|
| 236 | if( thread->core->fpu_owner == thread ) thread->core->fpu_owner = NULL; |
---|
| 237 | |
---|
| 238 | // detach thread from parent if attached |
---|
| 239 | if( (thread->flags & THREAD_FLAG_DETACHED) == 0 ) |
---|
| 240 | thread_child_parent_unlink( thread->parent , XPTR( local_cxy , thread ) ); |
---|
| 241 | |
---|
[428] | 242 | // remove thread from scheduler (scheduler lock already taken) |
---|
| 243 | uint32_t threads_nr = sched->u_threads_nr; |
---|
| 244 | assert( (threads_nr != 0) , __FUNCTION__ , "u_threads_nr cannot be 0\n" ); |
---|
| 245 | sched->u_threads_nr = threads_nr - 1; |
---|
[416] | 246 | list_unlink( &thread->sched_list ); |
---|
[428] | 247 | if( threads_nr == 1 ) sched->u_last = NULL; |
---|
[416] | 248 | |
---|
[428] | 249 | // delete thread |
---|
[416] | 250 | thread_destroy( thread ); |
---|
| 251 | |
---|
[433] | 252 | #if CONFIG_DEBUG_SCHED_HANDLE_SIGNALS |
---|
[437] | 253 | cycle = (uint32_t)hal_get_cycles(); |
---|
[433] | 254 | if( CONFIG_DEBUG_SCHED_HANDLE_SIGNALS < cycle ) |
---|
[437] | 255 | printk("\n[DBG] %s : thread %x in process %x has been deleted / cycle %d\n", |
---|
| 256 | __FUNCTION__ , thread , process->pid , cycle ); |
---|
[433] | 257 | #endif |
---|
[416] | 258 | // destroy process descriptor if no more threads |
---|
[428] | 259 | if( process->th_nr == 0 ) |
---|
| 260 | { |
---|
| 261 | // delete process |
---|
| 262 | process_destroy( process ); |
---|
| 263 | |
---|
[433] | 264 | #if CONFIG_DEBUG_SCHED_HANDLE_SIGNALS |
---|
| 265 | cycle = (uint32_t)hal_get_cycles(); |
---|
| 266 | if( CONFIG_DEBUG_SCHED_HANDLE_SIGNALS < cycle ) |
---|
[437] | 267 | printk("\n[DBG] %s : process %x has been deleted / cycle %d\n", |
---|
| 268 | __FUNCTION__ , process->pid , cycle ); |
---|
[433] | 269 | #endif |
---|
[428] | 270 | |
---|
| 271 | } |
---|
[416] | 272 | } |
---|
[1] | 273 | } |
---|
| 274 | |
---|
| 275 | // release lock |
---|
[428] | 276 | hal_fence(); |
---|
[1] | 277 | spinlock_unlock( &sched->lock ); |
---|
| 278 | |
---|
[433] | 279 | } // end sched_handle_signals() |
---|
[416] | 280 | |
---|
[408] | 281 | //////////////////////////////// |
---|
| 282 | void sched_yield( char * cause ) |
---|
[1] | 283 | { |
---|
[407] | 284 | thread_t * next; |
---|
[1] | 285 | thread_t * current = CURRENT_THREAD; |
---|
[409] | 286 | core_t * core = current->core; |
---|
| 287 | scheduler_t * sched = &core->scheduler; |
---|
[407] | 288 | |
---|
[433] | 289 | #if (CONFIG_DEBUG_SCHED_YIELD & 0x1) |
---|
| 290 | if( CONFIG_DEBUG_SCHED_YIELD < (uint32_t)hal_get_cycles() ) |
---|
| 291 | sched_display( core->lid ); |
---|
[407] | 292 | #endif |
---|
[1] | 293 | |
---|
[337] | 294 | // delay the yield if current thread has locks |
---|
[407] | 295 | if( (current->local_locks != 0) || (current->remote_locks != 0) ) |
---|
[337] | 296 | { |
---|
| 297 | current->flags |= THREAD_FLAG_SCHED; |
---|
| 298 | return; |
---|
| 299 | } |
---|
[1] | 300 | |
---|
[435] | 301 | // enter critical section / save SR in current thread descriptor |
---|
| 302 | hal_disable_irq( &CURRENT_THREAD->save_sr ); |
---|
[408] | 303 | |
---|
[407] | 304 | // loop on threads to select next thread |
---|
[408] | 305 | next = sched_select( sched ); |
---|
[1] | 306 | |
---|
[436] | 307 | // check next thread kernel_stack overflow |
---|
| 308 | assert( (next->signature == THREAD_SIGNATURE), |
---|
| 309 | __FUNCTION__ , "kernel stack overflow for thread %x\n", next ); |
---|
| 310 | |
---|
[296] | 311 | // check next thread attached to same core as the calling thread |
---|
[436] | 312 | assert( (next->core == current->core), |
---|
| 313 | __FUNCTION__ , "next core %x != current core %x\n", next->core, current->core ); |
---|
[296] | 314 | |
---|
[407] | 315 | // check next thread not blocked when type != IDLE |
---|
[428] | 316 | assert( ((next->blocked == 0) || (next->type == THREAD_IDLE)) , __FUNCTION__ , |
---|
[407] | 317 | "next thread %x (%s) is blocked on core[%x,%d]\n", |
---|
[409] | 318 | next->trdid , thread_type_str(next->type) , local_cxy , core->lid ); |
---|
[296] | 319 | |
---|
| 320 | // switch contexts and update scheduler state if next != current |
---|
| 321 | if( next != current ) |
---|
[1] | 322 | { |
---|
| 323 | |
---|
[433] | 324 | #if CONFIG_DEBUG_SCHED_YIELD |
---|
| 325 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
| 326 | if( CONFIG_DEBUG_SCHED_YIELD < cycle ) |
---|
| 327 | printk("\n[DBG] %s : core[%x,%d] / cause = %s\n" |
---|
[408] | 328 | " thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n", |
---|
[409] | 329 | __FUNCTION__, local_cxy, core->lid, cause, |
---|
[407] | 330 | current, thread_type_str(current->type), current->process->pid, current->trdid, |
---|
[433] | 331 | next , thread_type_str(next->type) , next->process->pid , next->trdid , cycle ); |
---|
| 332 | #endif |
---|
[279] | 333 | |
---|
[296] | 334 | // update scheduler |
---|
[408] | 335 | sched->current = next; |
---|
| 336 | if( next->type == THREAD_USER ) sched->u_last = &next->sched_list; |
---|
| 337 | else sched->k_last = &next->sched_list; |
---|
[1] | 338 | |
---|
[407] | 339 | // handle FPU ownership |
---|
[306] | 340 | if( next->type == THREAD_USER ) |
---|
[296] | 341 | { |
---|
[407] | 342 | if( next == current->core->fpu_owner ) hal_fpu_enable(); |
---|
| 343 | else hal_fpu_disable(); |
---|
[296] | 344 | } |
---|
[1] | 345 | |
---|
[435] | 346 | // switch CPU from current thread context to new thread context |
---|
[407] | 347 | hal_do_cpu_switch( current->cpu_context, next->cpu_context ); |
---|
[296] | 348 | } |
---|
| 349 | else |
---|
| 350 | { |
---|
[407] | 351 | |
---|
[436] | 352 | #if (CONFIG_DEBUG_SCHED_YIELD & 1) |
---|
[433] | 353 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
| 354 | if( CONFIG_DEBUG_SCHED_YIELD < cycle ) |
---|
[435] | 355 | printk("\n[DBG] %s : core[%x,%d] / cause = %s\n" |
---|
| 356 | " thread %x (%s) (%x,%x) continue / cycle %d\n", |
---|
[409] | 357 | __FUNCTION__, local_cxy, core->lid, cause, |
---|
[433] | 358 | current, thread_type_str(current->type), current->process->pid, current->trdid, cycle ); |
---|
[428] | 359 | #endif |
---|
[407] | 360 | |
---|
[296] | 361 | } |
---|
[408] | 362 | |
---|
[416] | 363 | // handle pending requests for all threads executing on this core. |
---|
[433] | 364 | sched_handle_signals( core ); |
---|
[409] | 365 | |
---|
[435] | 366 | // exit critical section / restore SR from current thread descriptor |
---|
| 367 | hal_restore_irq( CURRENT_THREAD->save_sr ); |
---|
[408] | 368 | |
---|
[1] | 369 | } // end sched_yield() |
---|
| 370 | |
---|
[407] | 371 | |
---|
| 372 | /////////////////////////////// |
---|
| 373 | void sched_display( lid_t lid ) |
---|
[1] | 374 | { |
---|
[296] | 375 | list_entry_t * iter; |
---|
| 376 | thread_t * thread; |
---|
| 377 | uint32_t save_sr; |
---|
[1] | 378 | |
---|
[436] | 379 | assert( (lid < LOCAL_CLUSTER->cores_nr), __FUNCTION__, "illegal core index %d\n", lid); |
---|
[407] | 380 | |
---|
| 381 | core_t * core = &LOCAL_CLUSTER->core_tbl[lid]; |
---|
[296] | 382 | scheduler_t * sched = &core->scheduler; |
---|
| 383 | |
---|
| 384 | // get pointers on TXT0 chdev |
---|
[407] | 385 | xptr_t txt0_xp = chdev_dir.txt_tx[0]; |
---|
[296] | 386 | cxy_t txt0_cxy = GET_CXY( txt0_xp ); |
---|
| 387 | chdev_t * txt0_ptr = GET_PTR( txt0_xp ); |
---|
[1] | 388 | |
---|
[296] | 389 | // get extended pointer on remote TXT0 chdev lock |
---|
| 390 | xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); |
---|
[1] | 391 | |
---|
[296] | 392 | // get TXT0 lock in busy waiting mode |
---|
| 393 | remote_spinlock_lock_busy( lock_xp , &save_sr ); |
---|
| 394 | |
---|
[437] | 395 | nolock_printk("\n***** threads on core[%x,%d] / current %x / cycle %d\n", |
---|
| 396 | local_cxy , core->lid, sched->current, (uint32_t)hal_get_cycles() ); |
---|
[296] | 397 | |
---|
| 398 | // display kernel threads |
---|
| 399 | LIST_FOREACH( &sched->k_root , iter ) |
---|
[1] | 400 | { |
---|
[296] | 401 | thread = LIST_ELEMENT( iter , thread_t , sched_list ); |
---|
[408] | 402 | if (thread->type == THREAD_DEV) |
---|
| 403 | { |
---|
[416] | 404 | nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X / %s\n", |
---|
[408] | 405 | thread_type_str( thread->type ), thread->process->pid, thread->trdid, |
---|
[416] | 406 | thread, thread->blocked, thread->flags, thread->chdev->name ); |
---|
[408] | 407 | } |
---|
| 408 | else |
---|
| 409 | { |
---|
[437] | 410 | nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n", |
---|
[408] | 411 | thread_type_str( thread->type ), thread->process->pid, thread->trdid, |
---|
[437] | 412 | thread, thread->blocked, thread->flags ); |
---|
[408] | 413 | } |
---|
[1] | 414 | } |
---|
| 415 | |
---|
[296] | 416 | // display user threads |
---|
| 417 | LIST_FOREACH( &sched->u_root , iter ) |
---|
[1] | 418 | { |
---|
[296] | 419 | thread = LIST_ELEMENT( iter , thread_t , sched_list ); |
---|
[416] | 420 | nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n", |
---|
[408] | 421 | thread_type_str( thread->type ), thread->process->pid, thread->trdid, |
---|
[416] | 422 | thread, thread->blocked, thread->flags ); |
---|
[1] | 423 | } |
---|
| 424 | |
---|
[296] | 425 | // release TXT0 lock |
---|
| 426 | remote_spinlock_unlock_busy( lock_xp , save_sr ); |
---|
[1] | 427 | |
---|
[296] | 428 | } // end sched_display() |
---|
[1] | 429 | |
---|