/* * scheduler.c - Core scheduler implementation. * * Author Alain Greiner (2016) * * Copyright (c) UPMC Sorbonne Universites * * This file is part of ALMOS-MKH. * * ALMOS-MKH. is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2.0 of the License. * * ALMOS-MKH. is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with ALMOS-MKH.; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include //////////////////////////////// void sched_init( core_t * core ) { scheduler_t * sched = &core->scheduler; sched->u_threads_nr = 0; sched->k_threads_nr = 0; sched->current = NULL; sched->idle = NULL; sched->u_last = NULL; sched->k_last = NULL; // initialise threads lists list_root_init( &sched->u_root ); list_root_init( &sched->k_root ); } // end sched_init() //////////////////////////////////////////// void sched_register_thread( core_t * core, thread_t * thread ) { scheduler_t * sched = &core->scheduler; thread_type_t type = thread->type; // take lock protecting sheduler lists spinlock_lock( &sched->lock ); // register thread if( type == THREAD_USER ) { list_add_last( &sched->u_root , &thread->sched_list ); sched->u_threads_nr++; } else // kernel thread { list_add_last( &sched->k_root , &thread->sched_list ); sched->k_threads_nr++; } // release lock spinlock_unlock( &sched->lock ); } // end sched_register() ///////////////////////////////////////////// void sched_remove_thread( thread_t * thread ) { core_t * core = thread->core; scheduler_t * sched = &core->scheduler; thread_type_t type = thread->type; // take lock protecting sheduler lists spinlock_lock( &sched->lock ); // remove thread if( type == THREAD_USER ) { list_unlink( &thread->sched_list ); sched->u_threads_nr--; } else // kernel thread { list_unlink( &thread->sched_list ); sched->k_threads_nr--; } // release lock spinlock_unlock( &sched->lock ); } // end sched_remove() /////////////////////////////////////////// void sched_kill_thread( thread_t * thread ) { // check thread locks if( thread_can_yield() == false ) { printk("\n[PANIC] in %s : thread %x in process %x on core[%x][%d]" " did not released all locks\n", __FUNCTION__ , thread->trdid , thread->process->pid, local_cxy , thread->core->lid ); hal_core_sleep(); } // remove thread from scheduler sched_remove_thread( thread ); // reset the THREAD_SIG_KILL signal thread_reset_signal( thread , THREAD_SIG_KILL ); } // end sched_kill_thread() //////////////////////////////////////// thread_t * sched_select( core_t * core ) { thread_t * thread; scheduler_t * sched = &core->scheduler; // take lock protecting sheduler lists spinlock_lock( &sched->lock ); list_entry_t * current; list_entry_t * last; // first scan the kernel threads last = sched->k_last; current = sched->k_last; do { // get next entry in kernel list current = list_next( &sched->k_root , current ); // skip the list root that does not contain a thread if( current == NULL ) continue; // get thread pointer thread = LIST_ELEMENT( current , thread_t , sched_list ); // return thread if not blocked if( thread->blocked == 0 ) { // release lock spinlock_unlock( &sched->lock ); return thread; } } while( current != last ); // second scan the user threads last = sched->u_last; current = sched->u_last; do { // get next entry in user list current = list_next( &sched->u_root , current ); // skip the list root that does not contain a thread if( current == NULL ) continue; // get thread pointer thread = LIST_ELEMENT( current , thread_t , sched_list ); // return thread if not blocked if( thread->blocked == 0 ) { // release lock spinlock_unlock( &sched->lock ); return thread; } } while( current != last ); // release lock spinlock_unlock( &sched->lock ); // third, return idle thread if no runnable thread return sched->idle; } // end sched_elect() ////////////////////////////////////////// void sched_handle_signals( core_t * core ) { list_entry_t * iter; thread_t * thread; scheduler_t * sched = &core->scheduler; // take lock protecting threads lists spinlock_lock( &sched->lock ); // handle user threads LIST_FOREACH( &sched->u_root , iter ) { thread = LIST_ELEMENT( iter , thread_t , sched_list ); if( thread->signals & THREAD_SIG_KILL ) sched_kill_thread( thread ); } // handle kernel threads LIST_FOREACH( &sched->k_root , iter ) { thread = LIST_ELEMENT( iter , thread_t , sched_list ); if( thread->signals & THREAD_SIG_KILL ) sched_kill_thread( thread ); } // release lock spinlock_unlock( &sched->lock ); } // end sched_handle_signals() ////////////////// void sched_yield() { reg_t sr_save; thread_t * next; thread_t * current = CURRENT_THREAD; core_t * core = current->core; if( thread_can_yield() == false ) { printk("\n[PANIC] in %s : thread %x for process %x on core_gid %x" " has not released all locks at cycle %d\n", __FUNCTION__, current->trdid, current->process->pid, local_cxy , core->lid , hal_time_stamp() ); hal_core_sleep(); } // desactivate IRQs hal_disable_irq( &sr_save ); // first loop on all threads to handle pending signals sched_handle_signals( core ); // second loop on threads to select next thread next = sched_select( core ); // check stack overflow for selected thread if( next->signature != THREAD_SIGNATURE ) { printk("\n[PANIC] in %s : detected stack overflow for thread %x of process %x" " on core [%x][%d]\n", __FUNCTION__, next->trdid, next->process->pid, local_cxy , core->lid ); hal_core_sleep(); } sched_dmsg("\n[INFO] %s on core %d in cluster %x / old thread = %x / new thread = %x\n", __FUNCTION__, core->lid, local_cxy, current->trdid, next->trdid ); // switch contexts if new thread if( next != current ) { hal_cpu_context_save( current ); hal_cpu_context_restore( next ); } // restore IRQs hal_restore_irq( sr_save ); if( current->type != THREAD_USER ) return; if( current == core->fpu_owner ) hal_fpu_enable(); else hal_fpu_disable(); } // end sched_yield() ////////////////////////////////////// void sched_switch_to( thread_t * new ) { reg_t sr_save; thread_t * current = CURRENT_THREAD; core_t * core = current->core; process_t * process = current->process; // check calling thread released all locks if( thread_can_yield() == false ) { printk("\n[PANIC] in %s : thread %x for process %x on core %d in cluster %x" " has not released all locks\n", __FUNCTION__, current->trdid, process->pid, core->lid, local_cxy ); hal_core_sleep(); } // check new thread attached to same core as the calling thread if( new->core != current->core ) { printk("\n[PANIC] in %s : new thread %x is attached to core %d" " different from core %d of current thread\n", __FUNCTION__, new->trdid, new->core->lid, core->lid , current->trdid ); hal_core_sleep(); } // check new thread not blocked if( new->blocked == 0 ) { printk("\n[PANIC] in %s for thread %x of process %x on core %d in cluster %x" " : new thread %x is blocked\n", __FUNCTION__, current->trdid, process->pid , core->lid, local_cxy , new->trdid ); hal_core_sleep(); } // check stack overflow for new thread if( new->signature != THREAD_SIGNATURE ) { printk("\n[PANIC] in %s : stack overflow for new thread %x of process %x" " on core %d in cluster %x\n", __FUNCTION__, new->trdid, process->pid , core->lid , local_cxy ); hal_core_sleep(); } // desactivate IRQs hal_disable_irq( &sr_save ); // loop on all threads to handle pending signals sched_handle_signals( core ); // check stack overflow for new thread if( new->signature != THREAD_SIGNATURE ) { printk("PANIC %s detected stack overflow for thread %x of process %x" " on core %d in cluster %x\n", __FUNCTION__, new->trdid, new->process->pid, core->lid, local_cxy); hal_core_sleep(); } sched_dmsg("INFO : %s on core %d in cluster %x / old thread = %x / new thread = %x\n", __FUNCTION__, core->lid, local_cxy, current->trdid, new->trdid ); // switch contexts if new thread hal_cpu_context_save( current ); hal_cpu_context_restore( new ); // restore IRQs hal_restore_irq( sr_save ); if( current->type != THREAD_USER ) return; if( current == core->fpu_owner ) hal_fpu_enable(); else hal_fpu_disable(); } // end sched_switch_to()