Changeset 407 for trunk/kernel/kern/scheduler.c
- Timestamp:
- Nov 7, 2017, 3:08:12 PM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/kern/scheduler.c
r406 r407 24 24 #include <kernel_config.h> 25 25 #include <hal_types.h> 26 #include <hal_switch.h> 26 27 #include <hal_irqmask.h> 27 28 #include <hal_context.h> … … 38 39 39 40 extern chdev_directory_t chdev_dir; // allocated in kernel_init.c file 40 41 extern uint32_t switch_save_sr[]; // allocated in kernel_init.c file 41 42 42 43 //////////////////////////////// … … 127 128 } // end sched_remove() 128 129 129 ///////////////////////////////////////////130 void sched_kill_thread( thread_t * thread )131 {132 // check thread locks133 if( thread_can_yield() == false )134 {135 panic("thread %x in process %x on core[%x][%d]"136 " did not released all locks",137 thread->trdid , thread->process->pid,138 local_cxy , thread->core->lid );139 }140 141 // remove thread from scheduler142 sched_remove_thread( thread );143 144 // reset the THREAD_SIG_KILL signal145 thread_reset_signal( thread , THREAD_SIG_KILL );146 147 } // end sched_kill_thread()148 149 130 //////////////////////////////////////// 150 131 thread_t * sched_select( core_t * core ) … … 154 135 scheduler_t * sched = &core->scheduler; 155 136 156 sched_dmsg("\n[DMSG] %s : enter core[%x,%d] / cycle %d\n",157 __FUNCTION__ , local_cxy , core->lid , hal_time_stamp() );158 159 137 // take lock protecting sheduler lists 160 138 spinlock_lock( &sched->lock ); … … 163 141 list_entry_t * last; 164 142 165 // first : scan the kernel threads list if not empty143 // first loop : scan the kernel threads list if not empty 166 144 if( list_is_empty( &sched->k_root ) == false ) 167 145 { … … 179 157 thread = LIST_ELEMENT( current , thread_t , sched_list ); 180 158 181 // return thread if not idle_thread and runnable182 if( (thread->type != THREAD_IDLE) && (thread->blocked == 0) )159 // analyse kernel thread type 160 switch( thread->type ) 183 161 { 184 // release lock 185 spinlock_unlock( &sched->lock ); 186 187 sched_dmsg("\n[DMSG] %s : exit core[%x,%d] / k_thread = %x / cycle %d\n", 188 __FUNCTION__ , local_cxy , core->lid , thread->trdid , hal_time_stamp() ); 189 190 return thread; 191 } 162 case THREAD_IDLE: // skip IDLE thread 163 break; 164 165 case THREAD_RPC: // RPC thread if non blocked and FIFO non-empty 166 if( (thread->blocked == 0) && 167 (local_fifo_is_empty( &LOCAL_CLUSTER->rpc_fifo ) == 0) ) 168 { 169 spinlock_unlock( &sched->lock ); 170 return thread; 171 } 172 break; 173 174 default: // DEV thread if non blocked 175 if( thread->blocked == 0 ) 176 { 177 spinlock_unlock( &sched->lock ); 178 return thread; 179 } 180 break; 181 } // end switch type 192 182 } 193 183 while( current != last ); 194 184 } 195 185 196 // second : scan the user threads list if not empty186 // second loop : scan the user threads list if not empty 197 187 if( list_is_empty( &sched->u_root ) == false ) 198 188 { … … 213 203 if( thread->blocked == 0 ) 214 204 { 215 // release lock216 205 spinlock_unlock( &sched->lock ); 217 218 sched_dmsg("\n[DMSG] %s : exit core[%x,%d] / u_thread = %x / cycle %d\n",219 __FUNCTION__ , local_cxy , core->lid , thread->trdid , hal_time_stamp() );220 206 return thread; 221 207 } … … 224 210 } 225 211 226 // release lock212 // third : return idle thread if no runnable thread 227 213 spinlock_unlock( &sched->lock ); 228 229 sched_dmsg("\n[DMSG] %s : exit core[%x,%d] / idle = %x / cycle %d\n",230 __FUNCTION__ , local_cxy , core->lid , sched->idle->trdid , hal_time_stamp() );231 232 // third : return idle thread if no runnable thread233 214 return sched->idle; 234 215 235 216 } // end sched_select() 217 218 /////////////////////////////////////////// 219 void sched_kill_thread( thread_t * thread ) 220 { 221 // check locks 222 if( thread_can_yield() == false ) 223 { 224 panic("locks not released for thread %x in process %x on core[%x][%d]", 225 thread->trdid , thread->process->pid, local_cxy , thread->core->lid ); 226 } 227 228 // remove thread from scheduler 229 sched_remove_thread( thread ); 230 231 // reset the THREAD_SIG_KILL signal 232 thread_reset_signal( thread , THREAD_SIG_KILL ); 233 234 // detached thread can suicide 235 if( thread->signals & THREAD_SIG_SUICIDE ) 236 { 237 assert( (thread->flags & THREAD_FLAG_DETACHED), __FUNCTION__, 238 "thread must be detached in case of suicide\n" ); 239 240 // remove thread from process 241 process_remove_thread( thread ); 242 243 // release memory for thread descriptor 244 thread_destroy( thread ); 245 } 246 } // end sched_kill_thread() 236 247 237 248 ////////////////////////////////////////// … … 242 253 scheduler_t * sched = &core->scheduler; 243 254 244 sched_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d]\n",245 __FUNCTION__, CURRENT_THREAD->trdid , local_cxy , core->lid );246 247 255 // take lock protecting threads lists 248 256 spinlock_lock( &sched->lock ); … … 252 260 { 253 261 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 254 if( thread->signals & THREAD_SIG_KILL )sched_kill_thread( thread );262 if( thread->signals ) sched_kill_thread( thread ); 255 263 } 256 264 … … 259 267 { 260 268 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 261 if( thread->signals & THREAD_SIG_KILL )sched_kill_thread( thread );269 if( thread->signals ) sched_kill_thread( thread ); 262 270 } 263 271 … … 265 273 spinlock_unlock( &sched->lock ); 266 274 267 sched_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d]\n",268 __FUNCTION__, CURRENT_THREAD->trdid , local_cxy , core->lid );269 270 275 } // end sched_handle_signals() 271 276 272 /////////////////////////////////// 273 void sched_yield( thread_t * next ) 274 { 275 reg_t sr_save; 276 277 ////////////////////////////////////// 278 void sched_update( thread_t * current, 279 thread_t * next ) 280 { 281 scheduler_t * sched = ¤t->core->scheduler; 282 283 if( current->type == THREAD_USER ) sched->u_last = ¤t->sched_list; 284 else sched->k_last = ¤t->sched_list; 285 286 sched->current = next; 287 } 288 289 ////////////////// 290 void sched_yield() 291 { 292 thread_t * next; 277 293 thread_t * current = CURRENT_THREAD; 278 core_t * core = current->core; 279 scheduler_t * sched = &core->scheduler; 280 281 sched_dmsg("\n[DMSG] %s : thread %x on core[%x,%d] enter / cycle %d\n", 282 __FUNCTION__, current->trdid, local_cxy, core->lid, hal_time_stamp() ); 294 295 #if( CONFIG_SCHED_DEBUG & 0x1 ) 296 if( hal_time_stamp() > CONFIG_SCHED_DEBUG ) sched_display( current->core->lid ); 297 #endif 283 298 284 299 // delay the yield if current thread has locks 285 if( thread_can_yield() == false)300 if( (current->local_locks != 0) || (current->remote_locks != 0) ) 286 301 { 287 302 current->flags |= THREAD_FLAG_SCHED; … … 289 304 } 290 305 291 // first loop on all threads to handle pending signals 292 sched_handle_signals( core ); 293 294 // second loop on threads to select next thread if required 295 if( next == NULL ) next = sched_select( core ); 306 // loop on threads to select next thread 307 next = sched_select( current->core ); 296 308 297 309 // check next thread attached to same core as the calling thread 298 assert( (next->core == current->core), __FUNCTION__ , "next core != current core\n"); 299 300 // check next thread not blocked 301 assert( (next->blocked == 0), __FUNCTION__ , "next thread is blocked\n"); 310 assert( (next->core == current->core), __FUNCTION__ , 311 "next core != current core\n"); 312 313 // check next thread not blocked when type != IDLE 314 assert( (next->blocked == 0) || (next->type = THREAD_IDLE) , __FUNCTION__ , 315 "next thread %x (%s) is blocked on core[%x,%d]\n", 316 next->trdid , thread_type_str(next->type) , local_cxy , current->core->lid ); 302 317 303 318 // switch contexts and update scheduler state if next != current 304 319 if( next != current ) 305 320 { 306 sched_dmsg("\n[DMSG] %s : trd %x (%s) on core[%x,%d] => trd %x (%s) / cycle %d\n", 307 __FUNCTION__, current->trdid, thread_type_str(current->type), local_cxy, core->lid, 308 next->trdid, thread_type_str(next->type), hal_time_stamp() ); 309 310 // calling thread desactivate IRQs 311 hal_disable_irq( &sr_save ); 321 // current thread desactivate IRQs 322 hal_disable_irq( &switch_save_sr[CURRENT_THREAD->core->lid] ); 323 324 sched_dmsg("\n[DBG] %s : core[%x,%d] / trd %x (%s) (%x,%x) => trd %x (%s) (%x,%x) / cycle %d\n", 325 __FUNCTION__, local_cxy, current->core->lid, 326 current, thread_type_str(current->type), current->process->pid, current->trdid, 327 next , thread_type_str(next->type) , next->process->pid , next->trdid, 328 hal_time_stamp() ); 312 329 313 330 // update scheduler 314 if( current->type == THREAD_USER ) sched->u_last = ¤t->sched_list; 315 else sched->k_last = ¤t->sched_list; 316 sched->current = next; 317 318 // handle FPU 331 sched_update( current , next ); 332 333 // handle FPU ownership 319 334 if( next->type == THREAD_USER ) 320 335 { 321 if( next == c ore->fpu_owner ) hal_fpu_enable();322 else hal_fpu_disable();336 if( next == current->core->fpu_owner ) hal_fpu_enable(); 337 else hal_fpu_disable(); 323 338 } 324 339 325 // switch contexts326 hal_ cpu_context_switch( current , next );327 328 // restore IRQs when callingthread resume329 hal_restore_irq( s r_save);340 // switch CPU from calling thread context to new thread context 341 hal_do_cpu_switch( current->cpu_context, next->cpu_context ); 342 343 // restore IRQs when next thread resume 344 hal_restore_irq( switch_save_sr[CURRENT_THREAD->core->lid] ); 330 345 } 331 346 else 332 347 { 333 sched_dmsg("\n[DMSG] %s : thread %x on core[%x,%d] continue / cycle %d\n", 334 __FUNCTION__, current->trdid, local_cxy, core->lid, hal_time_stamp() ); 348 349 sched_dmsg("\n[DBG] %s : core[%x,%d] / thread %x (%s) continue / cycle %d\n", 350 __FUNCTION__, local_cxy, current->core->lid, current->trdid, 351 thread_type_str(current->type) ,hal_time_stamp() ); 352 335 353 } 336 354 } // end sched_yield() 337 355 338 //////////////////// 339 void sched_display() 356 357 /////////////////////////////// 358 void sched_display( lid_t lid ) 340 359 { 341 360 list_entry_t * iter; … … 343 362 uint32_t save_sr; 344 363 345 thread_t * current = CURRENT_THREAD; 346 core_t * core = current->core; 364 if( lid >= LOCAL_CLUSTER->cores_nr ) 365 { 366 printk("\n[ERROR] in %s : illegal local index %d in cluster %x\n", 367 __FUNCTION__ , lid , local_cxy ); 368 return; 369 } 370 371 core_t * core = &LOCAL_CLUSTER->core_tbl[lid]; 347 372 scheduler_t * sched = &core->scheduler; 348 373 349 374 // get pointers on TXT0 chdev 350 xptr_t txt0_xp = chdev_dir.txt [0];375 xptr_t txt0_xp = chdev_dir.txt_tx[0]; 351 376 cxy_t txt0_cxy = GET_CXY( txt0_xp ); 352 377 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); … … 358 383 remote_spinlock_lock_busy( lock_xp , &save_sr ); 359 384 360 nolock_printk("\n***** scheduler state for core[%x,%d]\n" 361 "kernel_threads = %d / user_threads = %d / current = %x\n", 362 local_cxy , core->lid, 363 sched->k_threads_nr, sched->u_threads_nr, sched->current->trdid ); 385 nolock_printk("\n***** scheduler state for core[%x,%d] at cycle %d\n" 386 "kernel_threads = %d / user_threads = %d / current = %x / idle = %x\n", 387 local_cxy , core->lid, hal_time_stamp(), 388 sched->k_threads_nr, sched->u_threads_nr, 389 sched->current->trdid , sched->idle->trdid ); 364 390 365 391 // display kernel threads … … 367 393 { 368 394 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 369 nolock_printk(" - type = %s / trdid = % x / pid = %x / func = %x / blocked_vect = %x\n",395 nolock_printk(" - type = %s / trdid = %X / pid = %X / func = %X / blocked = %X\n", 370 396 thread_type_str( thread->type ), thread->trdid, thread->process->pid, 371 397 thread->entry_func, thread->blocked ); … … 376 402 { 377 403 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 378 nolock_printk(" - type = %s / trdid = % x / pid = %x / func = %x / blocked_vect = %x\n",404 nolock_printk(" - type = %s / trdid = %X / pid = %X / func = %X / blocked = %X\n", 379 405 thread_type_str( thread->type ), thread->trdid, thread->process->pid, 380 406 thread->entry_func, thread->blocked );
Note: See TracChangeset
for help on using the changeset viewer.