Changeset 564 for trunk/kernel/kern/thread.h
- Timestamp:
- Oct 4, 2018, 11:47:36 PM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/kern/thread.h
r527 r564 3 3 * 4 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016 )5 * Alain Greiner (2016,2017,2018) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 32 32 #include <list.h> 33 33 #include <hal_context.h> 34 #include < spinlock.h>34 #include <remote_busylock.h> 35 35 #include <core.h> 36 36 #include <chdev.h> … … 92 92 #define THREAD_BLOCKED_ISR 0x0400 /*! thread DEV wait ISR */ 93 93 #define THREAD_BLOCKED_WAIT 0x0800 /*! thread wait child process termination */ 94 #define THREAD_BLOCKED_LOCK 0x1000 /*! thread wait queuelock or rwlock */ 94 95 95 96 /*************************************************************************************** … … 119 120 * This TRDID is computed by the process_register_thread() function, when the user 120 121 * thread is registered in the local copy of the process descriptor. 121 * WARNING : Don't modify the first 4 fields order, as this order is used by the 122 * hal_kentry assembly code for the TSAR architecture. 122 * 123 * WARNING (1) Don't modify the first 4 fields order, as this order is used by the 124 * hal_kentry assembly code for some architectures (TSAR). 125 * 126 * WARNING (2) Most of the thread state is private and accessed only by this thread, 127 * but some fields are shared, and can be modified by other threads. 128 * - the "blocked" bit_vector can be modified by another thread 129 * running in another cluster (using atomic instructions), 130 * to change this thread scheduling status. 131 * - the "flags" bit_vector can be modified by another thread 132 * running in another cluster (using atomic instructions), 133 * to register requests such as ACK or DELETE. 134 * - the "join_xp" field can be modified by the joining thread, 135 * and this rendez-vous is protected by the dedicated "join_lock". 136 * 137 * WARNING (3) When this thread is blocked on a shared resource (queuelock, condvar, 138 * or chdev), it registers in the associated waiting queue, using the 139 * "wait_list" (local list) or "wait_xlist" (trans-cluster list) fields. 123 140 **************************************************************************************/ 124 141 … … 144 161 xptr_t parent; /*! extended pointer on parent thread */ 145 162 146 remote_ spinlock_t join_lock; /*! lock protecting the join/exit */163 remote_busylock_t join_lock; /*! lock protecting the join/exit */ 147 164 xptr_t join_xp; /*! joining/killer thread extended pointer */ 148 165 … … 180 197 cxy_t rpc_client_cxy; /*! client cluster index (for a RPC thread) */ 181 198 182 xlist_entry_t wait_list; /*! member of threads blocked on same cond */ 183 184 list_entry_t locks_root; /*! root of list of locks taken */ 185 xlist_entry_t xlocks_root; /*! root of xlist of remote locks taken */ 186 uint32_t local_locks; /*! number of local locks owned by thread */ 187 uint32_t remote_locks; /*! number of remote locks owned by thread */ 199 list_entry_t wait_list; /*! member of a local waiting queue */ 200 xlist_entry_t wait_xlist; /*! member of a trans-cluster waiting queue */ 201 202 uint32_t busylocks; /*! number of taken busylocks */ 203 204 #if DEBUG_BUSYLOCK 205 xlist_entry_t busylocks_root; /*! root of xlist of taken busylocks */ 206 #endif 188 207 189 208 thread_info_t info; /*! embedded thread_info_t */ … … 311 330 312 331 /*************************************************************************************** 313 * This function is called by the sched_handle_signals() function to releases332 * This low-level function is called by the sched_handle_signals() function to releases 314 333 * the physical memory allocated for a thread in a given cluster, when this thread 315 334 * is marked for delete. This include the thread descriptor itself, the associated … … 363 382 **************************************************************************************/ 364 383 void thread_reset_req_ack( thread_t * target ); 365 366 /***************************************************************************************367 * This function checks if the calling thread can deschedule.368 ***************************************************************************************369 * @ returns true if no locks taken.370 **************************************************************************************/371 inline bool_t thread_can_yield( void );372 373 /***************************************************************************************374 * This function implements the delayed descheduling mechanism : It is called by375 * all lock release functions, and calls the sched_yield() function when all locks376 * have beeen released and the calling thread THREAD_FLAG_SCHED flag is set.377 **************************************************************************************/378 void thread_check_sched( void );379 384 380 385 /*************************************************************************************** … … 417 422 * thread descriptor identified by the <thread_xp> argument. 418 423 * We need an extended pointer, because the client thread of an I/O operation on a 419 * given device is not in the same cluster as the associated device descriptor.424 * given device is generally not in the same cluster as the associated server thread. 420 425 * WARNING : this function does not reschedule the remote thread. 421 426 * The scheduling can be forced by sending an IPI to the core running the remote thread. … … 432 437 *************************************************************************************** 433 438 * @ thread : local pointer on target thread. 434 * @ is_user : update user time if non zero / update kernel time if zero439 * @ is_user : update user time if true / update kernel time if false 435 440 **************************************************************************************/ 436 441 void thread_time_update( thread_t * thread, 437 uint32_tis_user );442 bool_t is_user ); 438 443 439 444 /*************************************************************************************** … … 449 454 trdid_t trdid ); 450 455 456 /*************************************************************************************** 457 * This function checks that the thread identified by the <thread> argument does hold 458 * any busylock (local or remote). 459 * If the xlist of taken busylocks is not empty, it displays the set of taken locks, 460 * and makes a kernel panic. 461 *************************************************************************************** 462 * @ thread : local pointer on target thread. 463 * @ func_str : faulty function name. 464 **************************************************************************************/ 465 void thread_assert_can_yield( thread_t * thread, 466 const char * func_str ); 467 468 /*************************************************************************************** 469 * This debug function display the list of busylocks currently owned by a thread 470 * identified by the DEBUG_BUSYLOCK_THREAD_XP parameter. 471 * It is called each time the target thread acquire or release a busylock 472 * (local or remote). It is never called when DEBUG_BUSYLOCK_THEAD_CP == 0. 473 *************************************************************************************** 474 * @ lock_type : type of acquired / released busylock. 475 * @ is_acquire : change is an acquire when true / change is a release when false. 476 **************************************************************************************/ 477 void thread_display_busylocks( uint32_t lock_type, 478 bool_t is_acquire ); 479 480 451 481 452 482 #endif /* _THREAD_H_ */
Note: See TracChangeset
for help on using the changeset viewer.