Ignore:
Timestamp:
Oct 4, 2018, 11:47:36 PM (6 years ago)
Author:
alain
Message:

Complete restructuration of kernel locks.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/kern/thread.h

    r527 r564  
    33 *
    44 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *         Alain Greiner (2016)
     5 *         Alain Greiner (2016,2017,2018)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    3232#include <list.h>
    3333#include <hal_context.h>
    34 #include <spinlock.h>
     34#include <remote_busylock.h>
    3535#include <core.h>
    3636#include <chdev.h>
     
    9292#define THREAD_BLOCKED_ISR       0x0400  /*! thread DEV wait ISR                      */
    9393#define THREAD_BLOCKED_WAIT      0x0800  /*! thread wait child process termination    */
     94#define THREAD_BLOCKED_LOCK      0x1000  /*! thread wait queuelock or rwlock          */
    9495
    9596/***************************************************************************************
     
    119120 * This TRDID is computed by the process_register_thread() function, when the user
    120121 * thread is registered in the local copy of the process descriptor.
    121  * WARNING : Don't modify the first 4 fields order, as this order is used by the
    122  * hal_kentry assembly code for the TSAR architecture.
     122 *
     123 * WARNING (1) Don't modify the first 4 fields order, as this order is used by the
     124 *             hal_kentry assembly code for some architectures (TSAR).
     125 *
     126 * WARNING (2) Most of the thread state is private and accessed only by this thread,
     127 *             but some fields are shared, and can be modified by other threads.
     128 *             - the "blocked" bit_vector can be modified by another thread
     129 *               running in another cluster (using atomic instructions),
     130 *               to change this thread scheduling status.
     131 *             - the "flags" bit_vector can be modified by another thread
     132 *               running in another cluster (using atomic instructions),
     133 *               to register requests such as ACK or DELETE.
     134 *             - the "join_xp" field can be modified by the joining thread,
     135 *               and this rendez-vous is protected by the dedicated "join_lock".
     136 *
     137 * WARNING (3) When this thread is blocked on a shared resource (queuelock, condvar,
     138 *             or chdev), it registers in the associated waiting queue, using the
     139 *             "wait_list" (local list) or "wait_xlist" (trans-cluster list) fields.
    123140 **************************************************************************************/
    124141
     
    144161    xptr_t              parent;          /*! extended pointer on parent thread        */
    145162
    146     remote_spinlock_t   join_lock;       /*! lock protecting the join/exit            */
     163    remote_busylock_t   join_lock;       /*! lock protecting the join/exit            */
    147164    xptr_t              join_xp;         /*! joining/killer thread extended pointer   */
    148165
     
    180197        cxy_t               rpc_client_cxy;  /*! client cluster index (for a RPC thread)  */
    181198
    182     xlist_entry_t       wait_list;       /*! member of threads blocked on same cond   */
    183 
    184     list_entry_t        locks_root;      /*! root of list of locks taken              */
    185     xlist_entry_t       xlocks_root;     /*! root of xlist of remote locks taken      */
    186         uint32_t            local_locks;         /*! number of local locks owned by thread    */
    187         uint32_t            remote_locks;        /*! number of remote locks owned by thread   */
     199    list_entry_t        wait_list;       /*! member of a local waiting queue          */
     200    xlist_entry_t       wait_xlist;      /*! member of a trans-cluster waiting queue  */
     201
     202        uint32_t            busylocks;       /*! number of taken busylocks                */
     203
     204#if DEBUG_BUSYLOCK
     205    xlist_entry_t       busylocks_root;  /*! root of xlist of taken busylocks         */
     206#endif
    188207
    189208        thread_info_t       info;            /*! embedded thread_info_t                   */
     
    311330
    312331/***************************************************************************************
    313  * This function is called by the sched_handle_signals() function to releases
     332 * This low-level function is called by the sched_handle_signals() function to releases
    314333 * the physical memory allocated for a thread in a given cluster, when this thread
    315334 * is marked for delete. This include the thread descriptor itself, the associated
     
    363382 **************************************************************************************/
    364383void thread_reset_req_ack( thread_t * target );
    365 
    366 /***************************************************************************************
    367  * This function checks if the calling thread can deschedule.
    368  ***************************************************************************************
    369  * @ returns true if no locks taken.
    370  **************************************************************************************/
    371 inline bool_t thread_can_yield( void );
    372 
    373 /***************************************************************************************
    374  * This function implements the delayed descheduling mechanism : It is called  by
    375  * all lock release functions, and calls the sched_yield() function when all locks
    376  * have beeen released and the calling thread THREAD_FLAG_SCHED flag is set.
    377  **************************************************************************************/
    378 void thread_check_sched( void );
    379384
    380385/***************************************************************************************
     
    417422 * thread descriptor identified by the <thread_xp> argument.
    418423 * We need an extended pointer, because the client thread of an I/O operation on a
    419  * given device is not in the same cluster as the associated device descriptor.
     424 * given device is generally not in the same cluster as the associated server thread.
    420425 * WARNING : this function does not reschedule the remote thread.
    421426 * The scheduling can be forced by sending an IPI to the core running the remote thread.
     
    432437 ***************************************************************************************
    433438 * @ thread   : local pointer on target thread.
    434  * @ is_user  : update user time if non zero / update kernel time if zero
     439 * @ is_user  : update user time if true / update kernel time if false
    435440 **************************************************************************************/
    436441void thread_time_update( thread_t * thread,
    437                          uint32_t   is_user );
     442                         bool_t     is_user );
    438443
    439444/***************************************************************************************
     
    449454                        trdid_t  trdid );
    450455
     456/***************************************************************************************
     457 * This function checks that the thread identified by the <thread> argument does hold
     458 * any busylock (local or remote).
     459 * If the xlist of taken busylocks is not empty, it displays the set of taken locks,
     460 * and makes a kernel panic. 
     461 ***************************************************************************************
     462 * @ thread    : local pointer on target thread.
     463 * @ func_str  : faulty function name.
     464 **************************************************************************************/
     465void thread_assert_can_yield( thread_t    * thread,
     466                              const char  * func_str );
     467
     468/***************************************************************************************
     469 * This debug function display the list of busylocks currently owned by a thread
     470 * identified by the DEBUG_BUSYLOCK_THREAD_XP parameter.
     471 * It is called each time the target thread acquire or release a busylock
     472 * (local or remote). It is never called when DEBUG_BUSYLOCK_THEAD_CP == 0.
     473 ***************************************************************************************
     474 * @ lock_type  : type of acquired / released busylock.
     475 * @ is_acquire : change is an acquire when true / change is a release when false.
     476 **************************************************************************************/
     477void thread_display_busylocks( uint32_t lock_type,
     478                               bool_t   is_acquire );
     479
     480
    451481
    452482#endif  /* _THREAD_H_ */
Note: See TracChangeset for help on using the changeset viewer.