source: trunk/kernel/kern/scheduler.h @ 469

Last change on this file since 469 was 457, checked in by alain, 6 years ago

This version modifies the exec syscall and fixes a large number of small bugs.
The version number has been updated (0.1)

File size: 7.3 KB
Line 
1/*
2 * scheduler.h - Core scheduler definition.
3 *
4 * Author    Alain Greiner (2016)
5 *
6 * Copyright (c) UPMC Sorbonne Universites
7 *
8 * This file is part of ALMOS-MKH.
9 *
10 * ALMOS-MKH is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2.0 of the License.
13 *
14 * ALMOS-MKH is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24#ifndef _SCHEDULER_H_
25#define _SCHEDULER_H_
26
27#include <hal_kernel_types.h>
28#include <list.h>
29#include <spinlock.h>
30
31/****  Forward declarations  ****/
32
33struct core_s;
34struct thread_s;
35
36/*********************************************************************************************
37 * This structure define the scheduler associated to a given core.
38 ********************************************************************************************/
39
40typedef struct scheduler_s
41{
42    spinlock_t        lock;            /*! lock protecting lists of threads                 */
43    uint16_t          u_threads_nr;    /*! total number of attached user threads            */
44    uint16_t          k_threads_nr;    /*! total number of attached kernel threads          */
45    list_entry_t      u_root;          /*! root of list of user threads                     */
46    list_entry_t      k_root;          /*! root of list of kernel threads                   */
47    list_entry_t    * u_last;          /*! pointer on list_entry for last executed k_thread */
48    list_entry_t    * k_last;          /*! pointer on list entry for last executed u_thread */
49    struct thread_s * idle;            /*! pointer on idle thread                           */
50    struct thread_s * current;         /*! pointer on current running thread                */
51    volatile bool_t   req_ack_pending; /*! sequencialize ack requests when true             */
52    bool_t            trace;           /*! context switches trace activated if true         */
53}
54scheduler_t;
55
56/*********************************************************************************************
57 *  This function initialises the scheduler for a given core.
58 ********************************************************************************************/ 
59void sched_init( struct core_s * core );
60
61/*********************************************************************************************
62 * This function atomically register a new thread in a given core scheduler.
63 *********************************************************************************************
64 * @ core    : local pointer on the core descriptor.
65 * @ thread  : local pointer on the thread descriptor.
66 ********************************************************************************************/ 
67void sched_register_thread( struct core_s   * core,
68                            struct thread_s * thread );
69
70/*********************************************************************************************
71 * This function is the only method to make a context switch. It is called in cas of TICK,
72 * or when when a thread explicitely requires a scheduling.
73 * It handles the pending signals for all threads attached to the core running the calling
74 * thread, and calls the sched_select() function to select a new thread.
75 * The cause argument is only used for debug by the sched_display() function, and
76 * indicates the scheduling cause.
77 *********************************************************************************************
78 * @ cause    : character string defining the scheduling cause.
79 ********************************************************************************************/
80void sched_yield( char * cause );
81
82/*********************************************************************************************
83 * This function scan all threads attached to a given scheduler, and executes the relevant
84 * actions for pending THREAD_FLAG_REQ_ACK or THREAD_FLAG_REQ_DELETE requests.
85 * It is called in by the sched_yield() function, with IRQ disabled.
86 * - REQ_ACK : it checks that target thread is blocked, decrements the response counter
87 *   to acknowledge the client thread, and reset the pending request.
88 * - REQ_DELETE : it detach the target thread from parent if attached, detach it from
89 *   the process, remove it from scheduler, release memory allocated to thread descriptor,
90 *   and destroy the process descriptor it the target thread was the last thread.
91 *********************************************************************************************
92 * @ core    : local pointer on the core descriptor.
93 ********************************************************************************************/
94void sched_handle_signals( struct core_s * core );
95
96/*********************************************************************************************
97 * This function does NOT modify the scheduler state.
98 * It just select a thread in the list of attached threads, implementing the following
99 * three steps policy:
100 * 1) It scan the list of kernel threads, from the next thread after the last executed one,
101 *    and returns the first runnable found : not IDLE, not blocked, client queue not empty.
102 *    It can be the current thread.
103 * 2) If no kernel thread found, it scan the list of user thread, from the next thread after
104 *    the last executed one, and returns the first runable found : not blocked.
105 *    It can be the current thread.
106 * 3) If no runable thread found, it returns the idle thread.
107 *********************************************************************************************
108 * @ core    : local pointer on scheduler.
109 * @ returns pointer on selected thread descriptor
110 ********************************************************************************************/
111struct thread_s * sched_select( struct scheduler_s * sched );
112
113/*********************************************************************************************
114 * This debug function displays on TXT0 the internal state of a local scheduler,
115 * identified by the core local index <lid>.
116 *********************************************************************************************
117 * @ lid      : local index of target core.
118 ********************************************************************************************/
119void sched_display( lid_t lid );
120
121/*********************************************************************************************
122 * This debug function displays on TXT0 the internal state of a scheduler,
123 * identified by the target cluster identifier <cxy> and the core local index <lid>.
124 * It can be called by a thread running in any cluster, as it uses remote accesses,
125 * to scan the scheduler local lists of threads.
126 *********************************************************************************************
127 * @ cxy      : target cluster identifier
128 * @ lid      : local index of target core.
129 ********************************************************************************************/
130void sched_remote_display( cxy_t  cxy,
131                           lid_t  lid );
132
133#endif  /* _SCHEDULER_H_ */
Note: See TracBrowser for help on using the repository browser.