source: trunk/kernel/kern/thread.c @ 296

Last change on this file since 296 was 296, checked in by alain, 7 years ago

Several modifs in the generic scheduler and in the hal_context to
fix the context switch mechanism.

File size: 27.0 KB
RevLine 
[1]1/*
2 * thread.c -  implementation of thread operations (user & kernel)
[171]3 *
[1]4 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
[23]5 *         Alain Greiner (2016,2017)
[1]6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
[5]9 * This file is part of ALMOS-MKH.
[1]10 *
[5]11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
[1]12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
[5]15 * ALMOS-MKH is distributed in the hope that it will be useful, but
[1]16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
[5]21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
[1]22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
[14]25#include <kernel_config.h>
[1]26#include <hal_types.h>
27#include <hal_context.h>
28#include <hal_irqmask.h>
29#include <hal_special.h>
30#include <hal_remote.h>
31#include <memcpy.h>
32#include <printk.h>
33#include <cluster.h>
34#include <process.h>
35#include <scheduler.h>
[188]36#include <dev_pic.h>
[1]37#include <core.h>
38#include <list.h>
39#include <xlist.h>
40#include <page.h>
41#include <kmem.h>
42#include <ppm.h>
43#include <thread.h>
44
45//////////////////////////////////////////////////////////////////////////////////////
46// Extern global variables
47//////////////////////////////////////////////////////////////////////////////////////
48
49extern process_t      process_zero;
50
51//////////////////////////////////////////////////////////////////////////////////////
[16]52// This function returns a printable string for the thread type.
[1]53//////////////////////////////////////////////////////////////////////////////////////
[5]54char * thread_type_str( uint32_t type )
55{
[296]56    if     ( type == THREAD_USER   ) return "USR";
[16]57    else if( type == THREAD_RPC    ) return "RPC";
58    else if( type == THREAD_DEV    ) return "DEV";
[296]59    else if( type == THREAD_KERNEL ) return "KER";
60    else if( type == THREAD_IDLE   ) return "IDL";
[5]61    else                             return "undefined";
62}
63
[1]64/////////////////////////////////////////////////////////////////////////////////////
[14]65// This static function allocates physical memory for a thread descriptor.
66// It can be called by the three functions:
[1]67// - thread_user_create()
[14]68// - thread_user_fork()
[1]69// - thread_kernel_create()
70/////////////////////////////////////////////////////////////////////////////////////
[14]71// @ return pointer on thread descriptor if success / return NULL if failure.
[1]72/////////////////////////////////////////////////////////////////////////////////////
[14]73static thread_t * thread_alloc()
[1]74{
[23]75        page_t       * page;   // pointer on page descriptor containing thread descriptor
[171]76        kmem_req_t     req;    // kmem request
[1]77
78        // allocates memory for thread descriptor + kernel stack
79        req.type  = KMEM_PAGE;
[14]80        req.size  = CONFIG_THREAD_DESC_ORDER;
[1]81        req.flags = AF_KERNEL | AF_ZERO;
82        page      = kmem_alloc( &req );
83
[14]84    // return pointer on new thread descriptor
[23]85        if( page == NULL ) return NULL;
[53]86    else               return (thread_t *)ppm_page2vaddr( page );
[171]87}
[1]88
[14]89/////////////////////////////////////////////////////////////////////////////////////
[23]90// This static function releases the physical memory for a thread descriptor.
[53]91// It is called by the three functions:
[23]92// - thread_user_create()
93// - thread_user_fork()
94// - thread_kernel_create()
95/////////////////////////////////////////////////////////////////////////////////////
96// @ thread  : pointer on thread descriptor.
97/////////////////////////////////////////////////////////////////////////////////////
98static void thread_release( thread_t * thread )
99{
100    kmem_req_t   req;
101
102    req.type  = KMEM_PAGE;
[53]103    req.ptr   = ppm_vaddr2page( thread );
[23]104    kmem_free( &req );
105}
106
107/////////////////////////////////////////////////////////////////////////////////////
[14]108// This static function initializes a thread descriptor (kernel or user).
109// It can be called by the four functions:
110// - thread_user_create()
111// - thread_user_fork()
112// - thread_kernel_create()
113// - thread_user_init()
114/////////////////////////////////////////////////////////////////////////////////////
115// @ thread       : pointer on thread descriptor
116// @ process      : pointer on process descriptor.
117// @ type         : thread type.
118// @ func         : pointer on thread entry function.
119// @ args         : pointer on thread entry function arguments.
120// @ core_lid     : target core local index.
121// @ u_stack_base : stack base (user thread only)
122// @ u_stack_size : stack base (user thread only)
123/////////////////////////////////////////////////////////////////////////////////////
124static error_t thread_init( thread_t      * thread,
125                            process_t     * process,
126                            thread_type_t   type,
127                            void          * func,
128                            void          * args,
129                            lid_t           core_lid,
130                            intptr_t        u_stack_base,
131                            uint32_t        u_stack_size )
132{
133    error_t        error;
134    trdid_t        trdid;      // allocated thread identifier
135
136        cluster_t    * local_cluster = LOCAL_CLUSTER;
137
138    // register new thread in process descriptor, and get a TRDID
[1]139    spinlock_lock( &process->th_lock );
140    error = process_register_thread( process, thread , &trdid );
141    spinlock_unlock( &process->th_lock );
142
[171]143    if( error )
[1]144    {
[14]145        printk("\n[ERROR] in %s : cannot get TRDID\n", __FUNCTION__ );
146        return EINVAL;
[1]147    }
[14]148
[1]149        // Initialize new thread descriptor
150    thread->trdid           = trdid;
[171]151        thread->type            = type;
[1]152    thread->quantum         = 0;            // TODO
153    thread->ticks_nr        = 0;            // TODO
154    thread->time_last_check = 0;
155        thread->core            = &local_cluster->core_tbl[core_lid];
156        thread->process         = process;
157
158    thread->local_locks     = 0;
159    list_root_init( &thread->locks_root );
160
161    thread->remote_locks    = 0;
162    xlist_root_init( XPTR( local_cxy , &thread->xlocks_root ) );
163
[171]164    thread->u_stack_base    = u_stack_base;
[1]165    thread->u_stack_size    = u_stack_size;
[171]166    thread->k_stack_base    = (intptr_t)thread;
[14]167    thread->k_stack_size    = CONFIG_THREAD_DESC_SIZE;
[1]168
169    thread->entry_func      = func;         // thread entry point
170    thread->entry_args      = args;         // thread function arguments
[171]171    thread->flags           = 0;            // all flags reset
[1]172    thread->signals         = 0;            // no pending signal
173    thread->errno           = 0;            // no error detected
[171]174    thread->fork_user       = 0;            // no fork required
[1]175    thread->fork_cxy        = 0;
176
177    // thread blocked
178    thread->blocked = THREAD_BLOCKED_GLOBAL;
179
180    // reset children list
181    xlist_root_init( XPTR( local_cxy , &thread->children_root ) );
182    thread->children_nr = 0;
183
184    // reset sched list and brothers list
185    list_entry_init( &thread->sched_list );
186    xlist_entry_init( XPTR( local_cxy , &thread->brothers_list ) );
187
188    // reset thread info
189    memset( &thread->info , 0 , sizeof(thread_info_t) );
190
191    // initialise signature
192        thread->signature = THREAD_SIGNATURE;
193
194    // update local DQDT
195    dqdt_local_update_threads( 1 );
196
[171]197    // register new thread in core scheduler
[1]198    sched_register_thread( thread->core , thread );
199
200        return 0;
201
[296]202} // end thread_init()
203
[1]204/////////////////////////////////////////////////////////
[23]205error_t thread_user_create( pid_t             pid,
206                            void            * start_func,
207                            void            * start_arg,
[1]208                            pthread_attr_t  * attr,
[23]209                            thread_t       ** new_thread )
[1]210{
211    error_t        error;
212        thread_t     * thread;       // pointer on created thread descriptor
213    process_t    * process;      // pointer to local process descriptor
214    lid_t          core_lid;     // selected core local index
[23]215    vseg_t       * vseg;         // stack vseg
[1]216
[23]217    thread_dmsg("\n[INFO] %s : enters for process %x\n", __FUNCTION__ , pid );
[5]218
[23]219    // get process descriptor local copy
220    process = process_get_local_copy( pid );
[1]221
[23]222    if( process == NULL )
223    {
224                printk("\n[ERROR] in %s : cannot get process descriptor %x\n",
225               __FUNCTION__ , pid );
226        return ENOMEM;
227    }
228
[171]229    // select a target core in local cluster
[23]230    if( attr->attributes & PT_ATTR_CORE_DEFINED ) core_lid = attr->lid;
231    else                                          core_lid = cluster_select_local_core();
[1]232
233    // check core local index
[23]234    if( core_lid >= LOCAL_CLUSTER->cores_nr )
235    {
236            printk("\n[ERROR] in %s : illegal core index attribute = %d\n",
237               __FUNCTION__ , core_lid );
[171]238
[23]239        return EINVAL;
240    }
[1]241
[171]242    // allocate a stack from local VMM
[23]243    vseg = vmm_create_vseg( process, 0 , 0 , VSEG_TYPE_STACK );
[1]244
[170]245    if( vseg == NULL )
[23]246    {
247            printk("\n[ERROR] in %s : cannot create stack vseg\n", __FUNCTION__ );
248                return ENOMEM;
[171]249    }
[23]250
[171]251    // allocate memory for thread descriptor
[14]252    thread = thread_alloc();
[1]253
[23]254    if( thread == NULL )
255    {
256            printk("\n[ERROR] in %s : cannot create new thread\n", __FUNCTION__ );
257        vmm_remove_vseg( vseg );
258        return ENOMEM;
259    }
[14]260
[171]261    // initialize thread descriptor
[14]262    error = thread_init( thread,
263                         process,
264                         THREAD_USER,
[23]265                         start_func,
266                         start_arg,
[14]267                         core_lid,
[23]268                         vseg->min,
269                         vseg->max - vseg->min );
[14]270
[171]271    if( error )
[14]272    {
[23]273            printk("\n[ERROR] in %s : cannot initialize new thread\n", __FUNCTION__ );
274        vmm_remove_vseg( vseg );
275        thread_release( thread );
[14]276        return EINVAL;
277    }
278
[171]279    // set LOADABLE flag
[1]280    thread->flags = THREAD_FLAG_LOADABLE;
[14]281
282    // set DETACHED flag if required
[23]283    if( attr->attributes & PT_ATTR_DETACH ) thread->flags |= THREAD_FLAG_DETACHED;
[1]284
[171]285    // allocate & initialize CPU context
286        error = hal_cpu_context_create( thread );
[1]287
[171]288    if( error )
[23]289    {
290            printk("\n[ERROR] in %s : cannot create CPU context\n", __FUNCTION__ );
291        vmm_remove_vseg( vseg );
292        thread_release( thread );
293        return ENOMEM;
294    }
295
[171]296    // allocate & initialize FPU context
297    error = hal_fpu_context_create( thread );
[23]298
299    if( error )
300    {
301            printk("\n[ERROR] in %s : cannot create FPU context\n", __FUNCTION__ );
302        vmm_remove_vseg( vseg );
303        thread_release( thread );
304        return ENOMEM;
305    }
306
[171]307    thread_dmsg("\n[INFO] %s : exit / trdid = %x / process %x / core = %d\n",
[5]308                __FUNCTION__ , thread->trdid , process->pid , core_lid );
[1]309
310    *new_thread = thread;
311        return 0;
[14]312
[296]313}  // end thread_user_create()
314
[23]315//////////////////////////////////////////////
316error_t thread_user_fork( process_t * process,
317                          thread_t ** new_thread )
[1]318{
319    error_t        error;
[14]320        thread_t     * thread;       // pointer on new thread descriptor
[1]321    lid_t          core_lid;     // selected core local index
[23]322        vseg_t       * vseg;         // stack vseg
[1]323
[14]324    thread_dmsg("\n[INFO] %s : enters\n", __FUNCTION__ );
[5]325
[171]326    // allocate a stack from local VMM
[23]327    vseg = vmm_create_vseg( process, 0 , 0 , VSEG_TYPE_STACK );
328
[286]329    if( vseg == NULL )
[23]330    {
331            printk("\n[ERROR] in %s : cannot create stack vseg\n", __FUNCTION__ );
332                return ENOMEM;
[171]333    }
[23]334
[1]335    // select a target core in local cluster
336    core_lid = cluster_select_local_core();
337
338    // get pointer on calling thread descriptor
339    thread_t * this = CURRENT_THREAD;
340
[171]341    // allocate memory for new thread descriptor
[14]342    thread = thread_alloc();
[1]343
[23]344    if( thread == NULL )
345    {
346        printk("\n[ERROR] in %s : cannot allocate new thread\n", __FUNCTION__ );
347        vmm_remove_vseg( vseg );
348        return ENOMEM;
349    }
[14]350
[171]351    // initialize thread descriptor
[14]352    error = thread_init( thread,
353                         process,
354                         THREAD_USER,
355                         this->entry_func,
356                         this->entry_args,
357                         core_lid,
[23]358                         vseg->min,
359                         vseg->max - vseg->min );
[14]360
[23]361    if( error )
[14]362    {
[23]363            printk("\n[ERROR] in %s : cannot initialize new thread\n", __FUNCTION__ );
364        vmm_remove_vseg( vseg );
365        thread_release( thread );
[14]366        return EINVAL;
367    }
368
[1]369    // set ATTACHED flag if set in this thread
[14]370    if( this->flags & THREAD_FLAG_DETACHED ) thread->flags = THREAD_FLAG_DETACHED;
[1]371
[171]372    // allocate & initialize CPU context from calling thread
373        error = hal_cpu_context_copy( thread , this );
[1]374
[23]375    if( error )
376    {
377            printk("\n[ERROR] in %s : cannot create CPU context\n", __FUNCTION__ );
378        vmm_remove_vseg( vseg );
379        thread_release( thread );
380        return ENOMEM;
381    }
382
[171]383    // allocate & initialize FPU context from calling thread
384        error = hal_fpu_context_copy( thread , this );
[1]385
[23]386    if( error )
387    {
388            printk("\n[ERROR] in %s : cannot create CPU context\n", __FUNCTION__ );
389        vmm_remove_vseg( vseg );
390        thread_release( thread );
391        return ENOMEM;
392    }
393
[171]394    thread_dmsg("\n[INFO] %s : exit / thread %x for process %x on core %d in cluster %x\n",
[14]395                 __FUNCTION__, thread->trdid, process->pid, core_lid, local_cxy );
[1]396
[14]397    *new_thread = thread;
[1]398        return 0;
[5]399
[296]400}  // end thread_user_fork()
401
[1]402/////////////////////////////////////////////////////////
403error_t thread_kernel_create( thread_t     ** new_thread,
404                              thread_type_t   type,
[171]405                              void          * func,
406                              void          * args,
[1]407                                              lid_t           core_lid )
408{
409    error_t        error;
[14]410        thread_t     * thread;       // pointer on new thread descriptor
[1]411
[296]412    thread_dmsg("\n[INFO] %s : enter / for type %s on core[%x,%d] / cycle %d\n",
413    __FUNCTION__ , thread_type_str( type ) , local_cxy , core_lid , hal_time_stamp() );
[1]414
[171]415    assert( ( (type == THREAD_KERNEL) || (type == THREAD_RPC) ||
[5]416              (type == THREAD_IDLE)   || (type == THREAD_DEV) ) ,
417              __FUNCTION__ , "illegal thread type" );
[1]418
[171]419    assert( (core_lid < LOCAL_CLUSTER->cores_nr) ,
[5]420            __FUNCTION__ , "illegal core_lid" );
[1]421
[171]422    // allocate memory for new thread descriptor
[14]423    thread = thread_alloc();
424
425    if( thread == NULL ) return ENOMEM;
426
[171]427    // initialize thread descriptor
[14]428    error = thread_init( thread,
429                         &process_zero,
430                         type,
431                         func,
432                         args,
433                         core_lid,
434                         0 , 0 );  // no user stack for a kernel thread
435
[171]436    if( error ) // release allocated memory for thread descriptor
[1]437    {
[185]438        thread_release( thread );
[14]439        return EINVAL;
[1]440    }
441
[171]442    // allocate & initialize CPU context
443        hal_cpu_context_create( thread );
[14]444
[296]445    thread_dmsg("\n[INFO] %s : exit / trdid = %x / type = %s / core = [%x,%d] / cycle %d\n",
446    __FUNCTION__ , thread->trdid , thread_type_str(type) , 
447    local_cxy , core_lid , hal_time_stamp() );
[1]448
[171]449    *new_thread = thread;
[1]450        return 0;
[5]451
[296]452} // end thread_kernel_create()
453
[14]454///////////////////////////////////////////////////
455error_t thread_kernel_init( thread_t      * thread,
456                            thread_type_t   type,
[171]457                            void          * func,
458                            void          * args,
[14]459                                            lid_t           core_lid )
460{
[171]461    assert( ( (type == THREAD_KERNEL) || (type == THREAD_RPC) ||
[14]462              (type == THREAD_IDLE)   || (type == THREAD_DEV) ) ,
463              __FUNCTION__ , "illegal thread type" );
[1]464
[171]465    if( core_lid >= LOCAL_CLUSTER->cores_nr )
[14]466    {
[171]467        printk("\n[PANIC] in %s : illegal core_lid / cores = %d / lid = %d / cxy = %x\n",
[14]468               __FUNCTION__ , LOCAL_CLUSTER->cores_nr , core_lid , local_cxy );
469        hal_core_sleep();
470    }
471
472    error_t  error = thread_init( thread,
473                                  &process_zero,
474                                  type,
475                                  func,
476                                  args,
477                                  core_lid,
478                                  0 , 0 );   // no user stack for a kernel thread
479
480    // allocate & initialize CPU context if success
481    if( error == 0 ) hal_cpu_context_create( thread );
[171]482
[14]483    return error;
[171]484}
[14]485
[1]486///////////////////////////////////////////////////////////////////////////////////////
487// TODO: check that all memory dynamically allocated during thread execution
488// has been released, using a cache of mmap and malloc requests. [AG]
489///////////////////////////////////////////////////////////////////////////////////////
490void thread_destroy( thread_t * thread )
491{
492        uint32_t     tm_start;
493        uint32_t     tm_end;
[60]494    reg_t        state;
[1]495
496    process_t  * process    = thread->process;
497    core_t     * core       = thread->core;
498
[5]499    thread_dmsg("\n[INFO] %s : enters for thread %x in process %x / type = %s\n",
500                __FUNCTION__ , thread->trdid , process->pid , thread_type_str( thread->type ) );
[1]501
[5]502    assert( (thread->children_nr == 0) , __FUNCTION__ , "still attached children" );
503
504    assert( (thread->local_locks == 0) , __FUNCTION__ , "all local locks not released" );
[171]505
[5]506    assert( (thread->remote_locks == 0) , __FUNCTION__ , "all remote locks not released" );
507
[101]508        tm_start = hal_get_cycles();
[1]509
510    // update intrumentation values
511    uint32_t pgfaults = thread->info.pgfault_nr;
512    uint32_t u_errors = thread->info.u_err_nr;
513    uint32_t m_errors = thread->info.m_err_nr;
514
515        process->vmm.pgfault_nr += pgfaults;
516        process->vmm.u_err_nr   += u_errors;
517        process->vmm.m_err_nr   += m_errors;
518
519    // release memory allocated for CPU context and FPU context
520        hal_cpu_context_destroy( thread );
521        hal_fpu_context_destroy( thread );
522       
523    // release FPU if required
524    // TODO This should be done before calling thread_destroy()
525        hal_disable_irq( &state );
526        if( core->fpu_owner == thread )
527        {
528                core->fpu_owner = NULL;
529                hal_fpu_disable();
530        }
531        hal_restore_irq( state );
532
[171]533    // remove thread from process th_tbl[]
[1]534    // TODO This should be done before calling thread_destroy()
535    ltid_t ltid = LTID_FROM_TRDID( thread->trdid );
536
537        spinlock_lock( &process->th_lock );
538        process->th_tbl[ltid] = XPTR_NULL;
539        process->th_nr--;
540        spinlock_unlock( &process->th_lock );
541       
[23]542    // update local DQDT
543    dqdt_local_update_threads( -1 );
544
[1]545    // invalidate thread descriptor
546        thread->signature = 0;
547
548    // release memory for thread descriptor
[23]549    thread_release( thread );
[1]550
[101]551        tm_end = hal_get_cycles();
[1]552
[5]553        thread_dmsg("\n[INFO] %s : exit for thread %x in process %x / duration = %d\n",
554                       __FUNCTION__, thread->trdid , process->pid , tm_end - tm_start );
[171]555}
[1]556
557/////////////////////////////////////////////////
558void thread_child_parent_link( xptr_t  xp_parent,
559                               xptr_t  xp_child )
560{
[171]561    // get extended pointers on children list root
562    cxy_t      parent_cxy = GET_CXY( xp_parent );
[1]563    thread_t * parent_ptr = (thread_t *)GET_PTR( xp_parent );
564    xptr_t     root       = XPTR( parent_cxy , &parent_ptr->children_root );
565
[171]566    // get extended pointer on children list entry
567    cxy_t      child_cxy  = GET_CXY( xp_child );
[1]568    thread_t * child_ptr  = (thread_t *)GET_PTR( xp_child );
569    xptr_t     entry      = XPTR( child_cxy , &child_ptr->brothers_list );
570
571    // set the link
572    xlist_add_first( root , entry );
573    hal_remote_atomic_add( XPTR( parent_cxy , &parent_ptr->children_nr ) , 1 );
[171]574}
[1]575
576///////////////////////////////////////////////////
577void thread_child_parent_unlink( xptr_t  xp_parent,
578                                 xptr_t  xp_child )
579{
580    // get extended pointer on children list lock
[171]581    cxy_t      parent_cxy = GET_CXY( xp_parent );
[1]582    thread_t * parent_ptr = (thread_t *)GET_PTR( xp_parent );
583    xptr_t     lock       = XPTR( parent_cxy , &parent_ptr->children_lock );
584
[171]585    // get extended pointer on children list entry
586    cxy_t      child_cxy  = GET_CXY( xp_child );
[1]587    thread_t * child_ptr  = (thread_t *)GET_PTR( xp_child );
588    xptr_t     entry      = XPTR( child_cxy , &child_ptr->brothers_list );
589
590    // get the lock
591    remote_spinlock_lock( lock );
592
593    // remove the link
594    xlist_unlink( entry );
595    hal_remote_atomic_add( XPTR( parent_cxy , &parent_ptr->children_nr ) , -1 );
[171]596
[1]597    // release the lock
598    remote_spinlock_unlock( lock );
599}
600
601/////////////////////////////////////////////////
602inline void thread_set_signal( thread_t * thread,
603                               uint32_t   mask )
604{
605    hal_atomic_or( &thread->signals , mask );
606}
[171]607
[1]608///////////////////////////////////////////////////
609inline void thread_reset_signal( thread_t * thread,
610                                 uint32_t   mask )
611{
612    hal_atomic_and( &thread->signals , ~mask );
613}
[171]614
[1]615//////////////////////////////////
616inline bool_t thread_is_joinable()
617{
618    thread_t * this = CURRENT_THREAD;
619    return( (this->brothers_list.next != XPTR_NULL) &&
620            (this->brothers_list.pred != XPTR_NULL) );
621}
622
623//////////////////////////////////
624inline bool_t thread_is_runnable()
625{
626    thread_t * this = CURRENT_THREAD;
627    return( this->blocked == 0 );
628}
629
630////////////////////////////////
631inline bool_t thread_can_yield()
632{
633    thread_t * this = CURRENT_THREAD;
634    return ( (this->local_locks == 0) && (this->remote_locks == 0) );
635}
636
637///////////////////////////
638bool_t thread_check_sched()
639{
640        thread_t * this = CURRENT_THREAD;
641
642    // check locks count
643        if( (this->local_locks != 0) || (this->remote_locks != 0) ) return false;
644
645    // compute elapsed time, taking into account 32 bits register wrap
646    uint32_t elapsed;
[101]647    uint32_t time_now   = hal_get_cycles();
[1]648    uint32_t time_last  = this->time_last_check;
649    if( time_now < time_last ) elapsed = (0xFFFFFFFF - time_last) + time_now;
650        else                       elapsed = time_now - time_last;
651
652    // update thread time
653    this->time_last_check = time_now;
654
655        // check elapsed time
656        if( elapsed < CONFIG_CORE_CHECK_EVERY ) return false;
657    else                                    return true;
658}
659
660/////////////////////
661error_t thread_exit()
662{
[60]663    reg_t      sr_save;
[1]664
665        thread_t * this = CURRENT_THREAD;
666
667    // test if this thread can be descheduled
668        if( !thread_can_yield() )
669        {
670        printk("ERROR in %s : thread %x in process %x on core %d in cluster %x\n"
671               " did not released all locks\n",
672               __FUNCTION__ , this->trdid , this->process->pid ,
673               CURRENT_CORE->lid , local_cxy );
674        return EINVAL;
675    }
676
677    if( this->flags & THREAD_FLAG_DETACHED )
678    {
679        // if detached set signal and set blocking cause atomically
680        hal_disable_irq( &sr_save );
681        thread_set_signal( this , THREAD_SIG_KILL );
682        thread_block( this , THREAD_BLOCKED_EXIT );
683        hal_restore_irq( sr_save );
684    }
[171]685    else
[1]686    {
[171]687        // if attached, set blocking cause
[1]688        thread_block( this , THREAD_BLOCKED_EXIT );
689    }
690
691    // deschedule
[296]692    sched_yield( NULL );
[1]693    return 0;
[171]694}
[1]695
696/////////////////////////////////////
697void thread_block( thread_t * thread,
698                   uint32_t   cause )
699{
[171]700    // set blocking cause
[1]701    hal_atomic_or( &thread->blocked , cause );
[171]702}
[1]703
704////////////////////////////////////
705void thread_unblock( xptr_t   thread,
706                    uint32_t cause )
707{
708    // get thread cluster and local pointer
[171]709    cxy_t      cxy = GET_CXY( thread );
[1]710    thread_t * ptr = (thread_t *)GET_PTR( thread );
711
712    // reset blocking cause
713    hal_remote_atomic_and( XPTR( cxy , &ptr->blocked ) , ~cause );
[171]714}
[1]715
716/////////////////////////////////////
717void thread_kill( thread_t * target )
718{
719    // set SIG_KILL signal in target thread descriptor
720    thread_set_signal( target , THREAD_SIG_KILL );
721
722    // set the global blocked bit in target thread descriptor.
723    thread_block( target , THREAD_BLOCKED_GLOBAL );
724
[188]725    // send an IPI to schedule the target thread core.
726    dev_pic_send_ipi( local_cxy , target->core->lid );
[171]727}
[1]728
[14]729///////////////////////
730void thread_idle_func()
[1]731{
[68]732#if CONFIG_IDLE_DEBUG
[14]733    lid_t  lid = CURRENT_CORE->lid;
[68]734#endif
[14]735
[1]736    while( 1 )
737    {
[50]738        idle_dmsg("\n[INFO] %s : core[%x][%d] goes to sleep at cycle %d\n",
[101]739                    __FUNCTION__ , local_cxy , lid , hal_get_cycles() );
[1]740
741        // force core to sleeping state
742        hal_core_sleep();
743
[50]744        idle_dmsg("\n[INFO] %s : core[%x][%d] wake up at cycle %d\n",
[101]745                    __FUNCTION__ , local_cxy , lid , hal_get_cycles() );
[1]746
[14]747        // force scheduling
[296]748        sched_yield( NULL );
[1]749   }
[171]750}
[1]751
[16]752/////////////////////////////////////////////////
753void thread_user_time_update( thread_t * thread )
754{
755    // TODO
756    printk("\n[WARNING] function %s not implemented\n", __FUNCTION__ );
757}
[1]758
[16]759///////////////////////////////////////////////////
760void thread_kernel_time_update( thread_t * thread )
761{
762    // TODO
763    printk("\n[WARNING] function %s not implemented\n", __FUNCTION__ );
764}
765
766////////////////////////////////////////////////
[23]767void thread_signals_handle( thread_t * thread )
[16]768{
769    // TODO
770    printk("\n[WARNING] function %s not implemented\n", __FUNCTION__ );
771}
772
[23]773/////////////////////////////////////
774xptr_t thread_get_xptr( pid_t    pid,
775                        trdid_t  trdid )
776{
777    cxy_t         target_cxy;          // target thread cluster identifier
778    ltid_t        target_thread_ltid;  // target thread local index
[171]779    thread_t    * target_thread_ptr;   // target thread local pointer
[23]780    xptr_t        target_process_xp;   // extended pointer on target process descriptor
[171]781    process_t   * target_process_ptr;  // local pointer on target process descriptor
[23]782    pid_t         target_process_pid;  // target process identifier
783    xlist_entry_t root;                // root of list of process in target cluster
784    xptr_t        lock_xp;             // extended pointer on lock protecting  this list
[16]785
[23]786    // get target cluster identifier and local thread identifier
787    target_cxy         = CXY_FROM_TRDID( trdid );
788    target_thread_ltid = LTID_FROM_TRDID( trdid );
789
790    // get root of list of process descriptors in target cluster
791    hal_remote_memcpy( XPTR( local_cxy  , &root ),
792                       XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_root ),
793                       sizeof(xlist_entry_t) );
794
[171]795    // get extended pointer on lock protecting the list of processes
[23]796    lock_xp = XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_lock );
797
798    // take the lock protecting the list of processes in target cluster
799    remote_spinlock_lock( lock_xp );
800
801    // loop on list of process in target cluster to find the PID process
802    xptr_t  iter;
803    bool_t  found = false;
804    XLIST_FOREACH( XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_root ) , iter )
805    {
806        target_process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
807        target_process_ptr = (process_t *)GET_PTR( target_process_xp );
808        target_process_pid = hal_remote_lw( XPTR( target_cxy , &target_process_ptr->pid ) );
809        if( target_process_pid == pid )
810        {
811            found = true;
812            break;
813        }
814    }
815
816    // release the lock protecting the list of processes in target cluster
817    remote_spinlock_unlock( lock_xp );
818
819    // check target thread found
820    if( found == false )
821    {
822        return XPTR_NULL;
823    }
824
825    // get target thread local pointer
826    xptr_t xp = XPTR( target_cxy , &target_process_ptr->th_tbl[target_thread_ltid] );
[171]827    target_thread_ptr = (thread_t *)hal_remote_lpt( xp );
[23]828
829    if( target_thread_ptr == NULL )
830    {
831        return XPTR_NULL;
832    }
833
834    return XPTR( target_cxy , target_thread_ptr );
[171]835}
[23]836
Note: See TracBrowser for help on using the repository browser.