source: trunk/kernel/kern/thread.c @ 438

Last change on this file since 438 was 438, checked in by alain, 6 years ago

Fix a bug in scheduler related to RPC blocking.

File size: 38.4 KB
Line 
1/*
2 * thread.c -  implementation of thread operations (user & kernel)
3 *
4 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *         Alain Greiner (2016,2017)
6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25#include <kernel_config.h>
26#include <hal_types.h>
27#include <hal_context.h>
28#include <hal_irqmask.h>
29#include <hal_special.h>
30#include <hal_remote.h>
31#include <memcpy.h>
32#include <printk.h>
33#include <cluster.h>
34#include <process.h>
35#include <scheduler.h>
36#include <dev_pic.h>
37#include <core.h>
38#include <list.h>
39#include <xlist.h>
40#include <page.h>
41#include <kmem.h>
42#include <ppm.h>
43#include <thread.h>
44
45//////////////////////////////////////////////////////////////////////////////////////
46// Extern global variables
47//////////////////////////////////////////////////////////////////////////////////////
48
49extern process_t      process_zero;
50
51//////////////////////////////////////////////////////////////////////////////////////
52// This function returns a printable string for the thread type.
53//////////////////////////////////////////////////////////////////////////////////////
54char * thread_type_str( uint32_t type )
55{
56    if     ( type == THREAD_USER   ) return "USR";
57    else if( type == THREAD_RPC    ) return "RPC";
58    else if( type == THREAD_DEV    ) return "DEV";
59    else if( type == THREAD_IDLE   ) return "IDL";
60    else                             return "undefined";
61}
62
63/////////////////////////////////////////////////////////////////////////////////////
64// This static function allocates physical memory for a thread descriptor.
65// It can be called by the three functions:
66// - thread_user_create()
67// - thread_user_fork()
68// - thread_kernel_create()
69/////////////////////////////////////////////////////////////////////////////////////
70// @ return pointer on thread descriptor if success / return NULL if failure.
71/////////////////////////////////////////////////////////////////////////////////////
72static thread_t * thread_alloc()
73{
74        page_t       * page;   // pointer on page descriptor containing thread descriptor
75        kmem_req_t     req;    // kmem request
76
77        // allocates memory for thread descriptor + kernel stack
78        req.type  = KMEM_PAGE;
79        req.size  = CONFIG_THREAD_DESC_ORDER;
80        req.flags = AF_KERNEL | AF_ZERO;
81        page      = kmem_alloc( &req );
82
83        if( page == NULL ) return NULL;
84
85    // return pointer on new thread descriptor
86    xptr_t base_xp = ppm_page2base( XPTR(local_cxy , page ) );
87    return (thread_t *)GET_PTR( base_xp );
88
89}  // end thread_alloc()
90 
91
92/////////////////////////////////////////////////////////////////////////////////////
93// This static function releases the physical memory for a thread descriptor.
94// It is called by the three functions:
95// - thread_user_create()
96// - thread_user_fork()
97// - thread_kernel_create()
98/////////////////////////////////////////////////////////////////////////////////////
99// @ thread  : pointer on thread descriptor.
100/////////////////////////////////////////////////////////////////////////////////////
101static void thread_release( thread_t * thread )
102{
103    kmem_req_t   req;
104
105    xptr_t base_xp = ppm_base2page( XPTR(local_cxy , thread ) );
106
107    req.type  = KMEM_PAGE;
108    req.ptr   = GET_PTR( base_xp );
109    kmem_free( &req );
110}
111
112/////////////////////////////////////////////////////////////////////////////////////
113// This static function initializes a thread descriptor (kernel or user).
114// It can be called by the four functions:
115// - thread_user_create()
116// - thread_user_fork()
117// - thread_kernel_create()
118// - thread_idle_init()
119// It updates the local DQDT.
120/////////////////////////////////////////////////////////////////////////////////////
121// @ thread       : pointer on thread descriptor
122// @ process      : pointer on process descriptor.
123// @ type         : thread type.
124// @ func         : pointer on thread entry function.
125// @ args         : pointer on thread entry function arguments.
126// @ core_lid     : target core local index.
127// @ u_stack_base : stack base (user thread only)
128// @ u_stack_size : stack base (user thread only)
129/////////////////////////////////////////////////////////////////////////////////////
130static error_t thread_init( thread_t      * thread,
131                            process_t     * process,
132                            thread_type_t   type,
133                            void          * func,
134                            void          * args,
135                            lid_t           core_lid,
136                            intptr_t        u_stack_base,
137                            uint32_t        u_stack_size )
138{
139    error_t        error;
140    trdid_t        trdid;      // allocated thread identifier
141
142        cluster_t    * local_cluster = LOCAL_CLUSTER;
143
144    // register new thread in process descriptor, and get a TRDID
145    error = process_register_thread( process, thread , &trdid );
146
147    if( error )
148    {
149        printk("\n[ERROR] in %s : cannot get TRDID\n", __FUNCTION__ );
150        return EINVAL;
151    }
152
153    // compute thread descriptor size without kernel stack
154    uint32_t desc_size = (intptr_t)(&thread->signature) - (intptr_t)thread + 4; 
155
156        // Initialize new thread descriptor
157    thread->trdid           = trdid;
158        thread->type            = type;
159    thread->quantum         = 0;            // TODO
160    thread->ticks_nr        = 0;            // TODO
161    thread->time_last_check = 0;
162        thread->core            = &local_cluster->core_tbl[core_lid];
163        thread->process         = process;
164
165    thread->local_locks     = 0;
166    thread->remote_locks    = 0;
167
168#if CONFIG_LOCKS_DEBUG
169    list_root_init( &thread->locks_root ); 
170    xlist_root_init( XPTR( local_cxy , &thread->xlocks_root ) );
171#endif
172
173    thread->u_stack_base    = u_stack_base;
174    thread->u_stack_size    = u_stack_size;
175    thread->k_stack_base    = (intptr_t)thread + desc_size;
176    thread->k_stack_size    = CONFIG_THREAD_DESC_SIZE - desc_size;
177
178    thread->entry_func      = func;         // thread entry point
179    thread->entry_args      = args;         // thread function arguments
180    thread->flags           = 0;            // all flags reset
181    thread->errno           = 0;            // no error detected
182    thread->fork_user       = 0;            // no user defined placement for fork
183    thread->fork_cxy        = 0;            // user defined target cluster for fork
184    thread->blocked         = THREAD_BLOCKED_GLOBAL;
185
186    // reset children list
187    xlist_root_init( XPTR( local_cxy , &thread->children_root ) );
188    thread->children_nr = 0;
189
190    // reset sched list and brothers list
191    list_entry_init( &thread->sched_list );
192    xlist_entry_init( XPTR( local_cxy , &thread->brothers_list ) );
193
194    // reset thread info
195    memset( &thread->info , 0 , sizeof(thread_info_t) );
196
197    // initializes join_lock
198    remote_spinlock_init( XPTR( local_cxy , &thread->join_lock ) );
199
200    // initialise signature
201        thread->signature = THREAD_SIGNATURE;
202
203    // FIXME call hal_thread_init() function to initialise the save_sr field
204    thread->save_sr = 0xFF13;
205
206    // register new thread in core scheduler
207    sched_register_thread( thread->core , thread );
208
209        // update DQDT
210    dqdt_update_threads( 1 );
211
212        return 0;
213
214} // end thread_init()
215
216/////////////////////////////////////////////////////////
217error_t thread_user_create( pid_t             pid,
218                            void            * start_func,
219                            void            * start_arg,
220                            pthread_attr_t  * attr,
221                            thread_t       ** new_thread )
222{
223    error_t        error;
224        thread_t     * thread;       // pointer on created thread descriptor
225    process_t    * process;      // pointer to local process descriptor
226    lid_t          core_lid;     // selected core local index
227    vseg_t       * vseg;         // stack vseg
228
229    assert( (attr != NULL) , __FUNCTION__, "pthread attributes must be defined" );
230
231#if DEBUG_THREAD_USER_CREATE
232uint32_t cycle = (uint32_t)hal_get_cycles();
233if( DEBUG_THREAD_USER_CREATE < cycle )
234printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n",
235__FUNCTION__, CURRENT_THREAD, pid , cycle );
236#endif
237
238    // get process descriptor local copy
239    process = process_get_local_copy( pid );
240    if( process == NULL )
241    {
242                printk("\n[ERROR] in %s : cannot get process descriptor %x\n",
243               __FUNCTION__ , pid );
244        return ENOMEM;
245    }
246
247    // select a target core in local cluster
248    if( attr->attributes & PT_ATTR_CORE_DEFINED )
249    {
250        core_lid = attr->lid;
251        if( core_lid >= LOCAL_CLUSTER->cores_nr )
252        {
253                printk("\n[ERROR] in %s : illegal core index attribute = %d\n",
254            __FUNCTION__ , core_lid );
255            return EINVAL;
256        }
257    }
258    else
259    {
260        core_lid = cluster_select_local_core();
261    }
262
263    // allocate a stack from local VMM
264    vseg = vmm_create_vseg( process,
265                            VSEG_TYPE_STACK,
266                            0,                 // size unused
267                            0,                 // length unused
268                            0,                 // file_offset unused
269                            0,                 // file_size unused
270                            XPTR_NULL,         // mapper_xp unused
271                            local_cxy );
272
273    if( vseg == NULL )
274    {
275            printk("\n[ERROR] in %s : cannot create stack vseg\n", __FUNCTION__ );
276                return ENOMEM;
277    }
278
279    // allocate memory for thread descriptor
280    thread = thread_alloc();
281
282    if( thread == NULL )
283    {
284            printk("\n[ERROR] in %s : cannot create new thread\n", __FUNCTION__ );
285        vmm_remove_vseg( vseg );
286        return ENOMEM;
287    }
288
289    // initialize thread descriptor
290    error = thread_init( thread,
291                         process,
292                         THREAD_USER,
293                         start_func,
294                         start_arg,
295                         core_lid,
296                         vseg->min,
297                         vseg->max - vseg->min );
298    if( error )
299    {
300            printk("\n[ERROR] in %s : cannot initialize new thread\n", __FUNCTION__ );
301        vmm_remove_vseg( vseg );
302        thread_release( thread );
303        return EINVAL;
304    }
305
306    // set DETACHED flag if required
307    if( attr->attributes & PT_ATTR_DETACH ) 
308    {
309        thread->flags |= THREAD_FLAG_DETACHED;
310    }
311
312    // allocate & initialize CPU context
313        if( hal_cpu_context_create( thread ) )
314    {
315            printk("\n[ERROR] in %s : cannot create CPU context\n", __FUNCTION__ );
316        vmm_remove_vseg( vseg );
317        thread_release( thread );
318        return ENOMEM;
319    }
320
321    // allocate  FPU context
322    if( hal_fpu_context_alloc( thread ) )
323    {
324            printk("\n[ERROR] in %s : cannot create FPU context\n", __FUNCTION__ );
325        vmm_remove_vseg( vseg );
326        thread_release( thread );
327        return ENOMEM;
328    }
329
330#if DEBUG_THREAD_USER_CREATE
331cycle = (uint32_t)hal_get_cycles();
332if( DEBUG_THREAD_USER_CREATE < cycle )
333printk("\n[DBG] %s : thread %x exit / process %x / new_thread %x / core %d / cycle %d\n",
334__FUNCTION__, CURRENT_THREAD, pid, thread, core_lid, cycle );
335#endif
336
337    *new_thread = thread;
338        return 0;
339
340}  // end thread_user_create()
341
342///////////////////////////////////////////////////////
343error_t thread_user_fork( xptr_t      parent_thread_xp,
344                          process_t * child_process,
345                          thread_t ** child_thread )
346{
347    error_t        error;
348        thread_t     * child_ptr;        // local pointer on local child thread
349    lid_t          core_lid;         // selected core local index
350
351    thread_t     * parent_ptr;       // local pointer on remote parent thread
352    cxy_t          parent_cxy;       // parent thread cluster
353    process_t    * parent_process;   // local pointer on parent process
354    xptr_t         parent_gpt_xp;    // extended pointer on parent thread GPT
355
356    void         * func;             // parent thread entry_func
357    void         * args;             // parent thread entry_args
358    intptr_t       base;             // parent thread u_stack_base
359    uint32_t       size;             // parent thread u_stack_size
360    uint32_t       flags;            // parent_thread flags
361    vpn_t          vpn_base;         // parent thread stack vpn_base
362    vpn_t          vpn_size;         // parent thread stack vpn_size
363    reg_t        * uzone;            // parent thread pointer on uzone 
364
365    vseg_t       * vseg;             // child thread STACK vseg
366
367#if DEBUG_THREAD_USER_FORK
368uint32_t cycle = (uint32_t)hal_get_cycles();
369if( DEBUG_THREAD_USER_FORK < cycle )
370printk("\n[DBG] %s : thread %x enter / child_process %x / cycle %d\n",
371__FUNCTION__, CURRENT_THREAD, child_process->pid, cycle );
372#endif
373
374    // select a target core in local cluster
375    core_lid = cluster_select_local_core();
376
377    // get cluster and local pointer on parent thread descriptor
378    parent_cxy = GET_CXY( parent_thread_xp );
379    parent_ptr = (thread_t *)GET_PTR( parent_thread_xp );
380
381    // get relevant fields from parent thread
382    func  = (void *)  hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->entry_func    ));
383    args  = (void *)  hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->entry_args    ));
384    base  = (intptr_t)hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->u_stack_base  ));
385    size  = (uint32_t)hal_remote_lw ( XPTR( parent_cxy , &parent_ptr->u_stack_size  ));
386    flags =           hal_remote_lw ( XPTR( parent_cxy , &parent_ptr->flags         ));
387    uzone = (reg_t *) hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->uzone_current ));
388
389    vpn_base = base >> CONFIG_PPM_PAGE_SHIFT;
390    vpn_size = size >> CONFIG_PPM_PAGE_SHIFT;
391
392    // get pointer on parent process in parent thread cluster
393    parent_process = (process_t *)hal_remote_lpt( XPTR( parent_cxy,
394                                                        &parent_ptr->process ) );
395 
396    // get extended pointer on parent GPT in parent thread cluster
397    parent_gpt_xp = XPTR( parent_cxy , &parent_process->vmm.gpt );
398
399    // allocate memory for child thread descriptor
400    child_ptr = thread_alloc();
401    if( child_ptr == NULL )
402    {
403        printk("\n[ERROR] in %s : cannot allocate new thread\n", __FUNCTION__ );
404        return -1;
405    }
406
407    // initialize thread descriptor
408    error = thread_init( child_ptr,
409                         child_process,
410                         THREAD_USER,
411                         func,
412                         args,
413                         core_lid,
414                         base,
415                         size );
416    if( error )
417    {
418            printk("\n[ERROR] in %s : cannot initialize child thread\n", __FUNCTION__ );
419        thread_release( child_ptr );
420        return EINVAL;
421    }
422
423    // return child pointer
424    *child_thread = child_ptr;
425
426    // set detached flag if required
427    if( flags & THREAD_FLAG_DETACHED ) child_ptr->flags = THREAD_FLAG_DETACHED;
428
429    // update uzone pointer in child thread descriptor
430    child_ptr->uzone_current = (char *)((intptr_t)uzone +
431                                        (intptr_t)child_ptr - 
432                                        (intptr_t)parent_ptr );
433 
434
435    // allocate CPU context for child thread
436        if( hal_cpu_context_alloc( child_ptr ) )
437    {
438            printk("\n[ERROR] in %s : cannot allocate CPU context\n", __FUNCTION__ );
439        thread_release( child_ptr );
440        return -1;
441    }
442
443    // allocate FPU context for child thread
444        if( hal_fpu_context_alloc( child_ptr ) )
445    {
446            printk("\n[ERROR] in %s : cannot allocate FPU context\n", __FUNCTION__ );
447        thread_release( child_ptr );
448        return -1;
449    }
450
451    // create and initialize STACK vseg
452    vseg = vseg_alloc();
453    vseg_init( vseg,
454               VSEG_TYPE_STACK,
455               base,
456               size,
457               vpn_base,
458               vpn_size,
459               0, 0, XPTR_NULL,                         // not a file vseg
460               local_cxy );
461
462    // register STACK vseg in local child VSL
463    vseg_attach( &child_process->vmm , vseg );
464
465    // copy all valid STACK GPT entries   
466    vpn_t          vpn;
467    bool_t         mapped;
468    ppn_t          ppn;
469    for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ )
470    {
471        error = hal_gpt_pte_copy( &child_process->vmm.gpt,
472                                  parent_gpt_xp,
473                                  vpn,
474                                  true,                 // set cow
475                                  &ppn,
476                                  &mapped );
477        if( error )
478        {
479            vseg_detach( &child_process->vmm , vseg );
480            vseg_free( vseg );
481            thread_release( child_ptr );
482            printk("\n[ERROR] in %s : cannot update child GPT\n", __FUNCTION__ );
483            return -1;
484        }
485
486        // increment pending forks counter for the page if mapped
487        if( mapped )
488        {
489            xptr_t   page_xp  = ppm_ppn2page( ppn );
490            cxy_t    page_cxy = GET_CXY( page_xp );
491            page_t * page_ptr = (page_t *)GET_PTR( page_xp );
492            hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , 1 );
493
494#if (DEBUG_THREAD_USER_FORK & 1)
495cycle = (uint32_t)hal_get_cycles();
496if( DEBUG_THREAD_USER_FORK < cycle )
497printk("\n[DBG] %s : thread %x copied stack PTE to child GPT : vpn %x\n",
498__FUNCTION__, CURRENT_THREAD, vpn );
499#endif
500
501        }
502    }
503
504    // set COW flag for all mapped entries of STAK vseg in parent thread GPT
505    hal_gpt_set_cow( parent_gpt_xp,
506                     vpn_base,
507                     vpn_size );
508 
509#if DEBUG_THREAD_USER_FORK
510cycle = (uint32_t)hal_get_cycles();
511if( DEBUG_THREAD_USER_FORK < cycle )
512printk("\n[DBG] %s : thread %x exit / child_process %x / child_thread %x / cycle %d\n",
513__FUNCTION__, CURRENT_THREAD, child_process->pid, child_ptr, cycle );
514#endif
515
516        return 0;
517
518}  // end thread_user_fork()
519
520/////////////////////////////////////////////////////////
521error_t thread_kernel_create( thread_t     ** new_thread,
522                              thread_type_t   type,
523                              void          * func,
524                              void          * args,
525                                              lid_t           core_lid )
526{
527    error_t        error;
528        thread_t     * thread;       // pointer on new thread descriptor
529
530    assert( ( (type == THREAD_IDLE) || (type == THREAD_RPC) || (type == THREAD_DEV) ) ,
531    __FUNCTION__ , "illegal thread type" );
532
533    assert( (core_lid < LOCAL_CLUSTER->cores_nr) ,
534            __FUNCTION__ , "illegal core_lid" );
535
536#if DEBUG_THREAD_KERNEL_CREATE
537uint32_t cycle = (uint32_t)hal_get_cycles();
538if( DEBUG_THREAD_KERNEL_CREATE < cycle )
539printk("\n[DBG] %s : thread %x enter / requested_type %s / cycle %d\n",
540__FUNCTION__, CURRENT_THREAD, thread, thread_type_str(type), cycle );
541#endif
542
543    // allocate memory for new thread descriptor
544    thread = thread_alloc();
545
546    if( thread == NULL ) return ENOMEM;
547
548    // initialize thread descriptor
549    error = thread_init( thread,
550                         &process_zero,
551                         type,
552                         func,
553                         args,
554                         core_lid,
555                         0 , 0 );  // no user stack for a kernel thread
556
557    if( error ) // release allocated memory for thread descriptor
558    {
559        thread_release( thread );
560        return EINVAL;
561    }
562
563    // allocate & initialize CPU context
564        hal_cpu_context_create( thread );
565
566#if DEBUG_THREAD_KERNEL_CREATE
567cycle = (uint32_t)hal_get_cycles();
568if( DEBUG_THREAD_KERNEL_CREATE < cycle )
569printk("\n[DBG] %s : thread %x exit / new_thread %x / type %s / cycle %d\n",
570__FUNCTION__, CURRENT_THREAD, thread, thread_type_str(type), cycle );
571#endif
572
573    *new_thread = thread;
574        return 0;
575
576} // end thread_kernel_create()
577
578/////////////////////////////////////////////////
579error_t thread_idle_init( thread_t      * thread,
580                          thread_type_t   type,
581                          void          * func,
582                          void          * args,
583                                          lid_t           core_lid )
584{
585    assert( (type == THREAD_IDLE) , __FUNCTION__ , "illegal thread type" );
586
587    assert( (core_lid < LOCAL_CLUSTER->cores_nr) , __FUNCTION__ , "illegal core index" );
588
589    error_t  error = thread_init( thread,
590                                  &process_zero,
591                                  type,
592                                  func,
593                                  args,
594                                  core_lid,
595                                  0 , 0 );   // no user stack for a kernel thread
596
597    // allocate & initialize CPU context if success
598    if( error == 0 ) hal_cpu_context_create( thread );
599
600    return error;
601
602}  // end thread_idle_init()
603
604///////////////////////////////////////////////////////////////////////////////////////
605// TODO: check that all memory dynamically allocated during thread execution
606// has been released, using a cache of mmap and malloc requests. [AG]
607///////////////////////////////////////////////////////////////////////////////////////
608void thread_destroy( thread_t * thread )
609{
610    reg_t        save_sr;
611
612    process_t  * process    = thread->process;
613    core_t     * core       = thread->core;
614
615#if DEBUG_THREAD_DESTROY
616uint32_t cycle = (uint32_t)hal_get_cycles();
617if( DEBUG_THREAD_DESTROY < cycle )
618printk("\n[DBG] %s : thread %x enter to destroy thread %x in process %x / cycle %d\n",
619__FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle );
620#endif
621
622    assert( (thread->children_nr == 0) , __FUNCTION__ , "still attached children" );
623
624    assert( (thread->local_locks == 0) , __FUNCTION__ , "all local locks not released" );
625
626    assert( (thread->remote_locks == 0) , __FUNCTION__ , "all remote locks not released" );
627
628    // update intrumentation values
629        process->vmm.pgfault_nr += thread->info.pgfault_nr;
630
631    // release memory allocated for CPU context and FPU context
632        hal_cpu_context_destroy( thread );
633        if ( thread->type == THREAD_USER ) hal_fpu_context_destroy( thread );
634       
635    // release FPU ownership if required
636        hal_disable_irq( &save_sr );
637        if( core->fpu_owner == thread )
638        {
639                core->fpu_owner = NULL;
640                hal_fpu_disable();
641        }
642        hal_restore_irq( save_sr );
643
644    // remove thread from process th_tbl[]
645    process_remove_thread( thread );
646       
647    // update DQDT
648    dqdt_update_threads( -1 );
649
650    // invalidate thread descriptor
651        thread->signature = 0;
652
653    // release memory for thread descriptor
654    thread_release( thread );
655
656#if DEBUG_THREAD_DESTROY
657cycle = (uint32_t)hal_get_cycles();
658if( DEBUG_THREAD_DESTROY < cycle )
659printk("\n[DBG] %s : thread %x exit / destroyed thread %x in process %x / cycle %d\n",
660__FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle );
661#endif
662
663}   // end thread_destroy()
664
665/////////////////////////////////////////////////
666void thread_child_parent_link( xptr_t  xp_parent,
667                               xptr_t  xp_child )
668{
669    // get extended pointers on children list root
670    cxy_t      parent_cxy = GET_CXY( xp_parent );
671    thread_t * parent_ptr = (thread_t *)GET_PTR( xp_parent );
672    xptr_t     root       = XPTR( parent_cxy , &parent_ptr->children_root );
673
674    // get extended pointer on children list entry
675    cxy_t      child_cxy  = GET_CXY( xp_child );
676    thread_t * child_ptr  = (thread_t *)GET_PTR( xp_child );
677    xptr_t     entry      = XPTR( child_cxy , &child_ptr->brothers_list );
678
679    // set the link
680    xlist_add_first( root , entry );
681    hal_remote_atomic_add( XPTR( parent_cxy , &parent_ptr->children_nr ) , 1 );
682
683}  // end thread_child_parent_link()
684
685///////////////////////////////////////////////////
686void thread_child_parent_unlink( xptr_t  xp_parent,
687                                 xptr_t  xp_child )
688{
689    // get extended pointer on children list lock
690    cxy_t      parent_cxy = GET_CXY( xp_parent );
691    thread_t * parent_ptr = (thread_t *)GET_PTR( xp_parent );
692    xptr_t     lock       = XPTR( parent_cxy , &parent_ptr->children_lock );
693
694    // get extended pointer on children list entry
695    cxy_t      child_cxy  = GET_CXY( xp_child );
696    thread_t * child_ptr  = (thread_t *)GET_PTR( xp_child );
697    xptr_t     entry      = XPTR( child_cxy , &child_ptr->brothers_list );
698
699    // get the lock
700    remote_spinlock_lock( lock );
701
702    // remove the link
703    xlist_unlink( entry );
704    hal_remote_atomic_add( XPTR( parent_cxy , &parent_ptr->children_nr ) , -1 );
705
706    // release the lock
707    remote_spinlock_unlock( lock );
708
709}  // thread_child_parent_unlink()
710
711//////////////////////////////////////////////////
712inline void thread_set_req_ack( thread_t * target,
713                                uint32_t * rsp_count )
714{
715    reg_t    save_sr;   // for critical section
716
717    // get pointer on target thread scheduler
718    scheduler_t * sched = &target->core->scheduler;
719
720    // wait scheduler ready to handle a new request
721    while( sched->req_ack_pending ) asm volatile( "nop" );
722   
723    // enter critical section
724    hal_disable_irq( &save_sr );
725     
726    // set request in target thread scheduler
727    sched->req_ack_pending = true;
728
729    // set ack request in target thread "flags"
730    hal_atomic_or( &target->flags , THREAD_FLAG_REQ_ACK );
731
732    // set pointer on responses counter in target thread
733    target->ack_rsp_count = rsp_count;
734   
735    // exit critical section
736    hal_restore_irq( save_sr );
737
738    hal_fence();
739
740}  // thread_set_req_ack()
741
742/////////////////////////////////////////////////////
743inline void thread_reset_req_ack( thread_t * target )
744{
745    reg_t    save_sr;   // for critical section
746
747    // get pointer on target thread scheduler
748    scheduler_t * sched = &target->core->scheduler;
749
750    // check signal pending in scheduler
751    assert( sched->req_ack_pending , __FUNCTION__ , "no pending signal" );
752   
753    // enter critical section
754    hal_disable_irq( &save_sr );
755     
756    // reset signal in scheduler
757    sched->req_ack_pending = false;
758
759    // reset signal in thread "flags"
760    hal_atomic_and( &target->flags , ~THREAD_FLAG_REQ_ACK );
761
762    // reset pointer on responses counter
763    target->ack_rsp_count = NULL;
764   
765    // exit critical section
766    hal_restore_irq( save_sr );
767
768    hal_fence();
769
770}  // thread_reset_req_ack()
771
772////////////////////////////////
773inline bool_t thread_can_yield()
774{
775    thread_t * this = CURRENT_THREAD;
776    return (this->local_locks == 0) && (this->remote_locks == 0);
777}
778
779/////////////////////////
780void thread_check_sched()
781{
782    thread_t * this = CURRENT_THREAD;
783
784        if( (this->local_locks == 0) && 
785        (this->remote_locks == 0) &&
786        (this->flags & THREAD_FLAG_SCHED) ) 
787    {
788        this->flags &= ~THREAD_FLAG_SCHED;
789        sched_yield( "delayed scheduling" );
790    }
791
792}  // end thread_check_sched()
793
794//////////////////////////////////////
795void thread_block( xptr_t   thread_xp,
796                   uint32_t cause )
797{
798    // get thread cluster and local pointer
799    cxy_t      cxy = GET_CXY( thread_xp );
800    thread_t * ptr = GET_PTR( thread_xp );
801
802    // set blocking cause
803    hal_remote_atomic_or( XPTR( cxy , &ptr->blocked ) , cause );
804    hal_fence();
805
806#if DEBUG_THREAD_BLOCK
807uint32_t cycle = (uint32_t)hal_get_cycles();
808if( DEBUG_THREAD_BLOCK < cycle )
809printk("\n[DBG] %s : thread %x blocked thread %x / cause %x / cycle %d\n",
810__FUNCTION__ , CURRENT_THREAD , ptr , cause , cycle );
811#endif
812
813#if (DEBUG_THREAD_BLOCK & 1)
814if( DEBUG_THREAD_BLOCK < cycle )
815sched_display( ptr->core->lid );
816#endif
817
818} // end thread_block()
819
820////////////////////////////////////////////
821uint32_t thread_unblock( xptr_t   thread_xp,
822                         uint32_t cause )
823{
824    // get thread cluster and local pointer
825    cxy_t      cxy = GET_CXY( thread_xp );
826    thread_t * ptr = GET_PTR( thread_xp );
827
828    // reset blocking cause
829    uint32_t previous = hal_remote_atomic_and( XPTR( cxy , &ptr->blocked ) , ~cause );
830    hal_fence();
831
832#if DEBUG_THREAD_BLOCK
833uint32_t cycle = (uint32_t)hal_get_cycles();
834if( DEBUG_THREAD_BLOCK < cycle )
835printk("\n[DBG] %s : thread %x unblocked thread %x / cause %x / cycle %d\n",
836__FUNCTION__ , CURRENT_THREAD , ptr , cause , cycle );
837#endif
838
839#if (DEBUG_THREAD_BLOCK & 1)
840if( DEBUG_THREAD_BLOCK < cycle )
841sched_display( ptr->core->lid );
842#endif
843
844    // return a non zero value if the cause bit is modified
845    return( previous & cause );
846
847}  // end thread_unblock()
848
849////////////////////////////////////
850void thread_kill( xptr_t  target_xp,
851                  bool_t  is_exit,
852                  bool_t  is_forced )
853{
854    reg_t       save_sr;                // for critical section
855    bool_t      attached;               // target thread in attached mode
856    bool_t      join_done;              // joining thread arrived first
857    xptr_t      killer_xp;              // extended pointer on killer thread (this)
858    thread_t  * killer_ptr;             // pointer on killer thread (this)
859    cxy_t       target_cxy;             // target thread cluster     
860    thread_t  * target_ptr;             // pointer on target thread
861    xptr_t      joining_xp;             // extended pointer on joining thread
862    thread_t  * joining_ptr;            // pointer on joining thread
863    cxy_t       joining_cxy;            // joining thread cluster
864    pid_t       target_pid;             // target process PID
865    cxy_t       owner_cxy;              // target process owner cluster
866    trdid_t     target_trdid;           // target thread identifier
867    ltid_t      target_ltid;            // target thread local index
868    xptr_t      process_state_xp;       // extended pointer on <term_state> in process
869
870    xptr_t      target_flags_xp;        // extended pointer on target thread <flags>
871    xptr_t      target_join_lock_xp;    // extended pointer on target thread <join_lock>
872    xptr_t      target_join_xp_xp;      // extended pointer on target thread <join_xp>
873    xptr_t      target_process_xp;      // extended pointer on target thread <process>
874
875    process_t * target_process;         // pointer on target thread process
876
877    // get target thread cluster and pointer
878    target_cxy = GET_CXY( target_xp );
879    target_ptr = GET_PTR( target_xp );
880
881    // get killer thread pointers
882    killer_ptr = CURRENT_THREAD;
883    killer_xp  = XPTR( local_cxy , killer_ptr );
884
885#if DEBUG_THREAD_KILL
886uint32_t cycle  = (uint32_t)hal_get_cycles;
887if( DEBUG_THREAD_KILL < cycle )
888printk("\n[DBG] %s : thread %x enter for target thread %x / cycle %d\n",
889__FUNCTION__, killer_ptr, target_ptr, cycle );
890#endif
891
892    // block the target thread
893    thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
894
895    // get target thread attached mode
896    target_flags_xp = XPTR( target_cxy , &target_ptr->flags );
897    attached = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_DETACHED) == 0);
898
899    // synchronize with the joining thread
900    // if the target thread is attached && not forced
901
902    if( attached  && (is_forced == false) )
903    {
904        // build extended pointers on target thread join fields
905        target_join_lock_xp  = XPTR( target_cxy , &target_ptr->join_lock );
906        target_join_xp_xp    = XPTR( target_cxy , &target_ptr->join_xp );
907
908        // enter critical section
909        hal_disable_irq( &save_sr );
910
911        // take the join_lock in target thread descriptor
912        remote_spinlock_lock( target_join_lock_xp );
913
914        // get join_done from target thread descriptor
915        join_done = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_JOIN_DONE) != 0);
916   
917        if( join_done )     // joining thread arrived first
918        {
919            // get extended pointer on joining thread
920            joining_xp  = (xptr_t)hal_remote_lwd( target_join_xp_xp );
921            joining_ptr = GET_PTR( joining_xp );
922            joining_cxy = GET_CXY( joining_xp );
923           
924            // reset the join_done flag in target thread
925            hal_remote_atomic_and( target_flags_xp , ~THREAD_FLAG_JOIN_DONE );
926
927            // unblock the joining thread
928            thread_unblock( joining_xp , THREAD_BLOCKED_JOIN );
929
930            // release the join_lock in target thread descriptor
931            remote_spinlock_unlock( target_join_lock_xp );
932
933            // restore IRQs
934            hal_restore_irq( save_sr );
935        }
936        else                // this thread arrived first
937        {
938            // set the kill_done flag in target thread
939            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_KILL_DONE );
940
941            // block this thread on BLOCKED_JOIN
942            thread_block( killer_xp , THREAD_BLOCKED_JOIN );
943
944            // set extended pointer on killer thread in target thread
945            hal_remote_swd( target_join_xp_xp , killer_xp );
946
947            // release the join_lock in target thread descriptor
948            remote_spinlock_unlock( target_join_lock_xp );
949
950            // deschedule
951            sched_yield( "killer thread wait joining thread" );
952
953            // restore IRQs
954            hal_restore_irq( save_sr );
955        }
956    }  // end if attached
957
958    // - if the target thread is the main thread
959    //   => synchronize with the parent process main thread
960    // - if the target thread is not the main thread
961    //   => simply mark the target thread for delete
962
963    // get pointer on target thread process
964    target_process_xp  = XPTR( target_cxy , &target_ptr->process );
965    target_process     = (process_t *)hal_remote_lpt( target_process_xp ); 
966
967        // get target process owner cluster
968        target_pid = hal_remote_lw( XPTR( target_cxy , &target_process->pid ) );
969    owner_cxy = CXY_FROM_PID( target_pid );
970
971    // get target thread local index
972    target_trdid = hal_remote_lw( XPTR( target_cxy , &target_ptr->trdid ) );
973    target_ltid  = LTID_FROM_TRDID( target_trdid );
974
975    if( (owner_cxy == target_cxy) && (target_ltid == 0) )     // main thread
976    {
977        // get extended pointer on term_state in target process owner cluster
978        process_state_xp = XPTR( owner_cxy , &target_process->term_state );
979
980        // set termination info in target process owner 
981        if( is_exit ) hal_remote_atomic_or( process_state_xp , PROCESS_TERM_EXIT );
982        else          hal_remote_atomic_or( process_state_xp , PROCESS_TERM_KILL );
983
984#if DEBUG_THREAD_KILL
985cycle  = (uint32_t)hal_get_cycles;
986if( DEBUG_THREAD_KILL < cycle )
987printk("\n[DBG] %s : thread %x exit for thread %x / main thread / cycle %d\n",
988__FUNCTION__, killer_ptr, target_ptr, cycle );
989#endif
990
991    }
992    else                                                      // main thread
993    {
994        // set the REQ_DELETE flag in target thread descriptor
995        hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
996
997#if DEBUG_THREAD_KILL
998cycle  = (uint32_t)hal_get_cycles;
999if( DEBUG_THREAD_KILL < cycle )
1000printk("\n[DBG] %s : thread %x exit for thread %x / not the main thread / cycle %d\n",
1001__FUNCTION__, killer_ptr, target_ptr, cycle );
1002#endif
1003
1004    }
1005
1006}  // end thread_kill()
1007
1008///////////////////////
1009void thread_idle_func()
1010{
1011    while( 1 )
1012    {
1013        // unmask IRQs
1014        hal_enable_irq( NULL );
1015
1016        if( CONFIG_THREAD_IDLE_MODE_SLEEP ) // force core to low-power mode
1017        {
1018
1019#if DEBUG_THREAD_IDLE
1020uint32_t cycle  = (uint32_t)hal_get_cycles;
1021thread_t * this = CURRENT_THREAD;
1022if( DEBUG_THREAD_IDLE < cycle )
1023printk("\n[DBG] %s : idle thread %x on core[%x,%d] goes to sleep / cycle %d\n",
1024__FUNCTION__, this, local_cxy, this->core->lid, cycle );
1025#endif
1026
1027            hal_core_sleep();
1028
1029#if DEBUG_THREAD_IDLE
1030cycle  = (uint32_t)hal_get_cycles;
1031if( DEBUG_THREAD_IDLE < cycle )
1032printk("\n[DBG] %s : idle thread %x on core[%x,%d] wake up / cycle %d\n",
1033__FUNCTION__, this, local_cxy, this->core->lid, cycle );
1034#endif
1035
1036        }
1037        else                                // search a runable thread
1038        {
1039            sched_yield( "IDLE" );
1040        }
1041    }
1042}  // end thread_idle()
1043
1044
1045/////////////////////////////////////////////////
1046void thread_user_time_update( thread_t * thread )
1047{
1048    // TODO
1049    // printk("\n[WARNING] function %s not implemented\n", __FUNCTION__ );
1050}
1051
1052///////////////////////////////////////////////////
1053void thread_kernel_time_update( thread_t * thread )
1054{
1055    // TODO
1056    // printk("\n[WARNING] function %s not implemented\n", __FUNCTION__ );
1057}
1058
1059/////////////////////////////////////
1060xptr_t thread_get_xptr( pid_t    pid,
1061                        trdid_t  trdid )
1062{
1063    cxy_t         target_cxy;          // target thread cluster identifier
1064    ltid_t        target_thread_ltid;  // target thread local index
1065    thread_t    * target_thread_ptr;   // target thread local pointer
1066    xptr_t        target_process_xp;   // extended pointer on target process descriptor
1067    process_t   * target_process_ptr;  // local pointer on target process descriptor
1068    pid_t         target_process_pid;  // target process identifier
1069    xlist_entry_t root;                // root of list of process in target cluster
1070    xptr_t        lock_xp;             // extended pointer on lock protecting  this list
1071
1072    // get target cluster identifier and local thread identifier
1073    target_cxy         = CXY_FROM_TRDID( trdid );
1074    target_thread_ltid = LTID_FROM_TRDID( trdid );
1075
1076    // check trdid argument
1077        if( (target_thread_ltid >= CONFIG_THREAD_MAX_PER_CLUSTER) || 
1078        cluster_is_undefined( target_cxy ) )         return XPTR_NULL;
1079
1080    // get root of list of process descriptors in target cluster
1081    hal_remote_memcpy( XPTR( local_cxy  , &root ),
1082                       XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_root ),
1083                       sizeof(xlist_entry_t) );
1084
1085    // get extended pointer on lock protecting the list of processes
1086    lock_xp = XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_lock );
1087
1088    // take the lock protecting the list of processes in target cluster
1089    remote_spinlock_lock( lock_xp );
1090
1091    // loop on list of process in target cluster to find the PID process
1092    xptr_t  iter;
1093    bool_t  found = false;
1094    XLIST_FOREACH( XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_root ) , iter )
1095    {
1096        target_process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
1097        target_process_ptr = (process_t *)GET_PTR( target_process_xp );
1098        target_process_pid = hal_remote_lw( XPTR( target_cxy , &target_process_ptr->pid ) );
1099        if( target_process_pid == pid )
1100        {
1101            found = true;
1102            break;
1103        }
1104    }
1105
1106    // release the lock protecting the list of processes in target cluster
1107    remote_spinlock_unlock( lock_xp );
1108
1109    // check PID found
1110    if( found == false ) return XPTR_NULL;
1111
1112    // get target thread local pointer
1113    xptr_t xp = XPTR( target_cxy , &target_process_ptr->th_tbl[target_thread_ltid] );
1114    target_thread_ptr = (thread_t *)hal_remote_lpt( xp );
1115
1116    if( target_thread_ptr == NULL )  return XPTR_NULL;
1117
1118    return XPTR( target_cxy , target_thread_ptr );
1119}
1120
Note: See TracBrowser for help on using the repository browser.