source: trunk/kernel/kern/thread.c @ 443

Last change on this file since 443 was 443, checked in by alain, 6 years ago

Fix few bugs whike debugging the sort multi-thread application.

File size: 43.1 KB
Line 
1/*
2 * thread.c -  implementation of thread operations (user & kernel)
3 *
4 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *         Alain Greiner (2016,2017)
6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25#include <kernel_config.h>
26#include <hal_types.h>
27#include <hal_context.h>
28#include <hal_irqmask.h>
29#include <hal_special.h>
30#include <hal_remote.h>
31#include <memcpy.h>
32#include <printk.h>
33#include <cluster.h>
34#include <process.h>
35#include <scheduler.h>
36#include <dev_pic.h>
37#include <core.h>
38#include <list.h>
39#include <xlist.h>
40#include <page.h>
41#include <kmem.h>
42#include <ppm.h>
43#include <thread.h>
44
45//////////////////////////////////////////////////////////////////////////////////////
46// Extern global variables
47//////////////////////////////////////////////////////////////////////////////////////
48
49extern process_t      process_zero;
50
51//////////////////////////////////////////////////////////////////////////////////////
52// This function returns a printable string for the thread type.
53//////////////////////////////////////////////////////////////////////////////////////
54char * thread_type_str( uint32_t type )
55{
56    if     ( type == THREAD_USER   ) return "USR";
57    else if( type == THREAD_RPC    ) return "RPC";
58    else if( type == THREAD_DEV    ) return "DEV";
59    else if( type == THREAD_IDLE   ) return "IDL";
60    else                             return "undefined";
61}
62
63/////////////////////////////////////////////////////////////////////////////////////
64// This static function allocates physical memory for a thread descriptor.
65// It can be called by the three functions:
66// - thread_user_create()
67// - thread_user_fork()
68// - thread_kernel_create()
69/////////////////////////////////////////////////////////////////////////////////////
70// @ return pointer on thread descriptor if success / return NULL if failure.
71/////////////////////////////////////////////////////////////////////////////////////
72static thread_t * thread_alloc()
73{
74        page_t       * page;   // pointer on page descriptor containing thread descriptor
75        kmem_req_t     req;    // kmem request
76
77        // allocates memory for thread descriptor + kernel stack
78        req.type  = KMEM_PAGE;
79        req.size  = CONFIG_THREAD_DESC_ORDER;
80        req.flags = AF_KERNEL | AF_ZERO;
81        page      = kmem_alloc( &req );
82
83        if( page == NULL ) return NULL;
84
85    // return pointer on new thread descriptor
86    xptr_t base_xp = ppm_page2base( XPTR(local_cxy , page ) );
87    return (thread_t *)GET_PTR( base_xp );
88
89}  // end thread_alloc()
90 
91
92/////////////////////////////////////////////////////////////////////////////////////
93// This static function releases the physical memory for a thread descriptor.
94// It is called by the three functions:
95// - thread_user_create()
96// - thread_user_fork()
97// - thread_kernel_create()
98/////////////////////////////////////////////////////////////////////////////////////
99// @ thread  : pointer on thread descriptor.
100/////////////////////////////////////////////////////////////////////////////////////
101static void thread_release( thread_t * thread )
102{
103    kmem_req_t   req;
104
105    xptr_t base_xp = ppm_base2page( XPTR(local_cxy , thread ) );
106
107    req.type  = KMEM_PAGE;
108    req.ptr   = GET_PTR( base_xp );
109    kmem_free( &req );
110}
111
112/////////////////////////////////////////////////////////////////////////////////////
113// This static function initializes a thread descriptor (kernel or user).
114// It can be called by the four functions:
115// - thread_user_create()
116// - thread_user_fork()
117// - thread_kernel_create()
118// - thread_idle_init()
119// It updates the local DQDT.
120/////////////////////////////////////////////////////////////////////////////////////
121// @ thread       : pointer on thread descriptor
122// @ process      : pointer on process descriptor.
123// @ type         : thread type.
124// @ func         : pointer on thread entry function.
125// @ args         : pointer on thread entry function arguments.
126// @ core_lid     : target core local index.
127// @ u_stack_base : stack base (user thread only)
128// @ u_stack_size : stack base (user thread only)
129/////////////////////////////////////////////////////////////////////////////////////
130static error_t thread_init( thread_t      * thread,
131                            process_t     * process,
132                            thread_type_t   type,
133                            void          * func,
134                            void          * args,
135                            lid_t           core_lid,
136                            intptr_t        u_stack_base,
137                            uint32_t        u_stack_size )
138{
139    error_t        error;
140    trdid_t        trdid;      // allocated thread identifier
141
142        cluster_t    * local_cluster = LOCAL_CLUSTER;
143
144#if DEBUG_THREAD_USER_INIT
145uint32_t cycle = (uint32_t)hal_get_cycles();
146if( DEBUG_THREAD_USER_INIT < cycle )
147printk("\n[DBG] %s : thread %x enter to init thread %x in process %x / cycle %d\n",
148__FUNCTION__, CURRENT_THREAD, thread, process->pid , cycle );
149#endif
150
151    // register new thread in process descriptor, and get a TRDID
152    error = process_register_thread( process, thread , &trdid );
153
154    if( error )
155    {
156        printk("\n[ERROR] in %s : cannot get TRDID\n", __FUNCTION__ );
157        return EINVAL;
158    }
159
160    // compute thread descriptor size without kernel stack
161    uint32_t desc_size = (intptr_t)(&thread->signature) - (intptr_t)thread + 4; 
162
163        // Initialize new thread descriptor
164    thread->trdid           = trdid;
165        thread->type            = type;
166    thread->quantum         = 0;            // TODO
167    thread->ticks_nr        = 0;            // TODO
168    thread->time_last_check = 0;
169        thread->core            = &local_cluster->core_tbl[core_lid];
170        thread->process         = process;
171
172    thread->local_locks     = 0;
173    thread->remote_locks    = 0;
174
175#if CONFIG_LOCKS_DEBUG
176    list_root_init( &thread->locks_root ); 
177    xlist_root_init( XPTR( local_cxy , &thread->xlocks_root ) );
178#endif
179
180    thread->u_stack_base    = u_stack_base;
181    thread->u_stack_size    = u_stack_size;
182    thread->k_stack_base    = (intptr_t)thread + desc_size;
183    thread->k_stack_size    = CONFIG_THREAD_DESC_SIZE - desc_size;
184
185    thread->entry_func      = func;         // thread entry point
186    thread->entry_args      = args;         // thread function arguments
187    thread->flags           = 0;            // all flags reset
188    thread->errno           = 0;            // no error detected
189    thread->fork_user       = 0;            // no user defined placement for fork
190    thread->fork_cxy        = 0;            // user defined target cluster for fork
191    thread->blocked         = THREAD_BLOCKED_GLOBAL;
192
193    // reset sched list
194    list_entry_init( &thread->sched_list );
195
196    // reset thread info
197    memset( &thread->info , 0 , sizeof(thread_info_t) );
198
199    // initializes join_lock
200    remote_spinlock_init( XPTR( local_cxy , &thread->join_lock ) );
201
202    // initialise signature
203        thread->signature = THREAD_SIGNATURE;
204
205    // FIXME define and call an architecture specific hal_thread_init()
206    // function to initialise the save_sr field
207    thread->save_sr = 0xFF13;
208
209    // register new thread in core scheduler
210    sched_register_thread( thread->core , thread );
211
212        // update DQDT
213    dqdt_update_threads( 1 );
214
215#if DEBUG_THREAD_USER_INIT
216cycle = (uint32_t)hal_get_cycles();
217if( DEBUG_THREAD_USER_INIT < cycle )
218printk("\n[DBG] %s : thread %x exit  after init of thread %x in process %x / cycle %d\n",
219__FUNCTION__, CURRENT_THREAD, thread, process->pid , cycle );
220#endif
221
222        return 0;
223
224} // end thread_init()
225
226/////////////////////////////////////////////////////////
227error_t thread_user_create( pid_t             pid,
228                            void            * start_func,
229                            void            * start_arg,
230                            pthread_attr_t  * attr,
231                            thread_t       ** new_thread )
232{
233    error_t        error;
234        thread_t     * thread;       // pointer on created thread descriptor
235    process_t    * process;      // pointer to local process descriptor
236    lid_t          core_lid;     // selected core local index
237    vseg_t       * vseg;         // stack vseg
238
239    assert( (attr != NULL) , __FUNCTION__, "pthread attributes must be defined" );
240
241#if DEBUG_THREAD_USER_CREATE
242uint32_t cycle = (uint32_t)hal_get_cycles();
243if( DEBUG_THREAD_USER_CREATE < cycle )
244printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",
245__FUNCTION__, CURRENT_THREAD, pid , local_cxy , cycle );
246#endif
247
248    // get process descriptor local copy
249    process = process_get_local_copy( pid );
250
251    if( process == NULL )
252    {
253                printk("\n[ERROR] in %s : cannot get process descriptor %x\n",
254               __FUNCTION__ , pid );
255        return ENOMEM;
256    }
257
258#if( DEBUG_THREAD_USER_CREATE & 1)
259if( DEBUG_THREAD_USER_CREATE < cycle )
260printk("\n[DBG] %s : process descriptor = %x for process %x in cluster %x\n",
261__FUNCTION__, process , pid , local_cxy );
262#endif
263
264    // select a target core in local cluster
265    if( attr->attributes & PT_ATTR_CORE_DEFINED )
266    {
267        core_lid = attr->lid;
268        if( core_lid >= LOCAL_CLUSTER->cores_nr )
269        {
270                printk("\n[ERROR] in %s : illegal core index attribute = %d\n",
271            __FUNCTION__ , core_lid );
272            return EINVAL;
273        }
274    }
275    else
276    {
277        core_lid = cluster_select_local_core();
278    }
279
280#if( DEBUG_THREAD_USER_CREATE & 1)
281if( DEBUG_THREAD_USER_CREATE < cycle )
282printk("\n[DBG] %s : core[%x,%d] selected\n",
283__FUNCTION__, local_cxy , core_lid );
284#endif
285
286    // allocate a stack from local VMM
287    vseg = vmm_create_vseg( process,
288                            VSEG_TYPE_STACK,
289                            0,                 // size unused
290                            0,                 // length unused
291                            0,                 // file_offset unused
292                            0,                 // file_size unused
293                            XPTR_NULL,         // mapper_xp unused
294                            local_cxy );
295
296    if( vseg == NULL )
297    {
298            printk("\n[ERROR] in %s : cannot create stack vseg\n", __FUNCTION__ );
299                return ENOMEM;
300    }
301
302    // allocate memory for thread descriptor
303    thread = thread_alloc();
304
305    if( thread == NULL )
306    {
307            printk("\n[ERROR] in %s : cannot create new thread\n", __FUNCTION__ );
308        vmm_remove_vseg( vseg );
309        return ENOMEM;
310    }
311
312#if( DEBUG_THREAD_USER_CREATE & 1)
313if( DEBUG_THREAD_USER_CREATE < cycle )
314printk("\n[DBG] %s : thread descriptor %x allocated\n",
315__FUNCTION__, thread );
316#endif
317
318    // initialize thread descriptor
319    error = thread_init( thread,
320                         process,
321                         THREAD_USER,
322                         start_func,
323                         start_arg,
324                         core_lid,
325                         vseg->min,
326                         vseg->max - vseg->min );
327    if( error )
328    {
329            printk("\n[ERROR] in %s : cannot initialize new thread\n", __FUNCTION__ );
330        vmm_remove_vseg( vseg );
331        thread_release( thread );
332        return EINVAL;
333    }
334
335#if( DEBUG_THREAD_USER_CREATE & 1)
336if( DEBUG_THREAD_USER_CREATE < cycle )
337printk("\n[DBG] %s : thread descriptor %x initialised / trdid = %x\n",
338__FUNCTION__, thread , thread->trdid );
339#endif
340
341    // set DETACHED flag if required
342    if( attr->attributes & PT_ATTR_DETACH ) 
343    {
344        thread->flags |= THREAD_FLAG_DETACHED;
345    }
346
347    // allocate & initialize CPU context
348        if( hal_cpu_context_create( thread ) )
349    {
350            printk("\n[ERROR] in %s : cannot create CPU context\n", __FUNCTION__ );
351        vmm_remove_vseg( vseg );
352        thread_release( thread );
353        return ENOMEM;
354    }
355
356    // allocate  FPU context
357    if( hal_fpu_context_alloc( thread ) )
358    {
359            printk("\n[ERROR] in %s : cannot create FPU context\n", __FUNCTION__ );
360        vmm_remove_vseg( vseg );
361        thread_release( thread );
362        return ENOMEM;
363    }
364
365#if DEBUG_THREAD_USER_CREATE
366cycle = (uint32_t)hal_get_cycles();
367if( DEBUG_THREAD_USER_CREATE < cycle )
368printk("\n[DBG] %s : thread %x exit / new_thread %x in process %x / core %d / cycle %d\n",
369__FUNCTION__, CURRENT_THREAD, thread->trdid , pid , core_lid, cycle );
370#endif
371
372    *new_thread = thread;
373        return 0;
374
375}  // end thread_user_create()
376
377///////////////////////////////////////////////////////
378error_t thread_user_fork( xptr_t      parent_thread_xp,
379                          process_t * child_process,
380                          thread_t ** child_thread )
381{
382    error_t        error;
383        thread_t     * child_ptr;        // local pointer on local child thread
384    lid_t          core_lid;         // selected core local index
385
386    thread_t     * parent_ptr;       // local pointer on remote parent thread
387    cxy_t          parent_cxy;       // parent thread cluster
388    process_t    * parent_process;   // local pointer on parent process
389    xptr_t         parent_gpt_xp;    // extended pointer on parent thread GPT
390
391    void         * func;             // parent thread entry_func
392    void         * args;             // parent thread entry_args
393    intptr_t       base;             // parent thread u_stack_base
394    uint32_t       size;             // parent thread u_stack_size
395    uint32_t       flags;            // parent_thread flags
396    vpn_t          vpn_base;         // parent thread stack vpn_base
397    vpn_t          vpn_size;         // parent thread stack vpn_size
398    reg_t        * uzone;            // parent thread pointer on uzone 
399
400    vseg_t       * vseg;             // child thread STACK vseg
401
402#if DEBUG_THREAD_USER_FORK
403uint32_t cycle = (uint32_t)hal_get_cycles();
404if( DEBUG_THREAD_USER_FORK < cycle )
405printk("\n[DBG] %s : thread %x enter / child_process %x / cycle %d\n",
406__FUNCTION__, CURRENT_THREAD, child_process->pid, cycle );
407#endif
408
409    // select a target core in local cluster
410    core_lid = cluster_select_local_core();
411
412    // get cluster and local pointer on parent thread descriptor
413    parent_cxy = GET_CXY( parent_thread_xp );
414    parent_ptr = (thread_t *)GET_PTR( parent_thread_xp );
415
416    // get relevant fields from parent thread
417    func  = (void *)  hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->entry_func    ));
418    args  = (void *)  hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->entry_args    ));
419    base  = (intptr_t)hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->u_stack_base  ));
420    size  = (uint32_t)hal_remote_lw ( XPTR( parent_cxy , &parent_ptr->u_stack_size  ));
421    flags =           hal_remote_lw ( XPTR( parent_cxy , &parent_ptr->flags         ));
422    uzone = (reg_t *) hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->uzone_current ));
423
424    vpn_base = base >> CONFIG_PPM_PAGE_SHIFT;
425    vpn_size = size >> CONFIG_PPM_PAGE_SHIFT;
426
427    // get pointer on parent process in parent thread cluster
428    parent_process = (process_t *)hal_remote_lpt( XPTR( parent_cxy,
429                                                        &parent_ptr->process ) );
430 
431    // get extended pointer on parent GPT in parent thread cluster
432    parent_gpt_xp = XPTR( parent_cxy , &parent_process->vmm.gpt );
433
434    // allocate memory for child thread descriptor
435    child_ptr = thread_alloc();
436    if( child_ptr == NULL )
437    {
438        printk("\n[ERROR] in %s : cannot allocate new thread\n", __FUNCTION__ );
439        return -1;
440    }
441
442    // initialize thread descriptor
443    error = thread_init( child_ptr,
444                         child_process,
445                         THREAD_USER,
446                         func,
447                         args,
448                         core_lid,
449                         base,
450                         size );
451    if( error )
452    {
453            printk("\n[ERROR] in %s : cannot initialize child thread\n", __FUNCTION__ );
454        thread_release( child_ptr );
455        return EINVAL;
456    }
457
458    // return child pointer
459    *child_thread = child_ptr;
460
461    // set detached flag if required
462    if( flags & THREAD_FLAG_DETACHED ) child_ptr->flags = THREAD_FLAG_DETACHED;
463
464    // update uzone pointer in child thread descriptor
465    child_ptr->uzone_current = (char *)((intptr_t)uzone +
466                                        (intptr_t)child_ptr - 
467                                        (intptr_t)parent_ptr );
468 
469
470    // allocate CPU context for child thread
471        if( hal_cpu_context_alloc( child_ptr ) )
472    {
473            printk("\n[ERROR] in %s : cannot allocate CPU context\n", __FUNCTION__ );
474        thread_release( child_ptr );
475        return -1;
476    }
477
478    // allocate FPU context for child thread
479        if( hal_fpu_context_alloc( child_ptr ) )
480    {
481            printk("\n[ERROR] in %s : cannot allocate FPU context\n", __FUNCTION__ );
482        thread_release( child_ptr );
483        return -1;
484    }
485
486    // create and initialize STACK vseg
487    vseg = vseg_alloc();
488    vseg_init( vseg,
489               VSEG_TYPE_STACK,
490               base,
491               size,
492               vpn_base,
493               vpn_size,
494               0, 0, XPTR_NULL,                         // not a file vseg
495               local_cxy );
496
497    // register STACK vseg in local child VSL
498    vseg_attach( &child_process->vmm , vseg );
499
500    // copy all valid STACK GPT entries   
501    vpn_t          vpn;
502    bool_t         mapped;
503    ppn_t          ppn;
504    for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ )
505    {
506        error = hal_gpt_pte_copy( &child_process->vmm.gpt,
507                                  parent_gpt_xp,
508                                  vpn,
509                                  true,                 // set cow
510                                  &ppn,
511                                  &mapped );
512        if( error )
513        {
514            vseg_detach( &child_process->vmm , vseg );
515            vseg_free( vseg );
516            thread_release( child_ptr );
517            printk("\n[ERROR] in %s : cannot update child GPT\n", __FUNCTION__ );
518            return -1;
519        }
520
521        // increment pending forks counter for the page if mapped
522        if( mapped )
523        {
524            xptr_t   page_xp  = ppm_ppn2page( ppn );
525            cxy_t    page_cxy = GET_CXY( page_xp );
526            page_t * page_ptr = (page_t *)GET_PTR( page_xp );
527            hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , 1 );
528
529#if (DEBUG_THREAD_USER_FORK & 1)
530cycle = (uint32_t)hal_get_cycles();
531if( DEBUG_THREAD_USER_FORK < cycle )
532printk("\n[DBG] %s : thread %x copied stack PTE to child GPT : vpn %x\n",
533__FUNCTION__, CURRENT_THREAD, vpn );
534#endif
535
536        }
537    }
538
539    // set COW flag for all mapped entries of STAK vseg in parent thread GPT
540    hal_gpt_set_cow( parent_gpt_xp,
541                     vpn_base,
542                     vpn_size );
543 
544#if DEBUG_THREAD_USER_FORK
545cycle = (uint32_t)hal_get_cycles();
546if( DEBUG_THREAD_USER_FORK < cycle )
547printk("\n[DBG] %s : thread %x exit / child_process %x / child_thread %x / cycle %d\n",
548__FUNCTION__, CURRENT_THREAD, child_process->pid, child_ptr, cycle );
549#endif
550
551        return 0;
552
553}  // end thread_user_fork()
554
555/////////////////////////////////////////////////////////
556error_t thread_kernel_create( thread_t     ** new_thread,
557                              thread_type_t   type,
558                              void          * func,
559                              void          * args,
560                                              lid_t           core_lid )
561{
562    error_t        error;
563        thread_t     * thread;       // pointer on new thread descriptor
564
565    assert( ( (type == THREAD_IDLE) || (type == THREAD_RPC) || (type == THREAD_DEV) ) ,
566    __FUNCTION__ , "illegal thread type" );
567
568    assert( (core_lid < LOCAL_CLUSTER->cores_nr) ,
569            __FUNCTION__ , "illegal core_lid" );
570
571#if DEBUG_THREAD_KERNEL_CREATE
572uint32_t cycle = (uint32_t)hal_get_cycles();
573if( DEBUG_THREAD_KERNEL_CREATE < cycle )
574printk("\n[DBG] %s : thread %x enter / requested_type %s / cycle %d\n",
575__FUNCTION__, CURRENT_THREAD, thread, thread_type_str(type), cycle );
576#endif
577
578    // allocate memory for new thread descriptor
579    thread = thread_alloc();
580
581    if( thread == NULL ) return ENOMEM;
582
583    // initialize thread descriptor
584    error = thread_init( thread,
585                         &process_zero,
586                         type,
587                         func,
588                         args,
589                         core_lid,
590                         0 , 0 );  // no user stack for a kernel thread
591
592    if( error ) // release allocated memory for thread descriptor
593    {
594        thread_release( thread );
595        return EINVAL;
596    }
597
598    // allocate & initialize CPU context
599        hal_cpu_context_create( thread );
600
601#if DEBUG_THREAD_KERNEL_CREATE
602cycle = (uint32_t)hal_get_cycles();
603if( DEBUG_THREAD_KERNEL_CREATE < cycle )
604printk("\n[DBG] %s : thread %x exit / new_thread %x / type %s / cycle %d\n",
605__FUNCTION__, CURRENT_THREAD, thread, thread_type_str(type), cycle );
606#endif
607
608    *new_thread = thread;
609        return 0;
610
611} // end thread_kernel_create()
612
613/////////////////////////////////////////////////
614error_t thread_idle_init( thread_t      * thread,
615                          thread_type_t   type,
616                          void          * func,
617                          void          * args,
618                                          lid_t           core_lid )
619{
620    assert( (type == THREAD_IDLE) , __FUNCTION__ , "illegal thread type" );
621
622    assert( (core_lid < LOCAL_CLUSTER->cores_nr) , __FUNCTION__ , "illegal core index" );
623
624    error_t  error = thread_init( thread,
625                                  &process_zero,
626                                  type,
627                                  func,
628                                  args,
629                                  core_lid,
630                                  0 , 0 );   // no user stack for a kernel thread
631
632    // allocate & initialize CPU context if success
633    if( error == 0 ) hal_cpu_context_create( thread );
634
635    return error;
636
637}  // end thread_idle_init()
638
639///////////////////////////////////////////////////////////////////////////////////////
640// TODO: check that all memory dynamically allocated during thread execution
641// has been released, using a cache of mmap requests. [AG]
642///////////////////////////////////////////////////////////////////////////////////////
643bool_t thread_destroy( thread_t * thread )
644{
645    reg_t        save_sr;
646    bool_t       last_thread;
647
648    process_t  * process    = thread->process;
649    core_t     * core       = thread->core;
650
651#if DEBUG_THREAD_DESTROY
652uint32_t cycle = (uint32_t)hal_get_cycles();
653if( DEBUG_THREAD_DESTROY < cycle )
654printk("\n[DBG] %s : thread %x enter to destroy thread %x in process %x / cycle %d\n",
655__FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle );
656#endif
657
658    assert( (thread->local_locks == 0) , __FUNCTION__ , 
659    "local lock not released for thread %x in process %x", thread->trdid, process->pid );
660
661    assert( (thread->remote_locks == 0) , __FUNCTION__ , 
662    "remote lock not released for thread %x in process %x", thread->trdid, process->pid );
663
664    // update intrumentation values
665        process->vmm.pgfault_nr += thread->info.pgfault_nr;
666
667    // release memory allocated for CPU context and FPU context
668        hal_cpu_context_destroy( thread );
669        if ( thread->type == THREAD_USER ) hal_fpu_context_destroy( thread );
670       
671    // release FPU ownership if required
672        hal_disable_irq( &save_sr );
673        if( core->fpu_owner == thread )
674        {
675                core->fpu_owner = NULL;
676                hal_fpu_disable();
677        }
678        hal_restore_irq( save_sr );
679
680    // remove thread from process th_tbl[]
681    last_thread = process_remove_thread( thread );
682       
683    // update DQDT
684    dqdt_update_threads( -1 );
685
686    // invalidate thread descriptor
687        thread->signature = 0;
688
689    // release memory for thread descriptor
690    thread_release( thread );
691
692#if DEBUG_THREAD_DESTROY
693cycle = (uint32_t)hal_get_cycles();
694if( DEBUG_THREAD_DESTROY < cycle )
695printk("\n[DBG] %s : thread %x exit / destroyed thread %x in process %x / cycle %d\n",
696__FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle );
697#endif
698
699    return last_thread;
700
701}   // end thread_destroy()
702
703//////////////////////////////////////////////////
704inline void thread_set_req_ack( thread_t * target,
705                                uint32_t * rsp_count )
706{
707    reg_t    save_sr;   // for critical section
708
709    // get pointer on target thread scheduler
710    scheduler_t * sched = &target->core->scheduler;
711
712    // wait scheduler ready to handle a new request
713    while( sched->req_ack_pending ) asm volatile( "nop" );
714   
715    // enter critical section
716    hal_disable_irq( &save_sr );
717     
718    // set request in target thread scheduler
719    sched->req_ack_pending = true;
720
721    // set ack request in target thread "flags"
722    hal_atomic_or( &target->flags , THREAD_FLAG_REQ_ACK );
723
724    // set pointer on responses counter in target thread
725    target->ack_rsp_count = rsp_count;
726   
727    // exit critical section
728    hal_restore_irq( save_sr );
729
730    hal_fence();
731
732}  // thread_set_req_ack()
733
734/////////////////////////////////////////////////////
735inline void thread_reset_req_ack( thread_t * target )
736{
737    reg_t    save_sr;   // for critical section
738
739    // get pointer on target thread scheduler
740    scheduler_t * sched = &target->core->scheduler;
741
742    // check signal pending in scheduler
743    assert( sched->req_ack_pending , __FUNCTION__ , "no pending signal" );
744   
745    // enter critical section
746    hal_disable_irq( &save_sr );
747     
748    // reset signal in scheduler
749    sched->req_ack_pending = false;
750
751    // reset signal in thread "flags"
752    hal_atomic_and( &target->flags , ~THREAD_FLAG_REQ_ACK );
753
754    // reset pointer on responses counter
755    target->ack_rsp_count = NULL;
756   
757    // exit critical section
758    hal_restore_irq( save_sr );
759
760    hal_fence();
761
762}  // thread_reset_req_ack()
763
764////////////////////////////////
765inline bool_t thread_can_yield()
766{
767    thread_t * this = CURRENT_THREAD;
768    return (this->local_locks == 0) && (this->remote_locks == 0);
769}
770
771/////////////////////////
772void thread_check_sched()
773{
774    thread_t * this = CURRENT_THREAD;
775
776        if( (this->local_locks == 0) && 
777        (this->remote_locks == 0) &&
778        (this->flags & THREAD_FLAG_SCHED) ) 
779    {
780        this->flags &= ~THREAD_FLAG_SCHED;
781        sched_yield( "delayed scheduling" );
782    }
783
784}  // end thread_check_sched()
785
786//////////////////////////////////////
787void thread_block( xptr_t   thread_xp,
788                   uint32_t cause )
789{
790    // get thread cluster and local pointer
791    cxy_t      cxy = GET_CXY( thread_xp );
792    thread_t * ptr = GET_PTR( thread_xp );
793
794    // set blocking cause
795    hal_remote_atomic_or( XPTR( cxy , &ptr->blocked ) , cause );
796    hal_fence();
797
798#if DEBUG_THREAD_BLOCK
799uint32_t cycle = (uint32_t)hal_get_cycles();
800if( DEBUG_THREAD_BLOCK < cycle )
801printk("\n[DBG] %s : thread %x blocked thread %x / cause %x / cycle %d\n",
802__FUNCTION__ , CURRENT_THREAD , ptr , cause , cycle );
803#endif
804
805#if (DEBUG_THREAD_BLOCK & 1)
806if( DEBUG_THREAD_BLOCK < cycle )
807sched_display( ptr->core->lid );
808#endif
809
810} // end thread_block()
811
812////////////////////////////////////////////
813uint32_t thread_unblock( xptr_t   thread_xp,
814                         uint32_t cause )
815{
816    // get thread cluster and local pointer
817    cxy_t      cxy = GET_CXY( thread_xp );
818    thread_t * ptr = GET_PTR( thread_xp );
819
820    // reset blocking cause
821    uint32_t previous = hal_remote_atomic_and( XPTR( cxy , &ptr->blocked ) , ~cause );
822    hal_fence();
823
824#if DEBUG_THREAD_BLOCK
825uint32_t cycle = (uint32_t)hal_get_cycles();
826if( DEBUG_THREAD_BLOCK < cycle )
827printk("\n[DBG] %s : thread %x unblocked thread %x / cause %x / cycle %d\n",
828__FUNCTION__ , CURRENT_THREAD , ptr , cause , cycle );
829#endif
830
831#if (DEBUG_THREAD_BLOCK & 1)
832if( DEBUG_THREAD_BLOCK < cycle )
833sched_display( ptr->core->lid );
834#endif
835
836    // return a non zero value if the cause bit is modified
837    return( previous & cause );
838
839}  // end thread_unblock()
840
841/*
842
843////////////////////////////////////
844void thread_kill( xptr_t  target_xp,
845                  bool_t  is_exit,
846                  bool_t  is_forced )
847{
848    reg_t       save_sr;                // for critical section
849    bool_t      attached;               // target thread in attached mode
850    bool_t      join_done;              // joining thread arrived first
851    xptr_t      killer_xp;              // extended pointer on killer thread (this)
852    thread_t  * killer_ptr;             // pointer on killer thread (this)
853    cxy_t       target_cxy;             // target thread cluster     
854    thread_t  * target_ptr;             // pointer on target thread
855    xptr_t      joining_xp;             // extended pointer on joining thread
856    thread_t  * joining_ptr;            // pointer on joining thread
857    cxy_t       joining_cxy;            // joining thread cluster
858    pid_t       target_pid;             // target process PID
859    cxy_t       owner_cxy;              // target process owner cluster
860    trdid_t     target_trdid;           // target thread identifier
861    ltid_t      target_ltid;            // target thread local index
862    xptr_t      process_state_xp;       // extended pointer on <term_state> in process
863
864    xptr_t      target_flags_xp;        // extended pointer on target thread <flags>
865    xptr_t      target_join_lock_xp;    // extended pointer on target thread <join_lock>
866    xptr_t      target_join_xp_xp;      // extended pointer on target thread <join_xp>
867    xptr_t      target_process_xp;      // extended pointer on target thread <process>
868
869    process_t * target_process;         // pointer on target thread process
870
871    // get target thread pointer and cluster
872    target_cxy = GET_CXY( target_xp );
873    target_ptr = GET_PTR( target_xp );
874
875    // get killer thread pointers
876    killer_ptr = CURRENT_THREAD;
877    killer_xp  = XPTR( local_cxy , killer_ptr );
878
879#if DEBUG_THREAD_DELETE
880uint32_t cycle  = (uint32_t)hal_get_cycles;
881if( DEBUG_THREAD_DELETE < cycle )
882printk("\n[DBG] %s : thread %x enter for target thread %x / cycle %d\n",
883__FUNCTION__, killer_ptr, target_ptr, cycle );
884#endif
885
886    // block the target thread
887    thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
888
889    // get target thread attached mode
890    target_flags_xp = XPTR( target_cxy , &target_ptr->flags );
891    attached = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_DETACHED) == 0);
892
893    // synchronize with the joining thread
894    // if the target thread is attached && not forced
895
896    if( attached  && (is_forced == false) )
897    {
898        // build extended pointers on target thread join fields
899        target_join_lock_xp  = XPTR( target_cxy , &target_ptr->join_lock );
900        target_join_xp_xp    = XPTR( target_cxy , &target_ptr->join_xp );
901
902        // enter critical section
903        hal_disable_irq( &save_sr );
904
905        // take the join_lock in target thread descriptor
906        remote_spinlock_lock( target_join_lock_xp );
907
908        // get join_done from target thread descriptor
909        join_done = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_JOIN_DONE) != 0);
910   
911        if( join_done )     // joining thread arrived first
912        {
913            // get extended pointer on joining thread
914            joining_xp  = (xptr_t)hal_remote_lwd( target_join_xp_xp );
915            joining_ptr = GET_PTR( joining_xp );
916            joining_cxy = GET_CXY( joining_xp );
917           
918            // reset the join_done flag in target thread
919            hal_remote_atomic_and( target_flags_xp , ~THREAD_FLAG_JOIN_DONE );
920
921            // unblock the joining thread
922            thread_unblock( joining_xp , THREAD_BLOCKED_JOIN );
923
924            // release the join_lock in target thread descriptor
925            remote_spinlock_unlock( target_join_lock_xp );
926
927            // restore IRQs
928            hal_restore_irq( save_sr );
929        }
930        else                // this thread arrived first
931        {
932            // set the kill_done flag in target thread
933            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_KILL_DONE );
934
935            // block this thread on BLOCKED_JOIN
936            thread_block( killer_xp , THREAD_BLOCKED_JOIN );
937
938            // set extended pointer on killer thread in target thread
939            hal_remote_swd( target_join_xp_xp , killer_xp );
940
941            // release the join_lock in target thread descriptor
942            remote_spinlock_unlock( target_join_lock_xp );
943
944            // deschedule
945            sched_yield( "killer thread wait joining thread" );
946
947            // restore IRQs
948            hal_restore_irq( save_sr );
949        }
950    }  // end if attached
951
952    // - if the target thread is the main thread
953    //   => synchronize with the parent process main thread
954    // - if the target thread is not the main thread
955    //   => simply mark the target thread for delete
956
957    // get pointer on target thread process
958    target_process_xp  = XPTR( target_cxy , &target_ptr->process );
959    target_process     = (process_t *)hal_remote_lpt( target_process_xp );
960
961        // get target process owner cluster
962        target_pid = hal_remote_lw( XPTR( target_cxy , &target_process->pid ) );
963    owner_cxy = CXY_FROM_PID( target_pid );
964
965    // get target thread local index
966    target_trdid = hal_remote_lw( XPTR( target_cxy , &target_ptr->trdid ) );
967    target_ltid  = LTID_FROM_TRDID( target_trdid );
968
969    if( (owner_cxy == target_cxy) && (target_ltid == 0) )     // main thread
970    {
971        // get extended pointer on term_state in target process owner cluster
972        process_state_xp = XPTR( owner_cxy , &target_process->term_state );
973
974        // set termination info in target process owner 
975        if( is_exit ) hal_remote_atomic_or( process_state_xp , PROCESS_TERM_EXIT );
976        else          hal_remote_atomic_or( process_state_xp , PROCESS_TERM_KILL );
977
978#if DEBUG_THREAD_DELETE
979cycle  = (uint32_t)hal_get_cycles;
980if( DEBUG_THREAD_DELETE < cycle )
981printk("\n[DBG] %s : thread %x exit for thread %x / main thread / cycle %d\n",
982__FUNCTION__, killer_ptr, target_ptr, cycle );
983#endif
984
985    }
986    else                                                      // main thread
987    {
988        // set the REQ_DELETE flag in target thread descriptor
989        hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
990
991#if DEBUG_THREAD_DELETE
992cycle  = (uint32_t)hal_get_cycles;
993if( DEBUG_THREAD_DELETE < cycle )
994printk("\n[DBG] %s : thread %x exit for thread %x / not the main thread / cycle %d\n",
995__FUNCTION__, killer_ptr, target_ptr, cycle );
996#endif
997
998    }
999
1000}  // end thread_kill()
1001
1002*/
1003
1004//////////////////////////////////////
1005void thread_delete( xptr_t  target_xp,
1006                    pid_t   pid,
1007                    bool_t  is_forced )
1008{
1009    reg_t       save_sr;                // for critical section
1010    bool_t      target_join_done;       // joining thread arrived first
1011    bool_t      target_attached;        // target thread attached
1012    xptr_t      killer_xp;              // extended pointer on killer thread (this)
1013    thread_t  * killer_ptr;             // pointer on killer thread (this)
1014    cxy_t       target_cxy;             // target thread cluster     
1015    thread_t  * target_ptr;             // pointer on target thread
1016    xptr_t      target_flags_xp;        // extended pointer on target thread <flags>
1017    uint32_t    target_flags;           // target thread <flags> value
1018    xptr_t      target_join_lock_xp;    // extended pointer on target thread <join_lock>
1019    xptr_t      target_join_xp_xp;      // extended pointer on target thread <join_xp>
1020    trdid_t     target_trdid;           // target thread identifier
1021    ltid_t      target_ltid;            // target thread local index
1022    xptr_t      joining_xp;             // extended pointer on joining thread
1023    thread_t  * joining_ptr;            // pointer on joining thread
1024    cxy_t       joining_cxy;            // joining thread cluster
1025    cxy_t       owner_cxy;              // process owner cluster
1026
1027
1028    // get target thread pointers, identifiers, and flags
1029    target_cxy      = GET_CXY( target_xp );
1030    target_ptr      = GET_PTR( target_xp );
1031    target_trdid    = hal_remote_lw( XPTR( target_cxy , &target_ptr->trdid ) );
1032    target_ltid     = LTID_FROM_TRDID( target_trdid );
1033    target_flags_xp = XPTR( target_cxy , &target_ptr->flags ); 
1034    target_flags    = hal_remote_lw( target_flags_xp );
1035
1036    // get killer thread pointers
1037    killer_ptr = CURRENT_THREAD;
1038    killer_xp  = XPTR( local_cxy , killer_ptr );
1039
1040#if DEBUG_THREAD_DELETE
1041uint32_t cycle  = (uint32_t)hal_get_cycles;
1042if( DEBUG_THREAD_DELETE < cycle )
1043printk("\n[DBG] %s : killer thread %x enter for target thread %x / cycle %d\n",
1044__FUNCTION__, killer_ptr, target_ptr, cycle );
1045#endif
1046
1047    // target thread cannot be the main thread, because the main thread
1048    // must be deleted by the parent process sys_wait() function
1049    owner_cxy = CXY_FROM_PID( pid );
1050    assert( ((owner_cxy != target_cxy) || (target_ltid != 0)), __FUNCTION__,
1051    "tharget thread cannot be the main thread\n" );
1052
1053    // block the target thread
1054    thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
1055
1056    // get attached from target flag descriptor
1057    target_attached = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_DETACHED) != 0);
1058
1059    // synchronize with the joining thread if the target thread is attached
1060    if( target_attached && (is_forced == false) )
1061    {
1062        // build extended pointers on target thread join fields
1063        target_join_lock_xp  = XPTR( target_cxy , &target_ptr->join_lock );
1064        target_join_xp_xp    = XPTR( target_cxy , &target_ptr->join_xp );
1065
1066        // enter critical section
1067        hal_disable_irq( &save_sr );
1068
1069        // take the join_lock in target thread descriptor
1070        remote_spinlock_lock( target_join_lock_xp );
1071
1072        // get join_done from target thread descriptor
1073        target_join_done = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_JOIN_DONE) != 0);
1074   
1075        if( target_join_done )  // joining thread arrived first => unblock the joining thread
1076        {
1077            // get extended pointer on joining thread
1078            joining_xp  = (xptr_t)hal_remote_lwd( target_join_xp_xp );
1079            joining_ptr = GET_PTR( joining_xp );
1080            joining_cxy = GET_CXY( joining_xp );
1081           
1082            // reset the join_done flag in target thread
1083            hal_remote_atomic_and( target_flags_xp , ~THREAD_FLAG_JOIN_DONE );
1084
1085            // unblock the joining thread
1086            thread_unblock( joining_xp , THREAD_BLOCKED_JOIN );
1087
1088            // release the join_lock in target thread descriptor
1089            remote_spinlock_unlock( target_join_lock_xp );
1090
1091            // restore IRQs
1092            hal_restore_irq( save_sr );
1093        }
1094        else                // this thread arrived first => register flags and deschedule
1095        {
1096            // set the kill_done flag in target thread
1097            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_KILL_DONE );
1098
1099            // block this thread on BLOCKED_JOIN
1100            thread_block( killer_xp , THREAD_BLOCKED_JOIN );
1101
1102            // set extended pointer on killer thread in target thread
1103            hal_remote_swd( target_join_xp_xp , killer_xp );
1104
1105            // release the join_lock in target thread descriptor
1106            remote_spinlock_unlock( target_join_lock_xp );
1107
1108            // deschedule
1109            sched_yield( "killer thread wait joining thread" );
1110
1111            // restore IRQs
1112            hal_restore_irq( save_sr );
1113        }
1114    }  // end if attached
1115
1116    // set the REQ_DELETE flag in target thread descriptor
1117    hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
1118
1119#if DEBUG_THREAD_DELETE
1120cycle  = (uint32_t)hal_get_cycles;
1121if( DEBUG_THREAD_DELETE < cycle )
1122printk("\n[DBG] %s : killer thread %x exit for target thread %x / cycle %d\n",
1123__FUNCTION__, killer_ptr, target_ptr, cycle );
1124#endif
1125
1126}  // end thread_delete()
1127
1128
1129
1130///////////////////////
1131void thread_idle_func()
1132{
1133    while( 1 )
1134    {
1135        // unmask IRQs
1136        hal_enable_irq( NULL );
1137
1138        // force core to low-power mode (optional)
1139        if( CONFIG_THREAD_IDLE_MODE_SLEEP ) 
1140        {
1141
1142#if DEBUG_THREAD_IDLE
1143uint32_t cycle  = (uint32_t)hal_get_cycles;
1144thread_t * this = CURRENT_THREAD;
1145if( DEBUG_THREAD_IDLE < cycle )
1146printk("\n[DBG] %s : idle thread %x on core[%x,%d] goes to sleep / cycle %d\n",
1147__FUNCTION__, this, local_cxy, this->core->lid, cycle );
1148#endif
1149
1150            hal_core_sleep();
1151
1152#if DEBUG_THREAD_IDLE
1153cycle  = (uint32_t)hal_get_cycles;
1154if( DEBUG_THREAD_IDLE < cycle )
1155printk("\n[DBG] %s : idle thread %x on core[%x,%d] wake up / cycle %d\n",
1156__FUNCTION__, this, local_cxy, this->core->lid, cycle );
1157#endif
1158
1159        }
1160
1161        // search a runable thread
1162        sched_yield( "IDLE" );
1163    }
1164}  // end thread_idle()
1165
1166
1167/////////////////////////////////////////////////
1168void thread_user_time_update( thread_t * thread )
1169{
1170    // TODO
1171    // printk("\n[WARNING] function %s not implemented\n", __FUNCTION__ );
1172}
1173
1174///////////////////////////////////////////////////
1175void thread_kernel_time_update( thread_t * thread )
1176{
1177    // TODO
1178    // printk("\n[WARNING] function %s not implemented\n", __FUNCTION__ );
1179}
1180
1181/////////////////////////////////////
1182xptr_t thread_get_xptr( pid_t    pid,
1183                        trdid_t  trdid )
1184{
1185    cxy_t         target_cxy;          // target thread cluster identifier
1186    ltid_t        target_thread_ltid;  // target thread local index
1187    thread_t    * target_thread_ptr;   // target thread local pointer
1188    xptr_t        target_process_xp;   // extended pointer on target process descriptor
1189    process_t   * target_process_ptr;  // local pointer on target process descriptor
1190    pid_t         target_process_pid;  // target process identifier
1191    xlist_entry_t root;                // root of list of process in target cluster
1192    xptr_t        lock_xp;             // extended pointer on lock protecting  this list
1193
1194    // get target cluster identifier and local thread identifier
1195    target_cxy         = CXY_FROM_TRDID( trdid );
1196    target_thread_ltid = LTID_FROM_TRDID( trdid );
1197
1198    // check trdid argument
1199        if( (target_thread_ltid >= CONFIG_THREAD_MAX_PER_CLUSTER) || 
1200        cluster_is_undefined( target_cxy ) )         return XPTR_NULL;
1201
1202    // get root of list of process descriptors in target cluster
1203    hal_remote_memcpy( XPTR( local_cxy  , &root ),
1204                       XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_root ),
1205                       sizeof(xlist_entry_t) );
1206
1207    // get extended pointer on lock protecting the list of processes
1208    lock_xp = XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_lock );
1209
1210    // take the lock protecting the list of processes in target cluster
1211    remote_spinlock_lock( lock_xp );
1212
1213    // loop on list of process in target cluster to find the PID process
1214    xptr_t  iter;
1215    bool_t  found = false;
1216    XLIST_FOREACH( XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_root ) , iter )
1217    {
1218        target_process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
1219        target_process_ptr = (process_t *)GET_PTR( target_process_xp );
1220        target_process_pid = hal_remote_lw( XPTR( target_cxy , &target_process_ptr->pid ) );
1221        if( target_process_pid == pid )
1222        {
1223            found = true;
1224            break;
1225        }
1226    }
1227
1228    // release the lock protecting the list of processes in target cluster
1229    remote_spinlock_unlock( lock_xp );
1230
1231    // check PID found
1232    if( found == false ) return XPTR_NULL;
1233
1234    // get target thread local pointer
1235    xptr_t xp = XPTR( target_cxy , &target_process_ptr->th_tbl[target_thread_ltid] );
1236    target_thread_ptr = (thread_t *)hal_remote_lpt( xp );
1237
1238    if( target_thread_ptr == NULL )  return XPTR_NULL;
1239
1240    return XPTR( target_cxy , target_thread_ptr );
1241}
1242
Note: See TracBrowser for help on using the repository browser.