source: trunk/kernel/kern/thread.c @ 443

Last change on this file since 443 was 443, checked in by alain, 6 years ago

Fix few bugs whike debugging the sort multi-thread application.

File size: 43.1 KB
RevLine 
[1]1/*
2 * thread.c -  implementation of thread operations (user & kernel)
[171]3 *
[1]4 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
[23]5 *         Alain Greiner (2016,2017)
[1]6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
[5]9 * This file is part of ALMOS-MKH.
[1]10 *
[5]11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
[1]12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
[5]15 * ALMOS-MKH is distributed in the hope that it will be useful, but
[1]16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
[5]21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
[1]22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
[14]25#include <kernel_config.h>
[1]26#include <hal_types.h>
27#include <hal_context.h>
28#include <hal_irqmask.h>
29#include <hal_special.h>
30#include <hal_remote.h>
31#include <memcpy.h>
32#include <printk.h>
33#include <cluster.h>
34#include <process.h>
35#include <scheduler.h>
[188]36#include <dev_pic.h>
[1]37#include <core.h>
38#include <list.h>
39#include <xlist.h>
40#include <page.h>
41#include <kmem.h>
42#include <ppm.h>
43#include <thread.h>
44
45//////////////////////////////////////////////////////////////////////////////////////
46// Extern global variables
47//////////////////////////////////////////////////////////////////////////////////////
48
49extern process_t      process_zero;
50
51//////////////////////////////////////////////////////////////////////////////////////
[16]52// This function returns a printable string for the thread type.
[1]53//////////////////////////////////////////////////////////////////////////////////////
[5]54char * thread_type_str( uint32_t type )
55{
[296]56    if     ( type == THREAD_USER   ) return "USR";
[16]57    else if( type == THREAD_RPC    ) return "RPC";
58    else if( type == THREAD_DEV    ) return "DEV";
[296]59    else if( type == THREAD_IDLE   ) return "IDL";
[5]60    else                             return "undefined";
61}
62
[1]63/////////////////////////////////////////////////////////////////////////////////////
[14]64// This static function allocates physical memory for a thread descriptor.
65// It can be called by the three functions:
[1]66// - thread_user_create()
[14]67// - thread_user_fork()
[1]68// - thread_kernel_create()
69/////////////////////////////////////////////////////////////////////////////////////
[14]70// @ return pointer on thread descriptor if success / return NULL if failure.
[1]71/////////////////////////////////////////////////////////////////////////////////////
[14]72static thread_t * thread_alloc()
[1]73{
[23]74        page_t       * page;   // pointer on page descriptor containing thread descriptor
[171]75        kmem_req_t     req;    // kmem request
[1]76
77        // allocates memory for thread descriptor + kernel stack
78        req.type  = KMEM_PAGE;
[14]79        req.size  = CONFIG_THREAD_DESC_ORDER;
[1]80        req.flags = AF_KERNEL | AF_ZERO;
81        page      = kmem_alloc( &req );
82
[23]83        if( page == NULL ) return NULL;
[1]84
[315]85    // return pointer on new thread descriptor
86    xptr_t base_xp = ppm_page2base( XPTR(local_cxy , page ) );
87    return (thread_t *)GET_PTR( base_xp );
88
89}  // end thread_alloc()
90 
91
[14]92/////////////////////////////////////////////////////////////////////////////////////
[23]93// This static function releases the physical memory for a thread descriptor.
[53]94// It is called by the three functions:
[23]95// - thread_user_create()
96// - thread_user_fork()
97// - thread_kernel_create()
98/////////////////////////////////////////////////////////////////////////////////////
99// @ thread  : pointer on thread descriptor.
100/////////////////////////////////////////////////////////////////////////////////////
101static void thread_release( thread_t * thread )
102{
103    kmem_req_t   req;
104
[315]105    xptr_t base_xp = ppm_base2page( XPTR(local_cxy , thread ) );
106
[23]107    req.type  = KMEM_PAGE;
[315]108    req.ptr   = GET_PTR( base_xp );
[23]109    kmem_free( &req );
110}
111
112/////////////////////////////////////////////////////////////////////////////////////
[14]113// This static function initializes a thread descriptor (kernel or user).
[438]114// It can be called by the four functions:
[14]115// - thread_user_create()
116// - thread_user_fork()
117// - thread_kernel_create()
[438]118// - thread_idle_init()
119// It updates the local DQDT.
[14]120/////////////////////////////////////////////////////////////////////////////////////
121// @ thread       : pointer on thread descriptor
122// @ process      : pointer on process descriptor.
123// @ type         : thread type.
124// @ func         : pointer on thread entry function.
125// @ args         : pointer on thread entry function arguments.
126// @ core_lid     : target core local index.
127// @ u_stack_base : stack base (user thread only)
128// @ u_stack_size : stack base (user thread only)
129/////////////////////////////////////////////////////////////////////////////////////
130static error_t thread_init( thread_t      * thread,
131                            process_t     * process,
132                            thread_type_t   type,
133                            void          * func,
134                            void          * args,
135                            lid_t           core_lid,
136                            intptr_t        u_stack_base,
137                            uint32_t        u_stack_size )
138{
139    error_t        error;
140    trdid_t        trdid;      // allocated thread identifier
141
142        cluster_t    * local_cluster = LOCAL_CLUSTER;
143
[443]144#if DEBUG_THREAD_USER_INIT
145uint32_t cycle = (uint32_t)hal_get_cycles();
146if( DEBUG_THREAD_USER_INIT < cycle )
147printk("\n[DBG] %s : thread %x enter to init thread %x in process %x / cycle %d\n",
148__FUNCTION__, CURRENT_THREAD, thread, process->pid , cycle );
149#endif
150
[14]151    // register new thread in process descriptor, and get a TRDID
[1]152    error = process_register_thread( process, thread , &trdid );
153
[171]154    if( error )
[1]155    {
[14]156        printk("\n[ERROR] in %s : cannot get TRDID\n", __FUNCTION__ );
157        return EINVAL;
[1]158    }
[14]159
[407]160    // compute thread descriptor size without kernel stack
161    uint32_t desc_size = (intptr_t)(&thread->signature) - (intptr_t)thread + 4; 
162
[1]163        // Initialize new thread descriptor
164    thread->trdid           = trdid;
[171]165        thread->type            = type;
[1]166    thread->quantum         = 0;            // TODO
167    thread->ticks_nr        = 0;            // TODO
168    thread->time_last_check = 0;
169        thread->core            = &local_cluster->core_tbl[core_lid];
170        thread->process         = process;
171
172    thread->local_locks     = 0;
[409]173    thread->remote_locks    = 0;
[1]174
[409]175#if CONFIG_LOCKS_DEBUG
176    list_root_init( &thread->locks_root ); 
[1]177    xlist_root_init( XPTR( local_cxy , &thread->xlocks_root ) );
[409]178#endif
[1]179
[171]180    thread->u_stack_base    = u_stack_base;
[1]181    thread->u_stack_size    = u_stack_size;
[407]182    thread->k_stack_base    = (intptr_t)thread + desc_size;
183    thread->k_stack_size    = CONFIG_THREAD_DESC_SIZE - desc_size;
[1]184
185    thread->entry_func      = func;         // thread entry point
186    thread->entry_args      = args;         // thread function arguments
[171]187    thread->flags           = 0;            // all flags reset
[1]188    thread->errno           = 0;            // no error detected
[407]189    thread->fork_user       = 0;            // no user defined placement for fork
190    thread->fork_cxy        = 0;            // user defined target cluster for fork
[409]191    thread->blocked         = THREAD_BLOCKED_GLOBAL;
[1]192
[440]193    // reset sched list
[1]194    list_entry_init( &thread->sched_list );
195
196    // reset thread info
197    memset( &thread->info , 0 , sizeof(thread_info_t) );
198
[409]199    // initializes join_lock
200    remote_spinlock_init( XPTR( local_cxy , &thread->join_lock ) );
201
[1]202    // initialise signature
203        thread->signature = THREAD_SIGNATURE;
204
[443]205    // FIXME define and call an architecture specific hal_thread_init()
206    // function to initialise the save_sr field
[408]207    thread->save_sr = 0xFF13;
208
[171]209    // register new thread in core scheduler
[1]210    sched_register_thread( thread->core , thread );
211
[438]212        // update DQDT
213    dqdt_update_threads( 1 );
214
[443]215#if DEBUG_THREAD_USER_INIT
216cycle = (uint32_t)hal_get_cycles();
217if( DEBUG_THREAD_USER_INIT < cycle )
218printk("\n[DBG] %s : thread %x exit  after init of thread %x in process %x / cycle %d\n",
219__FUNCTION__, CURRENT_THREAD, thread, process->pid , cycle );
220#endif
221
[1]222        return 0;
223
[296]224} // end thread_init()
225
[1]226/////////////////////////////////////////////////////////
[23]227error_t thread_user_create( pid_t             pid,
228                            void            * start_func,
229                            void            * start_arg,
[1]230                            pthread_attr_t  * attr,
[23]231                            thread_t       ** new_thread )
[1]232{
233    error_t        error;
234        thread_t     * thread;       // pointer on created thread descriptor
235    process_t    * process;      // pointer to local process descriptor
236    lid_t          core_lid;     // selected core local index
[23]237    vseg_t       * vseg;         // stack vseg
[1]238
[407]239    assert( (attr != NULL) , __FUNCTION__, "pthread attributes must be defined" );
[5]240
[438]241#if DEBUG_THREAD_USER_CREATE
[433]242uint32_t cycle = (uint32_t)hal_get_cycles();
[438]243if( DEBUG_THREAD_USER_CREATE < cycle )
[443]244printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",
245__FUNCTION__, CURRENT_THREAD, pid , local_cxy , cycle );
[433]246#endif
[428]247
[23]248    // get process descriptor local copy
249    process = process_get_local_copy( pid );
[440]250
[23]251    if( process == NULL )
252    {
253                printk("\n[ERROR] in %s : cannot get process descriptor %x\n",
254               __FUNCTION__ , pid );
255        return ENOMEM;
256    }
257
[443]258#if( DEBUG_THREAD_USER_CREATE & 1)
259if( DEBUG_THREAD_USER_CREATE < cycle )
260printk("\n[DBG] %s : process descriptor = %x for process %x in cluster %x\n",
261__FUNCTION__, process , pid , local_cxy );
262#endif
263
[171]264    // select a target core in local cluster
[407]265    if( attr->attributes & PT_ATTR_CORE_DEFINED )
[23]266    {
[407]267        core_lid = attr->lid;
268        if( core_lid >= LOCAL_CLUSTER->cores_nr )
269        {
270                printk("\n[ERROR] in %s : illegal core index attribute = %d\n",
271            __FUNCTION__ , core_lid );
272            return EINVAL;
273        }
[23]274    }
[407]275    else
276    {
277        core_lid = cluster_select_local_core();
278    }
[1]279
[443]280#if( DEBUG_THREAD_USER_CREATE & 1)
281if( DEBUG_THREAD_USER_CREATE < cycle )
282printk("\n[DBG] %s : core[%x,%d] selected\n",
283__FUNCTION__, local_cxy , core_lid );
284#endif
285
[171]286    // allocate a stack from local VMM
[407]287    vseg = vmm_create_vseg( process,
288                            VSEG_TYPE_STACK,
289                            0,                 // size unused
290                            0,                 // length unused
291                            0,                 // file_offset unused
292                            0,                 // file_size unused
293                            XPTR_NULL,         // mapper_xp unused
294                            local_cxy );
[1]295
[170]296    if( vseg == NULL )
[23]297    {
298            printk("\n[ERROR] in %s : cannot create stack vseg\n", __FUNCTION__ );
299                return ENOMEM;
[171]300    }
[23]301
[171]302    // allocate memory for thread descriptor
[14]303    thread = thread_alloc();
[1]304
[23]305    if( thread == NULL )
306    {
307            printk("\n[ERROR] in %s : cannot create new thread\n", __FUNCTION__ );
308        vmm_remove_vseg( vseg );
309        return ENOMEM;
310    }
[14]311
[443]312#if( DEBUG_THREAD_USER_CREATE & 1)
313if( DEBUG_THREAD_USER_CREATE < cycle )
314printk("\n[DBG] %s : thread descriptor %x allocated\n",
315__FUNCTION__, thread );
316#endif
317
[171]318    // initialize thread descriptor
[14]319    error = thread_init( thread,
320                         process,
321                         THREAD_USER,
[23]322                         start_func,
323                         start_arg,
[14]324                         core_lid,
[23]325                         vseg->min,
326                         vseg->max - vseg->min );
[171]327    if( error )
[14]328    {
[23]329            printk("\n[ERROR] in %s : cannot initialize new thread\n", __FUNCTION__ );
330        vmm_remove_vseg( vseg );
331        thread_release( thread );
[14]332        return EINVAL;
333    }
334
[443]335#if( DEBUG_THREAD_USER_CREATE & 1)
336if( DEBUG_THREAD_USER_CREATE < cycle )
337printk("\n[DBG] %s : thread descriptor %x initialised / trdid = %x\n",
338__FUNCTION__, thread , thread->trdid );
339#endif
340
[14]341    // set DETACHED flag if required
[407]342    if( attr->attributes & PT_ATTR_DETACH ) 
343    {
344        thread->flags |= THREAD_FLAG_DETACHED;
345    }
[1]346
[171]347    // allocate & initialize CPU context
[407]348        if( hal_cpu_context_create( thread ) )
[23]349    {
350            printk("\n[ERROR] in %s : cannot create CPU context\n", __FUNCTION__ );
351        vmm_remove_vseg( vseg );
352        thread_release( thread );
353        return ENOMEM;
354    }
355
[407]356    // allocate  FPU context
357    if( hal_fpu_context_alloc( thread ) )
[23]358    {
359            printk("\n[ERROR] in %s : cannot create FPU context\n", __FUNCTION__ );
360        vmm_remove_vseg( vseg );
361        thread_release( thread );
362        return ENOMEM;
363    }
364
[438]365#if DEBUG_THREAD_USER_CREATE
[433]366cycle = (uint32_t)hal_get_cycles();
[438]367if( DEBUG_THREAD_USER_CREATE < cycle )
[443]368printk("\n[DBG] %s : thread %x exit / new_thread %x in process %x / core %d / cycle %d\n",
369__FUNCTION__, CURRENT_THREAD, thread->trdid , pid , core_lid, cycle );
[433]370#endif
[1]371
372    *new_thread = thread;
373        return 0;
[14]374
[296]375}  // end thread_user_create()
376
[408]377///////////////////////////////////////////////////////
378error_t thread_user_fork( xptr_t      parent_thread_xp,
379                          process_t * child_process,
380                          thread_t ** child_thread )
[1]381{
382    error_t        error;
[408]383        thread_t     * child_ptr;        // local pointer on local child thread
384    lid_t          core_lid;         // selected core local index
[1]385
[408]386    thread_t     * parent_ptr;       // local pointer on remote parent thread
387    cxy_t          parent_cxy;       // parent thread cluster
388    process_t    * parent_process;   // local pointer on parent process
389    xptr_t         parent_gpt_xp;    // extended pointer on parent thread GPT
[5]390
[408]391    void         * func;             // parent thread entry_func
392    void         * args;             // parent thread entry_args
393    intptr_t       base;             // parent thread u_stack_base
394    uint32_t       size;             // parent thread u_stack_size
395    uint32_t       flags;            // parent_thread flags
396    vpn_t          vpn_base;         // parent thread stack vpn_base
397    vpn_t          vpn_size;         // parent thread stack vpn_size
398    reg_t        * uzone;            // parent thread pointer on uzone 
399
400    vseg_t       * vseg;             // child thread STACK vseg
401
[438]402#if DEBUG_THREAD_USER_FORK
[433]403uint32_t cycle = (uint32_t)hal_get_cycles();
[438]404if( DEBUG_THREAD_USER_FORK < cycle )
[433]405printk("\n[DBG] %s : thread %x enter / child_process %x / cycle %d\n",
406__FUNCTION__, CURRENT_THREAD, child_process->pid, cycle );
407#endif
[408]408
[1]409    // select a target core in local cluster
410    core_lid = cluster_select_local_core();
411
[408]412    // get cluster and local pointer on parent thread descriptor
413    parent_cxy = GET_CXY( parent_thread_xp );
414    parent_ptr = (thread_t *)GET_PTR( parent_thread_xp );
[1]415
[408]416    // get relevant fields from parent thread
[428]417    func  = (void *)  hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->entry_func    ));
418    args  = (void *)  hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->entry_args    ));
419    base  = (intptr_t)hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->u_stack_base  ));
420    size  = (uint32_t)hal_remote_lw ( XPTR( parent_cxy , &parent_ptr->u_stack_size  ));
421    flags =           hal_remote_lw ( XPTR( parent_cxy , &parent_ptr->flags         ));
422    uzone = (reg_t *) hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->uzone_current ));
[1]423
[408]424    vpn_base = base >> CONFIG_PPM_PAGE_SHIFT;
425    vpn_size = size >> CONFIG_PPM_PAGE_SHIFT;
426
427    // get pointer on parent process in parent thread cluster
428    parent_process = (process_t *)hal_remote_lpt( XPTR( parent_cxy,
429                                                        &parent_ptr->process ) );
430 
431    // get extended pointer on parent GPT in parent thread cluster
432    parent_gpt_xp = XPTR( parent_cxy , &parent_process->vmm.gpt );
433
434    // allocate memory for child thread descriptor
435    child_ptr = thread_alloc();
436    if( child_ptr == NULL )
[23]437    {
438        printk("\n[ERROR] in %s : cannot allocate new thread\n", __FUNCTION__ );
[408]439        return -1;
[23]440    }
[14]441
[171]442    // initialize thread descriptor
[408]443    error = thread_init( child_ptr,
444                         child_process,
[14]445                         THREAD_USER,
[408]446                         func,
447                         args,
[14]448                         core_lid,
[408]449                         base,
450                         size );
[23]451    if( error )
[14]452    {
[408]453            printk("\n[ERROR] in %s : cannot initialize child thread\n", __FUNCTION__ );
454        thread_release( child_ptr );
[14]455        return EINVAL;
456    }
457
[407]458    // return child pointer
[408]459    *child_thread = child_ptr;
[1]460
[408]461    // set detached flag if required
462    if( flags & THREAD_FLAG_DETACHED ) child_ptr->flags = THREAD_FLAG_DETACHED;
[1]463
[408]464    // update uzone pointer in child thread descriptor
[428]465    child_ptr->uzone_current = (char *)((intptr_t)uzone +
466                                        (intptr_t)child_ptr - 
467                                        (intptr_t)parent_ptr );
[408]468 
469
[407]470    // allocate CPU context for child thread
[408]471        if( hal_cpu_context_alloc( child_ptr ) )
[23]472    {
[407]473            printk("\n[ERROR] in %s : cannot allocate CPU context\n", __FUNCTION__ );
[408]474        thread_release( child_ptr );
475        return -1;
[23]476    }
477
[407]478    // allocate FPU context for child thread
[408]479        if( hal_fpu_context_alloc( child_ptr ) )
[23]480    {
[407]481            printk("\n[ERROR] in %s : cannot allocate FPU context\n", __FUNCTION__ );
[408]482        thread_release( child_ptr );
483        return -1;
[23]484    }
485
[408]486    // create and initialize STACK vseg
487    vseg = vseg_alloc();
488    vseg_init( vseg,
489               VSEG_TYPE_STACK,
490               base,
491               size,
492               vpn_base,
493               vpn_size,
494               0, 0, XPTR_NULL,                         // not a file vseg
495               local_cxy );
[1]496
[408]497    // register STACK vseg in local child VSL
498    vseg_attach( &child_process->vmm , vseg );
499
500    // copy all valid STACK GPT entries   
501    vpn_t          vpn;
502    bool_t         mapped;
503    ppn_t          ppn;
504    for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ )
505    {
506        error = hal_gpt_pte_copy( &child_process->vmm.gpt,
507                                  parent_gpt_xp,
508                                  vpn,
509                                  true,                 // set cow
510                                  &ppn,
511                                  &mapped );
512        if( error )
513        {
514            vseg_detach( &child_process->vmm , vseg );
515            vseg_free( vseg );
516            thread_release( child_ptr );
517            printk("\n[ERROR] in %s : cannot update child GPT\n", __FUNCTION__ );
518            return -1;
519        }
520
[433]521        // increment pending forks counter for the page if mapped
[408]522        if( mapped )
523        {
524            xptr_t   page_xp  = ppm_ppn2page( ppn );
525            cxy_t    page_cxy = GET_CXY( page_xp );
526            page_t * page_ptr = (page_t *)GET_PTR( page_xp );
[433]527            hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , 1 );
[408]528
[438]529#if (DEBUG_THREAD_USER_FORK & 1)
[433]530cycle = (uint32_t)hal_get_cycles();
[438]531if( DEBUG_THREAD_USER_FORK < cycle )
[433]532printk("\n[DBG] %s : thread %x copied stack PTE to child GPT : vpn %x\n",
533__FUNCTION__, CURRENT_THREAD, vpn );
534#endif
[408]535
536        }
537    }
538
[433]539    // set COW flag for all mapped entries of STAK vseg in parent thread GPT
540    hal_gpt_set_cow( parent_gpt_xp,
541                     vpn_base,
542                     vpn_size );
[408]543 
[438]544#if DEBUG_THREAD_USER_FORK
[433]545cycle = (uint32_t)hal_get_cycles();
[438]546if( DEBUG_THREAD_USER_FORK < cycle )
[433]547printk("\n[DBG] %s : thread %x exit / child_process %x / child_thread %x / cycle %d\n",
548__FUNCTION__, CURRENT_THREAD, child_process->pid, child_ptr, cycle );
549#endif
[407]550
[1]551        return 0;
[5]552
[296]553}  // end thread_user_fork()
554
[1]555/////////////////////////////////////////////////////////
556error_t thread_kernel_create( thread_t     ** new_thread,
557                              thread_type_t   type,
[171]558                              void          * func,
559                              void          * args,
[1]560                                              lid_t           core_lid )
561{
562    error_t        error;
[14]563        thread_t     * thread;       // pointer on new thread descriptor
[1]564
[407]565    assert( ( (type == THREAD_IDLE) || (type == THREAD_RPC) || (type == THREAD_DEV) ) ,
566    __FUNCTION__ , "illegal thread type" );
[1]567
[171]568    assert( (core_lid < LOCAL_CLUSTER->cores_nr) ,
[5]569            __FUNCTION__ , "illegal core_lid" );
[1]570
[438]571#if DEBUG_THREAD_KERNEL_CREATE
[433]572uint32_t cycle = (uint32_t)hal_get_cycles();
[438]573if( DEBUG_THREAD_KERNEL_CREATE < cycle )
[433]574printk("\n[DBG] %s : thread %x enter / requested_type %s / cycle %d\n",
575__FUNCTION__, CURRENT_THREAD, thread, thread_type_str(type), cycle );
576#endif
577
[171]578    // allocate memory for new thread descriptor
[14]579    thread = thread_alloc();
580
581    if( thread == NULL ) return ENOMEM;
582
[171]583    // initialize thread descriptor
[14]584    error = thread_init( thread,
585                         &process_zero,
586                         type,
587                         func,
588                         args,
589                         core_lid,
590                         0 , 0 );  // no user stack for a kernel thread
591
[171]592    if( error ) // release allocated memory for thread descriptor
[1]593    {
[185]594        thread_release( thread );
[14]595        return EINVAL;
[1]596    }
597
[171]598    // allocate & initialize CPU context
599        hal_cpu_context_create( thread );
[14]600
[438]601#if DEBUG_THREAD_KERNEL_CREATE
[433]602cycle = (uint32_t)hal_get_cycles();
[438]603if( DEBUG_THREAD_KERNEL_CREATE < cycle )
[433]604printk("\n[DBG] %s : thread %x exit / new_thread %x / type %s / cycle %d\n",
605__FUNCTION__, CURRENT_THREAD, thread, thread_type_str(type), cycle );
606#endif
[1]607
[171]608    *new_thread = thread;
[1]609        return 0;
[5]610
[296]611} // end thread_kernel_create()
612
[438]613/////////////////////////////////////////////////
614error_t thread_idle_init( thread_t      * thread,
615                          thread_type_t   type,
616                          void          * func,
617                          void          * args,
618                                          lid_t           core_lid )
[14]619{
[407]620    assert( (type == THREAD_IDLE) , __FUNCTION__ , "illegal thread type" );
[1]621
[407]622    assert( (core_lid < LOCAL_CLUSTER->cores_nr) , __FUNCTION__ , "illegal core index" );
[14]623
624    error_t  error = thread_init( thread,
625                                  &process_zero,
626                                  type,
627                                  func,
628                                  args,
629                                  core_lid,
630                                  0 , 0 );   // no user stack for a kernel thread
631
632    // allocate & initialize CPU context if success
633    if( error == 0 ) hal_cpu_context_create( thread );
[171]634
[14]635    return error;
636
[438]637}  // end thread_idle_init()
[407]638
[1]639///////////////////////////////////////////////////////////////////////////////////////
640// TODO: check that all memory dynamically allocated during thread execution
[440]641// has been released, using a cache of mmap requests. [AG]
[1]642///////////////////////////////////////////////////////////////////////////////////////
[443]643bool_t thread_destroy( thread_t * thread )
[1]644{
[409]645    reg_t        save_sr;
[443]646    bool_t       last_thread;
[1]647
648    process_t  * process    = thread->process;
649    core_t     * core       = thread->core;
650
[438]651#if DEBUG_THREAD_DESTROY
[433]652uint32_t cycle = (uint32_t)hal_get_cycles();
[438]653if( DEBUG_THREAD_DESTROY < cycle )
[433]654printk("\n[DBG] %s : thread %x enter to destroy thread %x in process %x / cycle %d\n",
655__FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle );
656#endif
[1]657
[443]658    assert( (thread->local_locks == 0) , __FUNCTION__ , 
659    "local lock not released for thread %x in process %x", thread->trdid, process->pid );
[171]660
[443]661    assert( (thread->remote_locks == 0) , __FUNCTION__ , 
662    "remote lock not released for thread %x in process %x", thread->trdid, process->pid );
[5]663
[1]664    // update intrumentation values
[408]665        process->vmm.pgfault_nr += thread->info.pgfault_nr;
[1]666
667    // release memory allocated for CPU context and FPU context
668        hal_cpu_context_destroy( thread );
[409]669        if ( thread->type == THREAD_USER ) hal_fpu_context_destroy( thread );
[1]670       
[428]671    // release FPU ownership if required
[409]672        hal_disable_irq( &save_sr );
[1]673        if( core->fpu_owner == thread )
674        {
675                core->fpu_owner = NULL;
676                hal_fpu_disable();
677        }
[409]678        hal_restore_irq( save_sr );
[1]679
[171]680    // remove thread from process th_tbl[]
[443]681    last_thread = process_remove_thread( thread );
[1]682       
[438]683    // update DQDT
684    dqdt_update_threads( -1 );
[23]685
[1]686    // invalidate thread descriptor
687        thread->signature = 0;
688
689    // release memory for thread descriptor
[23]690    thread_release( thread );
[1]691
[438]692#if DEBUG_THREAD_DESTROY
[433]693cycle = (uint32_t)hal_get_cycles();
[438]694if( DEBUG_THREAD_DESTROY < cycle )
[433]695printk("\n[DBG] %s : thread %x exit / destroyed thread %x in process %x / cycle %d\n",
696__FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle );
697#endif
[1]698
[443]699    return last_thread;
700
[407]701}   // end thread_destroy()
702
[416]703//////////////////////////////////////////////////
704inline void thread_set_req_ack( thread_t * target,
705                                uint32_t * rsp_count )
[1]706{
[409]707    reg_t    save_sr;   // for critical section
708
[416]709    // get pointer on target thread scheduler
710    scheduler_t * sched = &target->core->scheduler;
[409]711
[416]712    // wait scheduler ready to handle a new request
713    while( sched->req_ack_pending ) asm volatile( "nop" );
[409]714   
715    // enter critical section
716    hal_disable_irq( &save_sr );
717     
[416]718    // set request in target thread scheduler
719    sched->req_ack_pending = true;
[409]720
[416]721    // set ack request in target thread "flags"
722    hal_atomic_or( &target->flags , THREAD_FLAG_REQ_ACK );
[409]723
[416]724    // set pointer on responses counter in target thread
725    target->ack_rsp_count = rsp_count;
[409]726   
727    // exit critical section
728    hal_restore_irq( save_sr );
729
[407]730    hal_fence();
[171]731
[416]732}  // thread_set_req_ack()
[409]733
[416]734/////////////////////////////////////////////////////
735inline void thread_reset_req_ack( thread_t * target )
[1]736{
[409]737    reg_t    save_sr;   // for critical section
738
739    // get pointer on target thread scheduler
[416]740    scheduler_t * sched = &target->core->scheduler;
[409]741
742    // check signal pending in scheduler
[416]743    assert( sched->req_ack_pending , __FUNCTION__ , "no pending signal" );
[409]744   
745    // enter critical section
746    hal_disable_irq( &save_sr );
747     
748    // reset signal in scheduler
[416]749    sched->req_ack_pending = false;
[409]750
751    // reset signal in thread "flags"
[416]752    hal_atomic_and( &target->flags , ~THREAD_FLAG_REQ_ACK );
[409]753
754    // reset pointer on responses counter
[416]755    target->ack_rsp_count = NULL;
[409]756   
757    // exit critical section
758    hal_restore_irq( save_sr );
759
[407]760    hal_fence();
[171]761
[416]762}  // thread_reset_req_ack()
[409]763
[1]764////////////////////////////////
765inline bool_t thread_can_yield()
766{
767    thread_t * this = CURRENT_THREAD;
[367]768    return (this->local_locks == 0) && (this->remote_locks == 0);
[1]769}
770
[367]771/////////////////////////
772void thread_check_sched()
[1]773{
[338]774    thread_t * this = CURRENT_THREAD;
[1]775
[367]776        if( (this->local_locks == 0) && 
777        (this->remote_locks == 0) &&
778        (this->flags & THREAD_FLAG_SCHED) ) 
779    {
780        this->flags &= ~THREAD_FLAG_SCHED;
[408]781        sched_yield( "delayed scheduling" );
[367]782    }
[1]783
[407]784}  // end thread_check_sched()
785
[436]786//////////////////////////////////////
787void thread_block( xptr_t   thread_xp,
788                   uint32_t cause )
[407]789{
[436]790    // get thread cluster and local pointer
791    cxy_t      cxy = GET_CXY( thread_xp );
792    thread_t * ptr = GET_PTR( thread_xp );
793
[407]794    // set blocking cause
[436]795    hal_remote_atomic_or( XPTR( cxy , &ptr->blocked ) , cause );
[407]796    hal_fence();
797
[438]798#if DEBUG_THREAD_BLOCK
[433]799uint32_t cycle = (uint32_t)hal_get_cycles();
[438]800if( DEBUG_THREAD_BLOCK < cycle )
[436]801printk("\n[DBG] %s : thread %x blocked thread %x / cause %x / cycle %d\n",
802__FUNCTION__ , CURRENT_THREAD , ptr , cause , cycle );
[433]803#endif
804
[438]805#if (DEBUG_THREAD_BLOCK & 1)
806if( DEBUG_THREAD_BLOCK < cycle )
[436]807sched_display( ptr->core->lid );
808#endif
809
[407]810} // end thread_block()
811
[433]812////////////////////////////////////////////
813uint32_t thread_unblock( xptr_t   thread_xp,
[407]814                         uint32_t cause )
815{
816    // get thread cluster and local pointer
[433]817    cxy_t      cxy = GET_CXY( thread_xp );
818    thread_t * ptr = GET_PTR( thread_xp );
[407]819
820    // reset blocking cause
821    uint32_t previous = hal_remote_atomic_and( XPTR( cxy , &ptr->blocked ) , ~cause );
822    hal_fence();
823
[438]824#if DEBUG_THREAD_BLOCK
[433]825uint32_t cycle = (uint32_t)hal_get_cycles();
[438]826if( DEBUG_THREAD_BLOCK < cycle )
[436]827printk("\n[DBG] %s : thread %x unblocked thread %x / cause %x / cycle %d\n",
828__FUNCTION__ , CURRENT_THREAD , ptr , cause , cycle );
[433]829#endif
830
[438]831#if (DEBUG_THREAD_BLOCK & 1)
832if( DEBUG_THREAD_BLOCK < cycle )
[436]833sched_display( ptr->core->lid );
834#endif
835
[407]836    // return a non zero value if the cause bit is modified
837    return( previous & cause );
838
839}  // end thread_unblock()
840
[440]841/*
842
[436]843////////////////////////////////////
844void thread_kill( xptr_t  target_xp,
845                  bool_t  is_exit,
846                  bool_t  is_forced )
[1]847{
[436]848    reg_t       save_sr;                // for critical section
849    bool_t      attached;               // target thread in attached mode
850    bool_t      join_done;              // joining thread arrived first
851    xptr_t      killer_xp;              // extended pointer on killer thread (this)
852    thread_t  * killer_ptr;             // pointer on killer thread (this)
853    cxy_t       target_cxy;             // target thread cluster     
854    thread_t  * target_ptr;             // pointer on target thread
855    xptr_t      joining_xp;             // extended pointer on joining thread
856    thread_t  * joining_ptr;            // pointer on joining thread
857    cxy_t       joining_cxy;            // joining thread cluster
858    pid_t       target_pid;             // target process PID
859    cxy_t       owner_cxy;              // target process owner cluster
860    trdid_t     target_trdid;           // target thread identifier
861    ltid_t      target_ltid;            // target thread local index
862    xptr_t      process_state_xp;       // extended pointer on <term_state> in process
[1]863
[436]864    xptr_t      target_flags_xp;        // extended pointer on target thread <flags>
865    xptr_t      target_join_lock_xp;    // extended pointer on target thread <join_lock>
866    xptr_t      target_join_xp_xp;      // extended pointer on target thread <join_xp>
867    xptr_t      target_process_xp;      // extended pointer on target thread <process>
[1]868
[436]869    process_t * target_process;         // pointer on target thread process
870
[440]871    // get target thread pointer and cluster
[436]872    target_cxy = GET_CXY( target_xp );
873    target_ptr = GET_PTR( target_xp );
874
875    // get killer thread pointers
876    killer_ptr = CURRENT_THREAD;
877    killer_xp  = XPTR( local_cxy , killer_ptr );
878
[440]879#if DEBUG_THREAD_DELETE
[433]880uint32_t cycle  = (uint32_t)hal_get_cycles;
[440]881if( DEBUG_THREAD_DELETE < cycle )
[433]882printk("\n[DBG] %s : thread %x enter for target thread %x / cycle %d\n",
[436]883__FUNCTION__, killer_ptr, target_ptr, cycle );
[433]884#endif
[1]885
[436]886    // block the target thread
887    thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
[409]888
[436]889    // get target thread attached mode
890    target_flags_xp = XPTR( target_cxy , &target_ptr->flags );
891    attached = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_DETACHED) == 0);
892
893    // synchronize with the joining thread
894    // if the target thread is attached && not forced
895
896    if( attached  && (is_forced == false) )
[1]897    {
[436]898        // build extended pointers on target thread join fields
899        target_join_lock_xp  = XPTR( target_cxy , &target_ptr->join_lock );
900        target_join_xp_xp    = XPTR( target_cxy , &target_ptr->join_xp );
[409]901
[436]902        // enter critical section
903        hal_disable_irq( &save_sr );
[409]904
[436]905        // take the join_lock in target thread descriptor
906        remote_spinlock_lock( target_join_lock_xp );
907
908        // get join_done from target thread descriptor
909        join_done = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_JOIN_DONE) != 0);
910   
911        if( join_done )     // joining thread arrived first
[409]912        {
[436]913            // get extended pointer on joining thread
914            joining_xp  = (xptr_t)hal_remote_lwd( target_join_xp_xp );
915            joining_ptr = GET_PTR( joining_xp );
916            joining_cxy = GET_CXY( joining_xp );
917           
918            // reset the join_done flag in target thread
919            hal_remote_atomic_and( target_flags_xp , ~THREAD_FLAG_JOIN_DONE );
[409]920
[436]921            // unblock the joining thread
922            thread_unblock( joining_xp , THREAD_BLOCKED_JOIN );
923
924            // release the join_lock in target thread descriptor
925            remote_spinlock_unlock( target_join_lock_xp );
926
927            // restore IRQs
928            hal_restore_irq( save_sr );
[409]929        }
[436]930        else                // this thread arrived first
931        {
932            // set the kill_done flag in target thread
933            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_KILL_DONE );
[1]934
[436]935            // block this thread on BLOCKED_JOIN
936            thread_block( killer_xp , THREAD_BLOCKED_JOIN );
[1]937
[436]938            // set extended pointer on killer thread in target thread
939            hal_remote_swd( target_join_xp_xp , killer_xp );
940
941            // release the join_lock in target thread descriptor
942            remote_spinlock_unlock( target_join_lock_xp );
943
944            // deschedule
945            sched_yield( "killer thread wait joining thread" );
946
947            // restore IRQs
948            hal_restore_irq( save_sr );
949        }
950    }  // end if attached
951
952    // - if the target thread is the main thread
953    //   => synchronize with the parent process main thread
954    // - if the target thread is not the main thread
955    //   => simply mark the target thread for delete
956
957    // get pointer on target thread process
958    target_process_xp  = XPTR( target_cxy , &target_ptr->process );
959    target_process     = (process_t *)hal_remote_lpt( target_process_xp );
960
961        // get target process owner cluster
962        target_pid = hal_remote_lw( XPTR( target_cxy , &target_process->pid ) );
963    owner_cxy = CXY_FROM_PID( target_pid );
964
965    // get target thread local index
966    target_trdid = hal_remote_lw( XPTR( target_cxy , &target_ptr->trdid ) );
967    target_ltid  = LTID_FROM_TRDID( target_trdid );
968
969    if( (owner_cxy == target_cxy) && (target_ltid == 0) )     // main thread
970    {
971        // get extended pointer on term_state in target process owner cluster
972        process_state_xp = XPTR( owner_cxy , &target_process->term_state );
973
974        // set termination info in target process owner 
975        if( is_exit ) hal_remote_atomic_or( process_state_xp , PROCESS_TERM_EXIT );
976        else          hal_remote_atomic_or( process_state_xp , PROCESS_TERM_KILL );
977
[440]978#if DEBUG_THREAD_DELETE
[433]979cycle  = (uint32_t)hal_get_cycles;
[440]980if( DEBUG_THREAD_DELETE < cycle )
[436]981printk("\n[DBG] %s : thread %x exit for thread %x / main thread / cycle %d\n",
982__FUNCTION__, killer_ptr, target_ptr, cycle );
[433]983#endif
[409]984
[436]985    }
986    else                                                      // main thread
987    {
988        // set the REQ_DELETE flag in target thread descriptor
989        hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
990
[440]991#if DEBUG_THREAD_DELETE
[436]992cycle  = (uint32_t)hal_get_cycles;
[440]993if( DEBUG_THREAD_DELETE < cycle )
[436]994printk("\n[DBG] %s : thread %x exit for thread %x / not the main thread / cycle %d\n",
995__FUNCTION__, killer_ptr, target_ptr, cycle );
996#endif
997
998    }
999
[407]1000}  // end thread_kill()
1001
[440]1002*/
1003
1004//////////////////////////////////////
1005void thread_delete( xptr_t  target_xp,
1006                    pid_t   pid,
1007                    bool_t  is_forced )
1008{
1009    reg_t       save_sr;                // for critical section
1010    bool_t      target_join_done;       // joining thread arrived first
1011    bool_t      target_attached;        // target thread attached
1012    xptr_t      killer_xp;              // extended pointer on killer thread (this)
1013    thread_t  * killer_ptr;             // pointer on killer thread (this)
1014    cxy_t       target_cxy;             // target thread cluster     
1015    thread_t  * target_ptr;             // pointer on target thread
1016    xptr_t      target_flags_xp;        // extended pointer on target thread <flags>
1017    uint32_t    target_flags;           // target thread <flags> value
1018    xptr_t      target_join_lock_xp;    // extended pointer on target thread <join_lock>
1019    xptr_t      target_join_xp_xp;      // extended pointer on target thread <join_xp>
1020    trdid_t     target_trdid;           // target thread identifier
1021    ltid_t      target_ltid;            // target thread local index
1022    xptr_t      joining_xp;             // extended pointer on joining thread
1023    thread_t  * joining_ptr;            // pointer on joining thread
1024    cxy_t       joining_cxy;            // joining thread cluster
1025    cxy_t       owner_cxy;              // process owner cluster
1026
1027
1028    // get target thread pointers, identifiers, and flags
1029    target_cxy      = GET_CXY( target_xp );
1030    target_ptr      = GET_PTR( target_xp );
1031    target_trdid    = hal_remote_lw( XPTR( target_cxy , &target_ptr->trdid ) );
1032    target_ltid     = LTID_FROM_TRDID( target_trdid );
1033    target_flags_xp = XPTR( target_cxy , &target_ptr->flags ); 
1034    target_flags    = hal_remote_lw( target_flags_xp );
1035
1036    // get killer thread pointers
1037    killer_ptr = CURRENT_THREAD;
1038    killer_xp  = XPTR( local_cxy , killer_ptr );
1039
1040#if DEBUG_THREAD_DELETE
1041uint32_t cycle  = (uint32_t)hal_get_cycles;
1042if( DEBUG_THREAD_DELETE < cycle )
1043printk("\n[DBG] %s : killer thread %x enter for target thread %x / cycle %d\n",
1044__FUNCTION__, killer_ptr, target_ptr, cycle );
1045#endif
1046
1047    // target thread cannot be the main thread, because the main thread
1048    // must be deleted by the parent process sys_wait() function
1049    owner_cxy = CXY_FROM_PID( pid );
1050    assert( ((owner_cxy != target_cxy) || (target_ltid != 0)), __FUNCTION__,
1051    "tharget thread cannot be the main thread\n" );
1052
1053    // block the target thread
1054    thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
1055
1056    // get attached from target flag descriptor
1057    target_attached = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_DETACHED) != 0);
1058
1059    // synchronize with the joining thread if the target thread is attached
1060    if( target_attached && (is_forced == false) )
1061    {
1062        // build extended pointers on target thread join fields
1063        target_join_lock_xp  = XPTR( target_cxy , &target_ptr->join_lock );
1064        target_join_xp_xp    = XPTR( target_cxy , &target_ptr->join_xp );
1065
1066        // enter critical section
1067        hal_disable_irq( &save_sr );
1068
1069        // take the join_lock in target thread descriptor
1070        remote_spinlock_lock( target_join_lock_xp );
1071
1072        // get join_done from target thread descriptor
1073        target_join_done = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_JOIN_DONE) != 0);
1074   
1075        if( target_join_done )  // joining thread arrived first => unblock the joining thread
1076        {
1077            // get extended pointer on joining thread
1078            joining_xp  = (xptr_t)hal_remote_lwd( target_join_xp_xp );
1079            joining_ptr = GET_PTR( joining_xp );
1080            joining_cxy = GET_CXY( joining_xp );
1081           
1082            // reset the join_done flag in target thread
1083            hal_remote_atomic_and( target_flags_xp , ~THREAD_FLAG_JOIN_DONE );
1084
1085            // unblock the joining thread
1086            thread_unblock( joining_xp , THREAD_BLOCKED_JOIN );
1087
1088            // release the join_lock in target thread descriptor
1089            remote_spinlock_unlock( target_join_lock_xp );
1090
1091            // restore IRQs
1092            hal_restore_irq( save_sr );
1093        }
1094        else                // this thread arrived first => register flags and deschedule
1095        {
1096            // set the kill_done flag in target thread
1097            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_KILL_DONE );
1098
1099            // block this thread on BLOCKED_JOIN
1100            thread_block( killer_xp , THREAD_BLOCKED_JOIN );
1101
1102            // set extended pointer on killer thread in target thread
1103            hal_remote_swd( target_join_xp_xp , killer_xp );
1104
1105            // release the join_lock in target thread descriptor
1106            remote_spinlock_unlock( target_join_lock_xp );
1107
1108            // deschedule
1109            sched_yield( "killer thread wait joining thread" );
1110
1111            // restore IRQs
1112            hal_restore_irq( save_sr );
1113        }
1114    }  // end if attached
1115
1116    // set the REQ_DELETE flag in target thread descriptor
1117    hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
1118
1119#if DEBUG_THREAD_DELETE
1120cycle  = (uint32_t)hal_get_cycles;
1121if( DEBUG_THREAD_DELETE < cycle )
1122printk("\n[DBG] %s : killer thread %x exit for target thread %x / cycle %d\n",
1123__FUNCTION__, killer_ptr, target_ptr, cycle );
1124#endif
1125
1126}  // end thread_delete()
1127
1128
1129
[14]1130///////////////////////
1131void thread_idle_func()
[1]1132{
1133    while( 1 )
1134    {
[408]1135        // unmask IRQs
1136        hal_enable_irq( NULL );
1137
[443]1138        // force core to low-power mode (optional)
1139        if( CONFIG_THREAD_IDLE_MODE_SLEEP ) 
[407]1140        {
[1]1141
[438]1142#if DEBUG_THREAD_IDLE
[433]1143uint32_t cycle  = (uint32_t)hal_get_cycles;
1144thread_t * this = CURRENT_THREAD;
[438]1145if( DEBUG_THREAD_IDLE < cycle )
[433]1146printk("\n[DBG] %s : idle thread %x on core[%x,%d] goes to sleep / cycle %d\n",
1147__FUNCTION__, this, local_cxy, this->core->lid, cycle );
1148#endif
[1]1149
[407]1150            hal_core_sleep();
[1]1151
[438]1152#if DEBUG_THREAD_IDLE
[433]1153cycle  = (uint32_t)hal_get_cycles;
[438]1154if( DEBUG_THREAD_IDLE < cycle )
[433]1155printk("\n[DBG] %s : idle thread %x on core[%x,%d] wake up / cycle %d\n",
1156__FUNCTION__, this, local_cxy, this->core->lid, cycle );
1157#endif
[407]1158
1159        }
[443]1160
1161        // search a runable thread
1162        sched_yield( "IDLE" );
[418]1163    }
[407]1164}  // end thread_idle()
[1]1165
[407]1166
[16]1167/////////////////////////////////////////////////
1168void thread_user_time_update( thread_t * thread )
1169{
1170    // TODO
[337]1171    // printk("\n[WARNING] function %s not implemented\n", __FUNCTION__ );
[16]1172}
[1]1173
[16]1174///////////////////////////////////////////////////
1175void thread_kernel_time_update( thread_t * thread )
1176{
1177    // TODO
[337]1178    // printk("\n[WARNING] function %s not implemented\n", __FUNCTION__ );
[16]1179}
1180
[23]1181/////////////////////////////////////
1182xptr_t thread_get_xptr( pid_t    pid,
1183                        trdid_t  trdid )
1184{
1185    cxy_t         target_cxy;          // target thread cluster identifier
1186    ltid_t        target_thread_ltid;  // target thread local index
[171]1187    thread_t    * target_thread_ptr;   // target thread local pointer
[23]1188    xptr_t        target_process_xp;   // extended pointer on target process descriptor
[171]1189    process_t   * target_process_ptr;  // local pointer on target process descriptor
[23]1190    pid_t         target_process_pid;  // target process identifier
1191    xlist_entry_t root;                // root of list of process in target cluster
1192    xptr_t        lock_xp;             // extended pointer on lock protecting  this list
[16]1193
[23]1194    // get target cluster identifier and local thread identifier
1195    target_cxy         = CXY_FROM_TRDID( trdid );
1196    target_thread_ltid = LTID_FROM_TRDID( trdid );
1197
[436]1198    // check trdid argument
1199        if( (target_thread_ltid >= CONFIG_THREAD_MAX_PER_CLUSTER) || 
1200        cluster_is_undefined( target_cxy ) )         return XPTR_NULL;
1201
[23]1202    // get root of list of process descriptors in target cluster
1203    hal_remote_memcpy( XPTR( local_cxy  , &root ),
1204                       XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_root ),
1205                       sizeof(xlist_entry_t) );
1206
[171]1207    // get extended pointer on lock protecting the list of processes
[23]1208    lock_xp = XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_lock );
1209
1210    // take the lock protecting the list of processes in target cluster
1211    remote_spinlock_lock( lock_xp );
1212
1213    // loop on list of process in target cluster to find the PID process
1214    xptr_t  iter;
1215    bool_t  found = false;
1216    XLIST_FOREACH( XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_root ) , iter )
1217    {
1218        target_process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
1219        target_process_ptr = (process_t *)GET_PTR( target_process_xp );
1220        target_process_pid = hal_remote_lw( XPTR( target_cxy , &target_process_ptr->pid ) );
1221        if( target_process_pid == pid )
1222        {
1223            found = true;
1224            break;
1225        }
1226    }
1227
1228    // release the lock protecting the list of processes in target cluster
1229    remote_spinlock_unlock( lock_xp );
1230
[436]1231    // check PID found
1232    if( found == false ) return XPTR_NULL;
[23]1233
1234    // get target thread local pointer
1235    xptr_t xp = XPTR( target_cxy , &target_process_ptr->th_tbl[target_thread_ltid] );
[171]1236    target_thread_ptr = (thread_t *)hal_remote_lpt( xp );
[23]1237
[436]1238    if( target_thread_ptr == NULL )  return XPTR_NULL;
[23]1239
1240    return XPTR( target_cxy , target_thread_ptr );
[171]1241}
[23]1242
Note: See TracBrowser for help on using the repository browser.