source: trunk/kernel/kern/thread.c @ 624

Last change on this file since 624 was 624, checked in by alain, 5 years ago

Fix several bugs to use the instruction MMU in kernel mode
in replacement of the instruction address extension register,
and remove the "kentry" segment.

This version is running on the tsar_generic_iob" platform.

One interesting bug: the cp0_ebase defining the kernel entry point
(for interrupts, exceptions and syscalls) must be initialized
early in kernel_init(), because the VFS initialisation done by
kernel_ini() uses RPCs, and RPCs uses Inter-Processor-Interrup.

File size: 49.2 KB
Line 
1/*
2 * thread.c -   thread operations implementation (user & kernel)
3 *
4 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *         Alain Greiner (2016,2017,2018)
6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25#include <kernel_config.h>
26#include <hal_kernel_types.h>
27#include <hal_context.h>
28#include <hal_irqmask.h>
29#include <hal_special.h>
30#include <hal_remote.h>
31#include <memcpy.h>
32#include <printk.h>
33#include <cluster.h>
34#include <process.h>
35#include <scheduler.h>
36#include <dev_pic.h>
37#include <core.h>
38#include <list.h>
39#include <xlist.h>
40#include <page.h>
41#include <kmem.h>
42#include <ppm.h>
43#include <thread.h>
44#include <rpc.h>
45
46//////////////////////////////////////////////////////////////////////////////////////
47// Extern global variables
48//////////////////////////////////////////////////////////////////////////////////////
49
50extern process_t            process_zero;       // allocated in kernel_init.c
51extern char               * lock_type_str[];    // allocated in kernel_init.c
52extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c
53
54//////////////////////////////////////////////////////////////////////////////////////
55// This function returns a printable string for the thread type.
56//////////////////////////////////////////////////////////////////////////////////////
57const char * thread_type_str( thread_type_t type )
58{
59  switch ( type ) {
60  case THREAD_USER:   return "USR";
61  case THREAD_RPC:    return "RPC";
62  case THREAD_DEV:    return "DEV";
63  case THREAD_IDLE:   return "IDL";
64  default:            return "undefined";
65  }
66}
67
68/////////////////////////////////////////////////////////////////////////////////////
69// This static function allocates physical memory for a thread descriptor.
70// It can be called by the three functions:
71// - thread_user_create()
72// - thread_user_fork()
73// - thread_kernel_create()
74/////////////////////////////////////////////////////////////////////////////////////
75// @ return pointer on thread descriptor if success / return NULL if failure.
76/////////////////////////////////////////////////////////////////////////////////////
77static thread_t * thread_alloc( void )
78{
79        page_t       * page;   // pointer on page descriptor containing thread descriptor
80        kmem_req_t     req;    // kmem request
81
82        // allocates memory for thread descriptor + kernel stack
83        req.type  = KMEM_PAGE;
84        req.size  = CONFIG_THREAD_DESC_ORDER;
85        req.flags = AF_KERNEL | AF_ZERO;
86        page      = kmem_alloc( &req );
87
88        if( page == NULL ) return NULL;
89
90    // return pointer on new thread descriptor
91    xptr_t base_xp = ppm_page2base( XPTR(local_cxy , page ) );
92    return GET_PTR( base_xp );
93
94}  // end thread_alloc()
95 
96
97/////////////////////////////////////////////////////////////////////////////////////
98// This static function releases the physical memory for a thread descriptor.
99// It is called by the three functions:
100// - thread_user_create()
101// - thread_user_fork()
102// - thread_kernel_create()
103/////////////////////////////////////////////////////////////////////////////////////
104// @ thread  : pointer on thread descriptor.
105/////////////////////////////////////////////////////////////////////////////////////
106static void thread_release( thread_t * thread )
107{
108    kmem_req_t   req;
109
110    xptr_t base_xp = ppm_base2page( XPTR(local_cxy , thread ) );
111
112    req.type  = KMEM_PAGE;
113    req.ptr   = GET_PTR( base_xp );
114    kmem_free( &req );
115}
116
117/////////////////////////////////////////////////////////////////////////////////////
118// This static function initializes a thread descriptor (kernel or user).
119// It can be called by the four functions:
120// - thread_user_create()
121// - thread_user_fork()
122// - thread_kernel_create()
123// - thread_idle_init()
124// It updates the local DQDT.
125/////////////////////////////////////////////////////////////////////////////////////
126// @ thread       : pointer on local thread descriptor
127// @ process      : pointer on local process descriptor.
128// @ type         : thread type.
129// @ func         : pointer on thread entry function.
130// @ args         : pointer on thread entry function arguments.
131// @ core_lid     : target core local index.
132// @ u_stack_base : stack base (user thread only)
133// @ u_stack_size : stack base (user thread only)
134/////////////////////////////////////////////////////////////////////////////////////
135static error_t thread_init( thread_t      * thread,
136                            process_t     * process,
137                            thread_type_t   type,
138                            void          * func,
139                            void          * args,
140                            lid_t           core_lid,
141                            intptr_t        u_stack_base,
142                            uint32_t        u_stack_size )
143{
144    error_t        error;
145    trdid_t        trdid;      // allocated thread identifier
146
147        cluster_t    * local_cluster = LOCAL_CLUSTER;
148
149#if DEBUG_THREAD_INIT
150uint32_t   cycle = (uint32_t)hal_get_cycles();
151thread_t * this  = CURRENT_THREAD;
152if( DEBUG_THREAD_INIT < cycle )
153printk("\n[%s] thread[%x,%x] enter for thread %x in process %x / cycle %d\n",
154__FUNCTION__, this->process->pid, this->trdid, thread, process->pid , cycle );
155#endif
156
157    // compute thread descriptor size without kernel stack
158    uint32_t desc_size = (intptr_t)(&thread->signature) - (intptr_t)thread + 4; 
159
160        // Initialize new thread descriptor
161        thread->type            = type;
162    thread->quantum         = 0;            // TODO
163    thread->ticks_nr        = 0;            // TODO
164    thread->time_last_check = 0;            // TODO
165        thread->core            = &local_cluster->core_tbl[core_lid];
166        thread->process         = process;
167
168    thread->busylocks       = 0;
169
170#if DEBUG_BUSYLOCK
171    xlist_root_init( XPTR( local_cxy , &thread->busylocks_root ) );
172#endif
173
174    thread->u_stack_base    = u_stack_base;
175    thread->u_stack_size    = u_stack_size;
176    thread->k_stack_base    = (intptr_t)thread + desc_size;
177    thread->k_stack_size    = CONFIG_THREAD_DESC_SIZE - desc_size;
178
179    thread->entry_func      = func;         // thread entry point
180    thread->entry_args      = args;         // thread function arguments
181    thread->flags           = 0;            // all flags reset
182    thread->errno           = 0;            // no error detected
183    thread->fork_user       = 0;            // no user defined placement for fork
184    thread->fork_cxy        = 0;            // user defined target cluster for fork
185    thread->blocked         = THREAD_BLOCKED_GLOBAL;
186
187    // register new thread in process descriptor, and get a TRDID
188    error = process_register_thread( process, thread , &trdid );
189
190    if( error )
191    {
192        printk("\n[ERROR] in %s : thread %x in process %x cannot get TRDID in cluster %x\n"
193        "    for thread %s in process %x / cycle %d\n",
194        __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,
195        local_cxy, thread_type_str(type), process->pid, (uint32_t)hal_get_cycles() );
196        return EINVAL;
197    }
198
199    // initialize trdid
200    thread->trdid           = trdid;
201
202    // initialize sched list
203    list_entry_init( &thread->sched_list );
204
205    // initialize waiting queue entries
206    list_entry_init( &thread->wait_list );
207    xlist_entry_init( XPTR( local_cxy , &thread->wait_xlist ) );
208
209    // initialize thread info
210    memset( &thread->info , 0 , sizeof(thread_info_t) );
211
212    // initialize join_lock
213    remote_busylock_init( XPTR( local_cxy , &thread->join_lock ), LOCK_THREAD_JOIN );
214
215    // initialise signature
216        thread->signature = THREAD_SIGNATURE;
217
218    // FIXME define and call an architecture specific hal_thread_init()
219    // function to initialise the save_sr field
220    thread->save_sr = 0xFF13;
221
222    // register new thread in core scheduler
223    sched_register_thread( thread->core , thread );
224
225        // update DQDT
226    dqdt_increment_threads();
227
228#if DEBUG_THREAD_INIT
229cycle = (uint32_t)hal_get_cycles();
230if( DEBUG_THREAD_INIT < cycle )
231printk("\n[%s] thread[%x,%x] exit for thread %x in process %x / cycle %d\n",
232__FUNCTION__, this->process->pid, this->trdid, thread, process->pid, cycle );
233#endif
234
235        return 0;
236
237} // end thread_init()
238
239/////////////////////////////////////////////////////////
240error_t thread_user_create( pid_t             pid,
241                            void            * start_func,
242                            void            * start_arg,
243                            pthread_attr_t  * attr,
244                            thread_t       ** new_thread )
245{
246    error_t        error;
247        thread_t     * thread;       // pointer on created thread descriptor
248    process_t    * process;      // pointer to local process descriptor
249    lid_t          core_lid;     // selected core local index
250    vseg_t       * vseg;         // stack vseg
251
252assert( (attr != NULL) , "pthread attributes must be defined" );
253
254#if DEBUG_THREAD_USER_CREATE
255thread_t * this  = CURRENT_THREAD;
256uint32_t   cycle = (uint32_t)hal_get_cycles();
257if( DEBUG_THREAD_USER_CREATE < cycle )
258printk("\n[%s] thread[%x,%x] enter in cluster %x for process %x / cycle %d\n",
259__FUNCTION__, this->process->pid , this->trdid , local_cxy , pid , cycle );
260#endif
261
262    // get process descriptor local copy
263    process = process_get_local_copy( pid );
264
265    if( process == NULL )
266    {
267                printk("\n[ERROR] in %s : cannot get process descriptor %x\n",
268               __FUNCTION__ , pid );
269        return ENOMEM;
270    }
271
272#if( DEBUG_THREAD_USER_CREATE & 1)
273if( DEBUG_THREAD_USER_CREATE < cycle )
274printk("\n[%s] process descriptor = %x for process %x in cluster %x\n",
275__FUNCTION__, process , pid , local_cxy );
276#endif
277
278    // select a target core in local cluster
279    if( attr->attributes & PT_ATTR_CORE_DEFINED )
280    {
281        core_lid = attr->lid;
282        if( core_lid >= LOCAL_CLUSTER->cores_nr )
283        {
284                printk("\n[ERROR] in %s : illegal core index attribute = %d\n",
285            __FUNCTION__ , core_lid );
286            return EINVAL;
287        }
288    }
289    else
290    {
291        core_lid = cluster_select_local_core();
292    }
293
294#if( DEBUG_THREAD_USER_CREATE & 1)
295if( DEBUG_THREAD_USER_CREATE < cycle )
296printk("\n[%s] core[%x,%d] selected\n",
297__FUNCTION__, local_cxy , core_lid );
298#endif
299
300    // allocate a stack from local VMM
301    vseg = vmm_create_vseg( process,
302                            VSEG_TYPE_STACK,
303                            0,                 // size unused
304                            0,                 // length unused
305                            0,                 // file_offset unused
306                            0,                 // file_size unused
307                            XPTR_NULL,         // mapper_xp unused
308                            local_cxy );
309
310    if( vseg == NULL )
311    {
312            printk("\n[ERROR] in %s : cannot create stack vseg\n", __FUNCTION__ );
313                return ENOMEM;
314    }
315
316#if( DEBUG_THREAD_USER_CREATE & 1)
317if( DEBUG_THREAD_USER_CREATE < cycle )
318printk("\n[%s] stack vseg created / vpn_base %x / %d pages\n",
319__FUNCTION__, vseg->vpn_base, vseg->vpn_size );
320#endif
321
322    // allocate memory for thread descriptor
323    thread = thread_alloc();
324
325    if( thread == NULL )
326    {
327            printk("\n[ERROR] in %s : cannot create new thread\n", __FUNCTION__ );
328        vmm_delete_vseg( process->pid , vseg->min );
329        return ENOMEM;
330    }
331
332#if( DEBUG_THREAD_USER_CREATE & 1)
333if( DEBUG_THREAD_USER_CREATE < cycle )
334printk("\n[%s] new thread descriptor %x allocated\n",
335__FUNCTION__, thread );
336#endif
337
338    // initialize thread descriptor
339    error = thread_init( thread,
340                         process,
341                         THREAD_USER,
342                         start_func,
343                         start_arg,
344                         core_lid,
345                         vseg->min,
346                         vseg->max - vseg->min );
347    if( error )
348    {
349            printk("\n[ERROR] in %s : cannot initialize new thread\n", __FUNCTION__ );
350        vmm_delete_vseg( process->pid , vseg->min );
351        thread_release( thread );
352        return EINVAL;
353    }
354
355#if( DEBUG_THREAD_USER_CREATE & 1)
356if( DEBUG_THREAD_USER_CREATE < cycle )
357printk("\n[%s] new thread descriptor initialised / trdid %x\n",
358__FUNCTION__, thread->trdid );
359#endif
360
361    // set DETACHED flag if required
362    if( attr->attributes & PT_ATTR_DETACH ) 
363    {
364        thread->flags |= THREAD_FLAG_DETACHED;
365    }
366
367    // allocate & initialize CPU context
368        if( hal_cpu_context_alloc( thread ) )
369    {
370            printk("\n[ERROR] in %s : cannot create CPU context\n", __FUNCTION__ );
371        vmm_delete_vseg( process->pid , vseg->min );
372        thread_release( thread );
373        return ENOMEM;
374    }
375    hal_cpu_context_init( thread );
376
377    // allocate & initialize FPU context
378    if( hal_fpu_context_alloc( thread ) )
379    {
380            printk("\n[ERROR] in %s : cannot create FPU context\n", __FUNCTION__ );
381        vmm_delete_vseg( process->pid , vseg->min );
382        thread_release( thread );
383        return ENOMEM;
384    }
385    hal_fpu_context_init( thread );
386
387#if( DEBUG_THREAD_USER_CREATE & 1)
388if( DEBUG_THREAD_USER_CREATE < cycle )
389printk("\n[%s] CPU & FPU contexts created\n",
390__FUNCTION__, thread->trdid );
391hal_vmm_display( process , true );
392#endif
393
394#if DEBUG_THREAD_USER_CREATE
395cycle = (uint32_t)hal_get_cycles();
396if( DEBUG_THREAD_USER_CREATE < cycle )
397printk("\n[%s] thread[%x,%x] exit / new_thread %x / core %d / cycle %d\n",
398__FUNCTION__, this->process->pid , this->trdid , thread->trdid, core_lid, cycle );
399#endif
400
401    *new_thread = thread;
402        return 0;
403
404}  // end thread_user_create()
405
406///////////////////////////////////////////////////////
407error_t thread_user_fork( xptr_t      parent_thread_xp,
408                          process_t * child_process,
409                          thread_t ** child_thread )
410{
411    error_t        error;
412        thread_t     * child_ptr;        // local pointer on local child thread
413    lid_t          core_lid;         // selected core local index
414
415    thread_t     * parent_ptr;       // local pointer on remote parent thread
416    cxy_t          parent_cxy;       // parent thread cluster
417    process_t    * parent_process;   // local pointer on parent process
418    xptr_t         parent_gpt_xp;    // extended pointer on parent thread GPT
419
420    void         * func;             // parent thread entry_func
421    void         * args;             // parent thread entry_args
422    intptr_t       base;             // parent thread u_stack_base
423    uint32_t       size;             // parent thread u_stack_size
424    uint32_t       flags;            // parent_thread flags
425    vpn_t          vpn_base;         // parent thread stack vpn_base
426    vpn_t          vpn_size;         // parent thread stack vpn_size
427    reg_t        * uzone;            // parent thread pointer on uzone 
428
429    vseg_t       * vseg;             // child thread STACK vseg
430
431#if DEBUG_THREAD_USER_FORK
432uint32_t   cycle = (uint32_t)hal_get_cycles();
433thread_t * this  = CURRENT_THREAD;
434if( DEBUG_THREAD_USER_FORK < cycle )
435printk("\n[%s] thread[%x,%x] enter / child_process %x / cycle %d\n",
436__FUNCTION__, this->process->pid, this->trdid, child_process->pid, cycle );
437#endif
438
439    // select a target core in local cluster
440    core_lid = cluster_select_local_core();
441
442    // get cluster and local pointer on parent thread descriptor
443    parent_cxy = GET_CXY( parent_thread_xp );
444    parent_ptr = GET_PTR( parent_thread_xp );
445
446    // get relevant fields from parent thread
447    func  = (void *)  hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->entry_func    ));
448    args  = (void *)  hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->entry_args    ));
449    base  = (intptr_t)hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->u_stack_base  ));
450    size  = (uint32_t)hal_remote_l32 ( XPTR( parent_cxy , &parent_ptr->u_stack_size  ));
451    flags =           hal_remote_l32 ( XPTR( parent_cxy , &parent_ptr->flags         ));
452    uzone = (reg_t *) hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->uzone_current ));
453
454    vpn_base = base >> CONFIG_PPM_PAGE_SHIFT;
455    vpn_size = size >> CONFIG_PPM_PAGE_SHIFT;
456
457    // get pointer on parent process in parent thread cluster
458    parent_process = (process_t *)hal_remote_lpt( XPTR( parent_cxy,
459                                                        &parent_ptr->process ) );
460 
461    // get extended pointer on parent GPT in parent thread cluster
462    parent_gpt_xp = XPTR( parent_cxy , &parent_process->vmm.gpt );
463
464    // allocate memory for child thread descriptor
465    child_ptr = thread_alloc();
466    if( child_ptr == NULL )
467    {
468        printk("\n[ERROR] in %s : cannot allocate new thread\n", __FUNCTION__ );
469        return -1;
470    }
471
472    // initialize thread descriptor
473    error = thread_init( child_ptr,
474                         child_process,
475                         THREAD_USER,
476                         func,
477                         args,
478                         core_lid,
479                         base,
480                         size );
481    if( error )
482    {
483            printk("\n[ERROR] in %s : cannot initialize child thread\n", __FUNCTION__ );
484        thread_release( child_ptr );
485        return EINVAL;
486    }
487
488#if (DEBUG_THREAD_USER_FORK & 1)
489if( DEBUG_THREAD_USER_FORK < cycle )
490printk("\n[%s] thread[%x,%x] initialised thread %x in process %x\n",
491__FUNCTION__, this->process->pid, this->trdid, child_ptr->trdid, child_process->pid );
492#endif
493
494    // return child pointer
495    *child_thread = child_ptr;
496
497    // set detached flag if required
498    if( flags & THREAD_FLAG_DETACHED ) child_ptr->flags = THREAD_FLAG_DETACHED;
499
500    // update uzone pointer in child thread descriptor
501    child_ptr->uzone_current = (char *)((intptr_t)uzone +
502                                        (intptr_t)child_ptr - 
503                                        (intptr_t)parent_ptr );
504 
505
506    // allocate CPU context for child thread
507        if( hal_cpu_context_alloc( child_ptr ) )
508    {
509            printk("\n[ERROR] in %s : cannot allocate CPU context\n", __FUNCTION__ );
510        thread_release( child_ptr );
511        return -1;
512    }
513
514    // allocate FPU context for child thread
515        if( hal_fpu_context_alloc( child_ptr ) )
516    {
517            printk("\n[ERROR] in %s : cannot allocate FPU context\n", __FUNCTION__ );
518        thread_release( child_ptr );
519        return -1;
520    }
521
522#if (DEBUG_THREAD_USER_FORK & 1)
523if( DEBUG_THREAD_USER_FORK < cycle )
524printk("\n[%s] thread[%x,%x] created CPU & FPU contexts for thread %x in process %x\n",
525__FUNCTION__, this->process->pid, this->trdid, child_ptr->trdid, child_process->pid );
526#endif
527
528   // create and initialize STACK vseg
529    vseg = vseg_alloc();
530    vseg_init( vseg,
531               VSEG_TYPE_STACK,
532               base,
533               size,
534               vpn_base,
535               vpn_size,
536               0, 0, XPTR_NULL,                         // not a file vseg
537               local_cxy );
538
539    // register STACK vseg in local child VSL
540    vmm_attach_vseg_to_vsl( &child_process->vmm , vseg );
541
542#if (DEBUG_THREAD_USER_FORK & 1)
543if( DEBUG_THREAD_USER_FORK < cycle )
544printk("\n[%s] thread[%x,%x] created stack vseg for thread %x in process %x\n",
545__FUNCTION__, this->process->pid, this->trdid, child_ptr->trdid, child_process->pid );
546#endif
547
548    // copy all valid STACK GPT entries   
549    vpn_t          vpn;
550    bool_t         mapped;
551    ppn_t          ppn;
552    for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ )
553    {
554        error = hal_gpt_pte_copy( &child_process->vmm.gpt,
555                                  parent_gpt_xp,
556                                  vpn,
557                                  true,                 // set cow
558                                  &ppn,
559                                  &mapped );
560        if( error )
561        {
562            vmm_detach_vseg_from_vsl( &child_process->vmm , vseg );
563            thread_release( child_ptr );
564            printk("\n[ERROR] in %s : cannot update child GPT\n", __FUNCTION__ );
565            return -1;
566        }
567
568        // increment pending forks counter for the page if mapped
569        if( mapped )
570        {
571            // get pointers on the page descriptor
572            xptr_t   page_xp  = ppm_ppn2page( ppn );
573            cxy_t    page_cxy = GET_CXY( page_xp );
574            page_t * page_ptr = GET_PTR( page_xp );
575
576            // get extended pointers on forks and lock fields
577            xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks );
578            xptr_t lock_xp  = XPTR( page_cxy , &page_ptr->lock );
579
580            // get lock protecting page
581            remote_busylock_acquire( lock_xp ); 
582
583            // increment the forks counter in page descriptor
584            hal_remote_atomic_add( forks_xp , 1 );
585
586            // release lock protecting page
587            remote_busylock_release( lock_xp ); 
588
589#if (DEBUG_THREAD_USER_FORK & 1)
590cycle = (uint32_t)hal_get_cycles();
591if( DEBUG_THREAD_USER_FORK < cycle )
592printk("\n[%s] thread[%x,%x] copied one PTE to child GPT : vpn %x / forks %d\n",
593__FUNCTION__, this->process->pid, this->trdid, 
594vpn, hal_remote_l32( XPTR( page_cxy , &page_ptr->forks) ) );
595#endif
596
597        }
598    }
599
600    // set COW flag for all mapped entries of STAK vseg in parent thread GPT
601    hal_gpt_set_cow( parent_gpt_xp,
602                     vpn_base,
603                     vpn_size );
604 
605#if DEBUG_THREAD_USER_FORK
606cycle = (uint32_t)hal_get_cycles();
607if( DEBUG_THREAD_USER_FORK < cycle )
608printk("\n[%s] thread[%x,%x] exit / child_thread %x / cycle %d\n",
609__FUNCTION__, this->process->pid, this->trdid, child_ptr, cycle );
610#endif
611
612        return 0;
613
614}  // end thread_user_fork()
615
616////////////////////////////////////////////////
617error_t thread_user_exec( void     * entry_func,
618                          uint32_t   argc,
619                          char    ** argv )
620{
621    thread_t  * thread  = CURRENT_THREAD;
622    process_t * process = thread->process;
623
624#if DEBUG_THREAD_USER_EXEC
625uint32_t cycle = (uint32_t)hal_get_cycles();
626if( DEBUG_THREAD_USER_EXEC < cycle )
627printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
628__FUNCTION__, process->pid, thread->trdid, cycle );
629#endif
630
631// check parent thread attributes
632assert( (thread->type == THREAD_USER )          , "bad type" );
633assert( (thread->signature == THREAD_SIGNATURE) , "bad signature" );
634assert( (thread->busylocks == 0)                , "bad busylocks" );
635
636        // re-initialize various thread descriptor fields
637    thread->quantum         = 0;            // TODO
638    thread->ticks_nr        = 0;            // TODO
639    thread->time_last_check = 0;            // TODO
640
641    thread->entry_func      = entry_func;
642    thread->main_argc       = argc; 
643    thread->main_argv       = argv;
644
645    // the main thread is always detached
646    thread->flags           = THREAD_FLAG_DETACHED;
647    thread->blocked         = 0;
648    thread->errno           = 0;
649    thread->fork_user       = 0;    // not inherited
650    thread->fork_cxy        = 0;    // not inherited
651
652    // re-initialize busylocks counters
653    thread->busylocks       = 0;
654
655    // reset thread info
656    memset( &thread->info , 0 , sizeof(thread_info_t) );
657
658    // re-initialize join_lock
659    remote_busylock_init( XPTR( local_cxy , &thread->join_lock ), LOCK_THREAD_JOIN );
660
661    // allocate an user stack vseg for main thread
662    vseg_t * vseg = vmm_create_vseg( process,
663                                     VSEG_TYPE_STACK,
664                                     0,                 // size unused
665                                     0,                 // length unused
666                                     0,                 // file_offset unused
667                                     0,                 // file_size unused
668                                     XPTR_NULL,         // mapper_xp unused
669                                     local_cxy );
670    if( vseg == NULL )
671    {
672            printk("\n[ERROR] in %s : cannot create stack vseg for main thread\n", __FUNCTION__ );
673                return -1;
674    }
675
676    // update user stack in thread descriptor
677    thread->u_stack_base = vseg->min;
678    thread->u_stack_size = vseg->max - vseg->min;
679   
680    // release FPU ownership if required
681    if( thread->core->fpu_owner == thread ) thread->core->fpu_owner = NULL;
682
683    // re-initialize  FPU context
684    hal_fpu_context_init( thread );
685
686#if DEBUG_THREAD_USER_EXEC
687cycle = (uint32_t)hal_get_cycles();
688if( DEBUG_THREAD_USER_EXEC < cycle )
689printk("\n[%s] thread[%x,%x] set CPU context & jump to user code / cycle %d\n",
690__FUNCTION__, process->pid, thread->trdid, cycle );
691hal_vmm_display( process , true );
692#endif
693
694    // re-initialize CPU context... and jump to user code
695        hal_cpu_context_exec( thread );
696
697    assert( false, "we should not execute this code");
698 
699    return 0;
700
701}  // end thread_user_exec()
702
703/////////////////////////////////////////////////////////
704error_t thread_kernel_create( thread_t     ** new_thread,
705                              thread_type_t   type,
706                              void          * func,
707                              void          * args,
708                                              lid_t           core_lid )
709{
710    error_t        error;
711        thread_t     * thread;       // pointer on new thread descriptor
712
713    thread_t * this = CURRENT_THREAD; 
714
715assert( ( (type == THREAD_IDLE) || (type == THREAD_RPC) || (type == THREAD_DEV) ) ,
716"illegal thread type" );
717
718assert( (core_lid < LOCAL_CLUSTER->cores_nr) ,
719"illegal core_lid" );
720
721#if DEBUG_THREAD_KERNEL_CREATE
722uint32_t   cycle = (uint32_t)hal_get_cycles();
723if( DEBUG_THREAD_KERNEL_CREATE < cycle )
724printk("\n[%s] thread[%x,%x] enter / requested_type %s / cycle %d\n",
725__FUNCTION__, this->process->pid, this->trdid, thread_type_str(type), cycle );
726#endif
727
728    // allocate memory for new thread descriptor
729    thread = thread_alloc();
730
731    if( thread == NULL )
732    {
733        printk("\n[ERROR] in %s : thread %x in process %x\n"
734        "   no memory for thread descriptor\n",
735        __FUNCTION__, this->trdid, this->process->pid );
736        return ENOMEM;
737    }
738
739    // initialize thread descriptor
740    error = thread_init( thread,
741                         &process_zero,
742                         type,
743                         func,
744                         args,
745                         core_lid,
746                         0 , 0 );  // no user stack for a kernel thread
747
748    if( error ) // release allocated memory for thread descriptor
749    {
750        printk("\n[ERROR] in %s : thread %x in process %x\n"
751        "   cannot initialize thread descriptor\n",
752        __FUNCTION__, this->trdid, this->process->pid );
753        thread_release( thread );
754        return ENOMEM;
755    }
756
757    // allocate & initialize CPU context
758        error = hal_cpu_context_alloc( thread );
759
760    if( error )
761    {
762        printk("\n[ERROR] in %s : thread %x in process %x\n"
763        "    cannot create CPU context\n",
764        __FUNCTION__, this->trdid, this->process->pid );
765        thread_release( thread );
766        return EINVAL;
767    }
768
769    hal_cpu_context_init( thread );
770
771    // set THREAD_BLOCKED_IDLE for DEV threads
772    if( type == THREAD_DEV ) thread->blocked |= THREAD_BLOCKED_IDLE;
773
774#if DEBUG_THREAD_KERNEL_CREATE
775cycle = (uint32_t)hal_get_cycles();
776if( DEBUG_THREAD_KERNEL_CREATE < cycle )
777printk("\n[%s] thread[%x,%x] exit / new_thread %x / type %s / cycle %d\n",
778__FUNCTION__, this->process->pid, this->trdid, thread, thread_type_str(type), cycle );
779#endif
780
781    *new_thread = thread;
782        return 0;
783
784} // end thread_kernel_create()
785
786//////////////////////////////////////////////
787void thread_idle_init( thread_t      * thread,
788                       thread_type_t   type,
789                       void          * func,
790                       void          * args,
791                           lid_t           core_lid )
792{
793
794// check arguments
795assert( (type == THREAD_IDLE) , "illegal thread type" );
796assert( (core_lid < LOCAL_CLUSTER->cores_nr) , "illegal core index" );
797
798    // initialize thread descriptor
799    error_t  error = thread_init( thread,
800                                  &process_zero,
801                                  type,
802                                  func,
803                                  args,
804                                  core_lid,
805                                  0 , 0 );   // no user stack for a kernel thread
806
807    assert( (error == 0), "cannot create thread idle" );
808
809    // allocate & initialize CPU context if success
810    error = hal_cpu_context_alloc( thread );
811
812    assert( (error == 0), "cannot allocate CPU context" );
813
814    hal_cpu_context_init( thread );
815
816}  // end thread_idle_init()
817
818///////////////////////////////////////////////////////////////////////////////////////
819// TODO: check that all memory dynamically allocated during thread execution
820// has been released => check vmm destroy for MMAP vsegs  [AG]
821///////////////////////////////////////////////////////////////////////////////////////
822void thread_destroy( thread_t * thread )
823{
824    reg_t        save_sr;
825
826    process_t  * process    = thread->process;
827    core_t     * core       = thread->core;
828
829#if DEBUG_THREAD_DESTROY
830uint32_t   cycle = (uint32_t)hal_get_cycles();
831thread_t * this  = CURRENT_THREAD;
832if( DEBUG_THREAD_DESTROY < cycle )
833printk("\n[%s] thread[%x,%x] enter to destroy thread[%x,%x] / cycle %d\n",
834__FUNCTION__, this->process->pid, this->trdid, process->pid, thread->trdid, cycle );
835#endif
836
837    // check busylocks counter
838    thread_assert_can_yield( thread , __FUNCTION__ );
839
840    // update intrumentation values
841        process->vmm.pgfault_nr += thread->info.pgfault_nr;
842
843    // release memory allocated for CPU context and FPU context
844        hal_cpu_context_destroy( thread );
845        if ( thread->type == THREAD_USER ) hal_fpu_context_destroy( thread );
846       
847    // release FPU ownership if required
848        hal_disable_irq( &save_sr );
849        if( core->fpu_owner == thread )
850        {
851                core->fpu_owner = NULL;
852                hal_fpu_disable();
853        }
854        hal_restore_irq( save_sr );
855
856    // invalidate thread descriptor
857        thread->signature = 0;
858
859    // release memory for thread descriptor
860    thread_release( thread );
861
862#if DEBUG_THREAD_DESTROY
863cycle = (uint32_t)hal_get_cycles();
864if( DEBUG_THREAD_DESTROY < cycle )
865printk("\n[%s] thread[%x,%x] exit / destroyed thread[%x,%x] / cycle %d\n",
866__FUNCTION__, this->process->pid, this->trdid, process->pid, thread->trdid, cycle );
867#endif
868
869}   // end thread_destroy()
870
871//////////////////////////////////////////////////
872inline void thread_set_req_ack( thread_t * target,
873                                uint32_t * rsp_count )
874{
875    reg_t    save_sr;   // for critical section
876
877    // get pointer on target thread scheduler
878    scheduler_t * sched = &target->core->scheduler;
879
880    // wait scheduler ready to handle a new request
881    while( sched->req_ack_pending ) asm volatile( "nop" );
882   
883    // enter critical section
884    hal_disable_irq( &save_sr );
885     
886    // set request in target thread scheduler
887    sched->req_ack_pending = true;
888
889    // set ack request in target thread "flags"
890    hal_atomic_or( &target->flags , THREAD_FLAG_REQ_ACK );
891
892    // set pointer on responses counter in target thread
893    target->ack_rsp_count = rsp_count;
894   
895    // exit critical section
896    hal_restore_irq( save_sr );
897
898    hal_fence();
899
900}  // thread_set_req_ack()
901
902/////////////////////////////////////////////////////
903inline void thread_reset_req_ack( thread_t * target )
904{
905    reg_t    save_sr;   // for critical section
906
907    // get pointer on target thread scheduler
908    scheduler_t * sched = &target->core->scheduler;
909
910    // check signal pending in scheduler
911    assert( sched->req_ack_pending , "no pending signal" );
912   
913    // enter critical section
914    hal_disable_irq( &save_sr );
915     
916    // reset signal in scheduler
917    sched->req_ack_pending = false;
918
919    // reset signal in thread "flags"
920    hal_atomic_and( &target->flags , ~THREAD_FLAG_REQ_ACK );
921
922    // reset pointer on responses counter
923    target->ack_rsp_count = NULL;
924   
925    // exit critical section
926    hal_restore_irq( save_sr );
927
928    hal_fence();
929
930}  // thread_reset_req_ack()
931
932//////////////////////////////////////
933void thread_block( xptr_t   thread_xp,
934                   uint32_t cause )
935{
936    // get thread cluster and local pointer
937    cxy_t      cxy = GET_CXY( thread_xp );
938    thread_t * ptr = GET_PTR( thread_xp );
939
940    // set blocking cause
941    hal_remote_atomic_or( XPTR( cxy , &ptr->blocked ) , cause );
942    hal_fence();
943
944#if DEBUG_THREAD_BLOCK
945uint32_t    cycle   = (uint32_t)hal_get_cycles();
946process_t * process = hal_remote_lpt( XPTR( cxy , &ptr->process ) );
947thread_t  * this    = CURRENT_THREAD;
948if( DEBUG_THREAD_BLOCK < cycle )
949printk("\n[%s] thread[%x,%x] blocked thread %x in process %x / cause %x\n",
950__FUNCTION__, this->process->pid, this->trdid,
951ptr->trdid, hal_remote_l32(XPTR( cxy , &process->pid )), cause );
952#endif
953
954} // end thread_block()
955
956////////////////////////////////////////////
957uint32_t thread_unblock( xptr_t   thread_xp,
958                         uint32_t cause )
959{
960    // get thread cluster and local pointer
961    cxy_t      cxy = GET_CXY( thread_xp );
962    thread_t * ptr = GET_PTR( thread_xp );
963
964    // reset blocking cause
965    uint32_t previous = hal_remote_atomic_and( XPTR( cxy , &ptr->blocked ) , ~cause );
966    hal_fence();
967
968#if DEBUG_THREAD_BLOCK
969uint32_t    cycle   = (uint32_t)hal_get_cycles();
970process_t * process = hal_remote_lpt( XPTR( cxy , &ptr->process ) );
971thread_t  * this    = CURRENT_THREAD;
972if( DEBUG_THREAD_BLOCK < cycle )
973printk("\n[%s] thread[%x,%x] unblocked thread %x in process %x / cause %x\n",
974__FUNCTION__, this->process->pid, this->trdid,
975ptr->trdid, hal_remote_l32(XPTR( cxy , &process->pid )), cause );
976#endif
977
978    // return a non zero value if the cause bit is modified
979    return( previous & cause );
980
981}  // end thread_unblock()
982
983//////////////////////////////////////
984void thread_delete( xptr_t  target_xp,
985                    pid_t   pid,
986                    bool_t  is_forced )
987{
988    reg_t       save_sr;                // for critical section
989    bool_t      target_join_done;       // joining thread arrived first
990    bool_t      target_attached;        // target thread attached
991    xptr_t      killer_xp;              // extended pointer on killer thread (this)
992    thread_t  * killer_ptr;             // pointer on killer thread (this)
993    cxy_t       target_cxy;             // target thread cluster     
994    thread_t  * target_ptr;             // pointer on target thread
995    xptr_t      target_flags_xp;        // extended pointer on target thread <flags>
996    xptr_t      target_join_lock_xp;    // extended pointer on target thread <join_lock>
997    xptr_t      target_join_xp_xp;      // extended pointer on target thread <join_xp>
998    trdid_t     target_trdid;           // target thread identifier
999    ltid_t      target_ltid;            // target thread local index
1000    xptr_t      joining_xp;             // extended pointer on joining thread
1001    thread_t  * joining_ptr;            // pointer on joining thread
1002    cxy_t       joining_cxy;            // joining thread cluster
1003
1004    // get target thread cluster and local pointer
1005    target_cxy      = GET_CXY( target_xp );
1006    target_ptr      = GET_PTR( target_xp );
1007
1008    // get target thread identifiers, and attached flag
1009    target_trdid    = hal_remote_l32( XPTR( target_cxy , &target_ptr->trdid ) );
1010    target_ltid     = LTID_FROM_TRDID( target_trdid );
1011    target_flags_xp = XPTR( target_cxy , &target_ptr->flags ); 
1012    target_attached = ( (hal_remote_l32( target_flags_xp ) & THREAD_FLAG_DETACHED) == 0 );
1013
1014    // get killer thread pointers
1015    killer_ptr = CURRENT_THREAD;
1016    killer_xp  = XPTR( local_cxy , killer_ptr );
1017
1018#if DEBUG_THREAD_DELETE
1019uint32_t cycle  = (uint32_t)hal_get_cycles();
1020if( DEBUG_THREAD_DELETE < cycle )
1021printk("\n[%s] killer[%x,%x] enters / target[%x,%x] / cycle %d\n",
1022__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid, 
1023target_ptr->process->pid, target_ptr->trdid, cycle );
1024#endif
1025
1026// check target thread is not the main thread, because the main thread
1027// must be deleted by the parent process sys_wait() function
1028assert( ((CXY_FROM_PID( pid ) != target_cxy) || (target_ltid != 0)),
1029"tharget thread cannot be the main thread\n" );
1030
1031    // check killer thread can yield
1032    thread_assert_can_yield( killer_ptr , __FUNCTION__ ); 
1033
1034    // if the target thread is attached, we must synchonize with the joining thread
1035    // before blocking and marking the target thead for delete.
1036
1037    if( target_attached && (is_forced == false) ) // synchronize with joining thread
1038    {
1039        // build extended pointers on target thread join fields
1040        target_join_lock_xp  = XPTR( target_cxy , &target_ptr->join_lock );
1041        target_join_xp_xp    = XPTR( target_cxy , &target_ptr->join_xp );
1042
1043        // enter critical section
1044        hal_disable_irq( &save_sr );
1045
1046        // take the join_lock in target thread descriptor
1047        remote_busylock_acquire( target_join_lock_xp );
1048
1049        // get join_done from target thread descriptor
1050        target_join_done = ((hal_remote_l32( target_flags_xp ) & THREAD_FLAG_JOIN_DONE) != 0);
1051   
1052        if( target_join_done )                     // joining thread arrived first
1053        {
1054            // get extended pointer on joining thread
1055            joining_xp  = (xptr_t)hal_remote_l64( target_join_xp_xp );
1056            joining_ptr = GET_PTR( joining_xp );
1057            joining_cxy = GET_CXY( joining_xp );
1058           
1059            // reset the join_done flag in target thread
1060            hal_remote_atomic_and( target_flags_xp , ~THREAD_FLAG_JOIN_DONE );
1061
1062            // unblock the joining thread
1063            thread_unblock( joining_xp , THREAD_BLOCKED_JOIN );
1064
1065            // release the join_lock in target thread descriptor
1066            remote_busylock_release( target_join_lock_xp );
1067
1068            // block the target thread
1069            thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
1070
1071            // set the REQ_DELETE flag in target thread descriptor
1072            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
1073
1074            // exit critical section
1075            hal_restore_irq( save_sr );
1076
1077#if DEBUG_THREAD_DELETE
1078cycle  = (uint32_t)hal_get_cycles;
1079if( DEBUG_THREAD_DELETE < cycle )
1080printk("\n[%s] killer[%x,%x] exit / target[%x,%x] marked after join / cycle %d\n",
1081__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid,
1082target_ptr->process->pid, target_ptr->trdid, cycle );
1083#endif
1084
1085        }
1086        else                                      // killer thread arrived first
1087        {
1088            // set the kill_done flag in target thread
1089            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_KILL_DONE );
1090
1091            // block this thread on BLOCKED_JOIN
1092            thread_block( killer_xp , THREAD_BLOCKED_JOIN );
1093
1094            // set extended pointer on killer thread in target thread
1095            hal_remote_s64( target_join_xp_xp , killer_xp );
1096
1097            // release the join_lock in target thread descriptor
1098            remote_busylock_release( target_join_lock_xp );
1099
1100#if DEBUG_THREAD_DELETE
1101cycle  = (uint32_t)hal_get_cycles;
1102if( DEBUG_THREAD_DELETE < cycle )
1103printk("\n[%s] killer[%x,%x] deschedules / target[%x,%x] not completed / cycle %d\n",
1104__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid,
1105target_ptr->process->pid, target_ptr->trdid, cycle );
1106#endif
1107            // deschedule
1108            sched_yield( "killer thread wait joining thread" );
1109
1110            // block the target thread
1111            thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
1112
1113            // set the REQ_DELETE flag in target thread descriptor
1114            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
1115
1116            // exit critical section
1117            hal_restore_irq( save_sr );
1118
1119#if DEBUG_THREAD_DELETE
1120cycle  = (uint32_t)hal_get_cycles;
1121if( DEBUG_THREAD_DELETE < cycle )
1122printk("\n[%s] killer[%x,%x] exit / target[%x,%x] marked after join / cycle %d\n",
1123__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid,
1124target_ptr->process->pid, target_ptr->trdid, cycle );
1125#endif
1126
1127        }
1128    }
1129    else                     // no synchronization with joining thread required
1130    {
1131        // block the target thread
1132        thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
1133
1134        // set the REQ_DELETE flag in target thread descriptor
1135        hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
1136
1137#if DEBUG_THREAD_DELETE
1138cycle  = (uint32_t)hal_get_cycles;
1139if( DEBUG_THREAD_DELETE < cycle )
1140printk("\n[%s] killer[%x,%x] exit / target [%x,%x] marked / no join / cycle %d\n",
1141__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid,
1142target_ptr->process->pid, target_ptr->trdid, cycle );
1143#endif
1144
1145    }
1146}  // end thread_delete()
1147
1148
1149
1150/////////////////////////////
1151void thread_idle_func( void )
1152{
1153    while( 1 )
1154    {
1155        // unmask IRQs
1156        hal_enable_irq( NULL );
1157
1158        // force core to low-power mode (optional)
1159        if( CONFIG_SCHED_IDLE_MODE_SLEEP ) 
1160        {
1161
1162#if DEBUG_THREAD_IDLE
1163{ 
1164uint32_t cycle = (uint32_t)hal_get_cycles();
1165if( DEBUG_THREAD_IDLE < cycle )
1166printk("\n[%s] idle thread on core[%x,%d] goes to sleep / cycle %d\n",
1167__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cycle );
1168}
1169#endif
1170
1171            hal_core_sleep();
1172
1173#if DEBUG_THREAD_IDLE
1174{
1175uint32_t cycle = (uint32_t)hal_get_cycles();
1176if( DEBUG_THREAD_IDLE < cycle )
1177printk("\n[%s] idle thread on core[%x,%d] wake up / cycle %d\n",
1178__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cycle );
1179}
1180#endif
1181
1182        }
1183
1184#if DEBUG_THREAD_IDLE
1185{
1186uint32_t cycle = (uint32_t)hal_get_cycles();
1187if( DEBUG_THREAD_IDLE < cycle )
1188sched_display( CURRENT_THREAD->core->lid );
1189}
1190#endif     
1191        // search a runable thread
1192        sched_yield( "running idle thread" );
1193
1194    } // end while
1195
1196}  // end thread_idle()
1197
1198
1199///////////////////////////////////////////
1200void thread_time_update( thread_t * thread,
1201                         bool_t     is_user )
1202{
1203    cycle_t current_cycle;   // current cycle counter value
1204    cycle_t last_cycle;      // last cycle counter value
1205
1206    // get pointer on thread_info structure
1207    thread_info_t * info = &thread->info;
1208
1209    // get last cycle counter value
1210    last_cycle = info->last_cycle;
1211
1212    // get current cycle counter value
1213    current_cycle = hal_get_cycles();
1214
1215    // update thread_info structure
1216    info->last_cycle = current_cycle;
1217
1218    // update time in thread_info
1219    if( is_user ) info->usr_cycles += (current_cycle - last_cycle);
1220    else          info->sys_cycles += (current_cycle - last_cycle);
1221
1222}  // end thread_time_update()
1223
1224/////////////////////////////////////
1225xptr_t thread_get_xptr( pid_t    pid,
1226                        trdid_t  trdid )
1227{
1228    cxy_t         target_cxy;          // target thread cluster identifier
1229    ltid_t        target_thread_ltid;  // target thread local index
1230    thread_t    * target_thread_ptr;   // target thread local pointer
1231    xptr_t        target_process_xp;   // extended pointer on target process descriptor
1232    process_t   * target_process_ptr;  // local pointer on target process descriptor
1233    pid_t         target_process_pid;  // target process identifier
1234    xlist_entry_t root;                // root of list of process in target cluster
1235    xptr_t        lock_xp;             // extended pointer on lock protecting  this list
1236
1237#if DEBUG_THREAD_GET_XPTR
1238uint32_t cycle  = (uint32_t)hal_get_cycles();
1239thread_t * this = CURRENT_THREAD;
1240if( DEBUG_THREAD_GET_XPTR < cycle )
1241printk("\n[%s] thread %x in process %x enters / pid %x / trdid %x / cycle %d\n",
1242__FUNCTION__, this->trdid, this->process->pid, pid, trdid, cycle );
1243#endif
1244
1245    // get target cluster identifier and local thread identifier
1246    target_cxy         = CXY_FROM_TRDID( trdid );
1247    target_thread_ltid = LTID_FROM_TRDID( trdid );
1248
1249    // check trdid argument
1250        if( (target_thread_ltid >= CONFIG_THREADS_MAX_PER_CLUSTER) || 
1251        cluster_is_undefined( target_cxy ) )         return XPTR_NULL;
1252
1253    // get root of list of process descriptors in target cluster
1254    hal_remote_memcpy( XPTR( local_cxy  , &root ),
1255                       XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_root ),
1256                       sizeof(xlist_entry_t) );
1257
1258    // get extended pointer on lock protecting the list of local processes
1259    lock_xp = XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_lock );
1260
1261    // take the lock protecting the list of processes in target cluster
1262    remote_queuelock_acquire( lock_xp );
1263
1264#if( DEBUG_THREAD_GET_XPTR & 1 )
1265if( DEBUG_THREAD_GET_XPTR < cycle )
1266printk("\n[%s] scan processes in cluster %x :\n", __FUNCTION__, target_cxy );
1267#endif
1268
1269    // scan the list of local processes in target cluster
1270    xptr_t  iter;
1271    bool_t  found = false;
1272    XLIST_FOREACH( XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_root ) , iter )
1273    {
1274        target_process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
1275        target_process_ptr = GET_PTR( target_process_xp );
1276        target_process_pid = hal_remote_l32( XPTR( target_cxy , &target_process_ptr->pid ) );
1277
1278#if( DEBUG_THREAD_GET_XPTR & 1 )
1279if( DEBUG_THREAD_GET_XPTR < cycle )
1280printk(" - process %x\n", target_process_pid );
1281#endif
1282
1283        if( target_process_pid == pid )
1284        {
1285            found = true;
1286            break;
1287        }
1288    }
1289
1290    // release the lock protecting the list of processes in target cluster
1291    remote_queuelock_release( lock_xp );
1292
1293    // check PID found
1294    if( found == false ) 
1295    {
1296
1297#if( DEBUG_THREAD_GET_XPTR & 1 )
1298if( DEBUG_THREAD_GET_XPTR < cycle )
1299printk("\n[%s] pid %x not found in cluster %x\n",
1300__FUNCTION__, pid, target_cxy );
1301#endif
1302        return XPTR_NULL;
1303    }
1304
1305    // get target thread local pointer
1306    xptr_t xp = XPTR( target_cxy , &target_process_ptr->th_tbl[target_thread_ltid] );
1307    target_thread_ptr = (thread_t *)hal_remote_lpt( xp );
1308
1309    if( target_thread_ptr == NULL )
1310    {
1311
1312#if( DEBUG_THREAD_GET_XPTR & 1 )
1313if( DEBUG_THREAD_GET_XPTR < cycle )
1314printk("\n[%s] thread %x not registered in process %x in cluster %x\n",
1315__FUNCTION__, trdid, pid, target_cxy );
1316#endif
1317        return XPTR_NULL;
1318    }
1319
1320#if DEBUG_THREAD_GET_XPTR
1321cycle  = (uint32_t)hal_get_cycles();
1322if( DEBUG_THREAD_GET_XPTR < cycle )
1323printk("\n[%s] thread %x in process %x exit / pid %x / trdid %x / cycle %d\n",
1324__FUNCTION__, this->trdid, this->process->pid, pid, trdid, cycle );
1325#endif
1326
1327    return XPTR( target_cxy , target_thread_ptr );
1328
1329}  // end thread_get_xptr()
1330
1331///////////////////////////////////////////////////
1332void thread_assert_can_yield( thread_t    * thread,
1333                              const char  * func_str )
1334{
1335    // does nothing if thread does not hold any busylock
1336
1337    if( thread->busylocks )
1338    {
1339        // get pointers on TXT0 chdev
1340        xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
1341        cxy_t     txt0_cxy = GET_CXY( txt0_xp );
1342        chdev_t * txt0_ptr = GET_PTR( txt0_xp );
1343
1344        // get extended pointer on TXT0 lock
1345        xptr_t  txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
1346
1347        // get TXT0 lock
1348        remote_busylock_acquire( txt0_lock_xp );
1349
1350        // display error message on TXT0
1351        nolock_printk("\n[PANIC] in %s / thread[%x,%x] cannot yield : "
1352        "hold %d busylock(s) / cycle %d\n",
1353        func_str, thread->process->pid, thread->trdid,
1354        thread->busylocks - 1, (uint32_t)hal_get_cycles() );
1355
1356#if DEBUG_BUSYLOCK
1357
1358// scan list of busylocks
1359xptr_t    iter_xp;
1360xptr_t    root_xp  = XPTR( local_cxy , &thread->busylocks_root );
1361XLIST_FOREACH( root_xp , iter_xp )
1362{
1363    xptr_t       lock_xp   = XLIST_ELEMENT( iter_xp , busylock_t , xlist );
1364    cxy_t        lock_cxy  = GET_CXY( lock_xp );
1365    busylock_t * lock_ptr  = GET_PTR( lock_xp );
1366    uint32_t     lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->type ) );
1367    nolock_printk(" - %s in cluster %x\n", lock_type_str[lock_type] , lock_cxy );
1368}
1369
1370#endif
1371
1372        // release TXT0 lock
1373        remote_busylock_release( txt0_lock_xp );
1374
1375        // suicide
1376        hal_core_sleep();
1377    }
1378}  // end thread_assert_can yield()
1379
1380//////////////////////////////////////////////////////
1381void thread_display_busylocks( xptr_t       thread_xp,
1382                               const char * string )
1383{
1384
1385    cxy_t      thread_cxy = GET_CXY( thread_xp );
1386    thread_t * thread_ptr = GET_PTR( thread_xp );
1387
1388#if DEBUG_BUSYLOCK
1389
1390    xptr_t     iter_xp;
1391
1392    // get relevant info from target thread descriptor
1393    uint32_t    locks   = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->busylocks ) );
1394    trdid_t     trdid   = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
1395    process_t * process = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->process ) );
1396    pid_t       pid     = hal_remote_l32( XPTR( thread_cxy , &process->pid ) );
1397
1398    // get extended pointer on root of busylocks
1399    xptr_t root_xp = XPTR( thread_cxy , &thread_ptr->busylocks_root );
1400
1401    // get pointers on TXT0 chdev
1402    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
1403    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
1404    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
1405
1406    // get extended pointer on remote TXT0 lock
1407    xptr_t  txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
1408
1409    // get TXT0 lock
1410    remote_busylock_acquire( txt0_lock_xp );
1411
1412    // display header
1413    nolock_printk("\n***** thread[%x,%x] in <%s> : %d busylocks *****\n",
1414    pid, trdid, string, locks );
1415
1416    // scan the xlist of busylocks when required
1417    if( locks )
1418    {
1419        XLIST_FOREACH( root_xp , iter_xp )
1420        {
1421            xptr_t       lock_xp   = XLIST_ELEMENT( iter_xp , busylock_t , xlist );
1422            cxy_t        lock_cxy  = GET_CXY( lock_xp );
1423            busylock_t * lock_ptr  = GET_PTR( lock_xp );
1424            uint32_t     lock_type = hal_remote_l32(XPTR( lock_cxy , &lock_ptr->type ));
1425            nolock_printk(" - %s in cluster %x\n", lock_type_str[lock_type] , lock_cxy );
1426        }
1427    }
1428
1429    // release TXT0 lock
1430    remote_busylock_release( txt0_lock_xp );
1431
1432#else
1433
1434printk("\n[ERROR] in %s : set DEBUG_BUSYLOCK in kernel_config.h for %s / thread(%x,%x)\n",
1435__FUNCTION__, string, thread_cxy, thread_ptr );
1436
1437#endif
1438
1439    return;
1440
1441}  // end thread_display_busylock()
1442
Note: See TracBrowser for help on using the repository browser.