source: trunk/kernel/kern/thread.c

Last change on this file was 683, checked in by alain, 16 months ago

All modifications required to support the <tcp_chat> application
including error recovery in case of packet loss.A

File size: 53.4 KB
Line 
1/*
2 * thread.c -   thread operations implementation (user & kernel)
3 *
4 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *         Alain Greiner    (2016,2017,2018,2019,2020)
6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25#include <kernel_config.h>
26#include <hal_kernel_types.h>
27#include <hal_context.h>
28#include <hal_irqmask.h>
29#include <hal_special.h>
30#include <hal_remote.h>
31#include <hal_vmm.h>
32#include <hal_switch.h>
33#include <memcpy.h>
34#include <printk.h>
35#include <cluster.h>
36#include <process.h>
37#include <scheduler.h>
38#include <dev_pic.h>
39#include <core.h>
40#include <list.h>
41#include <xlist.h>
42#include <page.h>
43#include <kmem.h>
44#include <ppm.h>
45#include <thread.h>
46#include <rpc.h>
47
48//////////////////////////////////////////////////////////////////////////////////////
49// Extern global variables
50//////////////////////////////////////////////////////////////////////////////////////
51
52extern process_t            process_zero;       // allocated in kernel_init.c
53extern char               * lock_type_str[];    // allocated in kernel_init.c
54extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c
55
56//////////////////////////////////////////////////////////////////////////////////////
57// This function returns a printable string for the thread type.
58//////////////////////////////////////////////////////////////////////////////////////
59const char * thread_type_str( thread_type_t type )
60{
61  switch ( type ) {
62  case THREAD_USER:   return "USR";
63  case THREAD_RPC:    return "RPC";
64  case THREAD_DEV:    return "DEV";
65  case THREAD_IDLE:   return "IDL";
66  default:            return "undefined";
67  }
68}
69
70/////////////////////////////////////////////////////////////////////////////////////
71// This static function initializes a thread descriptor (kernel or user).
72// It can be called by the four functions:
73// - thread_user_create()
74// - thread_user_fork()
75// - thread_kernel_create()
76// - thread_idle_init()
77// The "type" and "trdid" fields must have been previously set.
78// It updates the local DQDT.
79/////////////////////////////////////////////////////////////////////////////////////
80// @ thread          : pointer on local thread descriptor
81// @ process         : pointer on local process descriptor.
82// @ type            : thread type.
83// @ trdid           : thread identifier
84// @ func            : pointer on thread entry function.
85// @ args            : pointer on thread entry function arguments.
86// @ core_lid        : target core local index.
87// @ user_stack_vseg : local pointer on user stack vseg (user thread only)
88/////////////////////////////////////////////////////////////////////////////////////
89static error_t thread_init( thread_t      * thread,
90                            process_t     * process,
91                            thread_type_t   type,
92                            trdid_t         trdid,
93                            void          * func,
94                            void          * args,
95                            lid_t           core_lid,
96                            vseg_t        * user_stack_vseg )
97{
98
99// check type and trdid fields are initialized
100assert( __FUNCTION__, (thread->type == type)   , "bad type argument" );
101assert( __FUNCTION__, (thread->trdid == trdid) , "bad trdid argument" );
102
103#if DEBUG_THREAD_INIT
104uint32_t   cycle = (uint32_t)hal_get_cycles();
105thread_t * this  = CURRENT_THREAD;
106if( DEBUG_THREAD_INIT < cycle )
107printk("\n[%s] thread[%x,%x] enter for thread[%x,%x] / cycle %d\n",
108__FUNCTION__, this->process->pid, this->trdid, process->pid, thread->trdid, cycle );
109#endif
110
111    // compute thread descriptor size without kernel stack
112    uint32_t desc_size = (intptr_t)(&thread->signature) - (intptr_t)thread + 4; 
113
114        // Initialize new thread descriptor
115    thread->quantum         = 0;            // TODO
116    thread->ticks_nr        = 0;            // TODO
117    thread->time_last_check = 0;            // TODO
118        thread->core            = &LOCAL_CLUSTER->core_tbl[core_lid];
119        thread->process         = process;
120    thread->busylocks       = 0;
121
122#if DEBUG_BUSYLOCK
123xlist_root_init( XPTR( local_cxy , &thread->busylocks_root ) );
124#endif
125
126    thread->user_stack_vseg = user_stack_vseg;
127    thread->k_stack_base    = (intptr_t)thread + desc_size;
128    thread->k_stack_size    = CONFIG_THREAD_DESC_SIZE - desc_size;
129    thread->entry_func      = func;         // thread entry point
130    thread->entry_args      = args;         // thread function arguments
131    thread->flags           = 0;            // all flags reset
132    thread->errno           = 0;            // no error detected
133    thread->fork_user       = 0;            // no user defined placement for fork
134    thread->fork_cxy        = 0;            // user defined target cluster for fork
135    thread->blocked         = THREAD_BLOCKED_GLOBAL;
136
137    // initialize sched list
138    list_entry_init( &thread->sched_list );
139
140    // initialize the embedded alarm
141    list_entry_init( &thread->alarm.list );
142
143    // initialize waiting queue entries
144    list_entry_init( &thread->wait_list );
145    xlist_entry_init( XPTR( local_cxy , &thread->wait_xlist ) );
146
147    // initialize thread info
148    memset( &thread->info , 0 , sizeof(thread_info_t) );
149
150    // initialize join_lock
151    remote_busylock_init( XPTR( local_cxy , &thread->join_lock ), LOCK_THREAD_JOIN );
152
153    // initialise signature
154        thread->signature = THREAD_SIGNATURE;
155
156    // FIXME define and call an architecture specific hal_thread_init()
157    // function to initialise the save_sr field
158    thread->save_sr = 0xFF13;
159
160    // register new thread in core scheduler
161    sched_register_thread( thread->core , thread );
162
163        // update DQDT
164    dqdt_increment_threads();
165
166    // nitialize timer alarm
167    alarm_init( &thread->alarm );
168
169#if CONFIG_INSTRUMENTATION_PGFAULTS
170thread->info.false_pgfault_nr    = 0;
171thread->info.false_pgfault_cost  = 0;
172thread->info.false_pgfault_max   = 0;
173thread->info.local_pgfault_nr    = 0;
174thread->info.local_pgfault_cost  = 0;
175thread->info.local_pgfault_max   = 0;
176thread->info.global_pgfault_nr   = 0;
177thread->info.global_pgfault_cost = 0;
178thread->info.global_pgfault_max  = 0;
179#endif
180
181#if DEBUG_THREAD_INIT
182cycle = (uint32_t)hal_get_cycles();
183if( DEBUG_THREAD_INIT < cycle )
184printk("\n[%s] thread[%x,%x] exit for thread[%x,%x] / cycle %d\n",
185__FUNCTION__, this->process->pid, this->trdid, process->pid, thread->trdid, cycle );
186#endif
187
188        return 0;
189
190} // end thread_init()
191
192//////////////////////////////////////////////////
193error_t thread_user_create( pid_t             pid,
194                            void            * start_func,
195                            void            * start_arg,
196                            pthread_attr_t  * attr,
197                            thread_t       ** new_thread )
198{
199    error_t        error;
200        thread_t     * thread;       // pointer on created thread descriptor
201    trdid_t        trdid;        // created thred identifier
202    process_t    * process;      // pointer to local process descriptor
203    lid_t          core_lid;     // selected core local index
204    vseg_t       * us_vseg;      // user stack vseg
205
206assert( __FUNCTION__, (attr != NULL) , "pthread attributes must be defined" );
207
208#if DEBUG_THREAD_USER_CREATE
209thread_t * this  = CURRENT_THREAD;
210uint32_t   cycle = (uint32_t)hal_get_cycles();
211if( DEBUG_THREAD_USER_CREATE < cycle )
212printk("\n[%s] thread[%x,%x] enter in cluster %x for process %x / cycle %d\n",
213__FUNCTION__, this->process->pid , this->trdid , local_cxy , pid , cycle );
214#endif
215
216    // get process descriptor local copy
217    process = process_get_local_copy( pid );
218
219    if( process == NULL )
220    {
221                printk("\n[ERROR] in %s : cannot get process descriptor %x\n",
222        __FUNCTION__ , pid );
223        return -1;
224    }
225
226#if( DEBUG_THREAD_USER_CREATE & 1)
227if( DEBUG_THREAD_USER_CREATE < cycle )
228printk("\n[%s] process descriptor = %x for process %x in cluster %x\n",
229__FUNCTION__, process , pid , local_cxy );
230#endif
231
232    // select a target core in local cluster
233    if( attr->attributes & PT_ATTR_CORE_DEFINED )
234    {
235        core_lid = attr->lid;
236        if( core_lid >= LOCAL_CLUSTER->cores_nr )
237        {
238                printk("\n[ERROR] in %s : illegal core index attribute = %d\n",
239            __FUNCTION__ , core_lid );
240            return -1;
241        }
242    }
243    else
244    {
245        core_lid = cluster_select_local_core( local_cxy );
246    }
247
248#if( DEBUG_THREAD_USER_CREATE & 1)
249if( DEBUG_THREAD_USER_CREATE < cycle )
250printk("\n[%s] core[%x,%d] selected\n",
251__FUNCTION__, local_cxy , core_lid );
252#endif
253
254    // allocate memory for thread descriptor
255    thread = kmem_alloc( CONFIG_THREAD_DESC_ORDER , AF_ZERO );
256
257    if( thread == NULL )
258    {
259            printk("\n[ERROR] in %s : cannot create new thread in cluster %x\n",
260        __FUNCTION__, local_cxy );
261        return -1;
262    }
263
264#if( DEBUG_THREAD_USER_CREATE & 1)
265if( DEBUG_THREAD_USER_CREATE < cycle )
266printk("\n[%s] new thread descriptor %x allocated\n",
267__FUNCTION__, thread );
268#endif
269
270    // set type in thread descriptor
271    thread->type = THREAD_USER;
272
273    // register new thread in process descriptor, and get a TRDID
274    error = process_register_thread( process, thread , &trdid );
275
276    if( error )
277    {
278        printk("\n[ERROR] in %s : cannot register new thread in process %x\n",
279        __FUNCTION__, pid );
280        thread_destroy( thread );
281        return -1;
282    }
283
284    // set trdid in thread descriptor
285    thread->trdid = trdid;
286
287#if( DEBUG_THREAD_USER_CREATE & 1)
288if( DEBUG_THREAD_USER_CREATE < cycle )
289printk("\n[%s] new thread %x registered in process %x\n",
290__FUNCTION__, trdid, pid );
291#endif
292
293    // allocate a stack from local VMM
294    us_vseg = vmm_create_vseg( process,
295                               VSEG_TYPE_STACK,
296                               LTID_FROM_TRDID( trdid ),
297                               0,                         // size unused
298                               0,                         // file_offset unused
299                               0,                         // file_size unused
300                               XPTR_NULL,                 // mapper_xp unused
301                               local_cxy );
302
303    if( us_vseg == NULL )
304    {
305            printk("\n[ERROR] in %s : cannot create stack vseg\n", __FUNCTION__ );
306        process_remove_thread( thread );
307        thread_destroy( thread );
308                return -1;
309    }
310
311#if( DEBUG_THREAD_USER_CREATE & 1)
312if( DEBUG_THREAD_USER_CREATE < cycle )
313printk("\n[%s] stack vseg created / vpn_base %x / %d pages\n",
314__FUNCTION__, us_vseg->vpn_base, us_vseg->vpn_size );
315#endif
316
317    // initialize thread descriptor
318    error = thread_init( thread,
319                         process,
320                         THREAD_USER,
321                         trdid,
322                         start_func,
323                         start_arg,
324                         core_lid,
325                         us_vseg );
326    if( error )
327    {
328            printk("\n[ERROR] in %s : cannot initialize new thread\n", __FUNCTION__ );
329        vmm_remove_vseg( process , us_vseg );
330        process_remove_thread( thread );
331        thread_destroy( thread );
332        return -1;
333    }
334
335#if( DEBUG_THREAD_USER_CREATE & 1)
336if( DEBUG_THREAD_USER_CREATE < cycle )
337printk("\n[%s] new thread %x in process %x initialised\n",
338__FUNCTION__, thread->trdid, process->pid );
339#endif
340
341    // set DETACHED flag if required
342    if( attr->attributes & PT_ATTR_DETACH ) 
343    {
344        thread->flags |= THREAD_FLAG_DETACHED;
345    }
346
347    // allocate & initialize CPU context
348        if( hal_cpu_context_alloc( thread ) )
349    {
350            printk("\n[ERROR] in %s : cannot create CPU context\n", __FUNCTION__ );
351        vmm_remove_vseg( process , us_vseg );
352        process_remove_thread( thread );
353        thread_destroy( thread );
354        return -1;
355    }
356    hal_cpu_context_init( thread,
357                          false , 0 , 0 );   // not a main thread
358
359    // allocate & initialize FPU context
360    if( hal_fpu_context_alloc( thread ) )
361    {
362            printk("\n[ERROR] in %s : cannot create FPU context\n", __FUNCTION__ );
363        vmm_remove_vseg( process , us_vseg );
364        process_remove_thread( thread );
365        thread_destroy( thread );
366        return -1;
367    }
368    hal_fpu_context_init( thread );
369
370#if( DEBUG_THREAD_USER_CREATE & 1)
371if( DEBUG_THREAD_USER_CREATE < cycle )
372printk("\n[%s] CPU & FPU contexts created\n",
373__FUNCTION__, thread->trdid );
374hal_vmm_display( XPTR( local_cxy , process ) , true );
375#endif
376
377#if DEBUG_THREAD_USER_CREATE
378cycle = (uint32_t)hal_get_cycles();
379if( DEBUG_THREAD_USER_CREATE < cycle )
380printk("\n[%s] thread[%x,%x] exit / new_thread %x / core %d / cycle %d\n",
381__FUNCTION__, this->process->pid , this->trdid , thread->trdid, core_lid, cycle );
382#endif
383
384    *new_thread = thread;
385        return 0;
386
387}  // end thread_user_create()
388
389///////////////////////////////////////////////////////
390error_t thread_user_fork( xptr_t      parent_thread_xp,
391                          process_t * child_process,
392                          thread_t ** child_thread )
393{
394    error_t        error;
395        thread_t     * child_ptr;        // local pointer on child thread
396    trdid_t        child_trdid;      // child thread identifier
397    lid_t          core_lid;         // selected core local index
398    thread_t     * parent_ptr;       // local pointer on remote parent thread
399    cxy_t          parent_cxy;       // parent thread cluster
400    process_t    * parent_process;   // local pointer on parent process
401    xptr_t         parent_gpt_xp;    // extended pointer on parent thread GPT
402    void         * parent_func;      // parent thread entry_func
403    void         * parent_args;      // parent thread entry_args
404    uint32_t       parent_flags;     // parent_thread flags
405    vseg_t       * parent_us_vseg;   // parent thread user stack vseg
406    vseg_t       * child_us_vseg;    // child thread user stack vseg
407
408#if DEBUG_THREAD_USER_FORK
409uint32_t   cycle = (uint32_t)hal_get_cycles();
410thread_t * this  = CURRENT_THREAD;
411if( DEBUG_THREAD_USER_FORK < cycle )
412printk("\n[%s] thread[%x,%x] enter for child_process %x / cycle %d\n",
413__FUNCTION__, this->process->pid, this->trdid, child_process->pid, cycle );
414#endif
415
416    // select a target core in local cluster
417    core_lid = cluster_select_local_core( local_cxy );
418
419#if (DEBUG_THREAD_USER_FORK & 1)
420if( DEBUG_THREAD_USER_FORK < cycle )
421printk("\n[%s] thread[%x,%x] selected core [%x,%d]\n",
422__FUNCTION__, this->process->pid, this->trdid, local_cxy, core_lid );
423#endif
424
425    // get cluster and local pointer on parent thread descriptor
426    parent_cxy = GET_CXY( parent_thread_xp );
427    parent_ptr = GET_PTR( parent_thread_xp );
428
429    // get relevant infos from parent thread
430    parent_func    = (void *)  hal_remote_lpt( XPTR(parent_cxy,&parent_ptr->entry_func ));
431    parent_args    = (void *)  hal_remote_lpt( XPTR(parent_cxy,&parent_ptr->entry_args ));
432    parent_flags   = (uint32_t)hal_remote_l32( XPTR(parent_cxy,&parent_ptr->flags ));
433    parent_us_vseg = (vseg_t *)hal_remote_lpt( XPTR(parent_cxy,&parent_ptr->user_stack_vseg ));
434
435    // get pointer on parent process in parent thread cluster
436    parent_process = (process_t *)hal_remote_lpt( XPTR( parent_cxy,
437                                                        &parent_ptr->process ) );
438 
439    // build extended pointer on parent GPT in parent thread cluster
440    parent_gpt_xp = XPTR( parent_cxy , &parent_process->vmm.gpt );
441
442#if (DEBUG_THREAD_USER_FORK & 1)
443if( DEBUG_THREAD_USER_FORK < cycle )
444printk("\n[%s] thread[%x,%x] get parent GPT\n",
445__FUNCTION__, this->process->pid, this->trdid );
446#endif
447
448    // allocate memory for child thread descriptor
449    child_ptr = kmem_alloc( CONFIG_THREAD_DESC_ORDER , AF_ZERO );
450
451    if( child_ptr == NULL )
452    {
453        printk("\n[ERROR] in %s : cannot allocate new thread\n",
454        __FUNCTION__ );
455        return -1;
456    }
457
458#if (DEBUG_THREAD_USER_FORK & 1)
459if( DEBUG_THREAD_USER_FORK < cycle )
460printk("\n[%s] thread[%x,%x] allocated new thread descriptor %x\n",
461__FUNCTION__, this->process->pid, this->trdid, child_ptr );
462#endif
463
464    // set type in thread descriptor
465    child_ptr->type = THREAD_USER;
466
467    // register new thread in process descriptor, and get a TRDID
468    error = process_register_thread( child_process, child_ptr , &child_trdid );
469
470    if( error )
471    {
472        printk("\n[ERROR] in %s : cannot register new thread in process %x\n",
473        __FUNCTION__, child_process->pid );
474        thread_destroy( child_ptr );
475        return -1;
476    }
477
478    // set trdid in thread descriptor
479    child_ptr->trdid = child_trdid;
480
481#if (DEBUG_THREAD_USER_FORK & 1)
482if( DEBUG_THREAD_USER_FORK < cycle )
483printk("\n[%s] thread[%x,%x] registered child thread %x in child process %x\n",
484__FUNCTION__, this->process->pid, this->trdid, child_trdid, child_process->pid );
485#endif
486
487    // get an user stack vseg from local VMM allocator
488    child_us_vseg = vmm_create_vseg( child_process,
489                                     VSEG_TYPE_STACK,
490                                     LTID_FROM_TRDID( child_trdid ), 
491                                     0,                               // size unused
492                                     0,                               // file_offset unused
493                                     0,                               // file_size unused
494                                     XPTR_NULL,                       // mapper_xp unused
495                                     local_cxy );
496    if( child_us_vseg == NULL )
497    {
498            printk("\n[ERROR] in %s : cannot create stack vseg\n", __FUNCTION__ );
499        process_remove_thread( child_ptr );
500        thread_destroy( child_ptr );
501        return -1;
502    }
503
504#if (DEBUG_THREAD_USER_FORK & 1)
505if( DEBUG_THREAD_USER_FORK < cycle )
506printk("\n[%s] thread[%x,%x] created an user stack vseg / vpn_base %x / %d pages\n",
507__FUNCTION__, this->process->pid, this->trdid,
508child_us_vseg->vpn_base, child_us_vseg->vpn_size );
509#endif
510
511    // initialize thread descriptor
512    error = thread_init( child_ptr,
513                         child_process,
514                         THREAD_USER,
515                         child_trdid,
516                         parent_func,
517                         parent_args,
518                         core_lid,
519                         child_us_vseg );
520    if( error )
521    {
522            printk("\n[ERROR] in %s : cannot initialize child thread\n", __FUNCTION__ );
523        vmm_remove_vseg( child_process , child_us_vseg ); 
524        process_remove_thread( child_ptr );
525        thread_destroy( child_ptr );
526        return -1;
527    }
528
529#if (DEBUG_THREAD_USER_FORK & 1)
530if( DEBUG_THREAD_USER_FORK < cycle )
531printk("\n[%s] thread[%x,%x] initialised thread %x in process %x\n",
532__FUNCTION__, this->process->pid, this->trdid, child_ptr->trdid, child_process->pid );
533#endif
534
535    // set detached flag if required
536    if( parent_flags & THREAD_FLAG_DETACHED ) child_ptr->flags = THREAD_FLAG_DETACHED;
537
538    // allocate a CPU context for child thread
539        if( hal_cpu_context_alloc( child_ptr ) )
540    {
541            printk("\n[ERROR] in %s : cannot allocate CPU context\n", __FUNCTION__ );
542        vmm_remove_vseg( child_process , child_us_vseg );
543        process_remove_thread( child_ptr );
544        thread_destroy( child_ptr );
545        return -1;
546    }
547
548    // allocate a FPU context for child thread
549        if( hal_fpu_context_alloc( child_ptr ) )
550    {
551            printk("\n[ERROR] in %s : cannot allocate FPU context\n", __FUNCTION__ );
552        vmm_remove_vseg( child_process , child_us_vseg );
553        process_remove_thread( child_ptr );
554        thread_destroy( child_ptr );
555        return -1;
556    }
557
558#if (DEBUG_THREAD_USER_FORK & 1)
559if( DEBUG_THREAD_USER_FORK < cycle )
560printk("\n[%s] thread[%x,%x] created CPU & FPU contexts for thread %x in process %x\n",
561__FUNCTION__, this->process->pid, this->trdid, child_ptr->trdid, child_process->pid );
562#endif
563
564    // scan parent GPT, and copy all valid entries
565    // associated to user stack vseg into child GPT
566    vpn_t  parent_vpn;
567    vpn_t  child_vpn;
568    bool_t mapped;
569    ppn_t  ppn;
570    vpn_t  parent_vpn_base = hal_remote_l32( XPTR( parent_cxy, &parent_us_vseg->vpn_base ) );
571    vpn_t  parent_vpn_size = hal_remote_l32( XPTR( parent_cxy, &parent_us_vseg->vpn_size ) );
572    vpn_t  child_vpn_base  = child_us_vseg->vpn_base;
573
574    for( parent_vpn = parent_vpn_base , child_vpn = child_vpn_base ; 
575         parent_vpn < (parent_vpn_base + parent_vpn_size) ;
576         parent_vpn++ , child_vpn++ )
577    {
578        error = hal_gpt_pte_copy( &child_process->vmm.gpt,
579                                  child_vpn,
580                                  parent_gpt_xp,
581                                  parent_vpn,
582                                  true,                 // set cow
583                                  &ppn,
584                                  &mapped );
585        if( error )
586        {
587            printk("\n[ERROR] in %s : cannot update child GPT\n", __FUNCTION__ );
588            vmm_remove_vseg( child_process , child_us_vseg );
589            process_remove_thread( child_ptr );
590            thread_destroy( child_ptr );
591            return -1;
592        }
593
594        // increment pending forks counter for a mapped page
595        if( mapped )
596        {
597            // get pointers on the page descriptor
598            xptr_t   page_xp  = ppm_ppn2page( ppn );
599            cxy_t    page_cxy = GET_CXY( page_xp );
600            page_t * page_ptr = GET_PTR( page_xp );
601
602            // build extended pointers on forks and lock fields
603            xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks );
604            xptr_t lock_xp  = XPTR( page_cxy , &page_ptr->lock );
605
606            // get lock protecting page
607            remote_busylock_acquire( lock_xp ); 
608
609            // increment the forks counter in page descriptor
610            hal_remote_atomic_add( forks_xp , 1 );
611
612            // release lock protecting page
613            remote_busylock_release( lock_xp ); 
614        }
615    }
616
617#if (DEBUG_THREAD_USER_FORK & 1)
618if( DEBUG_THREAD_USER_FORK < cycle )
619printk("\n[%s] thread[%x,%x] copied STACK vseg PTEs & set COW in child GPT\n",
620__FUNCTION__, this->process->pid, this->trdid );
621#endif
622
623    // set COW flag for all mapped entries of user stack vseg in parent GPT
624    hal_gpt_set_cow( parent_gpt_xp,
625                     parent_vpn_base,
626                     parent_vpn_size );
627
628#if (DEBUG_THREAD_USER_FORK & 1)
629if( DEBUG_THREAD_USER_FORK < cycle )
630printk("\n[%s] thread[%x,%x] set COW for STACK vseg in parent GPT\n",
631__FUNCTION__, this->process->pid, this->trdid );
632#endif
633
634    // return child pointer
635    *child_thread = child_ptr;
636
637#if DEBUG_THREAD_USER_FORK
638cycle = (uint32_t)hal_get_cycles();
639if( DEBUG_THREAD_USER_FORK < cycle )
640printk("\n[%s] thread[%x,%x] exit / created thread[%x,%x] / cycle %d\n",
641__FUNCTION__, this->process->pid, this->trdid,
642child_ptr->process->pid, child_ptr->trdid, cycle );
643#endif
644
645        return 0;
646
647}  // end thread_user_fork()
648
649/////////////////////////////////////
650void thread_user_exec( uint32_t argc,
651                       intptr_t argv )
652{
653    thread_t  * thread  = CURRENT_THREAD;
654    process_t * process = thread->process;
655
656#if DEBUG_THREAD_USER_EXEC
657uint32_t cycle = (uint32_t)hal_get_cycles();
658if( DEBUG_THREAD_USER_EXEC < cycle )
659printk("\n[%s] thread[%x,%x] enter / argc %d / argv %x / cycle %d\n",
660__FUNCTION__, process->pid, thread->trdid, argc, argv, cycle );
661#endif
662
663// check parent thread attributes
664assert( __FUNCTION__, (thread->type      == THREAD_USER )     , "bad type" );
665assert( __FUNCTION__, (thread->signature == THREAD_SIGNATURE) , "bad signature" );
666assert( __FUNCTION__, (thread->busylocks == 0)                , "bad busylocks" );
667
668        // re-initialize various thread descriptor fields
669    thread->quantum         = 0;                               // TODO
670    thread->ticks_nr        = 0;                               // TODO
671    thread->time_last_check = 0;                               // TODO
672    thread->entry_func      = (void*)process->vmm.entry_point;
673    thread->flags           = THREAD_FLAG_DETACHED;            // main always detached
674    thread->blocked         = 0;
675    thread->errno           = 0;
676    thread->fork_user       = 0;
677    thread->fork_cxy        = 0;
678
679    // reset thread info
680    memset( &thread->info , 0 , sizeof(thread_info_t) );
681
682    // re-initialize join_lock
683    remote_busylock_init( XPTR( local_cxy , &thread->join_lock ), LOCK_THREAD_JOIN );
684
685    // release FPU ownership if required
686    if( thread->core->fpu_owner == thread ) thread->core->fpu_owner = NULL;
687
688    // initialize thread FPU context
689    hal_fpu_context_init( thread );
690
691    // initialize thread CPU context
692    hal_cpu_context_init( thread,
693                          true,          // main thread
694                          argc,
695                          argv ); 
696
697#if DEBUG_THREAD_USER_EXEC
698cycle = (uint32_t)hal_get_cycles();
699if( DEBUG_THREAD_USER_EXEC < cycle )
700{
701    printk("\n[%s] thread[%x,%x] set CPU context & jump to user code / cycle %d\n",
702    __FUNCTION__, process->pid, thread->trdid, cycle );
703
704    hal_cpu_context_display( XPTR( local_cxy , thread ) );
705    hal_vmm_display( XPTR( local_cxy , process ) , true );
706}
707#endif
708
709    // restore CPU registers => jump to user code
710    hal_do_cpu_restore( thread->cpu_context );
711
712}  // end thread_user_exec()
713
714/////////////////////////////////////////////////////////
715error_t thread_kernel_create( thread_t     ** new_thread,
716                              thread_type_t   type,
717                              void          * func,
718                              void          * args,
719                                              lid_t           core_lid )
720{
721    error_t        error;
722        thread_t     * thread;       // pointer on new thread descriptor
723    trdid_t        trdid;        // new thread identifier
724
725    thread_t * this = CURRENT_THREAD; 
726
727assert( __FUNCTION__, ( (type == THREAD_IDLE) || (type == THREAD_RPC) || (type == THREAD_DEV) ) ,
728"illegal thread type" );
729
730assert( __FUNCTION__, (core_lid < LOCAL_CLUSTER->cores_nr) ,
731"illegal core_lid" );
732
733#if DEBUG_THREAD_KERNEL_CREATE
734uint32_t   cycle = (uint32_t)hal_get_cycles();
735if( DEBUG_THREAD_KERNEL_CREATE < cycle )
736printk("\n[%s] thread[%x,%x] enter / requested_type %s / cycle %d\n",
737__FUNCTION__, this->process->pid, this->trdid, thread_type_str(type), cycle );
738#endif
739
740    // allocate memory for new thread descriptor
741    thread = kmem_alloc( CONFIG_THREAD_DESC_ORDER , AF_ZERO );
742
743    if( thread == NULL )
744    {
745        printk("\n[ERROR] in %s : thread %x in process %x\n"
746        "   no memory for thread descriptor\n",
747        __FUNCTION__, this->trdid, this->process->pid );
748        return ENOMEM;
749    }
750
751    // set type in thread descriptor
752    thread->type = type;
753
754    // register new thread in local kernel process descriptor, and get a TRDID
755    error = process_register_thread( &process_zero , thread , &trdid );
756
757    if( error )
758    {
759        printk("\n[ERROR] in %s : cannot register thread in kernel process\n", __FUNCTION__ );
760        return -1;
761    }
762
763    // set trdid in thread descriptor
764    thread->trdid = trdid;
765
766    // initialize thread descriptor
767    error = thread_init( thread,
768                         &process_zero,
769                         type,
770                         trdid,
771                         func,
772                         args,
773                         core_lid,
774                         NULL );  // no user stack for a kernel thread
775
776    if( error ) // release allocated memory for thread descriptor
777    {
778        printk("\n[ERROR] in %s : cannot initialize thread descriptor\n", __FUNCTION__ );
779        thread_destroy( thread );
780        return ENOMEM;
781    }
782
783    // allocate & initialize CPU context
784        error = hal_cpu_context_alloc( thread );
785
786    if( error )
787    {
788        printk("\n[ERROR] in %s : thread %x in process %x\n"
789        "    cannot create CPU context\n",
790        __FUNCTION__, this->trdid, this->process->pid );
791        thread_destroy( thread );
792        return EINVAL;
793    }
794
795    hal_cpu_context_init( thread,
796                          false , 0 , 0 );  // not a main thread
797
798#if DEBUG_THREAD_KERNEL_CREATE
799cycle = (uint32_t)hal_get_cycles();
800if( DEBUG_THREAD_KERNEL_CREATE < cycle )
801printk("\n[%s] thread[%x,%x] exit / new_thread %x / type %s / cycle %d\n",
802__FUNCTION__, this->process->pid, this->trdid, thread, thread_type_str(type), cycle );
803#endif
804
805    *new_thread = thread;
806        return 0;
807
808} // end thread_kernel_create()
809
810//////////////////////////////////////////////
811void thread_idle_init( thread_t      * thread,
812                       thread_type_t   type,
813                       void          * func,
814                       void          * args,
815                           lid_t           core_lid )
816{
817    trdid_t trdid;   
818    error_t error;
819
820// check arguments
821assert( __FUNCTION__, (type == THREAD_IDLE),
822"illegal thread type" );
823
824assert( __FUNCTION__, (core_lid < LOCAL_CLUSTER->cores_nr),
825"illegal core index" );
826
827    // set type in thread descriptor
828    thread->type = THREAD_IDLE;
829
830    // register idle thread in local kernel process descriptor, and get a TRDID
831    error = process_register_thread( &process_zero , thread , &trdid );
832
833assert( __FUNCTION__, (error == 0),
834"cannot register idle_thread in kernel process" );
835
836    // set trdid in thread descriptor
837    thread->trdid = trdid;
838
839    // initialize thread descriptor
840    error = thread_init( thread,
841                         &process_zero,
842                         THREAD_IDLE,
843                         trdid,
844                         func,
845                         args,
846                         core_lid,
847                         NULL );   // no user stack for a kernel thread
848
849assert( __FUNCTION__, (error == 0),
850"cannot initialize idle_thread" );
851
852    // allocate CPU context
853    error = hal_cpu_context_alloc( thread );
854
855assert( __FUNCTION__,(error == 0),
856"cannot allocate CPU context" );
857
858    // initialize CPU context
859    hal_cpu_context_init( thread,
860                          false , 0 , 0 );   // not a main thread
861
862}  // end thread_idle_init()
863
864////////////////////////////////////////////
865uint32_t thread_destroy( thread_t * thread )
866{
867    reg_t           save_sr;
868    uint32_t        count;
869
870    thread_type_t   type    = thread->type;
871    process_t     * process = thread->process;
872    core_t        * core    = thread->core;
873
874#if DEBUG_THREAD_DESTROY
875uint32_t   cycle;
876thread_t * this  = CURRENT_THREAD;
877#endif
878
879#if (DEBUG_THREAD_DESTROY & 1)
880cycle = (uint32_t)hal_get_cycles();
881if( DEBUG_THREAD_DESTROY < cycle )
882printk("\n[%s] thread[%x,%x] enter to destroy thread[%x,%x] / cycle %d\n",
883__FUNCTION__, this->process->pid, this->trdid, process->pid, thread->trdid, cycle );
884#endif
885
886    // check calling thread busylocks counter
887    thread_assert_can_yield( thread , __FUNCTION__ );
888
889#if CONFIG_INSTRUMENTATION_PGFAULTS
890process->vmm.false_pgfault_nr    += thread->info.false_pgfault_nr;
891process->vmm.false_pgfault_cost  += thread->info.false_pgfault_cost;
892process->vmm.local_pgfault_nr    += thread->info.local_pgfault_nr;
893process->vmm.local_pgfault_cost  += thread->info.local_pgfault_cost;
894process->vmm.global_pgfault_nr   += thread->info.global_pgfault_nr;
895process->vmm.global_pgfault_cost += thread->info.global_pgfault_cost;
896#endif
897
898#if (CONFIG_INSTRUMENTATION_PGFAULTS & 1)
899uint32_t false_nr    = thread->info.false_pgfault_nr;
900uint32_t false_cost  = thread->info.false_pgfault_cost;
901uint32_t false_max   = thread->info.false_pgfault_max;
902uint32_t false_one   = false_nr  ? (false_cost  / false_nr ) : 0;
903
904uint32_t local_nr    = thread->info.local_pgfault_nr;
905uint32_t local_cost  = thread->info.local_pgfault_cost;
906uint32_t local_max   = thread->info.local_pgfault_max;
907uint32_t local_one   = local_nr  ? (local_cost  / local_nr ) : 0;
908
909uint32_t global_nr   = thread->info.global_pgfault_nr;
910uint32_t global_cost = thread->info.global_pgfault_cost;
911uint32_t global_max  = thread->info.global_pgfault_max;
912uint32_t global_one  = global_nr ? (global_cost / global_nr) : 0;
913
914printk("\n***** thread[%x,%x] page faults\n"
915       " - false  : %d events / cost %d cycles / max %d cycles\n"
916       " - local  : %d events / cost %d cycles / max %d cycles\n"
917       " - global : %d events / cost %d cycles / max %d cycles\n",
918       thread->process->pid, thread->trdid,
919       false_nr , false_one , false_max,
920       local_nr , local_one , local_max,
921       global_nr, global_one, global_max );
922#endif
923
924    // unlink embedded alarm from the list rooted in core when required
925    list_entry_t * entry = &thread->alarm.list;
926    if( (entry->next != NULL) || (entry->pred != NULL) )  list_unlink( entry );
927
928    // remove thread from process th_tbl[]
929    count = process_remove_thread( thread );
930
931    // release memory allocated for CPU context and FPU context
932        hal_cpu_context_destroy( thread );
933        hal_fpu_context_destroy( thread );
934       
935    // release user stack vseg (for an user thread only)
936    if( type == THREAD_USER )  vmm_remove_vseg( process , thread->user_stack_vseg );
937
938    // release FPU ownership if required
939        hal_disable_irq( &save_sr );
940        if( core->fpu_owner == thread )
941        {
942                core->fpu_owner = NULL;
943                hal_fpu_disable();
944        }
945        hal_restore_irq( save_sr );
946
947    // invalidate thread descriptor
948        thread->signature = 0;
949
950    // release memory for thread descriptor (including kernel stack)
951    kmem_free( thread , CONFIG_THREAD_DESC_ORDER );
952
953#if DEBUG_THREAD_DESTROY
954cycle = (uint32_t)hal_get_cycles();
955if( DEBUG_THREAD_DESTROY < cycle )
956printk("\n[%s] thread[%x,%x] exit / destroyed thread[%x,%x] / cycle %d\n",
957__FUNCTION__, this->process->pid, this->trdid, process->pid, thread->trdid, cycle );
958#endif
959
960    return count;
961
962}   // end thread_destroy()
963
964//////////////////////////////////////////////////
965inline void thread_set_req_ack( thread_t * target,
966                                uint32_t * rsp_count )
967{
968    reg_t    save_sr;   // for critical section
969
970    // get pointer on target thread scheduler
971    scheduler_t * sched = &target->core->scheduler;
972
973    // wait scheduler ready to handle a new request
974    while( sched->req_ack_pending ) asm volatile( "nop" );
975   
976    // enter critical section
977    hal_disable_irq( &save_sr );
978     
979    // set request in target thread scheduler
980    sched->req_ack_pending = true;
981
982    // set ack request in target thread "flags"
983    hal_atomic_or( &target->flags , THREAD_FLAG_REQ_ACK );
984
985    // set pointer on responses counter in target thread
986    target->ack_rsp_count = rsp_count;
987   
988    // exit critical section
989    hal_restore_irq( save_sr );
990
991    hal_fence();
992
993}  // thread_set_req_ack()
994
995/////////////////////////////////////////////////////
996inline void thread_reset_req_ack( thread_t * target )
997{
998    reg_t    save_sr;   // for critical section
999
1000    // get pointer on target thread scheduler
1001    scheduler_t * sched = &target->core->scheduler;
1002
1003    // check signal pending in scheduler
1004    assert( __FUNCTION__, sched->req_ack_pending , "no pending signal" );
1005   
1006    // enter critical section
1007    hal_disable_irq( &save_sr );
1008     
1009    // reset signal in scheduler
1010    sched->req_ack_pending = false;
1011
1012    // reset signal in thread "flags"
1013    hal_atomic_and( &target->flags , ~THREAD_FLAG_REQ_ACK );
1014
1015    // reset pointer on responses counter
1016    target->ack_rsp_count = NULL;
1017   
1018    // exit critical section
1019    hal_restore_irq( save_sr );
1020
1021    hal_fence();
1022
1023}  // thread_reset_req_ack()
1024
1025//////////////////////////////////////
1026void thread_block( xptr_t   thread_xp,
1027                   uint32_t cause )
1028{
1029    // get thread cluster and local pointer
1030    cxy_t      cxy = GET_CXY( thread_xp );
1031    thread_t * ptr = GET_PTR( thread_xp );
1032
1033    // set blocking cause
1034    hal_remote_atomic_or( XPTR( cxy , &ptr->blocked ) , cause );
1035    hal_fence();
1036
1037#if DEBUG_THREAD_BLOCK
1038uint32_t    cycle   = (uint32_t)hal_get_cycles();
1039process_t * process = hal_remote_lpt( XPTR( cxy , &ptr->process ) );
1040thread_t  * this    = CURRENT_THREAD;
1041if( DEBUG_THREAD_BLOCK < cycle )
1042printk("\n[%s] thread[%x,%x] blocked thread %x in process %x / cause %x\n",
1043__FUNCTION__, this->process->pid, this->trdid,
1044ptr->trdid, hal_remote_l32(XPTR( cxy , &process->pid )), cause );
1045#endif
1046
1047} // end thread_block()
1048
1049////////////////////////////////////////////
1050uint32_t thread_unblock( xptr_t   thread_xp,
1051                         uint32_t cause )
1052{
1053    // get thread cluster and local pointer
1054    cxy_t      cxy = GET_CXY( thread_xp );
1055    thread_t * ptr = GET_PTR( thread_xp );
1056
1057    // reset blocking cause
1058    uint32_t previous = hal_remote_atomic_and( XPTR( cxy , &ptr->blocked ) , ~cause );
1059    hal_fence();
1060
1061#if DEBUG_THREAD_BLOCK
1062uint32_t    cycle   = (uint32_t)hal_get_cycles();
1063process_t * process = hal_remote_lpt( XPTR( cxy , &ptr->process ) );
1064thread_t  * this    = CURRENT_THREAD;
1065if( DEBUG_THREAD_BLOCK < cycle )
1066printk("\n[%s] thread[%x,%x] unblocked thread %x in process %x / cause %x\n",
1067__FUNCTION__, this->process->pid, this->trdid,
1068ptr->trdid, hal_remote_l32(XPTR( cxy , &process->pid )), cause );
1069#endif
1070
1071    // return a non zero value if the cause bit is modified
1072    return( previous & cause );
1073
1074}  // end thread_unblock()
1075
1076//////////////////////////////////////////////
1077void thread_delete_request( xptr_t  target_xp,
1078                            bool_t  is_forced )
1079{
1080    reg_t       save_sr;                // for critical section
1081    bool_t      target_join_done;       // joining thread arrived first
1082    bool_t      target_attached;        // target thread attached
1083    xptr_t      killer_xp;              // extended pointer on killer thread (this)
1084    thread_t  * killer_ptr;             // pointer on killer thread (this)
1085    cxy_t       target_cxy;             // target thread cluster     
1086    thread_t  * target_ptr;             // pointer on target thread
1087    process_t * target_process;         // pointer on target process
1088    pid_t       target_pid;             // target process identifier
1089    xptr_t      target_flags_xp;        // extended pointer on target thread <flags>
1090    xptr_t      target_join_lock_xp;    // extended pointer on target thread <join_lock>
1091    xptr_t      target_join_xp_xp;      // extended pointer on target thread <join_xp>
1092    trdid_t     target_trdid;           // target thread identifier
1093    ltid_t      target_ltid;            // target thread local index
1094    uint32_t    target_flags;           // target thread flags
1095    xptr_t      joining_xp;             // extended pointer on joining thread
1096    thread_t  * joining_ptr;            // local pointer on joining thread
1097    cxy_t       joining_cxy;            // joining thread cluster
1098
1099    // get target thread cluster and local pointer
1100    target_cxy      = GET_CXY( target_xp );
1101    target_ptr      = GET_PTR( target_xp );
1102
1103    // get target thread trdid, ltid, flags, and process PID
1104    target_trdid    = hal_remote_l32( XPTR( target_cxy , &target_ptr->trdid ) );
1105    target_ltid     = LTID_FROM_TRDID( target_trdid );
1106    target_flags_xp = XPTR( target_cxy , &target_ptr->flags );
1107    target_flags    = hal_remote_l32( target_flags_xp );
1108    target_process  = hal_remote_lpt( XPTR( target_cxy , &target_ptr->process ) );
1109    target_pid      = hal_remote_l32( XPTR( target_cxy , &target_process->pid ) );
1110    target_attached = ((target_flags & THREAD_FLAG_DETACHED) == 0); 
1111
1112    // get killer thread pointers
1113    killer_ptr = CURRENT_THREAD;
1114    killer_xp  = XPTR( local_cxy , killer_ptr );
1115
1116#if DEBUG_THREAD_DELETE
1117uint32_t cycle  = (uint32_t)hal_get_cycles();
1118if( DEBUG_THREAD_DELETE < cycle )
1119printk("\n[%s] killer[%x,%x] enters / target[%x,%x] / forced %d / flags %x / cycle %d\n",
1120__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid, 
1121target_pid, target_trdid, is_forced, target_flags, cycle );
1122#endif
1123
1124// check target thread is not the main thread, because the main thread
1125// must be deleted by the parent process sys_wait() function
1126assert( __FUNCTION__, ((CXY_FROM_PID( target_pid ) != target_cxy) || (target_ltid != 0)),
1127"target thread cannot be the main thread" );
1128
1129    // check killer thread can yield
1130    thread_assert_can_yield( killer_ptr , __FUNCTION__ ); 
1131
1132    // if the target thread is attached, we must synchonize with the joining thread
1133    // before blocking and marking the target thead for delete.
1134
1135    if( target_attached && (is_forced == false) ) // synchronize with joining thread
1136    {
1137        // build extended pointers on target thread join fields
1138        target_join_lock_xp  = XPTR( target_cxy , &target_ptr->join_lock );
1139        target_join_xp_xp    = XPTR( target_cxy , &target_ptr->join_xp );
1140
1141        // enter critical section
1142        hal_disable_irq( &save_sr );
1143
1144        // take the join_lock in target thread descriptor
1145        remote_busylock_acquire( target_join_lock_xp );
1146
1147        // get join_done from target thread descriptor
1148        target_join_done = ((hal_remote_l32( target_flags_xp ) & THREAD_FLAG_JOIN_DONE) != 0);
1149   
1150        if( target_join_done )                     // joining thread arrived first
1151        {
1152            // get extended pointer on joining thread
1153            joining_xp  = (xptr_t)hal_remote_l64( target_join_xp_xp );
1154
1155            // get cluster and local pointer on joining thread
1156            joining_ptr = GET_PTR( joining_xp );
1157            joining_cxy = GET_CXY( joining_xp );
1158
1159            // copy exit_status from target thread to joining thread, because
1160            // target thread may be deleted before joining thread resume
1161            void * status = hal_remote_lpt( XPTR( target_cxy , &target_ptr->exit_status ) );
1162            hal_remote_spt( XPTR( joining_cxy , &joining_ptr->exit_status ) , status );
1163           
1164            // reset the join_done flag in target thread
1165            hal_remote_atomic_and( target_flags_xp , ~THREAD_FLAG_JOIN_DONE );
1166
1167            // unblock the joining thread
1168            thread_unblock( joining_xp , THREAD_BLOCKED_JOIN );
1169
1170            // release the join_lock in target thread descriptor
1171            remote_busylock_release( target_join_lock_xp );
1172
1173            // block the target thread
1174            thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
1175
1176            // set the REQ_DELETE flag in target thread descriptor
1177            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
1178
1179            // exit critical section
1180            hal_restore_irq( save_sr );
1181
1182#if DEBUG_THREAD_DELETE
1183cycle  = (uint32_t)hal_get_cycles();
1184if( DEBUG_THREAD_DELETE < cycle )
1185printk("\n[%s] killer[%x,%x] exit / target[%x,%x] marked after join / cycle %d\n",
1186__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid,
1187target_pid, target_trdid, cycle );
1188#endif
1189
1190        }
1191        else                                      // killer thread arrived first
1192        {
1193            // set the kill_done flag in target thread
1194            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_KILL_DONE );
1195
1196            // block target thread on BLOCKED_JOIN
1197            thread_block( killer_xp , THREAD_BLOCKED_JOIN );
1198
1199            // set extended pointer on killer thread in target thread
1200            hal_remote_s64( target_join_xp_xp , killer_xp );
1201
1202            // release the join_lock in target thread descriptor
1203            remote_busylock_release( target_join_lock_xp );
1204
1205#if DEBUG_THREAD_DELETE
1206cycle  = (uint32_t)hal_get_cycles();
1207if( DEBUG_THREAD_DELETE < cycle )
1208printk("\n[%s] killer[%x,%x] deschedules / target[%x,%x] not completed / cycle %d\n",
1209__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid,
1210target_pid, target_trdid, cycle );
1211#endif
1212            // deschedule
1213            sched_yield( "killer thread wait joining thread" );
1214
1215            // block the target thread
1216            thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
1217
1218            // set the REQ_DELETE flag in target thread descriptor
1219            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
1220
1221            // exit critical section
1222            hal_restore_irq( save_sr );
1223
1224#if DEBUG_THREAD_DELETE
1225cycle  = (uint32_t)hal_get_cycles();
1226if( DEBUG_THREAD_DELETE < cycle )
1227printk("\n[%s] killer[%x,%x] exit / target[%x,%x] marked after join / cycle %d\n",
1228__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid,
1229target_pid, target_trdid, cycle );
1230#endif
1231
1232        }
1233    }
1234    else                     // no synchronization with joining thread required
1235    {
1236        // block the target thread
1237        thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
1238
1239        // set the REQ_DELETE flag in target thread descriptor
1240        hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
1241
1242#if DEBUG_THREAD_DELETE
1243cycle  = (uint32_t)hal_get_cycles();
1244if( DEBUG_THREAD_DELETE < cycle )
1245printk("\n[%s] killer[%x,%x] exit / target [%x,%x] marked / no join / cycle %d\n",
1246__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid,
1247target_pid, target_trdid, cycle );
1248#endif
1249
1250    }
1251}  // end thread_delete_request()
1252
1253
1254
1255/////////////////////////////
1256void thread_idle_func( void )
1257{
1258
1259#if DEBUG_THREAD_IDLE
1260uint32_t cycle;
1261#endif
1262
1263    while( 1 )
1264    {
1265        // unmask IRQs
1266        hal_enable_irq( NULL );
1267
1268        // force core to low-power mode (optional)
1269        if( CONFIG_SCHED_IDLE_MODE_SLEEP ) 
1270        {
1271
1272#if DEBUG_THREAD_IDLE
1273cycle = (uint32_t)hal_get_cycles();
1274if( DEBUG_THREAD_IDLE < cycle )
1275printk("\n[%s] idle thread on core[%x,%d] goes to sleep / cycle %d\n",
1276__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cycle );
1277#endif
1278
1279            hal_core_sleep();
1280
1281#if DEBUG_THREAD_IDLE
1282cycle = (uint32_t)hal_get_cycles();
1283if( DEBUG_THREAD_IDLE < cycle )
1284printk("\n[%s] idle thread on core[%x,%d] wake up / cycle %d\n",
1285__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cycle );
1286#endif
1287
1288        }
1289
1290#if DEBUG_THREAD_IDLE
1291cycle = (uint32_t)hal_get_cycles();
1292if( DEBUG_THREAD_IDLE < cycle )
1293sched_remote_display( local_cxy , CURRENT_THREAD->core->lid );
1294#endif     
1295        // search a runable thread
1296        sched_yield( "running idle thread" );
1297
1298    } // end while
1299
1300}  // end thread_idle()
1301
1302
1303///////////////////////////////////////////
1304void thread_time_update( thread_t * thread,
1305                         bool_t     is_user )
1306{
1307    cycle_t current_cycle;   // current cycle counter value
1308    cycle_t last_cycle;      // last cycle counter value
1309
1310    // get pointer on thread_info structure
1311    thread_info_t * info = &thread->info;
1312
1313    // get last cycle counter value
1314    last_cycle = info->last_cycle;
1315
1316    // get current cycle counter value
1317    current_cycle = hal_get_cycles();
1318
1319    // update thread_info structure
1320    info->last_cycle = current_cycle;
1321
1322    // update time in thread_info
1323    if( is_user ) info->usr_cycles += (current_cycle - last_cycle);
1324    else          info->sys_cycles += (current_cycle - last_cycle);
1325
1326}  // end thread_time_update()
1327
1328/////////////////////////////////////
1329xptr_t thread_get_xptr( pid_t    pid,
1330                        trdid_t  trdid )
1331{
1332    cxy_t         target_cxy;          // target thread cluster identifier
1333    ltid_t        target_thread_ltid;  // target thread local index
1334    thread_t    * target_thread_ptr;   // target thread local pointer
1335    xptr_t        target_process_xp;   // extended pointer on target process descriptor
1336    process_t   * target_process_ptr;  // local pointer on target process descriptor
1337    pid_t         target_process_pid;  // target process identifier
1338    xlist_entry_t root;                // root of list of process in target cluster
1339    xptr_t        lock_xp;             // extended pointer on lock protecting  this list
1340
1341#if DEBUG_THREAD_GET_XPTR
1342uint32_t cycle  = (uint32_t)hal_get_cycles();
1343thread_t * this = CURRENT_THREAD;
1344if( DEBUG_THREAD_GET_XPTR < cycle )
1345printk("\n[%s] thread %x in process %x enters / pid %x / trdid %x / cycle %d\n",
1346__FUNCTION__, this->trdid, this->process->pid, pid, trdid, cycle );
1347#endif
1348
1349    // get target cluster identifier and local thread identifier
1350    target_cxy         = CXY_FROM_TRDID( trdid );
1351    target_thread_ltid = LTID_FROM_TRDID( trdid );
1352
1353    // check trdid argument
1354        if( (target_thread_ltid >= CONFIG_THREADS_MAX_PER_CLUSTER) || 
1355        cluster_is_active( target_cxy ) == false )                return XPTR_NULL;
1356
1357    // get root of list of process descriptors in target cluster
1358    hal_remote_memcpy( XPTR( local_cxy  , &root ),
1359                       XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_root ),
1360                       sizeof(xlist_entry_t) );
1361
1362    // get extended pointer on lock protecting the list of local processes
1363    lock_xp = XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_lock );
1364
1365    // take the lock protecting the list of processes in target cluster
1366    remote_queuelock_acquire( lock_xp );
1367
1368#if( DEBUG_THREAD_GET_XPTR & 1 )
1369if( DEBUG_THREAD_GET_XPTR < cycle )
1370printk("\n[%s] scan processes in cluster %x :\n", __FUNCTION__, target_cxy );
1371#endif
1372
1373    // scan the list of local processes in target cluster
1374    xptr_t  iter;
1375    bool_t  found = false;
1376    XLIST_FOREACH( XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_root ) , iter )
1377    {
1378        target_process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
1379        target_process_ptr = GET_PTR( target_process_xp );
1380        target_process_pid = hal_remote_l32( XPTR( target_cxy , &target_process_ptr->pid ) );
1381
1382#if( DEBUG_THREAD_GET_XPTR & 1 )
1383if( DEBUG_THREAD_GET_XPTR < cycle )
1384printk(" - process %x\n", target_process_pid );
1385#endif
1386
1387        if( target_process_pid == pid )
1388        {
1389            found = true;
1390            break;
1391        }
1392    }
1393
1394    // release the lock protecting the list of processes in target cluster
1395    remote_queuelock_release( lock_xp );
1396
1397    // check PID found
1398    if( found == false ) 
1399    {
1400
1401#if( DEBUG_THREAD_GET_XPTR & 1 )
1402if( DEBUG_THREAD_GET_XPTR < cycle )
1403printk("\n[%s] pid %x not found in cluster %x\n",
1404__FUNCTION__, pid, target_cxy );
1405#endif
1406        return XPTR_NULL;
1407    }
1408
1409    // get target thread local pointer
1410    xptr_t xp = XPTR( target_cxy , &target_process_ptr->th_tbl[target_thread_ltid] );
1411    target_thread_ptr = (thread_t *)hal_remote_lpt( xp );
1412
1413    if( target_thread_ptr == NULL )
1414    {
1415
1416#if( DEBUG_THREAD_GET_XPTR & 1 )
1417if( DEBUG_THREAD_GET_XPTR < cycle )
1418printk("\n[%s] thread %x not registered in process %x in cluster %x\n",
1419__FUNCTION__, trdid, pid, target_cxy );
1420#endif
1421        return XPTR_NULL;
1422    }
1423
1424#if DEBUG_THREAD_GET_XPTR
1425cycle  = (uint32_t)hal_get_cycles();
1426if( DEBUG_THREAD_GET_XPTR < cycle )
1427printk("\n[%s] thread %x in process %x exit / pid %x / trdid %x / cycle %d\n",
1428__FUNCTION__, this->trdid, this->process->pid, pid, trdid, cycle );
1429#endif
1430
1431    return XPTR( target_cxy , target_thread_ptr );
1432
1433}  // end thread_get_xptr()
1434
1435///////////////////////////////////////////////////
1436void thread_assert_can_yield( thread_t    * thread,
1437                              const char  * func_str )
1438{
1439    // does nothing if thread does not hold any busylock
1440
1441    if( thread->busylocks )
1442    {
1443        // get pointers on TXT0 chdev
1444        xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
1445        cxy_t     txt0_cxy = GET_CXY( txt0_xp );
1446        chdev_t * txt0_ptr = GET_PTR( txt0_xp );
1447
1448        // get extended pointer on TXT0 lock
1449        xptr_t  txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
1450
1451        // get TXT0 lock
1452        remote_busylock_acquire( txt0_lock_xp );
1453
1454        // display error message on TXT0
1455        nolock_printk("\n[PANIC] in %s / thread[%x,%x] cannot yield : "
1456        "hold %d busylock(s) / cycle %d\n",
1457        func_str, thread->process->pid, thread->trdid,
1458        thread->busylocks - 1, (uint32_t)hal_get_cycles() );
1459
1460#if DEBUG_BUSYLOCK_TYPE
1461
1462// scan list of busylocks
1463xptr_t    iter_xp;
1464xptr_t    root_xp  = XPTR( local_cxy , &thread->busylocks_root );
1465XLIST_FOREACH( root_xp , iter_xp )
1466{
1467    xptr_t       lock_xp   = XLIST_ELEMENT( iter_xp , busylock_t , xlist );
1468    cxy_t        lock_cxy  = GET_CXY( lock_xp );
1469    busylock_t * lock_ptr  = GET_PTR( lock_xp );
1470    uint32_t     lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->type ) );
1471    nolock_printk(" - %s in cluster %x\n", lock_type_str[lock_type] , lock_cxy );
1472}
1473
1474#endif
1475
1476        // release TXT0 lock
1477        remote_busylock_release( txt0_lock_xp );
1478
1479        // suicide
1480        hal_core_sleep();
1481    }
1482}  // end thread_assert_can yield()
1483
1484//////////////////////////////////////////////////////
1485void thread_display_busylocks( xptr_t       thread_xp,
1486                               const char * string )
1487{
1488
1489    cxy_t      thread_cxy = GET_CXY( thread_xp );
1490    thread_t * thread_ptr = GET_PTR( thread_xp );
1491
1492#if DEBUG_BUSYLOCK
1493
1494    xptr_t     iter_xp;
1495
1496    // get relevant info from target thread descriptor
1497    uint32_t    locks   = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->busylocks ) );
1498    trdid_t     trdid   = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
1499    process_t * process = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->process ) );
1500    pid_t       pid     = hal_remote_l32( XPTR( thread_cxy , &process->pid ) );
1501
1502    // get extended pointer on root of busylocks
1503    xptr_t root_xp = XPTR( thread_cxy , &thread_ptr->busylocks_root );
1504
1505    // get pointers on TXT0 chdev
1506    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
1507    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
1508    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
1509
1510    // get extended pointer on remote TXT0 lock
1511    xptr_t  txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
1512
1513    // get TXT0 lock
1514    remote_busylock_acquire( txt0_lock_xp );
1515
1516    // display header
1517    nolock_printk("\n***** thread[%x,%x] in <%s> : %d busylocks *****\n",
1518    pid, trdid, string, locks );
1519
1520    // scan the xlist of busylocks when required
1521    if( locks )
1522    {
1523        XLIST_FOREACH( root_xp , iter_xp )
1524        {
1525            xptr_t       lock_xp   = XLIST_ELEMENT( iter_xp , busylock_t , xlist );
1526            cxy_t        lock_cxy  = GET_CXY( lock_xp );
1527            busylock_t * lock_ptr  = GET_PTR( lock_xp );
1528            uint32_t     lock_type = hal_remote_l32(XPTR( lock_cxy , &lock_ptr->type ));
1529            nolock_printk(" - %s in cluster %x\n", lock_type_str[lock_type] , lock_cxy );
1530        }
1531    }
1532
1533    // release TXT0 lock
1534    remote_busylock_release( txt0_lock_xp );
1535
1536#else
1537
1538printk("\n[ERROR] in %s : set DEBUG_BUSYLOCK in kernel_config.h for %s / thread(%x,%x)\n",
1539__FUNCTION__, string, thread_cxy, thread_ptr );
1540
1541#endif
1542
1543    return;
1544
1545}  // end thread_display_busylock()
1546
Note: See TracBrowser for help on using the repository browser.