source: trunk/kernel/kern/thread.c

Last change on this file was 683, checked in by alain, 3 years ago

All modifications required to support the <tcp_chat> application
including error recovery in case of packet loss.A

File size: 53.4 KB
RevLine 
[1]1/*
[564]2 * thread.c -   thread operations implementation (user & kernel)
[171]3 *
[1]4 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
[683]5 *         Alain Greiner    (2016,2017,2018,2019,2020)
[1]6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
[5]9 * This file is part of ALMOS-MKH.
[1]10 *
[5]11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
[1]12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
[5]15 * ALMOS-MKH is distributed in the hope that it will be useful, but
[1]16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
[5]21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
[1]22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
[14]25#include <kernel_config.h>
[457]26#include <hal_kernel_types.h>
[1]27#include <hal_context.h>
28#include <hal_irqmask.h>
29#include <hal_special.h>
30#include <hal_remote.h>
[625]31#include <hal_vmm.h>
[669]32#include <hal_switch.h>
[1]33#include <memcpy.h>
34#include <printk.h>
35#include <cluster.h>
36#include <process.h>
37#include <scheduler.h>
[188]38#include <dev_pic.h>
[1]39#include <core.h>
40#include <list.h>
41#include <xlist.h>
42#include <page.h>
43#include <kmem.h>
44#include <ppm.h>
45#include <thread.h>
[446]46#include <rpc.h>
[1]47
48//////////////////////////////////////////////////////////////////////////////////////
49// Extern global variables
50//////////////////////////////////////////////////////////////////////////////////////
51
[564]52extern process_t            process_zero;       // allocated in kernel_init.c
53extern char               * lock_type_str[];    // allocated in kernel_init.c
54extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c
[1]55
56//////////////////////////////////////////////////////////////////////////////////////
[16]57// This function returns a printable string for the thread type.
[1]58//////////////////////////////////////////////////////////////////////////////////////
[527]59const char * thread_type_str( thread_type_t type )
[5]60{
[527]61  switch ( type ) {
62  case THREAD_USER:   return "USR";
63  case THREAD_RPC:    return "RPC";
64  case THREAD_DEV:    return "DEV";
65  case THREAD_IDLE:   return "IDL";
66  default:            return "undefined";
67  }
[5]68}
69
[1]70/////////////////////////////////////////////////////////////////////////////////////
[14]71// This static function initializes a thread descriptor (kernel or user).
[438]72// It can be called by the four functions:
[14]73// - thread_user_create()
74// - thread_user_fork()
75// - thread_kernel_create()
[438]76// - thread_idle_init()
[625]77// The "type" and "trdid" fields must have been previously set.
[438]78// It updates the local DQDT.
[14]79/////////////////////////////////////////////////////////////////////////////////////
[625]80// @ thread          : pointer on local thread descriptor
81// @ process         : pointer on local process descriptor.
82// @ type            : thread type.
83// @ trdid           : thread identifier
84// @ func            : pointer on thread entry function.
85// @ args            : pointer on thread entry function arguments.
86// @ core_lid        : target core local index.
87// @ user_stack_vseg : local pointer on user stack vseg (user thread only)
[14]88/////////////////////////////////////////////////////////////////////////////////////
89static error_t thread_init( thread_t      * thread,
90                            process_t     * process,
91                            thread_type_t   type,
[625]92                            trdid_t         trdid,
[14]93                            void          * func,
94                            void          * args,
95                            lid_t           core_lid,
[625]96                            vseg_t        * user_stack_vseg )
[14]97{
98
[635]99// check type and trdid fields are initialized
[669]100assert( __FUNCTION__, (thread->type == type)   , "bad type argument" );
101assert( __FUNCTION__, (thread->trdid == trdid) , "bad trdid argument" );
[14]102
[564]103#if DEBUG_THREAD_INIT
[593]104uint32_t   cycle = (uint32_t)hal_get_cycles();
105thread_t * this  = CURRENT_THREAD;
[564]106if( DEBUG_THREAD_INIT < cycle )
[635]107printk("\n[%s] thread[%x,%x] enter for thread[%x,%x] / cycle %d\n",
108__FUNCTION__, this->process->pid, this->trdid, process->pid, thread->trdid, cycle );
[443]109#endif
110
[407]111    // compute thread descriptor size without kernel stack
112    uint32_t desc_size = (intptr_t)(&thread->signature) - (intptr_t)thread + 4; 
113
[1]114        // Initialize new thread descriptor
115    thread->quantum         = 0;            // TODO
116    thread->ticks_nr        = 0;            // TODO
[457]117    thread->time_last_check = 0;            // TODO
[625]118        thread->core            = &LOCAL_CLUSTER->core_tbl[core_lid];
[1]119        thread->process         = process;
[564]120    thread->busylocks       = 0;
[1]121
[564]122#if DEBUG_BUSYLOCK
[683]123xlist_root_init( XPTR( local_cxy , &thread->busylocks_root ) );
[409]124#endif
[1]125
[625]126    thread->user_stack_vseg = user_stack_vseg;
[407]127    thread->k_stack_base    = (intptr_t)thread + desc_size;
128    thread->k_stack_size    = CONFIG_THREAD_DESC_SIZE - desc_size;
[1]129    thread->entry_func      = func;         // thread entry point
130    thread->entry_args      = args;         // thread function arguments
[171]131    thread->flags           = 0;            // all flags reset
[1]132    thread->errno           = 0;            // no error detected
[407]133    thread->fork_user       = 0;            // no user defined placement for fork
134    thread->fork_cxy        = 0;            // user defined target cluster for fork
[409]135    thread->blocked         = THREAD_BLOCKED_GLOBAL;
[1]136
[564]137    // initialize sched list
[1]138    list_entry_init( &thread->sched_list );
139
[683]140    // initialize the embedded alarm
[669]141    list_entry_init( &thread->alarm.list );
142
[564]143    // initialize waiting queue entries
144    list_entry_init( &thread->wait_list );
145    xlist_entry_init( XPTR( local_cxy , &thread->wait_xlist ) );
146
147    // initialize thread info
[1]148    memset( &thread->info , 0 , sizeof(thread_info_t) );
149
[564]150    // initialize join_lock
151    remote_busylock_init( XPTR( local_cxy , &thread->join_lock ), LOCK_THREAD_JOIN );
[409]152
[1]153    // initialise signature
154        thread->signature = THREAD_SIGNATURE;
155
[443]156    // FIXME define and call an architecture specific hal_thread_init()
157    // function to initialise the save_sr field
[408]158    thread->save_sr = 0xFF13;
159
[171]160    // register new thread in core scheduler
[1]161    sched_register_thread( thread->core , thread );
162
[438]163        // update DQDT
[583]164    dqdt_increment_threads();
[438]165
[683]166    // nitialize timer alarm
167    alarm_init( &thread->alarm );
168
[641]169#if CONFIG_INSTRUMENTATION_PGFAULTS
[683]170thread->info.false_pgfault_nr    = 0;
171thread->info.false_pgfault_cost  = 0;
172thread->info.false_pgfault_max   = 0;
173thread->info.local_pgfault_nr    = 0;
174thread->info.local_pgfault_cost  = 0;
175thread->info.local_pgfault_max   = 0;
176thread->info.global_pgfault_nr   = 0;
177thread->info.global_pgfault_cost = 0;
178thread->info.global_pgfault_max  = 0;
[641]179#endif
180
[564]181#if DEBUG_THREAD_INIT
[443]182cycle = (uint32_t)hal_get_cycles();
[564]183if( DEBUG_THREAD_INIT < cycle )
[635]184printk("\n[%s] thread[%x,%x] exit for thread[%x,%x] / cycle %d\n",
185__FUNCTION__, this->process->pid, this->trdid, process->pid, thread->trdid, cycle );
[443]186#endif
187
[1]188        return 0;
189
[296]190} // end thread_init()
191
[625]192//////////////////////////////////////////////////
[23]193error_t thread_user_create( pid_t             pid,
194                            void            * start_func,
195                            void            * start_arg,
[1]196                            pthread_attr_t  * attr,
[23]197                            thread_t       ** new_thread )
[1]198{
199    error_t        error;
200        thread_t     * thread;       // pointer on created thread descriptor
[625]201    trdid_t        trdid;        // created thred identifier
[1]202    process_t    * process;      // pointer to local process descriptor
203    lid_t          core_lid;     // selected core local index
[625]204    vseg_t       * us_vseg;      // user stack vseg
[1]205
[669]206assert( __FUNCTION__, (attr != NULL) , "pthread attributes must be defined" );
[5]207
[438]208#if DEBUG_THREAD_USER_CREATE
[593]209thread_t * this  = CURRENT_THREAD;
210uint32_t   cycle = (uint32_t)hal_get_cycles();
[438]211if( DEBUG_THREAD_USER_CREATE < cycle )
[593]212printk("\n[%s] thread[%x,%x] enter in cluster %x for process %x / cycle %d\n",
213__FUNCTION__, this->process->pid , this->trdid , local_cxy , pid , cycle );
[433]214#endif
[428]215
[23]216    // get process descriptor local copy
217    process = process_get_local_copy( pid );
[440]218
[23]219    if( process == NULL )
220    {
221                printk("\n[ERROR] in %s : cannot get process descriptor %x\n",
[625]222        __FUNCTION__ , pid );
223        return -1;
[23]224    }
225
[443]226#if( DEBUG_THREAD_USER_CREATE & 1)
227if( DEBUG_THREAD_USER_CREATE < cycle )
[593]228printk("\n[%s] process descriptor = %x for process %x in cluster %x\n",
[443]229__FUNCTION__, process , pid , local_cxy );
230#endif
231
[171]232    // select a target core in local cluster
[407]233    if( attr->attributes & PT_ATTR_CORE_DEFINED )
[23]234    {
[407]235        core_lid = attr->lid;
236        if( core_lid >= LOCAL_CLUSTER->cores_nr )
237        {
238                printk("\n[ERROR] in %s : illegal core index attribute = %d\n",
239            __FUNCTION__ , core_lid );
[625]240            return -1;
[407]241        }
[23]242    }
[407]243    else
244    {
[637]245        core_lid = cluster_select_local_core( local_cxy );
[407]246    }
[1]247
[443]248#if( DEBUG_THREAD_USER_CREATE & 1)
249if( DEBUG_THREAD_USER_CREATE < cycle )
[593]250printk("\n[%s] core[%x,%d] selected\n",
[443]251__FUNCTION__, local_cxy , core_lid );
252#endif
253
[625]254    // allocate memory for thread descriptor
[683]255    thread = kmem_alloc( CONFIG_THREAD_DESC_ORDER , AF_ZERO );
[1]256
[625]257    if( thread == NULL )
[23]258    {
[625]259            printk("\n[ERROR] in %s : cannot create new thread in cluster %x\n",
260        __FUNCTION__, local_cxy );
261        return -1;
[171]262    }
[23]263
[457]264#if( DEBUG_THREAD_USER_CREATE & 1)
265if( DEBUG_THREAD_USER_CREATE < cycle )
[625]266printk("\n[%s] new thread descriptor %x allocated\n",
267__FUNCTION__, thread );
[457]268#endif
269
[625]270    // set type in thread descriptor
271    thread->type = THREAD_USER;
[1]272
[625]273    // register new thread in process descriptor, and get a TRDID
274    error = process_register_thread( process, thread , &trdid );
275
276    if( error )
[23]277    {
[625]278        printk("\n[ERROR] in %s : cannot register new thread in process %x\n",
279        __FUNCTION__, pid );
280        thread_destroy( thread );
281        return -1;
[23]282    }
[14]283
[625]284    // set trdid in thread descriptor
285    thread->trdid = trdid;
286
[443]287#if( DEBUG_THREAD_USER_CREATE & 1)
288if( DEBUG_THREAD_USER_CREATE < cycle )
[625]289printk("\n[%s] new thread %x registered in process %x\n",
290__FUNCTION__, trdid, pid );
[443]291#endif
292
[625]293    // allocate a stack from local VMM
294    us_vseg = vmm_create_vseg( process,
295                               VSEG_TYPE_STACK,
296                               LTID_FROM_TRDID( trdid ),
297                               0,                         // size unused
298                               0,                         // file_offset unused
299                               0,                         // file_size unused
300                               XPTR_NULL,                 // mapper_xp unused
301                               local_cxy );
302
303    if( us_vseg == NULL )
304    {
305            printk("\n[ERROR] in %s : cannot create stack vseg\n", __FUNCTION__ );
306        process_remove_thread( thread );
307        thread_destroy( thread );
308                return -1;
309    }
310
311#if( DEBUG_THREAD_USER_CREATE & 1)
312if( DEBUG_THREAD_USER_CREATE < cycle )
313printk("\n[%s] stack vseg created / vpn_base %x / %d pages\n",
314__FUNCTION__, us_vseg->vpn_base, us_vseg->vpn_size );
315#endif
316
[171]317    // initialize thread descriptor
[14]318    error = thread_init( thread,
319                         process,
320                         THREAD_USER,
[625]321                         trdid,
[23]322                         start_func,
323                         start_arg,
[14]324                         core_lid,
[625]325                         us_vseg );
[171]326    if( error )
[14]327    {
[23]328            printk("\n[ERROR] in %s : cannot initialize new thread\n", __FUNCTION__ );
[625]329        vmm_remove_vseg( process , us_vseg );
330        process_remove_thread( thread );
331        thread_destroy( thread );
332        return -1;
[14]333    }
334
[443]335#if( DEBUG_THREAD_USER_CREATE & 1)
336if( DEBUG_THREAD_USER_CREATE < cycle )
[625]337printk("\n[%s] new thread %x in process %x initialised\n",
338__FUNCTION__, thread->trdid, process->pid );
[443]339#endif
340
[14]341    // set DETACHED flag if required
[407]342    if( attr->attributes & PT_ATTR_DETACH ) 
343    {
344        thread->flags |= THREAD_FLAG_DETACHED;
345    }
[1]346
[171]347    // allocate & initialize CPU context
[457]348        if( hal_cpu_context_alloc( thread ) )
[23]349    {
350            printk("\n[ERROR] in %s : cannot create CPU context\n", __FUNCTION__ );
[625]351        vmm_remove_vseg( process , us_vseg );
352        process_remove_thread( thread );
353        thread_destroy( thread );
354        return -1;
[23]355    }
[669]356    hal_cpu_context_init( thread,
357                          false , 0 , 0 );   // not a main thread
[23]358
[457]359    // allocate & initialize FPU context
[407]360    if( hal_fpu_context_alloc( thread ) )
[23]361    {
362            printk("\n[ERROR] in %s : cannot create FPU context\n", __FUNCTION__ );
[625]363        vmm_remove_vseg( process , us_vseg );
364        process_remove_thread( thread );
365        thread_destroy( thread );
366        return -1;
[23]367    }
[457]368    hal_fpu_context_init( thread );
[23]369
[457]370#if( DEBUG_THREAD_USER_CREATE & 1)
371if( DEBUG_THREAD_USER_CREATE < cycle )
[593]372printk("\n[%s] CPU & FPU contexts created\n",
[457]373__FUNCTION__, thread->trdid );
[637]374hal_vmm_display( XPTR( local_cxy , process ) , true );
[457]375#endif
376
[438]377#if DEBUG_THREAD_USER_CREATE
[433]378cycle = (uint32_t)hal_get_cycles();
[438]379if( DEBUG_THREAD_USER_CREATE < cycle )
[593]380printk("\n[%s] thread[%x,%x] exit / new_thread %x / core %d / cycle %d\n",
381__FUNCTION__, this->process->pid , this->trdid , thread->trdid, core_lid, cycle );
[433]382#endif
[1]383
384    *new_thread = thread;
385        return 0;
[14]386
[296]387}  // end thread_user_create()
388
[408]389///////////////////////////////////////////////////////
390error_t thread_user_fork( xptr_t      parent_thread_xp,
391                          process_t * child_process,
392                          thread_t ** child_thread )
[1]393{
394    error_t        error;
[625]395        thread_t     * child_ptr;        // local pointer on child thread
396    trdid_t        child_trdid;      // child thread identifier
[408]397    lid_t          core_lid;         // selected core local index
398    thread_t     * parent_ptr;       // local pointer on remote parent thread
399    cxy_t          parent_cxy;       // parent thread cluster
400    process_t    * parent_process;   // local pointer on parent process
401    xptr_t         parent_gpt_xp;    // extended pointer on parent thread GPT
[625]402    void         * parent_func;      // parent thread entry_func
403    void         * parent_args;      // parent thread entry_args
404    uint32_t       parent_flags;     // parent_thread flags
405    vseg_t       * parent_us_vseg;   // parent thread user stack vseg
406    vseg_t       * child_us_vseg;    // child thread user stack vseg
[5]407
[438]408#if DEBUG_THREAD_USER_FORK
[593]409uint32_t   cycle = (uint32_t)hal_get_cycles();
410thread_t * this  = CURRENT_THREAD;
[438]411if( DEBUG_THREAD_USER_FORK < cycle )
[625]412printk("\n[%s] thread[%x,%x] enter for child_process %x / cycle %d\n",
[593]413__FUNCTION__, this->process->pid, this->trdid, child_process->pid, cycle );
[433]414#endif
[408]415
[1]416    // select a target core in local cluster
[637]417    core_lid = cluster_select_local_core( local_cxy );
[1]418
[625]419#if (DEBUG_THREAD_USER_FORK & 1)
420if( DEBUG_THREAD_USER_FORK < cycle )
421printk("\n[%s] thread[%x,%x] selected core [%x,%d]\n",
422__FUNCTION__, this->process->pid, this->trdid, local_cxy, core_lid );
423#endif
424
[408]425    // get cluster and local pointer on parent thread descriptor
426    parent_cxy = GET_CXY( parent_thread_xp );
[469]427    parent_ptr = GET_PTR( parent_thread_xp );
[1]428
[625]429    // get relevant infos from parent thread
430    parent_func    = (void *)  hal_remote_lpt( XPTR(parent_cxy,&parent_ptr->entry_func ));
431    parent_args    = (void *)  hal_remote_lpt( XPTR(parent_cxy,&parent_ptr->entry_args ));
432    parent_flags   = (uint32_t)hal_remote_l32( XPTR(parent_cxy,&parent_ptr->flags ));
433    parent_us_vseg = (vseg_t *)hal_remote_lpt( XPTR(parent_cxy,&parent_ptr->user_stack_vseg ));
[1]434
[408]435    // get pointer on parent process in parent thread cluster
436    parent_process = (process_t *)hal_remote_lpt( XPTR( parent_cxy,
437                                                        &parent_ptr->process ) );
438 
[625]439    // build extended pointer on parent GPT in parent thread cluster
[408]440    parent_gpt_xp = XPTR( parent_cxy , &parent_process->vmm.gpt );
441
[625]442#if (DEBUG_THREAD_USER_FORK & 1)
443if( DEBUG_THREAD_USER_FORK < cycle )
444printk("\n[%s] thread[%x,%x] get parent GPT\n",
445__FUNCTION__, this->process->pid, this->trdid );
446#endif
447
[408]448    // allocate memory for child thread descriptor
[683]449    child_ptr = kmem_alloc( CONFIG_THREAD_DESC_ORDER , AF_ZERO );
[625]450
[408]451    if( child_ptr == NULL )
[23]452    {
[625]453        printk("\n[ERROR] in %s : cannot allocate new thread\n",
454        __FUNCTION__ );
[408]455        return -1;
[23]456    }
[14]457
[625]458#if (DEBUG_THREAD_USER_FORK & 1)
459if( DEBUG_THREAD_USER_FORK < cycle )
460printk("\n[%s] thread[%x,%x] allocated new thread descriptor %x\n",
461__FUNCTION__, this->process->pid, this->trdid, child_ptr );
462#endif
463
464    // set type in thread descriptor
465    child_ptr->type = THREAD_USER;
466
467    // register new thread in process descriptor, and get a TRDID
468    error = process_register_thread( child_process, child_ptr , &child_trdid );
469
470    if( error )
471    {
472        printk("\n[ERROR] in %s : cannot register new thread in process %x\n",
473        __FUNCTION__, child_process->pid );
474        thread_destroy( child_ptr );
475        return -1;
476    }
477
478    // set trdid in thread descriptor
479    child_ptr->trdid = child_trdid;
480
481#if (DEBUG_THREAD_USER_FORK & 1)
482if( DEBUG_THREAD_USER_FORK < cycle )
483printk("\n[%s] thread[%x,%x] registered child thread %x in child process %x\n",
484__FUNCTION__, this->process->pid, this->trdid, child_trdid, child_process->pid );
485#endif
486
487    // get an user stack vseg from local VMM allocator
488    child_us_vseg = vmm_create_vseg( child_process,
489                                     VSEG_TYPE_STACK,
490                                     LTID_FROM_TRDID( child_trdid ), 
491                                     0,                               // size unused
492                                     0,                               // file_offset unused
493                                     0,                               // file_size unused
494                                     XPTR_NULL,                       // mapper_xp unused
495                                     local_cxy );
496    if( child_us_vseg == NULL )
497    {
498            printk("\n[ERROR] in %s : cannot create stack vseg\n", __FUNCTION__ );
499        process_remove_thread( child_ptr );
500        thread_destroy( child_ptr );
501        return -1;
502    }
503
504#if (DEBUG_THREAD_USER_FORK & 1)
505if( DEBUG_THREAD_USER_FORK < cycle )
506printk("\n[%s] thread[%x,%x] created an user stack vseg / vpn_base %x / %d pages\n",
507__FUNCTION__, this->process->pid, this->trdid,
508child_us_vseg->vpn_base, child_us_vseg->vpn_size );
509#endif
510
[171]511    // initialize thread descriptor
[408]512    error = thread_init( child_ptr,
513                         child_process,
[14]514                         THREAD_USER,
[625]515                         child_trdid,
516                         parent_func,
517                         parent_args,
[14]518                         core_lid,
[625]519                         child_us_vseg );
[23]520    if( error )
[14]521    {
[408]522            printk("\n[ERROR] in %s : cannot initialize child thread\n", __FUNCTION__ );
[625]523        vmm_remove_vseg( child_process , child_us_vseg ); 
524        process_remove_thread( child_ptr );
525        thread_destroy( child_ptr );
526        return -1;
[14]527    }
528
[564]529#if (DEBUG_THREAD_USER_FORK & 1)
530if( DEBUG_THREAD_USER_FORK < cycle )
[593]531printk("\n[%s] thread[%x,%x] initialised thread %x in process %x\n",
532__FUNCTION__, this->process->pid, this->trdid, child_ptr->trdid, child_process->pid );
[564]533#endif
534
[408]535    // set detached flag if required
[625]536    if( parent_flags & THREAD_FLAG_DETACHED ) child_ptr->flags = THREAD_FLAG_DETACHED;
[1]537
[625]538    // allocate a CPU context for child thread
[408]539        if( hal_cpu_context_alloc( child_ptr ) )
[23]540    {
[407]541            printk("\n[ERROR] in %s : cannot allocate CPU context\n", __FUNCTION__ );
[625]542        vmm_remove_vseg( child_process , child_us_vseg );
543        process_remove_thread( child_ptr );
544        thread_destroy( child_ptr );
[408]545        return -1;
[23]546    }
547
[625]548    // allocate a FPU context for child thread
[408]549        if( hal_fpu_context_alloc( child_ptr ) )
[23]550    {
[407]551            printk("\n[ERROR] in %s : cannot allocate FPU context\n", __FUNCTION__ );
[625]552        vmm_remove_vseg( child_process , child_us_vseg );
553        process_remove_thread( child_ptr );
554        thread_destroy( child_ptr );
[408]555        return -1;
[23]556    }
557
[564]558#if (DEBUG_THREAD_USER_FORK & 1)
559if( DEBUG_THREAD_USER_FORK < cycle )
[593]560printk("\n[%s] thread[%x,%x] created CPU & FPU contexts for thread %x in process %x\n",
561__FUNCTION__, this->process->pid, this->trdid, child_ptr->trdid, child_process->pid );
[564]562#endif
563
[625]564    // scan parent GPT, and copy all valid entries
565    // associated to user stack vseg into child GPT
566    vpn_t  parent_vpn;
567    vpn_t  child_vpn;
568    bool_t mapped;
569    ppn_t  ppn;
570    vpn_t  parent_vpn_base = hal_remote_l32( XPTR( parent_cxy, &parent_us_vseg->vpn_base ) );
571    vpn_t  parent_vpn_size = hal_remote_l32( XPTR( parent_cxy, &parent_us_vseg->vpn_size ) );
572    vpn_t  child_vpn_base  = child_us_vseg->vpn_base;
[635]573
[625]574    for( parent_vpn = parent_vpn_base , child_vpn = child_vpn_base ; 
575         parent_vpn < (parent_vpn_base + parent_vpn_size) ;
576         parent_vpn++ , child_vpn++ )
[408]577    {
578        error = hal_gpt_pte_copy( &child_process->vmm.gpt,
[625]579                                  child_vpn,
[408]580                                  parent_gpt_xp,
[625]581                                  parent_vpn,
[408]582                                  true,                 // set cow
583                                  &ppn,
584                                  &mapped );
585        if( error )
586        {
587            printk("\n[ERROR] in %s : cannot update child GPT\n", __FUNCTION__ );
[625]588            vmm_remove_vseg( child_process , child_us_vseg );
589            process_remove_thread( child_ptr );
590            thread_destroy( child_ptr );
[408]591            return -1;
592        }
593
[625]594        // increment pending forks counter for a mapped page
[408]595        if( mapped )
596        {
[469]597            // get pointers on the page descriptor
[408]598            xptr_t   page_xp  = ppm_ppn2page( ppn );
599            cxy_t    page_cxy = GET_CXY( page_xp );
[469]600            page_t * page_ptr = GET_PTR( page_xp );
601
[625]602            // build extended pointers on forks and lock fields
[469]603            xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks );
604            xptr_t lock_xp  = XPTR( page_cxy , &page_ptr->lock );
605
[564]606            // get lock protecting page
607            remote_busylock_acquire( lock_xp ); 
608
609            // increment the forks counter in page descriptor
[473]610            hal_remote_atomic_add( forks_xp , 1 );
[408]611
[564]612            // release lock protecting page
613            remote_busylock_release( lock_xp ); 
[625]614        }
615    }
[564]616
[438]617#if (DEBUG_THREAD_USER_FORK & 1)
618if( DEBUG_THREAD_USER_FORK < cycle )
[635]619printk("\n[%s] thread[%x,%x] copied STACK vseg PTEs & set COW in child GPT\n",
[625]620__FUNCTION__, this->process->pid, this->trdid );
[433]621#endif
[408]622
[625]623    // set COW flag for all mapped entries of user stack vseg in parent GPT
624    hal_gpt_set_cow( parent_gpt_xp,
625                     parent_vpn_base,
626                     parent_vpn_size );
[408]627
[625]628#if (DEBUG_THREAD_USER_FORK & 1)
629if( DEBUG_THREAD_USER_FORK < cycle )
[635]630printk("\n[%s] thread[%x,%x] set COW for STACK vseg in parent GPT\n",
[625]631__FUNCTION__, this->process->pid, this->trdid );
632#endif
633
634    // return child pointer
635    *child_thread = child_ptr;
636
[438]637#if DEBUG_THREAD_USER_FORK
[433]638cycle = (uint32_t)hal_get_cycles();
[438]639if( DEBUG_THREAD_USER_FORK < cycle )
[625]640printk("\n[%s] thread[%x,%x] exit / created thread[%x,%x] / cycle %d\n",
641__FUNCTION__, this->process->pid, this->trdid,
642child_ptr->process->pid, child_ptr->trdid, cycle );
[433]643#endif
[407]644
[1]645        return 0;
[5]646
[296]647}  // end thread_user_fork()
648
[669]649/////////////////////////////////////
650void thread_user_exec( uint32_t argc,
651                       intptr_t argv )
[457]652{
653    thread_t  * thread  = CURRENT_THREAD;
654    process_t * process = thread->process;
655
656#if DEBUG_THREAD_USER_EXEC
657uint32_t cycle = (uint32_t)hal_get_cycles();
658if( DEBUG_THREAD_USER_EXEC < cycle )
[683]659printk("\n[%s] thread[%x,%x] enter / argc %d / argv %x / cycle %d\n",
660__FUNCTION__, process->pid, thread->trdid, argc, argv, cycle );
[457]661#endif
662
[564]663// check parent thread attributes
[669]664assert( __FUNCTION__, (thread->type      == THREAD_USER )     , "bad type" );
665assert( __FUNCTION__, (thread->signature == THREAD_SIGNATURE) , "bad signature" );
666assert( __FUNCTION__, (thread->busylocks == 0)                , "bad busylocks" );
[457]667
668        // re-initialize various thread descriptor fields
[669]669    thread->quantum         = 0;                               // TODO
670    thread->ticks_nr        = 0;                               // TODO
671    thread->time_last_check = 0;                               // TODO
672    thread->entry_func      = (void*)process->vmm.entry_point;
673    thread->flags           = THREAD_FLAG_DETACHED;            // main always detached
[457]674    thread->blocked         = 0;
675    thread->errno           = 0;
[669]676    thread->fork_user       = 0;
677    thread->fork_cxy        = 0;
[457]678
679    // reset thread info
680    memset( &thread->info , 0 , sizeof(thread_info_t) );
681
[564]682    // re-initialize join_lock
683    remote_busylock_init( XPTR( local_cxy , &thread->join_lock ), LOCK_THREAD_JOIN );
[457]684
685    // release FPU ownership if required
686    if( thread->core->fpu_owner == thread ) thread->core->fpu_owner = NULL;
687
[669]688    // initialize thread FPU context
[457]689    hal_fpu_context_init( thread );
690
[669]691    // initialize thread CPU context
692    hal_cpu_context_init( thread,
693                          true,          // main thread
694                          argc,
695                          argv ); 
696
[457]697#if DEBUG_THREAD_USER_EXEC
698cycle = (uint32_t)hal_get_cycles();
699if( DEBUG_THREAD_USER_EXEC < cycle )
[669]700{
701    printk("\n[%s] thread[%x,%x] set CPU context & jump to user code / cycle %d\n",
702    __FUNCTION__, process->pid, thread->trdid, cycle );
703
704    hal_cpu_context_display( XPTR( local_cxy , thread ) );
705    hal_vmm_display( XPTR( local_cxy , process ) , true );
706}
[457]707#endif
708
[683]709    // restore CPU registers => jump to user code
[669]710    hal_do_cpu_restore( thread->cpu_context );
[457]711
712}  // end thread_user_exec()
713
[1]714/////////////////////////////////////////////////////////
715error_t thread_kernel_create( thread_t     ** new_thread,
716                              thread_type_t   type,
[171]717                              void          * func,
718                              void          * args,
[1]719                                              lid_t           core_lid )
720{
721    error_t        error;
[14]722        thread_t     * thread;       // pointer on new thread descriptor
[625]723    trdid_t        trdid;        // new thread identifier
[1]724
[593]725    thread_t * this = CURRENT_THREAD; 
[1]726
[669]727assert( __FUNCTION__, ( (type == THREAD_IDLE) || (type == THREAD_RPC) || (type == THREAD_DEV) ) ,
[593]728"illegal thread type" );
[1]729
[669]730assert( __FUNCTION__, (core_lid < LOCAL_CLUSTER->cores_nr) ,
[593]731"illegal core_lid" );
732
[438]733#if DEBUG_THREAD_KERNEL_CREATE
[593]734uint32_t   cycle = (uint32_t)hal_get_cycles();
[438]735if( DEBUG_THREAD_KERNEL_CREATE < cycle )
[593]736printk("\n[%s] thread[%x,%x] enter / requested_type %s / cycle %d\n",
737__FUNCTION__, this->process->pid, this->trdid, thread_type_str(type), cycle );
[433]738#endif
739
[171]740    // allocate memory for new thread descriptor
[683]741    thread = kmem_alloc( CONFIG_THREAD_DESC_ORDER , AF_ZERO );
[14]742
[581]743    if( thread == NULL )
744    {
745        printk("\n[ERROR] in %s : thread %x in process %x\n"
746        "   no memory for thread descriptor\n",
[593]747        __FUNCTION__, this->trdid, this->process->pid );
[581]748        return ENOMEM;
749    }
[14]750
[625]751    // set type in thread descriptor
752    thread->type = type;
753
754    // register new thread in local kernel process descriptor, and get a TRDID
755    error = process_register_thread( &process_zero , thread , &trdid );
756
757    if( error )
758    {
759        printk("\n[ERROR] in %s : cannot register thread in kernel process\n", __FUNCTION__ );
760        return -1;
761    }
762
763    // set trdid in thread descriptor
764    thread->trdid = trdid;
765
[171]766    // initialize thread descriptor
[14]767    error = thread_init( thread,
768                         &process_zero,
769                         type,
[625]770                         trdid,
[14]771                         func,
772                         args,
773                         core_lid,
[625]774                         NULL );  // no user stack for a kernel thread
[14]775
[171]776    if( error ) // release allocated memory for thread descriptor
[1]777    {
[625]778        printk("\n[ERROR] in %s : cannot initialize thread descriptor\n", __FUNCTION__ );
779        thread_destroy( thread );
[457]780        return ENOMEM;
[1]781    }
782
[171]783    // allocate & initialize CPU context
[457]784        error = hal_cpu_context_alloc( thread );
[581]785
[457]786    if( error )
787    {
[581]788        printk("\n[ERROR] in %s : thread %x in process %x\n"
[593]789        "    cannot create CPU context\n",
790        __FUNCTION__, this->trdid, this->process->pid );
[625]791        thread_destroy( thread );
[457]792        return EINVAL;
793    }
[581]794
[669]795    hal_cpu_context_init( thread,
796                          false , 0 , 0 );  // not a main thread
[14]797
[438]798#if DEBUG_THREAD_KERNEL_CREATE
[433]799cycle = (uint32_t)hal_get_cycles();
[438]800if( DEBUG_THREAD_KERNEL_CREATE < cycle )
[593]801printk("\n[%s] thread[%x,%x] exit / new_thread %x / type %s / cycle %d\n",
802__FUNCTION__, this->process->pid, this->trdid, thread, thread_type_str(type), cycle );
[433]803#endif
[1]804
[171]805    *new_thread = thread;
[1]806        return 0;
[5]807
[296]808} // end thread_kernel_create()
809
[457]810//////////////////////////////////////////////
811void thread_idle_init( thread_t      * thread,
812                       thread_type_t   type,
813                       void          * func,
814                       void          * args,
815                           lid_t           core_lid )
[14]816{
[625]817    trdid_t trdid;   
818    error_t error;
[14]819
[564]820// check arguments
[683]821assert( __FUNCTION__, (type == THREAD_IDLE),
822"illegal thread type" );
[564]823
[683]824assert( __FUNCTION__, (core_lid < LOCAL_CLUSTER->cores_nr),
825"illegal core index" );
826
[625]827    // set type in thread descriptor
828    thread->type = THREAD_IDLE;
829
830    // register idle thread in local kernel process descriptor, and get a TRDID
831    error = process_register_thread( &process_zero , thread , &trdid );
832
[683]833assert( __FUNCTION__, (error == 0),
834"cannot register idle_thread in kernel process" );
[625]835
836    // set trdid in thread descriptor
837    thread->trdid = trdid;
838
[457]839    // initialize thread descriptor
[625]840    error = thread_init( thread,
841                         &process_zero,
842                         THREAD_IDLE,
843                         trdid,
844                         func,
845                         args,
846                         core_lid,
847                         NULL );   // no user stack for a kernel thread
[14]848
[683]849assert( __FUNCTION__, (error == 0),
850"cannot initialize idle_thread" );
[457]851
[669]852    // allocate CPU context
[457]853    error = hal_cpu_context_alloc( thread );
[171]854
[683]855assert( __FUNCTION__,(error == 0),
856"cannot allocate CPU context" );
[14]857
[669]858    // initialize CPU context
859    hal_cpu_context_init( thread,
860                          false , 0 , 0 );   // not a main thread
[457]861
[438]862}  // end thread_idle_init()
[407]863
[625]864////////////////////////////////////////////
865uint32_t thread_destroy( thread_t * thread )
[1]866{
[625]867    reg_t           save_sr;
868    uint32_t        count;
[1]869
[625]870    thread_type_t   type    = thread->type;
871    process_t     * process = thread->process;
872    core_t        * core    = thread->core;
[1]873
[641]874#if DEBUG_THREAD_DESTROY
[640]875uint32_t   cycle;
[583]876thread_t * this  = CURRENT_THREAD;
[640]877#endif
878
879#if (DEBUG_THREAD_DESTROY & 1)
880cycle = (uint32_t)hal_get_cycles();
[438]881if( DEBUG_THREAD_DESTROY < cycle )
[593]882printk("\n[%s] thread[%x,%x] enter to destroy thread[%x,%x] / cycle %d\n",
[583]883__FUNCTION__, this->process->pid, this->trdid, process->pid, thread->trdid, cycle );
[433]884#endif
[1]885
[625]886    // check calling thread busylocks counter
[583]887    thread_assert_can_yield( thread , __FUNCTION__ );
[171]888
[635]889#if CONFIG_INSTRUMENTATION_PGFAULTS
[640]890process->vmm.false_pgfault_nr    += thread->info.false_pgfault_nr;
[641]891process->vmm.false_pgfault_cost  += thread->info.false_pgfault_cost;
[640]892process->vmm.local_pgfault_nr    += thread->info.local_pgfault_nr;
[641]893process->vmm.local_pgfault_cost  += thread->info.local_pgfault_cost;
[640]894process->vmm.global_pgfault_nr   += thread->info.global_pgfault_nr;
895process->vmm.global_pgfault_cost += thread->info.global_pgfault_cost;
[635]896#endif
[1]897
[640]898#if (CONFIG_INSTRUMENTATION_PGFAULTS & 1)
899uint32_t false_nr    = thread->info.false_pgfault_nr;
[641]900uint32_t false_cost  = thread->info.false_pgfault_cost;
901uint32_t false_max   = thread->info.false_pgfault_max;
902uint32_t false_one   = false_nr  ? (false_cost  / false_nr ) : 0;
903
[640]904uint32_t local_nr    = thread->info.local_pgfault_nr;
[641]905uint32_t local_cost  = thread->info.local_pgfault_cost;
906uint32_t local_max   = thread->info.local_pgfault_max;
907uint32_t local_one   = local_nr  ? (local_cost  / local_nr ) : 0;
908
[640]909uint32_t global_nr   = thread->info.global_pgfault_nr;
910uint32_t global_cost = thread->info.global_pgfault_cost;
[641]911uint32_t global_max  = thread->info.global_pgfault_max;
912uint32_t global_one  = global_nr ? (global_cost / global_nr) : 0;
913
[647]914printk("\n***** thread[%x,%x] page faults\n"
[641]915       " - false  : %d events / cost %d cycles / max %d cycles\n"
916       " - local  : %d events / cost %d cycles / max %d cycles\n"
917       " - global : %d events / cost %d cycles / max %d cycles\n",
918       thread->process->pid, thread->trdid,
919       false_nr , false_one , false_max,
920       local_nr , local_one , local_max,
921       global_nr, global_one, global_max );
[640]922#endif
923
[669]924    // unlink embedded alarm from the list rooted in core when required
925    list_entry_t * entry = &thread->alarm.list;
926    if( (entry->next != NULL) || (entry->pred != NULL) )  list_unlink( entry );
927
[625]928    // remove thread from process th_tbl[]
929    count = process_remove_thread( thread );
930
[635]931    // release memory allocated for CPU context and FPU context
[1]932        hal_cpu_context_destroy( thread );
[625]933        hal_fpu_context_destroy( thread );
[1]934       
[625]935    // release user stack vseg (for an user thread only)
936    if( type == THREAD_USER )  vmm_remove_vseg( process , thread->user_stack_vseg );
937
[428]938    // release FPU ownership if required
[409]939        hal_disable_irq( &save_sr );
[1]940        if( core->fpu_owner == thread )
941        {
942                core->fpu_owner = NULL;
943                hal_fpu_disable();
944        }
[409]945        hal_restore_irq( save_sr );
[1]946
947    // invalidate thread descriptor
948        thread->signature = 0;
949
[625]950    // release memory for thread descriptor (including kernel stack)
[683]951    kmem_free( thread , CONFIG_THREAD_DESC_ORDER );
[625]952
[438]953#if DEBUG_THREAD_DESTROY
[433]954cycle = (uint32_t)hal_get_cycles();
[438]955if( DEBUG_THREAD_DESTROY < cycle )
[593]956printk("\n[%s] thread[%x,%x] exit / destroyed thread[%x,%x] / cycle %d\n",
[583]957__FUNCTION__, this->process->pid, this->trdid, process->pid, thread->trdid, cycle );
[433]958#endif
[1]959
[625]960    return count;
961
[407]962}   // end thread_destroy()
963
[416]964//////////////////////////////////////////////////
965inline void thread_set_req_ack( thread_t * target,
966                                uint32_t * rsp_count )
[1]967{
[409]968    reg_t    save_sr;   // for critical section
969
[416]970    // get pointer on target thread scheduler
971    scheduler_t * sched = &target->core->scheduler;
[409]972
[416]973    // wait scheduler ready to handle a new request
974    while( sched->req_ack_pending ) asm volatile( "nop" );
[409]975   
976    // enter critical section
977    hal_disable_irq( &save_sr );
978     
[416]979    // set request in target thread scheduler
980    sched->req_ack_pending = true;
[409]981
[416]982    // set ack request in target thread "flags"
983    hal_atomic_or( &target->flags , THREAD_FLAG_REQ_ACK );
[409]984
[416]985    // set pointer on responses counter in target thread
986    target->ack_rsp_count = rsp_count;
[409]987   
988    // exit critical section
989    hal_restore_irq( save_sr );
990
[407]991    hal_fence();
[171]992
[416]993}  // thread_set_req_ack()
[409]994
[416]995/////////////////////////////////////////////////////
996inline void thread_reset_req_ack( thread_t * target )
[1]997{
[409]998    reg_t    save_sr;   // for critical section
999
1000    // get pointer on target thread scheduler
[416]1001    scheduler_t * sched = &target->core->scheduler;
[409]1002
1003    // check signal pending in scheduler
[669]1004    assert( __FUNCTION__, sched->req_ack_pending , "no pending signal" );
[409]1005   
1006    // enter critical section
1007    hal_disable_irq( &save_sr );
1008     
1009    // reset signal in scheduler
[416]1010    sched->req_ack_pending = false;
[409]1011
1012    // reset signal in thread "flags"
[416]1013    hal_atomic_and( &target->flags , ~THREAD_FLAG_REQ_ACK );
[409]1014
1015    // reset pointer on responses counter
[416]1016    target->ack_rsp_count = NULL;
[409]1017   
1018    // exit critical section
1019    hal_restore_irq( save_sr );
1020
[407]1021    hal_fence();
[171]1022
[416]1023}  // thread_reset_req_ack()
[409]1024
[436]1025//////////////////////////////////////
1026void thread_block( xptr_t   thread_xp,
1027                   uint32_t cause )
[407]1028{
[436]1029    // get thread cluster and local pointer
1030    cxy_t      cxy = GET_CXY( thread_xp );
1031    thread_t * ptr = GET_PTR( thread_xp );
1032
[407]1033    // set blocking cause
[436]1034    hal_remote_atomic_or( XPTR( cxy , &ptr->blocked ) , cause );
[407]1035    hal_fence();
1036
[438]1037#if DEBUG_THREAD_BLOCK
[457]1038uint32_t    cycle   = (uint32_t)hal_get_cycles();
1039process_t * process = hal_remote_lpt( XPTR( cxy , &ptr->process ) );
[593]1040thread_t  * this    = CURRENT_THREAD;
[438]1041if( DEBUG_THREAD_BLOCK < cycle )
[593]1042printk("\n[%s] thread[%x,%x] blocked thread %x in process %x / cause %x\n",
1043__FUNCTION__, this->process->pid, this->trdid,
[564]1044ptr->trdid, hal_remote_l32(XPTR( cxy , &process->pid )), cause );
[433]1045#endif
1046
[407]1047} // end thread_block()
1048
[433]1049////////////////////////////////////////////
1050uint32_t thread_unblock( xptr_t   thread_xp,
[407]1051                         uint32_t cause )
1052{
1053    // get thread cluster and local pointer
[433]1054    cxy_t      cxy = GET_CXY( thread_xp );
1055    thread_t * ptr = GET_PTR( thread_xp );
[407]1056
1057    // reset blocking cause
1058    uint32_t previous = hal_remote_atomic_and( XPTR( cxy , &ptr->blocked ) , ~cause );
1059    hal_fence();
1060
[438]1061#if DEBUG_THREAD_BLOCK
[457]1062uint32_t    cycle   = (uint32_t)hal_get_cycles();
1063process_t * process = hal_remote_lpt( XPTR( cxy , &ptr->process ) );
[593]1064thread_t  * this    = CURRENT_THREAD;
[438]1065if( DEBUG_THREAD_BLOCK < cycle )
[593]1066printk("\n[%s] thread[%x,%x] unblocked thread %x in process %x / cause %x\n",
1067__FUNCTION__, this->process->pid, this->trdid,
[564]1068ptr->trdid, hal_remote_l32(XPTR( cxy , &process->pid )), cause );
[433]1069#endif
1070
[446]1071    // return a non zero value if the cause bit is modified
1072    return( previous & cause );
[436]1073
[446]1074}  // end thread_unblock()
[407]1075
[683]1076//////////////////////////////////////////////
[669]1077void thread_delete_request( xptr_t  target_xp,
[683]1078                            bool_t  is_forced )
[440]1079{
1080    reg_t       save_sr;                // for critical section
1081    bool_t      target_join_done;       // joining thread arrived first
1082    bool_t      target_attached;        // target thread attached
1083    xptr_t      killer_xp;              // extended pointer on killer thread (this)
1084    thread_t  * killer_ptr;             // pointer on killer thread (this)
1085    cxy_t       target_cxy;             // target thread cluster     
1086    thread_t  * target_ptr;             // pointer on target thread
[651]1087    process_t * target_process;         // pointer on target process
[625]1088    pid_t       target_pid;             // target process identifier
[440]1089    xptr_t      target_flags_xp;        // extended pointer on target thread <flags>
1090    xptr_t      target_join_lock_xp;    // extended pointer on target thread <join_lock>
1091    xptr_t      target_join_xp_xp;      // extended pointer on target thread <join_xp>
1092    trdid_t     target_trdid;           // target thread identifier
1093    ltid_t      target_ltid;            // target thread local index
[651]1094    uint32_t    target_flags;           // target thread flags
[440]1095    xptr_t      joining_xp;             // extended pointer on joining thread
[651]1096    thread_t  * joining_ptr;            // local pointer on joining thread
1097    cxy_t       joining_cxy;            // joining thread cluster
[440]1098
[564]1099    // get target thread cluster and local pointer
[440]1100    target_cxy      = GET_CXY( target_xp );
1101    target_ptr      = GET_PTR( target_xp );
[564]1102
[651]1103    // get target thread trdid, ltid, flags, and process PID
[564]1104    target_trdid    = hal_remote_l32( XPTR( target_cxy , &target_ptr->trdid ) );
[440]1105    target_ltid     = LTID_FROM_TRDID( target_trdid );
[651]1106    target_flags_xp = XPTR( target_cxy , &target_ptr->flags );
1107    target_flags    = hal_remote_l32( target_flags_xp );
[625]1108    target_process  = hal_remote_lpt( XPTR( target_cxy , &target_ptr->process ) );
1109    target_pid      = hal_remote_l32( XPTR( target_cxy , &target_process->pid ) );
[651]1110    target_attached = ((target_flags & THREAD_FLAG_DETACHED) == 0); 
[440]1111
1112    // get killer thread pointers
1113    killer_ptr = CURRENT_THREAD;
1114    killer_xp  = XPTR( local_cxy , killer_ptr );
1115
1116#if DEBUG_THREAD_DELETE
[564]1117uint32_t cycle  = (uint32_t)hal_get_cycles();
[440]1118if( DEBUG_THREAD_DELETE < cycle )
[651]1119printk("\n[%s] killer[%x,%x] enters / target[%x,%x] / forced %d / flags %x / cycle %d\n",
[583]1120__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid, 
[651]1121target_pid, target_trdid, is_forced, target_flags, cycle );
[440]1122#endif
1123
[564]1124// check target thread is not the main thread, because the main thread
1125// must be deleted by the parent process sys_wait() function
[669]1126assert( __FUNCTION__, ((CXY_FROM_PID( target_pid ) != target_cxy) || (target_ltid != 0)),
[625]1127"target thread cannot be the main thread" );
[564]1128
[583]1129    // check killer thread can yield
1130    thread_assert_can_yield( killer_ptr , __FUNCTION__ ); 
[440]1131
[583]1132    // if the target thread is attached, we must synchonize with the joining thread
1133    // before blocking and marking the target thead for delete.
1134
1135    if( target_attached && (is_forced == false) ) // synchronize with joining thread
[564]1136    {
[440]1137        // build extended pointers on target thread join fields
1138        target_join_lock_xp  = XPTR( target_cxy , &target_ptr->join_lock );
1139        target_join_xp_xp    = XPTR( target_cxy , &target_ptr->join_xp );
1140
1141        // enter critical section
1142        hal_disable_irq( &save_sr );
1143
1144        // take the join_lock in target thread descriptor
[564]1145        remote_busylock_acquire( target_join_lock_xp );
[440]1146
1147        // get join_done from target thread descriptor
[564]1148        target_join_done = ((hal_remote_l32( target_flags_xp ) & THREAD_FLAG_JOIN_DONE) != 0);
[440]1149   
[583]1150        if( target_join_done )                     // joining thread arrived first
[440]1151        {
1152            // get extended pointer on joining thread
[564]1153            joining_xp  = (xptr_t)hal_remote_l64( target_join_xp_xp );
[651]1154
1155            // get cluster and local pointer on joining thread
1156            joining_ptr = GET_PTR( joining_xp );
1157            joining_cxy = GET_CXY( joining_xp );
1158
1159            // copy exit_status from target thread to joining thread, because
1160            // target thread may be deleted before joining thread resume
1161            void * status = hal_remote_lpt( XPTR( target_cxy , &target_ptr->exit_status ) );
1162            hal_remote_spt( XPTR( joining_cxy , &joining_ptr->exit_status ) , status );
[440]1163           
1164            // reset the join_done flag in target thread
1165            hal_remote_atomic_and( target_flags_xp , ~THREAD_FLAG_JOIN_DONE );
1166
1167            // unblock the joining thread
1168            thread_unblock( joining_xp , THREAD_BLOCKED_JOIN );
1169
1170            // release the join_lock in target thread descriptor
[564]1171            remote_busylock_release( target_join_lock_xp );
[440]1172
[583]1173            // block the target thread
1174            thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
1175
[564]1176            // set the REQ_DELETE flag in target thread descriptor
1177            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
1178
[583]1179            // exit critical section
[440]1180            hal_restore_irq( save_sr );
[564]1181
[583]1182#if DEBUG_THREAD_DELETE
[651]1183cycle  = (uint32_t)hal_get_cycles();
[564]1184if( DEBUG_THREAD_DELETE < cycle )
[593]1185printk("\n[%s] killer[%x,%x] exit / target[%x,%x] marked after join / cycle %d\n",
[583]1186__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid,
[651]1187target_pid, target_trdid, cycle );
[564]1188#endif
[583]1189
1190        }
1191        else                                      // killer thread arrived first
1192        {
[440]1193            // set the kill_done flag in target thread
1194            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_KILL_DONE );
1195
[651]1196            // block target thread on BLOCKED_JOIN
[440]1197            thread_block( killer_xp , THREAD_BLOCKED_JOIN );
1198
1199            // set extended pointer on killer thread in target thread
[564]1200            hal_remote_s64( target_join_xp_xp , killer_xp );
[440]1201
1202            // release the join_lock in target thread descriptor
[564]1203            remote_busylock_release( target_join_lock_xp );
[440]1204
[583]1205#if DEBUG_THREAD_DELETE
[651]1206cycle  = (uint32_t)hal_get_cycles();
[564]1207if( DEBUG_THREAD_DELETE < cycle )
[593]1208printk("\n[%s] killer[%x,%x] deschedules / target[%x,%x] not completed / cycle %d\n",
[583]1209__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid,
[651]1210target_pid, target_trdid, cycle );
[564]1211#endif
[440]1212            // deschedule
1213            sched_yield( "killer thread wait joining thread" );
1214
[583]1215            // block the target thread
1216            thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
1217
[564]1218            // set the REQ_DELETE flag in target thread descriptor
1219            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
1220
[583]1221            // exit critical section
[440]1222            hal_restore_irq( save_sr );
[583]1223
1224#if DEBUG_THREAD_DELETE
[651]1225cycle  = (uint32_t)hal_get_cycles();
[583]1226if( DEBUG_THREAD_DELETE < cycle )
[593]1227printk("\n[%s] killer[%x,%x] exit / target[%x,%x] marked after join / cycle %d\n",
[583]1228__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid,
[651]1229target_pid, target_trdid, cycle );
[583]1230#endif
1231
[440]1232        }
[564]1233    }
[583]1234    else                     // no synchronization with joining thread required
[564]1235    {
[583]1236        // block the target thread
1237        thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
1238
[564]1239        // set the REQ_DELETE flag in target thread descriptor
1240        hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
[440]1241
1242#if DEBUG_THREAD_DELETE
[651]1243cycle  = (uint32_t)hal_get_cycles();
[440]1244if( DEBUG_THREAD_DELETE < cycle )
[593]1245printk("\n[%s] killer[%x,%x] exit / target [%x,%x] marked / no join / cycle %d\n",
[583]1246__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid,
[651]1247target_pid, target_trdid, cycle );
[440]1248#endif
1249
[583]1250    }
[669]1251}  // end thread_delete_request()
[440]1252
1253
1254
[564]1255/////////////////////////////
[485]1256void thread_idle_func( void )
[1]1257{
[625]1258
1259#if DEBUG_THREAD_IDLE
1260uint32_t cycle;
1261#endif
1262
[1]1263    while( 1 )
1264    {
[408]1265        // unmask IRQs
1266        hal_enable_irq( NULL );
1267
[443]1268        // force core to low-power mode (optional)
[583]1269        if( CONFIG_SCHED_IDLE_MODE_SLEEP ) 
[407]1270        {
[1]1271
[564]1272#if DEBUG_THREAD_IDLE
[625]1273cycle = (uint32_t)hal_get_cycles();
[438]1274if( DEBUG_THREAD_IDLE < cycle )
[593]1275printk("\n[%s] idle thread on core[%x,%d] goes to sleep / cycle %d\n",
[446]1276__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cycle );
[433]1277#endif
[1]1278
[407]1279            hal_core_sleep();
[1]1280
[564]1281#if DEBUG_THREAD_IDLE
[625]1282cycle = (uint32_t)hal_get_cycles();
[438]1283if( DEBUG_THREAD_IDLE < cycle )
[593]1284printk("\n[%s] idle thread on core[%x,%d] wake up / cycle %d\n",
[531]1285__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cycle );
[433]1286#endif
[407]1287
1288        }
[443]1289
[446]1290#if DEBUG_THREAD_IDLE
[625]1291cycle = (uint32_t)hal_get_cycles();
[564]1292if( DEBUG_THREAD_IDLE < cycle )
[640]1293sched_remote_display( local_cxy , CURRENT_THREAD->core->lid );
[446]1294#endif     
[564]1295        // search a runable thread
1296        sched_yield( "running idle thread" );
[446]1297
[564]1298    } // end while
1299
[407]1300}  // end thread_idle()
[1]1301
[407]1302
[473]1303///////////////////////////////////////////
1304void thread_time_update( thread_t * thread,
[564]1305                         bool_t     is_user )
[16]1306{
[473]1307    cycle_t current_cycle;   // current cycle counter value
1308    cycle_t last_cycle;      // last cycle counter value
[1]1309
[473]1310    // get pointer on thread_info structure
1311    thread_info_t * info = &thread->info;
1312
1313    // get last cycle counter value
1314    last_cycle = info->last_cycle;
1315
1316    // get current cycle counter value
1317    current_cycle = hal_get_cycles();
1318
1319    // update thread_info structure
1320    info->last_cycle = current_cycle;
1321
1322    // update time in thread_info
1323    if( is_user ) info->usr_cycles += (current_cycle - last_cycle);
1324    else          info->sys_cycles += (current_cycle - last_cycle);
[16]1325
[564]1326}  // end thread_time_update()
1327
[23]1328/////////////////////////////////////
1329xptr_t thread_get_xptr( pid_t    pid,
1330                        trdid_t  trdid )
1331{
1332    cxy_t         target_cxy;          // target thread cluster identifier
1333    ltid_t        target_thread_ltid;  // target thread local index
[171]1334    thread_t    * target_thread_ptr;   // target thread local pointer
[23]1335    xptr_t        target_process_xp;   // extended pointer on target process descriptor
[171]1336    process_t   * target_process_ptr;  // local pointer on target process descriptor
[23]1337    pid_t         target_process_pid;  // target process identifier
1338    xlist_entry_t root;                // root of list of process in target cluster
1339    xptr_t        lock_xp;             // extended pointer on lock protecting  this list
[16]1340
[580]1341#if DEBUG_THREAD_GET_XPTR
1342uint32_t cycle  = (uint32_t)hal_get_cycles();
1343thread_t * this = CURRENT_THREAD;
1344if( DEBUG_THREAD_GET_XPTR < cycle )
[593]1345printk("\n[%s] thread %x in process %x enters / pid %x / trdid %x / cycle %d\n",
[580]1346__FUNCTION__, this->trdid, this->process->pid, pid, trdid, cycle );
1347#endif
1348
[23]1349    // get target cluster identifier and local thread identifier
1350    target_cxy         = CXY_FROM_TRDID( trdid );
1351    target_thread_ltid = LTID_FROM_TRDID( trdid );
1352
[436]1353    // check trdid argument
[564]1354        if( (target_thread_ltid >= CONFIG_THREADS_MAX_PER_CLUSTER) || 
[637]1355        cluster_is_active( target_cxy ) == false )                return XPTR_NULL;
[436]1356
[23]1357    // get root of list of process descriptors in target cluster
1358    hal_remote_memcpy( XPTR( local_cxy  , &root ),
1359                       XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_root ),
1360                       sizeof(xlist_entry_t) );
1361
[564]1362    // get extended pointer on lock protecting the list of local processes
[23]1363    lock_xp = XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_lock );
1364
1365    // take the lock protecting the list of processes in target cluster
[564]1366    remote_queuelock_acquire( lock_xp );
[23]1367
[580]1368#if( DEBUG_THREAD_GET_XPTR & 1 )
1369if( DEBUG_THREAD_GET_XPTR < cycle )
[593]1370printk("\n[%s] scan processes in cluster %x :\n", __FUNCTION__, target_cxy );
[580]1371#endif
1372
[564]1373    // scan the list of local processes in target cluster
[23]1374    xptr_t  iter;
1375    bool_t  found = false;
1376    XLIST_FOREACH( XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_root ) , iter )
1377    {
1378        target_process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
[469]1379        target_process_ptr = GET_PTR( target_process_xp );
[564]1380        target_process_pid = hal_remote_l32( XPTR( target_cxy , &target_process_ptr->pid ) );
[580]1381
1382#if( DEBUG_THREAD_GET_XPTR & 1 )
1383if( DEBUG_THREAD_GET_XPTR < cycle )
1384printk(" - process %x\n", target_process_pid );
1385#endif
1386
[23]1387        if( target_process_pid == pid )
1388        {
1389            found = true;
1390            break;
1391        }
1392    }
1393
1394    // release the lock protecting the list of processes in target cluster
[564]1395    remote_queuelock_release( lock_xp );
[23]1396
[436]1397    // check PID found
[580]1398    if( found == false ) 
1399    {
[23]1400
[580]1401#if( DEBUG_THREAD_GET_XPTR & 1 )
1402if( DEBUG_THREAD_GET_XPTR < cycle )
[593]1403printk("\n[%s] pid %x not found in cluster %x\n",
[580]1404__FUNCTION__, pid, target_cxy );
1405#endif
1406        return XPTR_NULL;
1407    }
1408
[23]1409    // get target thread local pointer
1410    xptr_t xp = XPTR( target_cxy , &target_process_ptr->th_tbl[target_thread_ltid] );
[171]1411    target_thread_ptr = (thread_t *)hal_remote_lpt( xp );
[23]1412
[580]1413    if( target_thread_ptr == NULL )
1414    {
[23]1415
[580]1416#if( DEBUG_THREAD_GET_XPTR & 1 )
1417if( DEBUG_THREAD_GET_XPTR < cycle )
[593]1418printk("\n[%s] thread %x not registered in process %x in cluster %x\n",
[580]1419__FUNCTION__, trdid, pid, target_cxy );
1420#endif
1421        return XPTR_NULL;
1422    }
1423
1424#if DEBUG_THREAD_GET_XPTR
1425cycle  = (uint32_t)hal_get_cycles();
1426if( DEBUG_THREAD_GET_XPTR < cycle )
[593]1427printk("\n[%s] thread %x in process %x exit / pid %x / trdid %x / cycle %d\n",
[580]1428__FUNCTION__, this->trdid, this->process->pid, pid, trdid, cycle );
1429#endif
1430
[23]1431    return XPTR( target_cxy , target_thread_ptr );
[564]1432
1433}  // end thread_get_xptr()
1434
1435///////////////////////////////////////////////////
1436void thread_assert_can_yield( thread_t    * thread,
1437                              const char  * func_str )
1438{
1439    // does nothing if thread does not hold any busylock
1440
1441    if( thread->busylocks )
1442    {
1443        // get pointers on TXT0 chdev
1444        xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
1445        cxy_t     txt0_cxy = GET_CXY( txt0_xp );
1446        chdev_t * txt0_ptr = GET_PTR( txt0_xp );
1447
1448        // get extended pointer on TXT0 lock
1449        xptr_t  txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
1450
1451        // get TXT0 lock
1452        remote_busylock_acquire( txt0_lock_xp );
1453
1454        // display error message on TXT0
[593]1455        nolock_printk("\n[PANIC] in %s / thread[%x,%x] cannot yield : "
[580]1456        "hold %d busylock(s) / cycle %d\n",
[593]1457        func_str, thread->process->pid, thread->trdid,
[624]1458        thread->busylocks - 1, (uint32_t)hal_get_cycles() );
[564]1459
[683]1460#if DEBUG_BUSYLOCK_TYPE
[580]1461
[583]1462// scan list of busylocks
1463xptr_t    iter_xp;
[580]1464xptr_t    root_xp  = XPTR( local_cxy , &thread->busylocks_root );
1465XLIST_FOREACH( root_xp , iter_xp )
[564]1466{
[580]1467    xptr_t       lock_xp   = XLIST_ELEMENT( iter_xp , busylock_t , xlist );
1468    cxy_t        lock_cxy  = GET_CXY( lock_xp );
1469    busylock_t * lock_ptr  = GET_PTR( lock_xp );
1470    uint32_t     lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->type ) );
1471    nolock_printk(" - %s in cluster %x\n", lock_type_str[lock_type] , lock_cxy );
1472}
[564]1473
1474#endif
[23]1475
[564]1476        // release TXT0 lock
1477        remote_busylock_release( txt0_lock_xp );
1478
1479        // suicide
1480        hal_core_sleep();
1481    }
1482}  // end thread_assert_can yield()
1483
[619]1484//////////////////////////////////////////////////////
1485void thread_display_busylocks( xptr_t       thread_xp,
1486                               const char * string )
[564]1487{
[623]1488
[581]1489    cxy_t      thread_cxy = GET_CXY( thread_xp );
1490    thread_t * thread_ptr = GET_PTR( thread_xp );
[564]1491
[623]1492#if DEBUG_BUSYLOCK
[564]1493
[623]1494    xptr_t     iter_xp;
[564]1495
[623]1496    // get relevant info from target thread descriptor
[619]1497    uint32_t    locks   = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->busylocks ) );
1498    trdid_t     trdid   = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
1499    process_t * process = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->process ) );
1500    pid_t       pid     = hal_remote_l32( XPTR( thread_cxy , &process->pid ) );
[564]1501
[581]1502    // get extended pointer on root of busylocks
[619]1503    xptr_t root_xp = XPTR( thread_cxy , &thread_ptr->busylocks_root );
[564]1504
[581]1505    // get pointers on TXT0 chdev
1506    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
1507    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
1508    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
[580]1509
[581]1510    // get extended pointer on remote TXT0 lock
1511    xptr_t  txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
[580]1512
[581]1513    // get TXT0 lock
1514    remote_busylock_acquire( txt0_lock_xp );
[580]1515
[581]1516    // display header
[619]1517    nolock_printk("\n***** thread[%x,%x] in <%s> : %d busylocks *****\n",
1518    pid, trdid, string, locks );
[581]1519
1520    // scan the xlist of busylocks when required
1521    if( locks )
1522    {
1523        XLIST_FOREACH( root_xp , iter_xp )
[580]1524        {
[581]1525            xptr_t       lock_xp   = XLIST_ELEMENT( iter_xp , busylock_t , xlist );
1526            cxy_t        lock_cxy  = GET_CXY( lock_xp );
1527            busylock_t * lock_ptr  = GET_PTR( lock_xp );
1528            uint32_t     lock_type = hal_remote_l32(XPTR( lock_cxy , &lock_ptr->type ));
1529            nolock_printk(" - %s in cluster %x\n", lock_type_str[lock_type] , lock_cxy );
[580]1530        }
[581]1531    }
[580]1532
[581]1533    // release TXT0 lock
1534    remote_busylock_release( txt0_lock_xp );
1535
[623]1536#else
[581]1537
[623]1538printk("\n[ERROR] in %s : set DEBUG_BUSYLOCK in kernel_config.h for %s / thread(%x,%x)\n",
1539__FUNCTION__, string, thread_cxy, thread_ptr );
1540
[581]1541#endif
1542
[623]1543    return;
[581]1544
[580]1545}  // end thread_display_busylock()
[581]1546
Note: See TracBrowser for help on using the repository browser.