source: trunk/kernel/kern/thread.c @ 625

Last change on this file since 625 was 625, checked in by alain, 5 years ago

Fix a bug in the vmm_remove_vseg() function: the physical pages
associated to an user DATA vseg were released to the kernel when
the target process descriptor was in the reference cluster.
This physical pages release should be done only when the page
forks counter value is zero.
All other modifications are cosmetic.

File size: 52.3 KB
RevLine 
[1]1/*
[564]2 * thread.c -   thread operations implementation (user & kernel)
[171]3 *
[1]4 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
[625]5 *         Alain Greiner (2016,2017,2018,2019)
[1]6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
[5]9 * This file is part of ALMOS-MKH.
[1]10 *
[5]11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
[1]12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
[5]15 * ALMOS-MKH is distributed in the hope that it will be useful, but
[1]16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
[5]21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
[1]22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
[14]25#include <kernel_config.h>
[457]26#include <hal_kernel_types.h>
[1]27#include <hal_context.h>
28#include <hal_irqmask.h>
29#include <hal_special.h>
30#include <hal_remote.h>
[625]31#include <hal_vmm.h>
[1]32#include <memcpy.h>
33#include <printk.h>
34#include <cluster.h>
35#include <process.h>
36#include <scheduler.h>
[188]37#include <dev_pic.h>
[1]38#include <core.h>
39#include <list.h>
40#include <xlist.h>
41#include <page.h>
42#include <kmem.h>
43#include <ppm.h>
44#include <thread.h>
[446]45#include <rpc.h>
[1]46
47//////////////////////////////////////////////////////////////////////////////////////
48// Extern global variables
49//////////////////////////////////////////////////////////////////////////////////////
50
[564]51extern process_t            process_zero;       // allocated in kernel_init.c
52extern char               * lock_type_str[];    // allocated in kernel_init.c
53extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c
[1]54
55//////////////////////////////////////////////////////////////////////////////////////
[16]56// This function returns a printable string for the thread type.
[1]57//////////////////////////////////////////////////////////////////////////////////////
[527]58const char * thread_type_str( thread_type_t type )
[5]59{
[527]60  switch ( type ) {
61  case THREAD_USER:   return "USR";
62  case THREAD_RPC:    return "RPC";
63  case THREAD_DEV:    return "DEV";
64  case THREAD_IDLE:   return "IDL";
65  default:            return "undefined";
66  }
[5]67}
68
[1]69/////////////////////////////////////////////////////////////////////////////////////
[14]70// This static function allocates physical memory for a thread descriptor.
71// It can be called by the three functions:
[1]72// - thread_user_create()
[14]73// - thread_user_fork()
[1]74// - thread_kernel_create()
75/////////////////////////////////////////////////////////////////////////////////////
[14]76// @ return pointer on thread descriptor if success / return NULL if failure.
[1]77/////////////////////////////////////////////////////////////////////////////////////
[485]78static thread_t * thread_alloc( void )
[1]79{
[23]80        page_t       * page;   // pointer on page descriptor containing thread descriptor
[171]81        kmem_req_t     req;    // kmem request
[1]82
83        // allocates memory for thread descriptor + kernel stack
84        req.type  = KMEM_PAGE;
[14]85        req.size  = CONFIG_THREAD_DESC_ORDER;
[1]86        req.flags = AF_KERNEL | AF_ZERO;
87        page      = kmem_alloc( &req );
88
[23]89        if( page == NULL ) return NULL;
[1]90
[315]91    // return pointer on new thread descriptor
92    xptr_t base_xp = ppm_page2base( XPTR(local_cxy , page ) );
[469]93    return GET_PTR( base_xp );
[315]94
95}  // end thread_alloc()
96 
97
[14]98/////////////////////////////////////////////////////////////////////////////////////
99// This static function initializes a thread descriptor (kernel or user).
[438]100// It can be called by the four functions:
[14]101// - thread_user_create()
102// - thread_user_fork()
103// - thread_kernel_create()
[438]104// - thread_idle_init()
[625]105// The "type" and "trdid" fields must have been previously set.
[438]106// It updates the local DQDT.
[14]107/////////////////////////////////////////////////////////////////////////////////////
[625]108// @ thread          : pointer on local thread descriptor
109// @ process         : pointer on local process descriptor.
110// @ type            : thread type.
111// @ trdid           : thread identifier
112// @ func            : pointer on thread entry function.
113// @ args            : pointer on thread entry function arguments.
114// @ core_lid        : target core local index.
115// @ user_stack_vseg : local pointer on user stack vseg (user thread only)
[14]116/////////////////////////////////////////////////////////////////////////////////////
117static error_t thread_init( thread_t      * thread,
118                            process_t     * process,
119                            thread_type_t   type,
[625]120                            trdid_t         trdid,
[14]121                            void          * func,
122                            void          * args,
123                            lid_t           core_lid,
[625]124                            vseg_t        * user_stack_vseg )
[14]125{
126
[625]127// check type and trdid fields initialized
128assert( (thread->type == type)   , "bad type argument" );
129assert( (thread->trdid == trdid) , "bad trdid argument" );
[14]130
[564]131#if DEBUG_THREAD_INIT
[593]132uint32_t   cycle = (uint32_t)hal_get_cycles();
133thread_t * this  = CURRENT_THREAD;
[564]134if( DEBUG_THREAD_INIT < cycle )
[593]135printk("\n[%s] thread[%x,%x] enter for thread %x in process %x / cycle %d\n",
[625]136__FUNCTION__, this->process->pid, this->trdid, thread->trdid, process->pid , cycle );
[443]137#endif
138
[407]139    // compute thread descriptor size without kernel stack
140    uint32_t desc_size = (intptr_t)(&thread->signature) - (intptr_t)thread + 4; 
141
[1]142        // Initialize new thread descriptor
143    thread->quantum         = 0;            // TODO
144    thread->ticks_nr        = 0;            // TODO
[457]145    thread->time_last_check = 0;            // TODO
[625]146        thread->core            = &LOCAL_CLUSTER->core_tbl[core_lid];
[1]147        thread->process         = process;
[564]148    thread->busylocks       = 0;
[1]149
[564]150#if DEBUG_BUSYLOCK
151    xlist_root_init( XPTR( local_cxy , &thread->busylocks_root ) );
[409]152#endif
[1]153
[625]154    thread->user_stack_vseg = user_stack_vseg;
[407]155    thread->k_stack_base    = (intptr_t)thread + desc_size;
156    thread->k_stack_size    = CONFIG_THREAD_DESC_SIZE - desc_size;
[1]157    thread->entry_func      = func;         // thread entry point
158    thread->entry_args      = args;         // thread function arguments
[171]159    thread->flags           = 0;            // all flags reset
[1]160    thread->errno           = 0;            // no error detected
[407]161    thread->fork_user       = 0;            // no user defined placement for fork
162    thread->fork_cxy        = 0;            // user defined target cluster for fork
[409]163    thread->blocked         = THREAD_BLOCKED_GLOBAL;
[1]164
[564]165    // initialize sched list
[1]166    list_entry_init( &thread->sched_list );
167
[564]168    // initialize waiting queue entries
169    list_entry_init( &thread->wait_list );
170    xlist_entry_init( XPTR( local_cxy , &thread->wait_xlist ) );
171
172    // initialize thread info
[1]173    memset( &thread->info , 0 , sizeof(thread_info_t) );
174
[564]175    // initialize join_lock
176    remote_busylock_init( XPTR( local_cxy , &thread->join_lock ), LOCK_THREAD_JOIN );
[409]177
[1]178    // initialise signature
179        thread->signature = THREAD_SIGNATURE;
180
[443]181    // FIXME define and call an architecture specific hal_thread_init()
182    // function to initialise the save_sr field
[408]183    thread->save_sr = 0xFF13;
184
[171]185    // register new thread in core scheduler
[1]186    sched_register_thread( thread->core , thread );
187
[438]188        // update DQDT
[583]189    dqdt_increment_threads();
[438]190
[564]191#if DEBUG_THREAD_INIT
[443]192cycle = (uint32_t)hal_get_cycles();
[564]193if( DEBUG_THREAD_INIT < cycle )
[593]194printk("\n[%s] thread[%x,%x] exit for thread %x in process %x / cycle %d\n",
195__FUNCTION__, this->process->pid, this->trdid, thread, process->pid, cycle );
[443]196#endif
197
[1]198        return 0;
199
[296]200} // end thread_init()
201
[625]202//////////////////////////////////////////////////
[23]203error_t thread_user_create( pid_t             pid,
204                            void            * start_func,
205                            void            * start_arg,
[1]206                            pthread_attr_t  * attr,
[23]207                            thread_t       ** new_thread )
[1]208{
209    error_t        error;
210        thread_t     * thread;       // pointer on created thread descriptor
[625]211    trdid_t        trdid;        // created thred identifier
[1]212    process_t    * process;      // pointer to local process descriptor
213    lid_t          core_lid;     // selected core local index
[625]214    vseg_t       * us_vseg;      // user stack vseg
[1]215
[593]216assert( (attr != NULL) , "pthread attributes must be defined" );
[5]217
[438]218#if DEBUG_THREAD_USER_CREATE
[593]219thread_t * this  = CURRENT_THREAD;
220uint32_t   cycle = (uint32_t)hal_get_cycles();
[438]221if( DEBUG_THREAD_USER_CREATE < cycle )
[593]222printk("\n[%s] thread[%x,%x] enter in cluster %x for process %x / cycle %d\n",
223__FUNCTION__, this->process->pid , this->trdid , local_cxy , pid , cycle );
[433]224#endif
[428]225
[23]226    // get process descriptor local copy
227    process = process_get_local_copy( pid );
[440]228
[23]229    if( process == NULL )
230    {
231                printk("\n[ERROR] in %s : cannot get process descriptor %x\n",
[625]232        __FUNCTION__ , pid );
233        return -1;
[23]234    }
235
[443]236#if( DEBUG_THREAD_USER_CREATE & 1)
237if( DEBUG_THREAD_USER_CREATE < cycle )
[593]238printk("\n[%s] process descriptor = %x for process %x in cluster %x\n",
[443]239__FUNCTION__, process , pid , local_cxy );
240#endif
241
[171]242    // select a target core in local cluster
[407]243    if( attr->attributes & PT_ATTR_CORE_DEFINED )
[23]244    {
[407]245        core_lid = attr->lid;
246        if( core_lid >= LOCAL_CLUSTER->cores_nr )
247        {
248                printk("\n[ERROR] in %s : illegal core index attribute = %d\n",
249            __FUNCTION__ , core_lid );
[625]250            return -1;
[407]251        }
[23]252    }
[407]253    else
254    {
255        core_lid = cluster_select_local_core();
256    }
[1]257
[443]258#if( DEBUG_THREAD_USER_CREATE & 1)
259if( DEBUG_THREAD_USER_CREATE < cycle )
[593]260printk("\n[%s] core[%x,%d] selected\n",
[443]261__FUNCTION__, local_cxy , core_lid );
262#endif
263
[625]264    // allocate memory for thread descriptor
265    thread = thread_alloc();
[1]266
[625]267    if( thread == NULL )
[23]268    {
[625]269            printk("\n[ERROR] in %s : cannot create new thread in cluster %x\n",
270        __FUNCTION__, local_cxy );
271        return -1;
[171]272    }
[23]273
[457]274#if( DEBUG_THREAD_USER_CREATE & 1)
275if( DEBUG_THREAD_USER_CREATE < cycle )
[625]276printk("\n[%s] new thread descriptor %x allocated\n",
277__FUNCTION__, thread );
[457]278#endif
279
[625]280    // set type in thread descriptor
281    thread->type = THREAD_USER;
[1]282
[625]283    // register new thread in process descriptor, and get a TRDID
284    error = process_register_thread( process, thread , &trdid );
285
286    if( error )
[23]287    {
[625]288        printk("\n[ERROR] in %s : cannot register new thread in process %x\n",
289        __FUNCTION__, pid );
290        thread_destroy( thread );
291        return -1;
[23]292    }
[14]293
[625]294    // set trdid in thread descriptor
295    thread->trdid = trdid;
296
[443]297#if( DEBUG_THREAD_USER_CREATE & 1)
298if( DEBUG_THREAD_USER_CREATE < cycle )
[625]299printk("\n[%s] new thread %x registered in process %x\n",
300__FUNCTION__, trdid, pid );
[443]301#endif
302
[625]303    // allocate a stack from local VMM
304    us_vseg = vmm_create_vseg( process,
305                               VSEG_TYPE_STACK,
306                               LTID_FROM_TRDID( trdid ),
307                               0,                         // size unused
308                               0,                         // file_offset unused
309                               0,                         // file_size unused
310                               XPTR_NULL,                 // mapper_xp unused
311                               local_cxy );
312
313    if( us_vseg == NULL )
314    {
315            printk("\n[ERROR] in %s : cannot create stack vseg\n", __FUNCTION__ );
316        process_remove_thread( thread );
317        thread_destroy( thread );
318                return -1;
319    }
320
321#if( DEBUG_THREAD_USER_CREATE & 1)
322if( DEBUG_THREAD_USER_CREATE < cycle )
323printk("\n[%s] stack vseg created / vpn_base %x / %d pages\n",
324__FUNCTION__, us_vseg->vpn_base, us_vseg->vpn_size );
325#endif
326
[171]327    // initialize thread descriptor
[14]328    error = thread_init( thread,
329                         process,
330                         THREAD_USER,
[625]331                         trdid,
[23]332                         start_func,
333                         start_arg,
[14]334                         core_lid,
[625]335                         us_vseg );
[171]336    if( error )
[14]337    {
[23]338            printk("\n[ERROR] in %s : cannot initialize new thread\n", __FUNCTION__ );
[625]339        vmm_remove_vseg( process , us_vseg );
340        process_remove_thread( thread );
341        thread_destroy( thread );
342        return -1;
[14]343    }
344
[443]345#if( DEBUG_THREAD_USER_CREATE & 1)
346if( DEBUG_THREAD_USER_CREATE < cycle )
[625]347printk("\n[%s] new thread %x in process %x initialised\n",
348__FUNCTION__, thread->trdid, process->pid );
[443]349#endif
350
[14]351    // set DETACHED flag if required
[407]352    if( attr->attributes & PT_ATTR_DETACH ) 
353    {
354        thread->flags |= THREAD_FLAG_DETACHED;
355    }
[1]356
[171]357    // allocate & initialize CPU context
[457]358        if( hal_cpu_context_alloc( thread ) )
[23]359    {
360            printk("\n[ERROR] in %s : cannot create CPU context\n", __FUNCTION__ );
[625]361        vmm_remove_vseg( process , us_vseg );
362        process_remove_thread( thread );
363        thread_destroy( thread );
364        return -1;
[23]365    }
[457]366    hal_cpu_context_init( thread );
[23]367
[457]368    // allocate & initialize FPU context
[407]369    if( hal_fpu_context_alloc( thread ) )
[23]370    {
371            printk("\n[ERROR] in %s : cannot create FPU context\n", __FUNCTION__ );
[625]372        vmm_remove_vseg( process , us_vseg );
373        process_remove_thread( thread );
374        thread_destroy( thread );
375        return -1;
[23]376    }
[457]377    hal_fpu_context_init( thread );
[23]378
[457]379#if( DEBUG_THREAD_USER_CREATE & 1)
380if( DEBUG_THREAD_USER_CREATE < cycle )
[593]381printk("\n[%s] CPU & FPU contexts created\n",
[457]382__FUNCTION__, thread->trdid );
[624]383hal_vmm_display( process , true );
[457]384#endif
385
[438]386#if DEBUG_THREAD_USER_CREATE
[433]387cycle = (uint32_t)hal_get_cycles();
[438]388if( DEBUG_THREAD_USER_CREATE < cycle )
[593]389printk("\n[%s] thread[%x,%x] exit / new_thread %x / core %d / cycle %d\n",
390__FUNCTION__, this->process->pid , this->trdid , thread->trdid, core_lid, cycle );
[433]391#endif
[1]392
393    *new_thread = thread;
394        return 0;
[14]395
[296]396}  // end thread_user_create()
397
[408]398///////////////////////////////////////////////////////
399error_t thread_user_fork( xptr_t      parent_thread_xp,
400                          process_t * child_process,
401                          thread_t ** child_thread )
[1]402{
403    error_t        error;
[625]404        thread_t     * child_ptr;        // local pointer on child thread
405    trdid_t        child_trdid;      // child thread identifier
[408]406    lid_t          core_lid;         // selected core local index
407    thread_t     * parent_ptr;       // local pointer on remote parent thread
408    cxy_t          parent_cxy;       // parent thread cluster
409    process_t    * parent_process;   // local pointer on parent process
410    xptr_t         parent_gpt_xp;    // extended pointer on parent thread GPT
[625]411    void         * parent_func;      // parent thread entry_func
412    void         * parent_args;      // parent thread entry_args
413    uint32_t       parent_flags;     // parent_thread flags
414    vseg_t       * parent_us_vseg;   // parent thread user stack vseg
415    vseg_t       * child_us_vseg;    // child thread user stack vseg
[5]416
[438]417#if DEBUG_THREAD_USER_FORK
[593]418uint32_t   cycle = (uint32_t)hal_get_cycles();
419thread_t * this  = CURRENT_THREAD;
[438]420if( DEBUG_THREAD_USER_FORK < cycle )
[625]421printk("\n[%s] thread[%x,%x] enter for child_process %x / cycle %d\n",
[593]422__FUNCTION__, this->process->pid, this->trdid, child_process->pid, cycle );
[433]423#endif
[408]424
[1]425    // select a target core in local cluster
426    core_lid = cluster_select_local_core();
427
[625]428#if (DEBUG_THREAD_USER_FORK & 1)
429if( DEBUG_THREAD_USER_FORK < cycle )
430printk("\n[%s] thread[%x,%x] selected core [%x,%d]\n",
431__FUNCTION__, this->process->pid, this->trdid, local_cxy, core_lid );
432#endif
433
[408]434    // get cluster and local pointer on parent thread descriptor
435    parent_cxy = GET_CXY( parent_thread_xp );
[469]436    parent_ptr = GET_PTR( parent_thread_xp );
[1]437
[625]438    // get relevant infos from parent thread
439    parent_func    = (void *)  hal_remote_lpt( XPTR(parent_cxy,&parent_ptr->entry_func ));
440    parent_args    = (void *)  hal_remote_lpt( XPTR(parent_cxy,&parent_ptr->entry_args ));
441    parent_flags   = (uint32_t)hal_remote_l32( XPTR(parent_cxy,&parent_ptr->flags ));
442    parent_us_vseg = (vseg_t *)hal_remote_lpt( XPTR(parent_cxy,&parent_ptr->user_stack_vseg ));
[1]443
[408]444    // get pointer on parent process in parent thread cluster
445    parent_process = (process_t *)hal_remote_lpt( XPTR( parent_cxy,
446                                                        &parent_ptr->process ) );
447 
[625]448    // build extended pointer on parent GPT in parent thread cluster
[408]449    parent_gpt_xp = XPTR( parent_cxy , &parent_process->vmm.gpt );
450
[625]451#if (DEBUG_THREAD_USER_FORK & 1)
452if( DEBUG_THREAD_USER_FORK < cycle )
453printk("\n[%s] thread[%x,%x] get parent GPT\n",
454__FUNCTION__, this->process->pid, this->trdid );
455#endif
456
[408]457    // allocate memory for child thread descriptor
458    child_ptr = thread_alloc();
[625]459
[408]460    if( child_ptr == NULL )
[23]461    {
[625]462        printk("\n[ERROR] in %s : cannot allocate new thread\n",
463        __FUNCTION__ );
[408]464        return -1;
[23]465    }
[14]466
[625]467#if (DEBUG_THREAD_USER_FORK & 1)
468if( DEBUG_THREAD_USER_FORK < cycle )
469printk("\n[%s] thread[%x,%x] allocated new thread descriptor %x\n",
470__FUNCTION__, this->process->pid, this->trdid, child_ptr );
471#endif
472
473    // set type in thread descriptor
474    child_ptr->type = THREAD_USER;
475
476    // register new thread in process descriptor, and get a TRDID
477    error = process_register_thread( child_process, child_ptr , &child_trdid );
478
479    if( error )
480    {
481        printk("\n[ERROR] in %s : cannot register new thread in process %x\n",
482        __FUNCTION__, child_process->pid );
483        thread_destroy( child_ptr );
484        return -1;
485    }
486
487    // set trdid in thread descriptor
488    child_ptr->trdid = child_trdid;
489
490#if (DEBUG_THREAD_USER_FORK & 1)
491if( DEBUG_THREAD_USER_FORK < cycle )
492printk("\n[%s] thread[%x,%x] registered child thread %x in child process %x\n",
493__FUNCTION__, this->process->pid, this->trdid, child_trdid, child_process->pid );
494#endif
495
496    // get an user stack vseg from local VMM allocator
497    child_us_vseg = vmm_create_vseg( child_process,
498                                     VSEG_TYPE_STACK,
499                                     LTID_FROM_TRDID( child_trdid ), 
500                                     0,                               // size unused
501                                     0,                               // file_offset unused
502                                     0,                               // file_size unused
503                                     XPTR_NULL,                       // mapper_xp unused
504                                     local_cxy );
505    if( child_us_vseg == NULL )
506    {
507            printk("\n[ERROR] in %s : cannot create stack vseg\n", __FUNCTION__ );
508        process_remove_thread( child_ptr );
509        thread_destroy( child_ptr );
510        return -1;
511    }
512
513#if (DEBUG_THREAD_USER_FORK & 1)
514if( DEBUG_THREAD_USER_FORK < cycle )
515printk("\n[%s] thread[%x,%x] created an user stack vseg / vpn_base %x / %d pages\n",
516__FUNCTION__, this->process->pid, this->trdid,
517child_us_vseg->vpn_base, child_us_vseg->vpn_size );
518#endif
519
[171]520    // initialize thread descriptor
[408]521    error = thread_init( child_ptr,
522                         child_process,
[14]523                         THREAD_USER,
[625]524                         child_trdid,
525                         parent_func,
526                         parent_args,
[14]527                         core_lid,
[625]528                         child_us_vseg );
[23]529    if( error )
[14]530    {
[408]531            printk("\n[ERROR] in %s : cannot initialize child thread\n", __FUNCTION__ );
[625]532        vmm_remove_vseg( child_process , child_us_vseg ); 
533        process_remove_thread( child_ptr );
534        thread_destroy( child_ptr );
535        return -1;
[14]536    }
537
[564]538#if (DEBUG_THREAD_USER_FORK & 1)
539if( DEBUG_THREAD_USER_FORK < cycle )
[593]540printk("\n[%s] thread[%x,%x] initialised thread %x in process %x\n",
541__FUNCTION__, this->process->pid, this->trdid, child_ptr->trdid, child_process->pid );
[564]542#endif
543
[408]544    // set detached flag if required
[625]545    if( parent_flags & THREAD_FLAG_DETACHED ) child_ptr->flags = THREAD_FLAG_DETACHED;
[1]546
[625]547    // allocate a CPU context for child thread
[408]548        if( hal_cpu_context_alloc( child_ptr ) )
[23]549    {
[407]550            printk("\n[ERROR] in %s : cannot allocate CPU context\n", __FUNCTION__ );
[625]551        vmm_remove_vseg( child_process , child_us_vseg );
552        process_remove_thread( child_ptr );
553        thread_destroy( child_ptr );
[408]554        return -1;
[23]555    }
556
[625]557    // allocate a FPU context for child thread
[408]558        if( hal_fpu_context_alloc( child_ptr ) )
[23]559    {
[407]560            printk("\n[ERROR] in %s : cannot allocate FPU context\n", __FUNCTION__ );
[625]561        vmm_remove_vseg( child_process , child_us_vseg );
562        process_remove_thread( child_ptr );
563        thread_destroy( child_ptr );
[408]564        return -1;
[23]565    }
566
[564]567#if (DEBUG_THREAD_USER_FORK & 1)
568if( DEBUG_THREAD_USER_FORK < cycle )
[593]569printk("\n[%s] thread[%x,%x] created CPU & FPU contexts for thread %x in process %x\n",
570__FUNCTION__, this->process->pid, this->trdid, child_ptr->trdid, child_process->pid );
[564]571#endif
572
[625]573    // scan parent GPT, and copy all valid entries
574    // associated to user stack vseg into child GPT
575    vpn_t  parent_vpn;
576    vpn_t  child_vpn;
577    bool_t mapped;
578    ppn_t  ppn;
579    vpn_t  parent_vpn_base = hal_remote_l32( XPTR( parent_cxy, &parent_us_vseg->vpn_base ) );
580    vpn_t  parent_vpn_size = hal_remote_l32( XPTR( parent_cxy, &parent_us_vseg->vpn_size ) );
581    vpn_t  child_vpn_base  = child_us_vseg->vpn_base;
582    for( parent_vpn = parent_vpn_base , child_vpn = child_vpn_base ; 
583         parent_vpn < (parent_vpn_base + parent_vpn_size) ;
584         parent_vpn++ , child_vpn++ )
[408]585    {
586        error = hal_gpt_pte_copy( &child_process->vmm.gpt,
[625]587                                  child_vpn,
[408]588                                  parent_gpt_xp,
[625]589                                  parent_vpn,
[408]590                                  true,                 // set cow
591                                  &ppn,
592                                  &mapped );
593        if( error )
594        {
595            printk("\n[ERROR] in %s : cannot update child GPT\n", __FUNCTION__ );
[625]596            vmm_remove_vseg( child_process , child_us_vseg );
597            process_remove_thread( child_ptr );
598            thread_destroy( child_ptr );
[408]599            return -1;
600        }
601
[625]602        // increment pending forks counter for a mapped page
[408]603        if( mapped )
604        {
[469]605            // get pointers on the page descriptor
[408]606            xptr_t   page_xp  = ppm_ppn2page( ppn );
607            cxy_t    page_cxy = GET_CXY( page_xp );
[469]608            page_t * page_ptr = GET_PTR( page_xp );
609
[625]610            // build extended pointers on forks and lock fields
[469]611            xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks );
612            xptr_t lock_xp  = XPTR( page_cxy , &page_ptr->lock );
613
[564]614            // get lock protecting page
615            remote_busylock_acquire( lock_xp ); 
616
617            // increment the forks counter in page descriptor
[473]618            hal_remote_atomic_add( forks_xp , 1 );
[408]619
[564]620            // release lock protecting page
621            remote_busylock_release( lock_xp ); 
[625]622        }
623    }
[564]624
[438]625#if (DEBUG_THREAD_USER_FORK & 1)
626if( DEBUG_THREAD_USER_FORK < cycle )
[625]627printk("\n[%s] thread[%x,%x] copied all stack vseg PTEs to child GPT\n",
628__FUNCTION__, this->process->pid, this->trdid );
[433]629#endif
[408]630
[625]631    // set COW flag for all mapped entries of user stack vseg in parent GPT
632    hal_gpt_set_cow( parent_gpt_xp,
633                     parent_vpn_base,
634                     parent_vpn_size );
[408]635
[625]636#if (DEBUG_THREAD_USER_FORK & 1)
637if( DEBUG_THREAD_USER_FORK < cycle )
638printk("\n[%s] thread[%x,%x] set the COW flag for stack vseg in parent GPT\n",
639__FUNCTION__, this->process->pid, this->trdid );
640#endif
641
642    // return child pointer
643    *child_thread = child_ptr;
644
[438]645#if DEBUG_THREAD_USER_FORK
[433]646cycle = (uint32_t)hal_get_cycles();
[438]647if( DEBUG_THREAD_USER_FORK < cycle )
[625]648printk("\n[%s] thread[%x,%x] exit / created thread[%x,%x] / cycle %d\n",
649__FUNCTION__, this->process->pid, this->trdid,
650child_ptr->process->pid, child_ptr->trdid, cycle );
[433]651#endif
[407]652
[1]653        return 0;
[5]654
[296]655}  // end thread_user_fork()
656
[457]657////////////////////////////////////////////////
658error_t thread_user_exec( void     * entry_func,
659                          uint32_t   argc,
660                          char    ** argv )
661{
662    thread_t  * thread  = CURRENT_THREAD;
663    process_t * process = thread->process;
664
665#if DEBUG_THREAD_USER_EXEC
666uint32_t cycle = (uint32_t)hal_get_cycles();
667if( DEBUG_THREAD_USER_EXEC < cycle )
[593]668printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
669__FUNCTION__, process->pid, thread->trdid, cycle );
[457]670#endif
671
[564]672// check parent thread attributes
673assert( (thread->type == THREAD_USER )          , "bad type" );
674assert( (thread->signature == THREAD_SIGNATURE) , "bad signature" );
675assert( (thread->busylocks == 0)                , "bad busylocks" );
[457]676
677        // re-initialize various thread descriptor fields
678    thread->quantum         = 0;            // TODO
679    thread->ticks_nr        = 0;            // TODO
680    thread->time_last_check = 0;            // TODO
681
682    thread->entry_func      = entry_func;
683    thread->main_argc       = argc; 
684    thread->main_argv       = argv;
685
686    // the main thread is always detached
687    thread->flags           = THREAD_FLAG_DETACHED;
688    thread->blocked         = 0;
689    thread->errno           = 0;
690    thread->fork_user       = 0;    // not inherited
691    thread->fork_cxy        = 0;    // not inherited
692
[564]693    // re-initialize busylocks counters
694    thread->busylocks       = 0;
695
[457]696    // reset thread info
697    memset( &thread->info , 0 , sizeof(thread_info_t) );
698
[564]699    // re-initialize join_lock
700    remote_busylock_init( XPTR( local_cxy , &thread->join_lock ), LOCK_THREAD_JOIN );
[457]701
702    // allocate an user stack vseg for main thread
[625]703    vseg_t * us_vseg = vmm_create_vseg( process,
704                                        VSEG_TYPE_STACK,
705                                        LTID_FROM_TRDID( thread->trdid ),
706                                        0,                 // length unused
707                                        0,                 // file_offset unused
708                                        0,                 // file_size unused
709                                        XPTR_NULL,         // mapper_xp unused
710                                        local_cxy );
711    if( us_vseg == NULL )
[457]712    {
713            printk("\n[ERROR] in %s : cannot create stack vseg for main thread\n", __FUNCTION__ );
714                return -1;
715    }
716
[469]717    // update user stack in thread descriptor
[625]718    thread->user_stack_vseg = us_vseg;
[457]719   
720    // release FPU ownership if required
721    if( thread->core->fpu_owner == thread ) thread->core->fpu_owner = NULL;
722
723    // re-initialize  FPU context
724    hal_fpu_context_init( thread );
725
726#if DEBUG_THREAD_USER_EXEC
727cycle = (uint32_t)hal_get_cycles();
728if( DEBUG_THREAD_USER_EXEC < cycle )
[593]729printk("\n[%s] thread[%x,%x] set CPU context & jump to user code / cycle %d\n",
730__FUNCTION__, process->pid, thread->trdid, cycle );
[624]731hal_vmm_display( process , true );
[457]732#endif
733
734    // re-initialize CPU context... and jump to user code
735        hal_cpu_context_exec( thread );
736
[564]737    assert( false, "we should not execute this code");
[457]738 
739    return 0;
740
741}  // end thread_user_exec()
742
[1]743/////////////////////////////////////////////////////////
744error_t thread_kernel_create( thread_t     ** new_thread,
745                              thread_type_t   type,
[171]746                              void          * func,
747                              void          * args,
[1]748                                              lid_t           core_lid )
749{
750    error_t        error;
[14]751        thread_t     * thread;       // pointer on new thread descriptor
[625]752    trdid_t        trdid;        // new thread identifier
[1]753
[593]754    thread_t * this = CURRENT_THREAD; 
[1]755
[593]756assert( ( (type == THREAD_IDLE) || (type == THREAD_RPC) || (type == THREAD_DEV) ) ,
757"illegal thread type" );
[1]758
[593]759assert( (core_lid < LOCAL_CLUSTER->cores_nr) ,
760"illegal core_lid" );
761
[438]762#if DEBUG_THREAD_KERNEL_CREATE
[593]763uint32_t   cycle = (uint32_t)hal_get_cycles();
[438]764if( DEBUG_THREAD_KERNEL_CREATE < cycle )
[593]765printk("\n[%s] thread[%x,%x] enter / requested_type %s / cycle %d\n",
766__FUNCTION__, this->process->pid, this->trdid, thread_type_str(type), cycle );
[433]767#endif
768
[171]769    // allocate memory for new thread descriptor
[14]770    thread = thread_alloc();
771
[581]772    if( thread == NULL )
773    {
774        printk("\n[ERROR] in %s : thread %x in process %x\n"
775        "   no memory for thread descriptor\n",
[593]776        __FUNCTION__, this->trdid, this->process->pid );
[581]777        return ENOMEM;
778    }
[14]779
[625]780    // set type in thread descriptor
781    thread->type = type;
782
783    // register new thread in local kernel process descriptor, and get a TRDID
784    error = process_register_thread( &process_zero , thread , &trdid );
785
786    if( error )
787    {
788        printk("\n[ERROR] in %s : cannot register thread in kernel process\n", __FUNCTION__ );
789        return -1;
790    }
791
792    // set trdid in thread descriptor
793    thread->trdid = trdid;
794
[171]795    // initialize thread descriptor
[14]796    error = thread_init( thread,
797                         &process_zero,
798                         type,
[625]799                         trdid,
[14]800                         func,
801                         args,
802                         core_lid,
[625]803                         NULL );  // no user stack for a kernel thread
[14]804
[171]805    if( error ) // release allocated memory for thread descriptor
[1]806    {
[625]807        printk("\n[ERROR] in %s : cannot initialize thread descriptor\n", __FUNCTION__ );
808        thread_destroy( thread );
[457]809        return ENOMEM;
[1]810    }
811
[171]812    // allocate & initialize CPU context
[457]813        error = hal_cpu_context_alloc( thread );
[581]814
[457]815    if( error )
816    {
[581]817        printk("\n[ERROR] in %s : thread %x in process %x\n"
[593]818        "    cannot create CPU context\n",
819        __FUNCTION__, this->trdid, this->process->pid );
[625]820        thread_destroy( thread );
[457]821        return EINVAL;
822    }
[581]823
[457]824    hal_cpu_context_init( thread );
[14]825
[583]826    // set THREAD_BLOCKED_IDLE for DEV threads
827    if( type == THREAD_DEV ) thread->blocked |= THREAD_BLOCKED_IDLE;
[457]828
[438]829#if DEBUG_THREAD_KERNEL_CREATE
[433]830cycle = (uint32_t)hal_get_cycles();
[438]831if( DEBUG_THREAD_KERNEL_CREATE < cycle )
[593]832printk("\n[%s] thread[%x,%x] exit / new_thread %x / type %s / cycle %d\n",
833__FUNCTION__, this->process->pid, this->trdid, thread, thread_type_str(type), cycle );
[433]834#endif
[1]835
[171]836    *new_thread = thread;
[1]837        return 0;
[5]838
[296]839} // end thread_kernel_create()
840
[457]841//////////////////////////////////////////////
842void thread_idle_init( thread_t      * thread,
843                       thread_type_t   type,
844                       void          * func,
845                       void          * args,
846                           lid_t           core_lid )
[14]847{
[625]848    trdid_t trdid;   
849    error_t error;
[14]850
[564]851// check arguments
852assert( (type == THREAD_IDLE) , "illegal thread type" );
853assert( (core_lid < LOCAL_CLUSTER->cores_nr) , "illegal core index" );
854
[625]855    // set type in thread descriptor
856    thread->type = THREAD_IDLE;
857
858    // register idle thread in local kernel process descriptor, and get a TRDID
859    error = process_register_thread( &process_zero , thread , &trdid );
860
861assert( (error == 0), "cannot register idle_thread in kernel process" );
862
863    // set trdid in thread descriptor
864    thread->trdid = trdid;
865
[457]866    // initialize thread descriptor
[625]867    error = thread_init( thread,
868                         &process_zero,
869                         THREAD_IDLE,
870                         trdid,
871                         func,
872                         args,
873                         core_lid,
874                         NULL );   // no user stack for a kernel thread
[14]875
[625]876assert( (error == 0), "cannot initialize idle_thread" );
[457]877
[14]878    // allocate & initialize CPU context if success
[457]879    error = hal_cpu_context_alloc( thread );
[171]880
[625]881assert( (error == 0), "cannot allocate CPU context" );
[14]882
[457]883    hal_cpu_context_init( thread );
884
[438]885}  // end thread_idle_init()
[407]886
[625]887////////////////////////////////////////////
888uint32_t thread_destroy( thread_t * thread )
[1]889{
[625]890    reg_t           save_sr;
891    uint32_t        count;
[1]892
[625]893    thread_type_t   type    = thread->type;
894    process_t     * process = thread->process;
895    core_t        * core    = thread->core;
[1]896
[438]897#if DEBUG_THREAD_DESTROY
[583]898uint32_t   cycle = (uint32_t)hal_get_cycles();
899thread_t * this  = CURRENT_THREAD;
[438]900if( DEBUG_THREAD_DESTROY < cycle )
[593]901printk("\n[%s] thread[%x,%x] enter to destroy thread[%x,%x] / cycle %d\n",
[583]902__FUNCTION__, this->process->pid, this->trdid, process->pid, thread->trdid, cycle );
[433]903#endif
[1]904
[625]905    // check calling thread busylocks counter
[583]906    thread_assert_can_yield( thread , __FUNCTION__ );
[171]907
[625]908    // update target process instrumentation counter
[408]909        process->vmm.pgfault_nr += thread->info.pgfault_nr;
[1]910
[625]911    // remove thread from process th_tbl[]
912    count = process_remove_thread( thread );
913
914    // release memory allocated for CPU context and FPU context if required
[1]915        hal_cpu_context_destroy( thread );
[625]916        hal_fpu_context_destroy( thread );
[1]917       
[625]918    // release user stack vseg (for an user thread only)
919    if( type == THREAD_USER )  vmm_remove_vseg( process , thread->user_stack_vseg );
920
[428]921    // release FPU ownership if required
[409]922        hal_disable_irq( &save_sr );
[1]923        if( core->fpu_owner == thread )
924        {
925                core->fpu_owner = NULL;
926                hal_fpu_disable();
927        }
[409]928        hal_restore_irq( save_sr );
[1]929
930    // invalidate thread descriptor
931        thread->signature = 0;
932
[625]933    // release memory for thread descriptor (including kernel stack)
934    kmem_req_t   req;
935    xptr_t       base_xp = ppm_base2page( XPTR(local_cxy , thread ) );
[1]936
[625]937    req.type  = KMEM_PAGE;
938    req.ptr   = GET_PTR( base_xp );
939    kmem_free( &req );
940
[438]941#if DEBUG_THREAD_DESTROY
[433]942cycle = (uint32_t)hal_get_cycles();
[438]943if( DEBUG_THREAD_DESTROY < cycle )
[593]944printk("\n[%s] thread[%x,%x] exit / destroyed thread[%x,%x] / cycle %d\n",
[583]945__FUNCTION__, this->process->pid, this->trdid, process->pid, thread->trdid, cycle );
[433]946#endif
[1]947
[625]948    return count;
949
[407]950}   // end thread_destroy()
951
[416]952//////////////////////////////////////////////////
953inline void thread_set_req_ack( thread_t * target,
954                                uint32_t * rsp_count )
[1]955{
[409]956    reg_t    save_sr;   // for critical section
957
[416]958    // get pointer on target thread scheduler
959    scheduler_t * sched = &target->core->scheduler;
[409]960
[416]961    // wait scheduler ready to handle a new request
962    while( sched->req_ack_pending ) asm volatile( "nop" );
[409]963   
964    // enter critical section
965    hal_disable_irq( &save_sr );
966     
[416]967    // set request in target thread scheduler
968    sched->req_ack_pending = true;
[409]969
[416]970    // set ack request in target thread "flags"
971    hal_atomic_or( &target->flags , THREAD_FLAG_REQ_ACK );
[409]972
[416]973    // set pointer on responses counter in target thread
974    target->ack_rsp_count = rsp_count;
[409]975   
976    // exit critical section
977    hal_restore_irq( save_sr );
978
[407]979    hal_fence();
[171]980
[416]981}  // thread_set_req_ack()
[409]982
[416]983/////////////////////////////////////////////////////
984inline void thread_reset_req_ack( thread_t * target )
[1]985{
[409]986    reg_t    save_sr;   // for critical section
987
988    // get pointer on target thread scheduler
[416]989    scheduler_t * sched = &target->core->scheduler;
[409]990
991    // check signal pending in scheduler
[492]992    assert( sched->req_ack_pending , "no pending signal" );
[409]993   
994    // enter critical section
995    hal_disable_irq( &save_sr );
996     
997    // reset signal in scheduler
[416]998    sched->req_ack_pending = false;
[409]999
1000    // reset signal in thread "flags"
[416]1001    hal_atomic_and( &target->flags , ~THREAD_FLAG_REQ_ACK );
[409]1002
1003    // reset pointer on responses counter
[416]1004    target->ack_rsp_count = NULL;
[409]1005   
1006    // exit critical section
1007    hal_restore_irq( save_sr );
1008
[407]1009    hal_fence();
[171]1010
[416]1011}  // thread_reset_req_ack()
[409]1012
[436]1013//////////////////////////////////////
1014void thread_block( xptr_t   thread_xp,
1015                   uint32_t cause )
[407]1016{
[436]1017    // get thread cluster and local pointer
1018    cxy_t      cxy = GET_CXY( thread_xp );
1019    thread_t * ptr = GET_PTR( thread_xp );
1020
[407]1021    // set blocking cause
[436]1022    hal_remote_atomic_or( XPTR( cxy , &ptr->blocked ) , cause );
[407]1023    hal_fence();
1024
[438]1025#if DEBUG_THREAD_BLOCK
[457]1026uint32_t    cycle   = (uint32_t)hal_get_cycles();
1027process_t * process = hal_remote_lpt( XPTR( cxy , &ptr->process ) );
[593]1028thread_t  * this    = CURRENT_THREAD;
[438]1029if( DEBUG_THREAD_BLOCK < cycle )
[593]1030printk("\n[%s] thread[%x,%x] blocked thread %x in process %x / cause %x\n",
1031__FUNCTION__, this->process->pid, this->trdid,
[564]1032ptr->trdid, hal_remote_l32(XPTR( cxy , &process->pid )), cause );
[433]1033#endif
1034
[407]1035} // end thread_block()
1036
[433]1037////////////////////////////////////////////
1038uint32_t thread_unblock( xptr_t   thread_xp,
[407]1039                         uint32_t cause )
1040{
1041    // get thread cluster and local pointer
[433]1042    cxy_t      cxy = GET_CXY( thread_xp );
1043    thread_t * ptr = GET_PTR( thread_xp );
[407]1044
1045    // reset blocking cause
1046    uint32_t previous = hal_remote_atomic_and( XPTR( cxy , &ptr->blocked ) , ~cause );
1047    hal_fence();
1048
[438]1049#if DEBUG_THREAD_BLOCK
[457]1050uint32_t    cycle   = (uint32_t)hal_get_cycles();
1051process_t * process = hal_remote_lpt( XPTR( cxy , &ptr->process ) );
[593]1052thread_t  * this    = CURRENT_THREAD;
[438]1053if( DEBUG_THREAD_BLOCK < cycle )
[593]1054printk("\n[%s] thread[%x,%x] unblocked thread %x in process %x / cause %x\n",
1055__FUNCTION__, this->process->pid, this->trdid,
[564]1056ptr->trdid, hal_remote_l32(XPTR( cxy , &process->pid )), cause );
[433]1057#endif
1058
[446]1059    // return a non zero value if the cause bit is modified
1060    return( previous & cause );
[436]1061
[446]1062}  // end thread_unblock()
[407]1063
[440]1064//////////////////////////////////////
1065void thread_delete( xptr_t  target_xp,
1066                    pid_t   pid,
1067                    bool_t  is_forced )
1068{
1069    reg_t       save_sr;                // for critical section
1070    bool_t      target_join_done;       // joining thread arrived first
1071    bool_t      target_attached;        // target thread attached
1072    xptr_t      killer_xp;              // extended pointer on killer thread (this)
1073    thread_t  * killer_ptr;             // pointer on killer thread (this)
1074    cxy_t       target_cxy;             // target thread cluster     
1075    thread_t  * target_ptr;             // pointer on target thread
[625]1076    process_t * target_process;         // pointer on arget process
1077    pid_t       target_pid;             // target process identifier
[440]1078    xptr_t      target_flags_xp;        // extended pointer on target thread <flags>
1079    xptr_t      target_join_lock_xp;    // extended pointer on target thread <join_lock>
1080    xptr_t      target_join_xp_xp;      // extended pointer on target thread <join_xp>
1081    trdid_t     target_trdid;           // target thread identifier
1082    ltid_t      target_ltid;            // target thread local index
1083    xptr_t      joining_xp;             // extended pointer on joining thread
1084    thread_t  * joining_ptr;            // pointer on joining thread
1085    cxy_t       joining_cxy;            // joining thread cluster
1086
[564]1087    // get target thread cluster and local pointer
[440]1088    target_cxy      = GET_CXY( target_xp );
1089    target_ptr      = GET_PTR( target_xp );
[564]1090
[625]1091    // get target thread identifier, attached flag, and process PID
[564]1092    target_trdid    = hal_remote_l32( XPTR( target_cxy , &target_ptr->trdid ) );
[440]1093    target_ltid     = LTID_FROM_TRDID( target_trdid );
1094    target_flags_xp = XPTR( target_cxy , &target_ptr->flags ); 
[564]1095    target_attached = ( (hal_remote_l32( target_flags_xp ) & THREAD_FLAG_DETACHED) == 0 );
[625]1096    target_process  = hal_remote_lpt( XPTR( target_cxy , &target_ptr->process ) );
1097    target_pid      = hal_remote_l32( XPTR( target_cxy , &target_process->pid ) );
[440]1098
[625]1099// check target PID
1100assert( (pid == target_pid),
1101"unconsistent pid and target_xp arguments" );
1102
[440]1103    // get killer thread pointers
1104    killer_ptr = CURRENT_THREAD;
1105    killer_xp  = XPTR( local_cxy , killer_ptr );
1106
1107#if DEBUG_THREAD_DELETE
[564]1108uint32_t cycle  = (uint32_t)hal_get_cycles();
[440]1109if( DEBUG_THREAD_DELETE < cycle )
[593]1110printk("\n[%s] killer[%x,%x] enters / target[%x,%x] / cycle %d\n",
[583]1111__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid, 
1112target_ptr->process->pid, target_ptr->trdid, cycle );
[440]1113#endif
1114
[564]1115// check target thread is not the main thread, because the main thread
1116// must be deleted by the parent process sys_wait() function
1117assert( ((CXY_FROM_PID( pid ) != target_cxy) || (target_ltid != 0)),
[625]1118"target thread cannot be the main thread" );
[564]1119
[583]1120    // check killer thread can yield
1121    thread_assert_can_yield( killer_ptr , __FUNCTION__ ); 
[440]1122
[583]1123    // if the target thread is attached, we must synchonize with the joining thread
1124    // before blocking and marking the target thead for delete.
1125
1126    if( target_attached && (is_forced == false) ) // synchronize with joining thread
[564]1127    {
[440]1128        // build extended pointers on target thread join fields
1129        target_join_lock_xp  = XPTR( target_cxy , &target_ptr->join_lock );
1130        target_join_xp_xp    = XPTR( target_cxy , &target_ptr->join_xp );
1131
1132        // enter critical section
1133        hal_disable_irq( &save_sr );
1134
1135        // take the join_lock in target thread descriptor
[564]1136        remote_busylock_acquire( target_join_lock_xp );
[440]1137
1138        // get join_done from target thread descriptor
[564]1139        target_join_done = ((hal_remote_l32( target_flags_xp ) & THREAD_FLAG_JOIN_DONE) != 0);
[440]1140   
[583]1141        if( target_join_done )                     // joining thread arrived first
[440]1142        {
1143            // get extended pointer on joining thread
[564]1144            joining_xp  = (xptr_t)hal_remote_l64( target_join_xp_xp );
[440]1145            joining_ptr = GET_PTR( joining_xp );
1146            joining_cxy = GET_CXY( joining_xp );
1147           
1148            // reset the join_done flag in target thread
1149            hal_remote_atomic_and( target_flags_xp , ~THREAD_FLAG_JOIN_DONE );
1150
1151            // unblock the joining thread
1152            thread_unblock( joining_xp , THREAD_BLOCKED_JOIN );
1153
1154            // release the join_lock in target thread descriptor
[564]1155            remote_busylock_release( target_join_lock_xp );
[440]1156
[583]1157            // block the target thread
1158            thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
1159
[564]1160            // set the REQ_DELETE flag in target thread descriptor
1161            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
1162
[583]1163            // exit critical section
[440]1164            hal_restore_irq( save_sr );
[564]1165
[583]1166#if DEBUG_THREAD_DELETE
1167cycle  = (uint32_t)hal_get_cycles;
[564]1168if( DEBUG_THREAD_DELETE < cycle )
[593]1169printk("\n[%s] killer[%x,%x] exit / target[%x,%x] marked after join / cycle %d\n",
[583]1170__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid,
1171target_ptr->process->pid, target_ptr->trdid, cycle );
[564]1172#endif
[583]1173
1174        }
1175        else                                      // killer thread arrived first
1176        {
[440]1177            // set the kill_done flag in target thread
1178            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_KILL_DONE );
1179
1180            // block this thread on BLOCKED_JOIN
1181            thread_block( killer_xp , THREAD_BLOCKED_JOIN );
1182
1183            // set extended pointer on killer thread in target thread
[564]1184            hal_remote_s64( target_join_xp_xp , killer_xp );
[440]1185
1186            // release the join_lock in target thread descriptor
[564]1187            remote_busylock_release( target_join_lock_xp );
[440]1188
[583]1189#if DEBUG_THREAD_DELETE
1190cycle  = (uint32_t)hal_get_cycles;
[564]1191if( DEBUG_THREAD_DELETE < cycle )
[593]1192printk("\n[%s] killer[%x,%x] deschedules / target[%x,%x] not completed / cycle %d\n",
[583]1193__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid,
1194target_ptr->process->pid, target_ptr->trdid, cycle );
[564]1195#endif
[440]1196            // deschedule
1197            sched_yield( "killer thread wait joining thread" );
1198
[583]1199            // block the target thread
1200            thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
1201
[564]1202            // set the REQ_DELETE flag in target thread descriptor
1203            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
1204
[583]1205            // exit critical section
[440]1206            hal_restore_irq( save_sr );
[583]1207
1208#if DEBUG_THREAD_DELETE
1209cycle  = (uint32_t)hal_get_cycles;
1210if( DEBUG_THREAD_DELETE < cycle )
[593]1211printk("\n[%s] killer[%x,%x] exit / target[%x,%x] marked after join / cycle %d\n",
[583]1212__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid,
1213target_ptr->process->pid, target_ptr->trdid, cycle );
1214#endif
1215
[440]1216        }
[564]1217    }
[583]1218    else                     // no synchronization with joining thread required
[564]1219    {
[583]1220        // block the target thread
1221        thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
1222
[564]1223        // set the REQ_DELETE flag in target thread descriptor
1224        hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
[440]1225
1226#if DEBUG_THREAD_DELETE
1227cycle  = (uint32_t)hal_get_cycles;
1228if( DEBUG_THREAD_DELETE < cycle )
[593]1229printk("\n[%s] killer[%x,%x] exit / target [%x,%x] marked / no join / cycle %d\n",
[583]1230__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid,
1231target_ptr->process->pid, target_ptr->trdid, cycle );
[440]1232#endif
1233
[583]1234    }
[440]1235}  // end thread_delete()
1236
1237
1238
[564]1239/////////////////////////////
[485]1240void thread_idle_func( void )
[1]1241{
[625]1242
1243#if DEBUG_THREAD_IDLE
1244uint32_t cycle;
1245#endif
1246
[1]1247    while( 1 )
1248    {
[408]1249        // unmask IRQs
1250        hal_enable_irq( NULL );
1251
[443]1252        // force core to low-power mode (optional)
[583]1253        if( CONFIG_SCHED_IDLE_MODE_SLEEP ) 
[407]1254        {
[1]1255
[564]1256#if DEBUG_THREAD_IDLE
[625]1257cycle = (uint32_t)hal_get_cycles();
[438]1258if( DEBUG_THREAD_IDLE < cycle )
[593]1259printk("\n[%s] idle thread on core[%x,%d] goes to sleep / cycle %d\n",
[446]1260__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cycle );
[433]1261#endif
[1]1262
[407]1263            hal_core_sleep();
[1]1264
[564]1265#if DEBUG_THREAD_IDLE
[625]1266cycle = (uint32_t)hal_get_cycles();
[438]1267if( DEBUG_THREAD_IDLE < cycle )
[593]1268printk("\n[%s] idle thread on core[%x,%d] wake up / cycle %d\n",
[531]1269__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cycle );
[433]1270#endif
[407]1271
1272        }
[443]1273
[446]1274#if DEBUG_THREAD_IDLE
[625]1275cycle = (uint32_t)hal_get_cycles();
[564]1276if( DEBUG_THREAD_IDLE < cycle )
[446]1277sched_display( CURRENT_THREAD->core->lid );
1278#endif     
[564]1279        // search a runable thread
1280        sched_yield( "running idle thread" );
[446]1281
[564]1282    } // end while
1283
[407]1284}  // end thread_idle()
[1]1285
[407]1286
[473]1287///////////////////////////////////////////
1288void thread_time_update( thread_t * thread,
[564]1289                         bool_t     is_user )
[16]1290{
[473]1291    cycle_t current_cycle;   // current cycle counter value
1292    cycle_t last_cycle;      // last cycle counter value
[1]1293
[473]1294    // get pointer on thread_info structure
1295    thread_info_t * info = &thread->info;
1296
1297    // get last cycle counter value
1298    last_cycle = info->last_cycle;
1299
1300    // get current cycle counter value
1301    current_cycle = hal_get_cycles();
1302
1303    // update thread_info structure
1304    info->last_cycle = current_cycle;
1305
1306    // update time in thread_info
1307    if( is_user ) info->usr_cycles += (current_cycle - last_cycle);
1308    else          info->sys_cycles += (current_cycle - last_cycle);
[16]1309
[564]1310}  // end thread_time_update()
1311
[23]1312/////////////////////////////////////
1313xptr_t thread_get_xptr( pid_t    pid,
1314                        trdid_t  trdid )
1315{
1316    cxy_t         target_cxy;          // target thread cluster identifier
1317    ltid_t        target_thread_ltid;  // target thread local index
[171]1318    thread_t    * target_thread_ptr;   // target thread local pointer
[23]1319    xptr_t        target_process_xp;   // extended pointer on target process descriptor
[171]1320    process_t   * target_process_ptr;  // local pointer on target process descriptor
[23]1321    pid_t         target_process_pid;  // target process identifier
1322    xlist_entry_t root;                // root of list of process in target cluster
1323    xptr_t        lock_xp;             // extended pointer on lock protecting  this list
[16]1324
[580]1325#if DEBUG_THREAD_GET_XPTR
1326uint32_t cycle  = (uint32_t)hal_get_cycles();
1327thread_t * this = CURRENT_THREAD;
1328if( DEBUG_THREAD_GET_XPTR < cycle )
[593]1329printk("\n[%s] thread %x in process %x enters / pid %x / trdid %x / cycle %d\n",
[580]1330__FUNCTION__, this->trdid, this->process->pid, pid, trdid, cycle );
1331#endif
1332
[23]1333    // get target cluster identifier and local thread identifier
1334    target_cxy         = CXY_FROM_TRDID( trdid );
1335    target_thread_ltid = LTID_FROM_TRDID( trdid );
1336
[436]1337    // check trdid argument
[564]1338        if( (target_thread_ltid >= CONFIG_THREADS_MAX_PER_CLUSTER) || 
[436]1339        cluster_is_undefined( target_cxy ) )         return XPTR_NULL;
1340
[23]1341    // get root of list of process descriptors in target cluster
1342    hal_remote_memcpy( XPTR( local_cxy  , &root ),
1343                       XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_root ),
1344                       sizeof(xlist_entry_t) );
1345
[564]1346    // get extended pointer on lock protecting the list of local processes
[23]1347    lock_xp = XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_lock );
1348
1349    // take the lock protecting the list of processes in target cluster
[564]1350    remote_queuelock_acquire( lock_xp );
[23]1351
[580]1352#if( DEBUG_THREAD_GET_XPTR & 1 )
1353if( DEBUG_THREAD_GET_XPTR < cycle )
[593]1354printk("\n[%s] scan processes in cluster %x :\n", __FUNCTION__, target_cxy );
[580]1355#endif
1356
[564]1357    // scan the list of local processes in target cluster
[23]1358    xptr_t  iter;
1359    bool_t  found = false;
1360    XLIST_FOREACH( XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_root ) , iter )
1361    {
1362        target_process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
[469]1363        target_process_ptr = GET_PTR( target_process_xp );
[564]1364        target_process_pid = hal_remote_l32( XPTR( target_cxy , &target_process_ptr->pid ) );
[580]1365
1366#if( DEBUG_THREAD_GET_XPTR & 1 )
1367if( DEBUG_THREAD_GET_XPTR < cycle )
1368printk(" - process %x\n", target_process_pid );
1369#endif
1370
[23]1371        if( target_process_pid == pid )
1372        {
1373            found = true;
1374            break;
1375        }
1376    }
1377
1378    // release the lock protecting the list of processes in target cluster
[564]1379    remote_queuelock_release( lock_xp );
[23]1380
[436]1381    // check PID found
[580]1382    if( found == false ) 
1383    {
[23]1384
[580]1385#if( DEBUG_THREAD_GET_XPTR & 1 )
1386if( DEBUG_THREAD_GET_XPTR < cycle )
[593]1387printk("\n[%s] pid %x not found in cluster %x\n",
[580]1388__FUNCTION__, pid, target_cxy );
1389#endif
1390        return XPTR_NULL;
1391    }
1392
[23]1393    // get target thread local pointer
1394    xptr_t xp = XPTR( target_cxy , &target_process_ptr->th_tbl[target_thread_ltid] );
[171]1395    target_thread_ptr = (thread_t *)hal_remote_lpt( xp );
[23]1396
[580]1397    if( target_thread_ptr == NULL )
1398    {
[23]1399
[580]1400#if( DEBUG_THREAD_GET_XPTR & 1 )
1401if( DEBUG_THREAD_GET_XPTR < cycle )
[593]1402printk("\n[%s] thread %x not registered in process %x in cluster %x\n",
[580]1403__FUNCTION__, trdid, pid, target_cxy );
1404#endif
1405        return XPTR_NULL;
1406    }
1407
1408#if DEBUG_THREAD_GET_XPTR
1409cycle  = (uint32_t)hal_get_cycles();
1410if( DEBUG_THREAD_GET_XPTR < cycle )
[593]1411printk("\n[%s] thread %x in process %x exit / pid %x / trdid %x / cycle %d\n",
[580]1412__FUNCTION__, this->trdid, this->process->pid, pid, trdid, cycle );
1413#endif
1414
[23]1415    return XPTR( target_cxy , target_thread_ptr );
[564]1416
1417}  // end thread_get_xptr()
1418
1419///////////////////////////////////////////////////
1420void thread_assert_can_yield( thread_t    * thread,
1421                              const char  * func_str )
1422{
1423    // does nothing if thread does not hold any busylock
1424
1425    if( thread->busylocks )
1426    {
1427        // get pointers on TXT0 chdev
1428        xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
1429        cxy_t     txt0_cxy = GET_CXY( txt0_xp );
1430        chdev_t * txt0_ptr = GET_PTR( txt0_xp );
1431
1432        // get extended pointer on TXT0 lock
1433        xptr_t  txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
1434
1435        // get TXT0 lock
1436        remote_busylock_acquire( txt0_lock_xp );
1437
1438        // display error message on TXT0
[593]1439        nolock_printk("\n[PANIC] in %s / thread[%x,%x] cannot yield : "
[580]1440        "hold %d busylock(s) / cycle %d\n",
[593]1441        func_str, thread->process->pid, thread->trdid,
[624]1442        thread->busylocks - 1, (uint32_t)hal_get_cycles() );
[564]1443
1444#if DEBUG_BUSYLOCK
[580]1445
[583]1446// scan list of busylocks
1447xptr_t    iter_xp;
[580]1448xptr_t    root_xp  = XPTR( local_cxy , &thread->busylocks_root );
1449XLIST_FOREACH( root_xp , iter_xp )
[564]1450{
[580]1451    xptr_t       lock_xp   = XLIST_ELEMENT( iter_xp , busylock_t , xlist );
1452    cxy_t        lock_cxy  = GET_CXY( lock_xp );
1453    busylock_t * lock_ptr  = GET_PTR( lock_xp );
1454    uint32_t     lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->type ) );
1455    nolock_printk(" - %s in cluster %x\n", lock_type_str[lock_type] , lock_cxy );
1456}
[564]1457
1458#endif
[23]1459
[564]1460        // release TXT0 lock
1461        remote_busylock_release( txt0_lock_xp );
1462
1463        // suicide
1464        hal_core_sleep();
1465    }
1466}  // end thread_assert_can yield()
1467
[619]1468//////////////////////////////////////////////////////
1469void thread_display_busylocks( xptr_t       thread_xp,
1470                               const char * string )
[564]1471{
[623]1472
[581]1473    cxy_t      thread_cxy = GET_CXY( thread_xp );
1474    thread_t * thread_ptr = GET_PTR( thread_xp );
[564]1475
[623]1476#if DEBUG_BUSYLOCK
[564]1477
[623]1478    xptr_t     iter_xp;
[564]1479
[623]1480    // get relevant info from target thread descriptor
[619]1481    uint32_t    locks   = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->busylocks ) );
1482    trdid_t     trdid   = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
1483    process_t * process = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->process ) );
1484    pid_t       pid     = hal_remote_l32( XPTR( thread_cxy , &process->pid ) );
[564]1485
[581]1486    // get extended pointer on root of busylocks
[619]1487    xptr_t root_xp = XPTR( thread_cxy , &thread_ptr->busylocks_root );
[564]1488
[581]1489    // get pointers on TXT0 chdev
1490    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
1491    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
1492    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
[580]1493
[581]1494    // get extended pointer on remote TXT0 lock
1495    xptr_t  txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
[580]1496
[581]1497    // get TXT0 lock
1498    remote_busylock_acquire( txt0_lock_xp );
[580]1499
[581]1500    // display header
[619]1501    nolock_printk("\n***** thread[%x,%x] in <%s> : %d busylocks *****\n",
1502    pid, trdid, string, locks );
[581]1503
1504    // scan the xlist of busylocks when required
1505    if( locks )
1506    {
1507        XLIST_FOREACH( root_xp , iter_xp )
[580]1508        {
[581]1509            xptr_t       lock_xp   = XLIST_ELEMENT( iter_xp , busylock_t , xlist );
1510            cxy_t        lock_cxy  = GET_CXY( lock_xp );
1511            busylock_t * lock_ptr  = GET_PTR( lock_xp );
1512            uint32_t     lock_type = hal_remote_l32(XPTR( lock_cxy , &lock_ptr->type ));
1513            nolock_printk(" - %s in cluster %x\n", lock_type_str[lock_type] , lock_cxy );
[580]1514        }
[581]1515    }
[580]1516
[581]1517    // release TXT0 lock
1518    remote_busylock_release( txt0_lock_xp );
1519
[623]1520#else
[581]1521
[623]1522printk("\n[ERROR] in %s : set DEBUG_BUSYLOCK in kernel_config.h for %s / thread(%x,%x)\n",
1523__FUNCTION__, string, thread_cxy, thread_ptr );
1524
[581]1525#endif
1526
[623]1527    return;
[581]1528
[580]1529}  // end thread_display_busylock()
[581]1530
Note: See TracBrowser for help on using the repository browser.