source: trunk/kernel/kern/thread.c @ 624

Last change on this file since 624 was 624, checked in by alain, 5 years ago

Fix several bugs to use the instruction MMU in kernel mode
in replacement of the instruction address extension register,
and remove the "kentry" segment.

This version is running on the tsar_generic_iob" platform.

One interesting bug: the cp0_ebase defining the kernel entry point
(for interrupts, exceptions and syscalls) must be initialized
early in kernel_init(), because the VFS initialisation done by
kernel_ini() uses RPCs, and RPCs uses Inter-Processor-Interrup.

File size: 49.2 KB
RevLine 
[1]1/*
[564]2 * thread.c -   thread operations implementation (user & kernel)
[171]3 *
[1]4 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
[564]5 *         Alain Greiner (2016,2017,2018)
[1]6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
[5]9 * This file is part of ALMOS-MKH.
[1]10 *
[5]11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
[1]12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
[5]15 * ALMOS-MKH is distributed in the hope that it will be useful, but
[1]16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
[5]21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
[1]22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
[14]25#include <kernel_config.h>
[457]26#include <hal_kernel_types.h>
[1]27#include <hal_context.h>
28#include <hal_irqmask.h>
29#include <hal_special.h>
30#include <hal_remote.h>
31#include <memcpy.h>
32#include <printk.h>
33#include <cluster.h>
34#include <process.h>
35#include <scheduler.h>
[188]36#include <dev_pic.h>
[1]37#include <core.h>
38#include <list.h>
39#include <xlist.h>
40#include <page.h>
41#include <kmem.h>
42#include <ppm.h>
43#include <thread.h>
[446]44#include <rpc.h>
[1]45
46//////////////////////////////////////////////////////////////////////////////////////
47// Extern global variables
48//////////////////////////////////////////////////////////////////////////////////////
49
[564]50extern process_t            process_zero;       // allocated in kernel_init.c
51extern char               * lock_type_str[];    // allocated in kernel_init.c
52extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c
[1]53
54//////////////////////////////////////////////////////////////////////////////////////
[16]55// This function returns a printable string for the thread type.
[1]56//////////////////////////////////////////////////////////////////////////////////////
[527]57const char * thread_type_str( thread_type_t type )
[5]58{
[527]59  switch ( type ) {
60  case THREAD_USER:   return "USR";
61  case THREAD_RPC:    return "RPC";
62  case THREAD_DEV:    return "DEV";
63  case THREAD_IDLE:   return "IDL";
64  default:            return "undefined";
65  }
[5]66}
67
[1]68/////////////////////////////////////////////////////////////////////////////////////
[14]69// This static function allocates physical memory for a thread descriptor.
70// It can be called by the three functions:
[1]71// - thread_user_create()
[14]72// - thread_user_fork()
[1]73// - thread_kernel_create()
74/////////////////////////////////////////////////////////////////////////////////////
[14]75// @ return pointer on thread descriptor if success / return NULL if failure.
[1]76/////////////////////////////////////////////////////////////////////////////////////
[485]77static thread_t * thread_alloc( void )
[1]78{
[23]79        page_t       * page;   // pointer on page descriptor containing thread descriptor
[171]80        kmem_req_t     req;    // kmem request
[1]81
82        // allocates memory for thread descriptor + kernel stack
83        req.type  = KMEM_PAGE;
[14]84        req.size  = CONFIG_THREAD_DESC_ORDER;
[1]85        req.flags = AF_KERNEL | AF_ZERO;
86        page      = kmem_alloc( &req );
87
[23]88        if( page == NULL ) return NULL;
[1]89
[315]90    // return pointer on new thread descriptor
91    xptr_t base_xp = ppm_page2base( XPTR(local_cxy , page ) );
[469]92    return GET_PTR( base_xp );
[315]93
94}  // end thread_alloc()
95 
96
[14]97/////////////////////////////////////////////////////////////////////////////////////
[23]98// This static function releases the physical memory for a thread descriptor.
[53]99// It is called by the three functions:
[23]100// - thread_user_create()
101// - thread_user_fork()
102// - thread_kernel_create()
103/////////////////////////////////////////////////////////////////////////////////////
104// @ thread  : pointer on thread descriptor.
105/////////////////////////////////////////////////////////////////////////////////////
106static void thread_release( thread_t * thread )
107{
108    kmem_req_t   req;
109
[315]110    xptr_t base_xp = ppm_base2page( XPTR(local_cxy , thread ) );
111
[23]112    req.type  = KMEM_PAGE;
[315]113    req.ptr   = GET_PTR( base_xp );
[23]114    kmem_free( &req );
115}
116
117/////////////////////////////////////////////////////////////////////////////////////
[14]118// This static function initializes a thread descriptor (kernel or user).
[438]119// It can be called by the four functions:
[14]120// - thread_user_create()
121// - thread_user_fork()
122// - thread_kernel_create()
[438]123// - thread_idle_init()
124// It updates the local DQDT.
[14]125/////////////////////////////////////////////////////////////////////////////////////
[593]126// @ thread       : pointer on local thread descriptor
127// @ process      : pointer on local process descriptor.
[14]128// @ type         : thread type.
129// @ func         : pointer on thread entry function.
130// @ args         : pointer on thread entry function arguments.
131// @ core_lid     : target core local index.
132// @ u_stack_base : stack base (user thread only)
133// @ u_stack_size : stack base (user thread only)
134/////////////////////////////////////////////////////////////////////////////////////
135static error_t thread_init( thread_t      * thread,
136                            process_t     * process,
137                            thread_type_t   type,
138                            void          * func,
139                            void          * args,
140                            lid_t           core_lid,
141                            intptr_t        u_stack_base,
142                            uint32_t        u_stack_size )
143{
144    error_t        error;
145    trdid_t        trdid;      // allocated thread identifier
146
147        cluster_t    * local_cluster = LOCAL_CLUSTER;
148
[564]149#if DEBUG_THREAD_INIT
[593]150uint32_t   cycle = (uint32_t)hal_get_cycles();
151thread_t * this  = CURRENT_THREAD;
[564]152if( DEBUG_THREAD_INIT < cycle )
[593]153printk("\n[%s] thread[%x,%x] enter for thread %x in process %x / cycle %d\n",
154__FUNCTION__, this->process->pid, this->trdid, thread, process->pid , cycle );
[443]155#endif
156
[407]157    // compute thread descriptor size without kernel stack
158    uint32_t desc_size = (intptr_t)(&thread->signature) - (intptr_t)thread + 4; 
159
[1]160        // Initialize new thread descriptor
[564]161        thread->type            = type;
[1]162    thread->quantum         = 0;            // TODO
163    thread->ticks_nr        = 0;            // TODO
[457]164    thread->time_last_check = 0;            // TODO
[1]165        thread->core            = &local_cluster->core_tbl[core_lid];
166        thread->process         = process;
167
[564]168    thread->busylocks       = 0;
[1]169
[564]170#if DEBUG_BUSYLOCK
171    xlist_root_init( XPTR( local_cxy , &thread->busylocks_root ) );
[409]172#endif
[1]173
[171]174    thread->u_stack_base    = u_stack_base;
[1]175    thread->u_stack_size    = u_stack_size;
[407]176    thread->k_stack_base    = (intptr_t)thread + desc_size;
177    thread->k_stack_size    = CONFIG_THREAD_DESC_SIZE - desc_size;
[1]178
179    thread->entry_func      = func;         // thread entry point
180    thread->entry_args      = args;         // thread function arguments
[171]181    thread->flags           = 0;            // all flags reset
[1]182    thread->errno           = 0;            // no error detected
[407]183    thread->fork_user       = 0;            // no user defined placement for fork
184    thread->fork_cxy        = 0;            // user defined target cluster for fork
[409]185    thread->blocked         = THREAD_BLOCKED_GLOBAL;
[1]186
[564]187    // register new thread in process descriptor, and get a TRDID
188    error = process_register_thread( process, thread , &trdid );
189
190    if( error )
191    {
[581]192        printk("\n[ERROR] in %s : thread %x in process %x cannot get TRDID in cluster %x\n"
193        "    for thread %s in process %x / cycle %d\n",
194        __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,
195        local_cxy, thread_type_str(type), process->pid, (uint32_t)hal_get_cycles() );
[564]196        return EINVAL;
197    }
198
199    // initialize trdid
200    thread->trdid           = trdid;
201
202    // initialize sched list
[1]203    list_entry_init( &thread->sched_list );
204
[564]205    // initialize waiting queue entries
206    list_entry_init( &thread->wait_list );
207    xlist_entry_init( XPTR( local_cxy , &thread->wait_xlist ) );
208
209    // initialize thread info
[1]210    memset( &thread->info , 0 , sizeof(thread_info_t) );
211
[564]212    // initialize join_lock
213    remote_busylock_init( XPTR( local_cxy , &thread->join_lock ), LOCK_THREAD_JOIN );
[409]214
[1]215    // initialise signature
216        thread->signature = THREAD_SIGNATURE;
217
[443]218    // FIXME define and call an architecture specific hal_thread_init()
219    // function to initialise the save_sr field
[408]220    thread->save_sr = 0xFF13;
221
[171]222    // register new thread in core scheduler
[1]223    sched_register_thread( thread->core , thread );
224
[438]225        // update DQDT
[583]226    dqdt_increment_threads();
[438]227
[564]228#if DEBUG_THREAD_INIT
[443]229cycle = (uint32_t)hal_get_cycles();
[564]230if( DEBUG_THREAD_INIT < cycle )
[593]231printk("\n[%s] thread[%x,%x] exit for thread %x in process %x / cycle %d\n",
232__FUNCTION__, this->process->pid, this->trdid, thread, process->pid, cycle );
[443]233#endif
234
[1]235        return 0;
236
[296]237} // end thread_init()
238
[1]239/////////////////////////////////////////////////////////
[23]240error_t thread_user_create( pid_t             pid,
241                            void            * start_func,
242                            void            * start_arg,
[1]243                            pthread_attr_t  * attr,
[23]244                            thread_t       ** new_thread )
[1]245{
246    error_t        error;
247        thread_t     * thread;       // pointer on created thread descriptor
248    process_t    * process;      // pointer to local process descriptor
249    lid_t          core_lid;     // selected core local index
[23]250    vseg_t       * vseg;         // stack vseg
[1]251
[593]252assert( (attr != NULL) , "pthread attributes must be defined" );
[5]253
[438]254#if DEBUG_THREAD_USER_CREATE
[593]255thread_t * this  = CURRENT_THREAD;
256uint32_t   cycle = (uint32_t)hal_get_cycles();
[438]257if( DEBUG_THREAD_USER_CREATE < cycle )
[593]258printk("\n[%s] thread[%x,%x] enter in cluster %x for process %x / cycle %d\n",
259__FUNCTION__, this->process->pid , this->trdid , local_cxy , pid , cycle );
[433]260#endif
[428]261
[23]262    // get process descriptor local copy
263    process = process_get_local_copy( pid );
[440]264
[23]265    if( process == NULL )
266    {
267                printk("\n[ERROR] in %s : cannot get process descriptor %x\n",
268               __FUNCTION__ , pid );
269        return ENOMEM;
270    }
271
[443]272#if( DEBUG_THREAD_USER_CREATE & 1)
273if( DEBUG_THREAD_USER_CREATE < cycle )
[593]274printk("\n[%s] process descriptor = %x for process %x in cluster %x\n",
[443]275__FUNCTION__, process , pid , local_cxy );
276#endif
277
[171]278    // select a target core in local cluster
[407]279    if( attr->attributes & PT_ATTR_CORE_DEFINED )
[23]280    {
[407]281        core_lid = attr->lid;
282        if( core_lid >= LOCAL_CLUSTER->cores_nr )
283        {
284                printk("\n[ERROR] in %s : illegal core index attribute = %d\n",
285            __FUNCTION__ , core_lid );
286            return EINVAL;
287        }
[23]288    }
[407]289    else
290    {
291        core_lid = cluster_select_local_core();
292    }
[1]293
[443]294#if( DEBUG_THREAD_USER_CREATE & 1)
295if( DEBUG_THREAD_USER_CREATE < cycle )
[593]296printk("\n[%s] core[%x,%d] selected\n",
[443]297__FUNCTION__, local_cxy , core_lid );
298#endif
299
[171]300    // allocate a stack from local VMM
[407]301    vseg = vmm_create_vseg( process,
302                            VSEG_TYPE_STACK,
303                            0,                 // size unused
304                            0,                 // length unused
305                            0,                 // file_offset unused
306                            0,                 // file_size unused
307                            XPTR_NULL,         // mapper_xp unused
308                            local_cxy );
[1]309
[170]310    if( vseg == NULL )
[23]311    {
312            printk("\n[ERROR] in %s : cannot create stack vseg\n", __FUNCTION__ );
313                return ENOMEM;
[171]314    }
[23]315
[457]316#if( DEBUG_THREAD_USER_CREATE & 1)
317if( DEBUG_THREAD_USER_CREATE < cycle )
[593]318printk("\n[%s] stack vseg created / vpn_base %x / %d pages\n",
[457]319__FUNCTION__, vseg->vpn_base, vseg->vpn_size );
320#endif
321
[171]322    // allocate memory for thread descriptor
[14]323    thread = thread_alloc();
[1]324
[23]325    if( thread == NULL )
326    {
327            printk("\n[ERROR] in %s : cannot create new thread\n", __FUNCTION__ );
[611]328        vmm_delete_vseg( process->pid , vseg->min );
[23]329        return ENOMEM;
330    }
[14]331
[443]332#if( DEBUG_THREAD_USER_CREATE & 1)
333if( DEBUG_THREAD_USER_CREATE < cycle )
[593]334printk("\n[%s] new thread descriptor %x allocated\n",
[443]335__FUNCTION__, thread );
336#endif
337
[171]338    // initialize thread descriptor
[14]339    error = thread_init( thread,
340                         process,
341                         THREAD_USER,
[23]342                         start_func,
343                         start_arg,
[14]344                         core_lid,
[23]345                         vseg->min,
346                         vseg->max - vseg->min );
[171]347    if( error )
[14]348    {
[23]349            printk("\n[ERROR] in %s : cannot initialize new thread\n", __FUNCTION__ );
[611]350        vmm_delete_vseg( process->pid , vseg->min );
[23]351        thread_release( thread );
[14]352        return EINVAL;
353    }
354
[443]355#if( DEBUG_THREAD_USER_CREATE & 1)
356if( DEBUG_THREAD_USER_CREATE < cycle )
[593]357printk("\n[%s] new thread descriptor initialised / trdid %x\n",
[457]358__FUNCTION__, thread->trdid );
[443]359#endif
360
[14]361    // set DETACHED flag if required
[407]362    if( attr->attributes & PT_ATTR_DETACH ) 
363    {
364        thread->flags |= THREAD_FLAG_DETACHED;
365    }
[1]366
[171]367    // allocate & initialize CPU context
[457]368        if( hal_cpu_context_alloc( thread ) )
[23]369    {
370            printk("\n[ERROR] in %s : cannot create CPU context\n", __FUNCTION__ );
[611]371        vmm_delete_vseg( process->pid , vseg->min );
[23]372        thread_release( thread );
373        return ENOMEM;
374    }
[457]375    hal_cpu_context_init( thread );
[23]376
[457]377    // allocate & initialize FPU context
[407]378    if( hal_fpu_context_alloc( thread ) )
[23]379    {
380            printk("\n[ERROR] in %s : cannot create FPU context\n", __FUNCTION__ );
[611]381        vmm_delete_vseg( process->pid , vseg->min );
[23]382        thread_release( thread );
383        return ENOMEM;
384    }
[457]385    hal_fpu_context_init( thread );
[23]386
[457]387#if( DEBUG_THREAD_USER_CREATE & 1)
388if( DEBUG_THREAD_USER_CREATE < cycle )
[593]389printk("\n[%s] CPU & FPU contexts created\n",
[457]390__FUNCTION__, thread->trdid );
[624]391hal_vmm_display( process , true );
[457]392#endif
393
[438]394#if DEBUG_THREAD_USER_CREATE
[433]395cycle = (uint32_t)hal_get_cycles();
[438]396if( DEBUG_THREAD_USER_CREATE < cycle )
[593]397printk("\n[%s] thread[%x,%x] exit / new_thread %x / core %d / cycle %d\n",
398__FUNCTION__, this->process->pid , this->trdid , thread->trdid, core_lid, cycle );
[433]399#endif
[1]400
401    *new_thread = thread;
402        return 0;
[14]403
[296]404}  // end thread_user_create()
405
[408]406///////////////////////////////////////////////////////
407error_t thread_user_fork( xptr_t      parent_thread_xp,
408                          process_t * child_process,
409                          thread_t ** child_thread )
[1]410{
411    error_t        error;
[408]412        thread_t     * child_ptr;        // local pointer on local child thread
413    lid_t          core_lid;         // selected core local index
[1]414
[408]415    thread_t     * parent_ptr;       // local pointer on remote parent thread
416    cxy_t          parent_cxy;       // parent thread cluster
417    process_t    * parent_process;   // local pointer on parent process
418    xptr_t         parent_gpt_xp;    // extended pointer on parent thread GPT
[5]419
[408]420    void         * func;             // parent thread entry_func
421    void         * args;             // parent thread entry_args
422    intptr_t       base;             // parent thread u_stack_base
423    uint32_t       size;             // parent thread u_stack_size
424    uint32_t       flags;            // parent_thread flags
425    vpn_t          vpn_base;         // parent thread stack vpn_base
426    vpn_t          vpn_size;         // parent thread stack vpn_size
427    reg_t        * uzone;            // parent thread pointer on uzone 
428
429    vseg_t       * vseg;             // child thread STACK vseg
430
[438]431#if DEBUG_THREAD_USER_FORK
[593]432uint32_t   cycle = (uint32_t)hal_get_cycles();
433thread_t * this  = CURRENT_THREAD;
[438]434if( DEBUG_THREAD_USER_FORK < cycle )
[593]435printk("\n[%s] thread[%x,%x] enter / child_process %x / cycle %d\n",
436__FUNCTION__, this->process->pid, this->trdid, child_process->pid, cycle );
[433]437#endif
[408]438
[1]439    // select a target core in local cluster
440    core_lid = cluster_select_local_core();
441
[408]442    // get cluster and local pointer on parent thread descriptor
443    parent_cxy = GET_CXY( parent_thread_xp );
[469]444    parent_ptr = GET_PTR( parent_thread_xp );
[1]445
[408]446    // get relevant fields from parent thread
[428]447    func  = (void *)  hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->entry_func    ));
448    args  = (void *)  hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->entry_args    ));
449    base  = (intptr_t)hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->u_stack_base  ));
[564]450    size  = (uint32_t)hal_remote_l32 ( XPTR( parent_cxy , &parent_ptr->u_stack_size  ));
451    flags =           hal_remote_l32 ( XPTR( parent_cxy , &parent_ptr->flags         ));
[428]452    uzone = (reg_t *) hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->uzone_current ));
[1]453
[408]454    vpn_base = base >> CONFIG_PPM_PAGE_SHIFT;
455    vpn_size = size >> CONFIG_PPM_PAGE_SHIFT;
456
457    // get pointer on parent process in parent thread cluster
458    parent_process = (process_t *)hal_remote_lpt( XPTR( parent_cxy,
459                                                        &parent_ptr->process ) );
460 
461    // get extended pointer on parent GPT in parent thread cluster
462    parent_gpt_xp = XPTR( parent_cxy , &parent_process->vmm.gpt );
463
464    // allocate memory for child thread descriptor
465    child_ptr = thread_alloc();
466    if( child_ptr == NULL )
[23]467    {
468        printk("\n[ERROR] in %s : cannot allocate new thread\n", __FUNCTION__ );
[408]469        return -1;
[23]470    }
[14]471
[171]472    // initialize thread descriptor
[408]473    error = thread_init( child_ptr,
474                         child_process,
[14]475                         THREAD_USER,
[408]476                         func,
477                         args,
[14]478                         core_lid,
[408]479                         base,
480                         size );
[23]481    if( error )
[14]482    {
[408]483            printk("\n[ERROR] in %s : cannot initialize child thread\n", __FUNCTION__ );
484        thread_release( child_ptr );
[14]485        return EINVAL;
486    }
487
[564]488#if (DEBUG_THREAD_USER_FORK & 1)
489if( DEBUG_THREAD_USER_FORK < cycle )
[593]490printk("\n[%s] thread[%x,%x] initialised thread %x in process %x\n",
491__FUNCTION__, this->process->pid, this->trdid, child_ptr->trdid, child_process->pid );
[564]492#endif
493
[407]494    // return child pointer
[408]495    *child_thread = child_ptr;
[1]496
[408]497    // set detached flag if required
498    if( flags & THREAD_FLAG_DETACHED ) child_ptr->flags = THREAD_FLAG_DETACHED;
[1]499
[408]500    // update uzone pointer in child thread descriptor
[428]501    child_ptr->uzone_current = (char *)((intptr_t)uzone +
502                                        (intptr_t)child_ptr - 
503                                        (intptr_t)parent_ptr );
[408]504 
505
[407]506    // allocate CPU context for child thread
[408]507        if( hal_cpu_context_alloc( child_ptr ) )
[23]508    {
[407]509            printk("\n[ERROR] in %s : cannot allocate CPU context\n", __FUNCTION__ );
[408]510        thread_release( child_ptr );
511        return -1;
[23]512    }
513
[407]514    // allocate FPU context for child thread
[408]515        if( hal_fpu_context_alloc( child_ptr ) )
[23]516    {
[407]517            printk("\n[ERROR] in %s : cannot allocate FPU context\n", __FUNCTION__ );
[408]518        thread_release( child_ptr );
519        return -1;
[23]520    }
521
[564]522#if (DEBUG_THREAD_USER_FORK & 1)
523if( DEBUG_THREAD_USER_FORK < cycle )
[593]524printk("\n[%s] thread[%x,%x] created CPU & FPU contexts for thread %x in process %x\n",
525__FUNCTION__, this->process->pid, this->trdid, child_ptr->trdid, child_process->pid );
[564]526#endif
527
528   // create and initialize STACK vseg
[408]529    vseg = vseg_alloc();
530    vseg_init( vseg,
531               VSEG_TYPE_STACK,
532               base,
533               size,
534               vpn_base,
535               vpn_size,
536               0, 0, XPTR_NULL,                         // not a file vseg
537               local_cxy );
[1]538
[408]539    // register STACK vseg in local child VSL
[611]540    vmm_attach_vseg_to_vsl( &child_process->vmm , vseg );
[408]541
[564]542#if (DEBUG_THREAD_USER_FORK & 1)
543if( DEBUG_THREAD_USER_FORK < cycle )
[593]544printk("\n[%s] thread[%x,%x] created stack vseg for thread %x in process %x\n",
545__FUNCTION__, this->process->pid, this->trdid, child_ptr->trdid, child_process->pid );
[564]546#endif
547
[408]548    // copy all valid STACK GPT entries   
549    vpn_t          vpn;
550    bool_t         mapped;
551    ppn_t          ppn;
552    for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ )
553    {
554        error = hal_gpt_pte_copy( &child_process->vmm.gpt,
555                                  parent_gpt_xp,
556                                  vpn,
557                                  true,                 // set cow
558                                  &ppn,
559                                  &mapped );
560        if( error )
561        {
[611]562            vmm_detach_vseg_from_vsl( &child_process->vmm , vseg );
[408]563            thread_release( child_ptr );
564            printk("\n[ERROR] in %s : cannot update child GPT\n", __FUNCTION__ );
565            return -1;
566        }
567
[433]568        // increment pending forks counter for the page if mapped
[408]569        if( mapped )
570        {
[469]571            // get pointers on the page descriptor
[408]572            xptr_t   page_xp  = ppm_ppn2page( ppn );
573            cxy_t    page_cxy = GET_CXY( page_xp );
[469]574            page_t * page_ptr = GET_PTR( page_xp );
575
576            // get extended pointers on forks and lock fields
577            xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks );
578            xptr_t lock_xp  = XPTR( page_cxy , &page_ptr->lock );
579
[564]580            // get lock protecting page
581            remote_busylock_acquire( lock_xp ); 
582
583            // increment the forks counter in page descriptor
[473]584            hal_remote_atomic_add( forks_xp , 1 );
[408]585
[564]586            // release lock protecting page
587            remote_busylock_release( lock_xp ); 
588
[438]589#if (DEBUG_THREAD_USER_FORK & 1)
[433]590cycle = (uint32_t)hal_get_cycles();
[438]591if( DEBUG_THREAD_USER_FORK < cycle )
[593]592printk("\n[%s] thread[%x,%x] copied one PTE to child GPT : vpn %x / forks %d\n",
593__FUNCTION__, this->process->pid, this->trdid, 
594vpn, hal_remote_l32( XPTR( page_cxy , &page_ptr->forks) ) );
[433]595#endif
[408]596
597        }
598    }
599
[433]600    // set COW flag for all mapped entries of STAK vseg in parent thread GPT
601    hal_gpt_set_cow( parent_gpt_xp,
602                     vpn_base,
603                     vpn_size );
[408]604 
[438]605#if DEBUG_THREAD_USER_FORK
[433]606cycle = (uint32_t)hal_get_cycles();
[438]607if( DEBUG_THREAD_USER_FORK < cycle )
[593]608printk("\n[%s] thread[%x,%x] exit / child_thread %x / cycle %d\n",
609__FUNCTION__, this->process->pid, this->trdid, child_ptr, cycle );
[433]610#endif
[407]611
[1]612        return 0;
[5]613
[296]614}  // end thread_user_fork()
615
[457]616////////////////////////////////////////////////
617error_t thread_user_exec( void     * entry_func,
618                          uint32_t   argc,
619                          char    ** argv )
620{
621    thread_t  * thread  = CURRENT_THREAD;
622    process_t * process = thread->process;
623
624#if DEBUG_THREAD_USER_EXEC
625uint32_t cycle = (uint32_t)hal_get_cycles();
626if( DEBUG_THREAD_USER_EXEC < cycle )
[593]627printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
628__FUNCTION__, process->pid, thread->trdid, cycle );
[457]629#endif
630
[564]631// check parent thread attributes
632assert( (thread->type == THREAD_USER )          , "bad type" );
633assert( (thread->signature == THREAD_SIGNATURE) , "bad signature" );
634assert( (thread->busylocks == 0)                , "bad busylocks" );
[457]635
636        // re-initialize various thread descriptor fields
637    thread->quantum         = 0;            // TODO
638    thread->ticks_nr        = 0;            // TODO
639    thread->time_last_check = 0;            // TODO
640
641    thread->entry_func      = entry_func;
642    thread->main_argc       = argc; 
643    thread->main_argv       = argv;
644
645    // the main thread is always detached
646    thread->flags           = THREAD_FLAG_DETACHED;
647    thread->blocked         = 0;
648    thread->errno           = 0;
649    thread->fork_user       = 0;    // not inherited
650    thread->fork_cxy        = 0;    // not inherited
651
[564]652    // re-initialize busylocks counters
653    thread->busylocks       = 0;
654
[457]655    // reset thread info
656    memset( &thread->info , 0 , sizeof(thread_info_t) );
657
[564]658    // re-initialize join_lock
659    remote_busylock_init( XPTR( local_cxy , &thread->join_lock ), LOCK_THREAD_JOIN );
[457]660
661    // allocate an user stack vseg for main thread
662    vseg_t * vseg = vmm_create_vseg( process,
663                                     VSEG_TYPE_STACK,
664                                     0,                 // size unused
665                                     0,                 // length unused
666                                     0,                 // file_offset unused
667                                     0,                 // file_size unused
668                                     XPTR_NULL,         // mapper_xp unused
669                                     local_cxy );
670    if( vseg == NULL )
671    {
672            printk("\n[ERROR] in %s : cannot create stack vseg for main thread\n", __FUNCTION__ );
673                return -1;
674    }
675
[469]676    // update user stack in thread descriptor
[457]677    thread->u_stack_base = vseg->min;
678    thread->u_stack_size = vseg->max - vseg->min;
679   
680    // release FPU ownership if required
681    if( thread->core->fpu_owner == thread ) thread->core->fpu_owner = NULL;
682
683    // re-initialize  FPU context
684    hal_fpu_context_init( thread );
685
686#if DEBUG_THREAD_USER_EXEC
687cycle = (uint32_t)hal_get_cycles();
688if( DEBUG_THREAD_USER_EXEC < cycle )
[593]689printk("\n[%s] thread[%x,%x] set CPU context & jump to user code / cycle %d\n",
690__FUNCTION__, process->pid, thread->trdid, cycle );
[624]691hal_vmm_display( process , true );
[457]692#endif
693
694    // re-initialize CPU context... and jump to user code
695        hal_cpu_context_exec( thread );
696
[564]697    assert( false, "we should not execute this code");
[457]698 
699    return 0;
700
701}  // end thread_user_exec()
702
[1]703/////////////////////////////////////////////////////////
704error_t thread_kernel_create( thread_t     ** new_thread,
705                              thread_type_t   type,
[171]706                              void          * func,
707                              void          * args,
[1]708                                              lid_t           core_lid )
709{
710    error_t        error;
[14]711        thread_t     * thread;       // pointer on new thread descriptor
[1]712
[593]713    thread_t * this = CURRENT_THREAD; 
[1]714
[593]715assert( ( (type == THREAD_IDLE) || (type == THREAD_RPC) || (type == THREAD_DEV) ) ,
716"illegal thread type" );
[1]717
[593]718assert( (core_lid < LOCAL_CLUSTER->cores_nr) ,
719"illegal core_lid" );
720
[438]721#if DEBUG_THREAD_KERNEL_CREATE
[593]722uint32_t   cycle = (uint32_t)hal_get_cycles();
[438]723if( DEBUG_THREAD_KERNEL_CREATE < cycle )
[593]724printk("\n[%s] thread[%x,%x] enter / requested_type %s / cycle %d\n",
725__FUNCTION__, this->process->pid, this->trdid, thread_type_str(type), cycle );
[433]726#endif
727
[171]728    // allocate memory for new thread descriptor
[14]729    thread = thread_alloc();
730
[581]731    if( thread == NULL )
732    {
733        printk("\n[ERROR] in %s : thread %x in process %x\n"
734        "   no memory for thread descriptor\n",
[593]735        __FUNCTION__, this->trdid, this->process->pid );
[581]736        return ENOMEM;
737    }
[14]738
[171]739    // initialize thread descriptor
[14]740    error = thread_init( thread,
741                         &process_zero,
742                         type,
743                         func,
744                         args,
745                         core_lid,
746                         0 , 0 );  // no user stack for a kernel thread
747
[171]748    if( error ) // release allocated memory for thread descriptor
[1]749    {
[581]750        printk("\n[ERROR] in %s : thread %x in process %x\n"
751        "   cannot initialize thread descriptor\n",
[593]752        __FUNCTION__, this->trdid, this->process->pid );
[185]753        thread_release( thread );
[457]754        return ENOMEM;
[1]755    }
756
[171]757    // allocate & initialize CPU context
[457]758        error = hal_cpu_context_alloc( thread );
[581]759
[457]760    if( error )
761    {
[581]762        printk("\n[ERROR] in %s : thread %x in process %x\n"
[593]763        "    cannot create CPU context\n",
764        __FUNCTION__, this->trdid, this->process->pid );
[457]765        thread_release( thread );
766        return EINVAL;
767    }
[581]768
[457]769    hal_cpu_context_init( thread );
[14]770
[583]771    // set THREAD_BLOCKED_IDLE for DEV threads
772    if( type == THREAD_DEV ) thread->blocked |= THREAD_BLOCKED_IDLE;
[457]773
[438]774#if DEBUG_THREAD_KERNEL_CREATE
[433]775cycle = (uint32_t)hal_get_cycles();
[438]776if( DEBUG_THREAD_KERNEL_CREATE < cycle )
[593]777printk("\n[%s] thread[%x,%x] exit / new_thread %x / type %s / cycle %d\n",
778__FUNCTION__, this->process->pid, this->trdid, thread, thread_type_str(type), cycle );
[433]779#endif
[1]780
[171]781    *new_thread = thread;
[1]782        return 0;
[5]783
[296]784} // end thread_kernel_create()
785
[457]786//////////////////////////////////////////////
787void thread_idle_init( thread_t      * thread,
788                       thread_type_t   type,
789                       void          * func,
790                       void          * args,
791                           lid_t           core_lid )
[14]792{
793
[564]794// check arguments
795assert( (type == THREAD_IDLE) , "illegal thread type" );
796assert( (core_lid < LOCAL_CLUSTER->cores_nr) , "illegal core index" );
797
[457]798    // initialize thread descriptor
[14]799    error_t  error = thread_init( thread,
800                                  &process_zero,
801                                  type,
802                                  func,
803                                  args,
804                                  core_lid,
805                                  0 , 0 );   // no user stack for a kernel thread
806
[492]807    assert( (error == 0), "cannot create thread idle" );
[457]808
[14]809    // allocate & initialize CPU context if success
[457]810    error = hal_cpu_context_alloc( thread );
[171]811
[492]812    assert( (error == 0), "cannot allocate CPU context" );
[14]813
[457]814    hal_cpu_context_init( thread );
815
[438]816}  // end thread_idle_init()
[407]817
[1]818///////////////////////////////////////////////////////////////////////////////////////
819// TODO: check that all memory dynamically allocated during thread execution
[583]820// has been released => check vmm destroy for MMAP vsegs  [AG]
[1]821///////////////////////////////////////////////////////////////////////////////////////
[583]822void thread_destroy( thread_t * thread )
[1]823{
[409]824    reg_t        save_sr;
[1]825
826    process_t  * process    = thread->process;
827    core_t     * core       = thread->core;
828
[438]829#if DEBUG_THREAD_DESTROY
[583]830uint32_t   cycle = (uint32_t)hal_get_cycles();
831thread_t * this  = CURRENT_THREAD;
[438]832if( DEBUG_THREAD_DESTROY < cycle )
[593]833printk("\n[%s] thread[%x,%x] enter to destroy thread[%x,%x] / cycle %d\n",
[583]834__FUNCTION__, this->process->pid, this->trdid, process->pid, thread->trdid, cycle );
[433]835#endif
[1]836
[583]837    // check busylocks counter
838    thread_assert_can_yield( thread , __FUNCTION__ );
[171]839
[1]840    // update intrumentation values
[408]841        process->vmm.pgfault_nr += thread->info.pgfault_nr;
[1]842
843    // release memory allocated for CPU context and FPU context
844        hal_cpu_context_destroy( thread );
[409]845        if ( thread->type == THREAD_USER ) hal_fpu_context_destroy( thread );
[1]846       
[428]847    // release FPU ownership if required
[409]848        hal_disable_irq( &save_sr );
[1]849        if( core->fpu_owner == thread )
850        {
851                core->fpu_owner = NULL;
852                hal_fpu_disable();
853        }
[409]854        hal_restore_irq( save_sr );
[1]855
856    // invalidate thread descriptor
857        thread->signature = 0;
858
859    // release memory for thread descriptor
[23]860    thread_release( thread );
[1]861
[438]862#if DEBUG_THREAD_DESTROY
[433]863cycle = (uint32_t)hal_get_cycles();
[438]864if( DEBUG_THREAD_DESTROY < cycle )
[593]865printk("\n[%s] thread[%x,%x] exit / destroyed thread[%x,%x] / cycle %d\n",
[583]866__FUNCTION__, this->process->pid, this->trdid, process->pid, thread->trdid, cycle );
[433]867#endif
[1]868
[407]869}   // end thread_destroy()
870
[416]871//////////////////////////////////////////////////
872inline void thread_set_req_ack( thread_t * target,
873                                uint32_t * rsp_count )
[1]874{
[409]875    reg_t    save_sr;   // for critical section
876
[416]877    // get pointer on target thread scheduler
878    scheduler_t * sched = &target->core->scheduler;
[409]879
[416]880    // wait scheduler ready to handle a new request
881    while( sched->req_ack_pending ) asm volatile( "nop" );
[409]882   
883    // enter critical section
884    hal_disable_irq( &save_sr );
885     
[416]886    // set request in target thread scheduler
887    sched->req_ack_pending = true;
[409]888
[416]889    // set ack request in target thread "flags"
890    hal_atomic_or( &target->flags , THREAD_FLAG_REQ_ACK );
[409]891
[416]892    // set pointer on responses counter in target thread
893    target->ack_rsp_count = rsp_count;
[409]894   
895    // exit critical section
896    hal_restore_irq( save_sr );
897
[407]898    hal_fence();
[171]899
[416]900}  // thread_set_req_ack()
[409]901
[416]902/////////////////////////////////////////////////////
903inline void thread_reset_req_ack( thread_t * target )
[1]904{
[409]905    reg_t    save_sr;   // for critical section
906
907    // get pointer on target thread scheduler
[416]908    scheduler_t * sched = &target->core->scheduler;
[409]909
910    // check signal pending in scheduler
[492]911    assert( sched->req_ack_pending , "no pending signal" );
[409]912   
913    // enter critical section
914    hal_disable_irq( &save_sr );
915     
916    // reset signal in scheduler
[416]917    sched->req_ack_pending = false;
[409]918
919    // reset signal in thread "flags"
[416]920    hal_atomic_and( &target->flags , ~THREAD_FLAG_REQ_ACK );
[409]921
922    // reset pointer on responses counter
[416]923    target->ack_rsp_count = NULL;
[409]924   
925    // exit critical section
926    hal_restore_irq( save_sr );
927
[407]928    hal_fence();
[171]929
[416]930}  // thread_reset_req_ack()
[409]931
[436]932//////////////////////////////////////
933void thread_block( xptr_t   thread_xp,
934                   uint32_t cause )
[407]935{
[436]936    // get thread cluster and local pointer
937    cxy_t      cxy = GET_CXY( thread_xp );
938    thread_t * ptr = GET_PTR( thread_xp );
939
[407]940    // set blocking cause
[436]941    hal_remote_atomic_or( XPTR( cxy , &ptr->blocked ) , cause );
[407]942    hal_fence();
943
[438]944#if DEBUG_THREAD_BLOCK
[457]945uint32_t    cycle   = (uint32_t)hal_get_cycles();
946process_t * process = hal_remote_lpt( XPTR( cxy , &ptr->process ) );
[593]947thread_t  * this    = CURRENT_THREAD;
[438]948if( DEBUG_THREAD_BLOCK < cycle )
[593]949printk("\n[%s] thread[%x,%x] blocked thread %x in process %x / cause %x\n",
950__FUNCTION__, this->process->pid, this->trdid,
[564]951ptr->trdid, hal_remote_l32(XPTR( cxy , &process->pid )), cause );
[433]952#endif
953
[407]954} // end thread_block()
955
[433]956////////////////////////////////////////////
957uint32_t thread_unblock( xptr_t   thread_xp,
[407]958                         uint32_t cause )
959{
960    // get thread cluster and local pointer
[433]961    cxy_t      cxy = GET_CXY( thread_xp );
962    thread_t * ptr = GET_PTR( thread_xp );
[407]963
964    // reset blocking cause
965    uint32_t previous = hal_remote_atomic_and( XPTR( cxy , &ptr->blocked ) , ~cause );
966    hal_fence();
967
[438]968#if DEBUG_THREAD_BLOCK
[457]969uint32_t    cycle   = (uint32_t)hal_get_cycles();
970process_t * process = hal_remote_lpt( XPTR( cxy , &ptr->process ) );
[593]971thread_t  * this    = CURRENT_THREAD;
[438]972if( DEBUG_THREAD_BLOCK < cycle )
[593]973printk("\n[%s] thread[%x,%x] unblocked thread %x in process %x / cause %x\n",
974__FUNCTION__, this->process->pid, this->trdid,
[564]975ptr->trdid, hal_remote_l32(XPTR( cxy , &process->pid )), cause );
[433]976#endif
977
[446]978    // return a non zero value if the cause bit is modified
979    return( previous & cause );
[436]980
[446]981}  // end thread_unblock()
[407]982
[440]983//////////////////////////////////////
984void thread_delete( xptr_t  target_xp,
985                    pid_t   pid,
986                    bool_t  is_forced )
987{
988    reg_t       save_sr;                // for critical section
989    bool_t      target_join_done;       // joining thread arrived first
990    bool_t      target_attached;        // target thread attached
991    xptr_t      killer_xp;              // extended pointer on killer thread (this)
992    thread_t  * killer_ptr;             // pointer on killer thread (this)
993    cxy_t       target_cxy;             // target thread cluster     
994    thread_t  * target_ptr;             // pointer on target thread
995    xptr_t      target_flags_xp;        // extended pointer on target thread <flags>
996    xptr_t      target_join_lock_xp;    // extended pointer on target thread <join_lock>
997    xptr_t      target_join_xp_xp;      // extended pointer on target thread <join_xp>
998    trdid_t     target_trdid;           // target thread identifier
999    ltid_t      target_ltid;            // target thread local index
1000    xptr_t      joining_xp;             // extended pointer on joining thread
1001    thread_t  * joining_ptr;            // pointer on joining thread
1002    cxy_t       joining_cxy;            // joining thread cluster
1003
[564]1004    // get target thread cluster and local pointer
[440]1005    target_cxy      = GET_CXY( target_xp );
1006    target_ptr      = GET_PTR( target_xp );
[564]1007
1008    // get target thread identifiers, and attached flag
1009    target_trdid    = hal_remote_l32( XPTR( target_cxy , &target_ptr->trdid ) );
[440]1010    target_ltid     = LTID_FROM_TRDID( target_trdid );
1011    target_flags_xp = XPTR( target_cxy , &target_ptr->flags ); 
[564]1012    target_attached = ( (hal_remote_l32( target_flags_xp ) & THREAD_FLAG_DETACHED) == 0 );
[440]1013
1014    // get killer thread pointers
1015    killer_ptr = CURRENT_THREAD;
1016    killer_xp  = XPTR( local_cxy , killer_ptr );
1017
1018#if DEBUG_THREAD_DELETE
[564]1019uint32_t cycle  = (uint32_t)hal_get_cycles();
[440]1020if( DEBUG_THREAD_DELETE < cycle )
[593]1021printk("\n[%s] killer[%x,%x] enters / target[%x,%x] / cycle %d\n",
[583]1022__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid, 
1023target_ptr->process->pid, target_ptr->trdid, cycle );
[440]1024#endif
1025
[564]1026// check target thread is not the main thread, because the main thread
1027// must be deleted by the parent process sys_wait() function
1028assert( ((CXY_FROM_PID( pid ) != target_cxy) || (target_ltid != 0)),
1029"tharget thread cannot be the main thread\n" );
1030
[583]1031    // check killer thread can yield
1032    thread_assert_can_yield( killer_ptr , __FUNCTION__ ); 
[440]1033
[583]1034    // if the target thread is attached, we must synchonize with the joining thread
1035    // before blocking and marking the target thead for delete.
1036
1037    if( target_attached && (is_forced == false) ) // synchronize with joining thread
[564]1038    {
[440]1039        // build extended pointers on target thread join fields
1040        target_join_lock_xp  = XPTR( target_cxy , &target_ptr->join_lock );
1041        target_join_xp_xp    = XPTR( target_cxy , &target_ptr->join_xp );
1042
1043        // enter critical section
1044        hal_disable_irq( &save_sr );
1045
1046        // take the join_lock in target thread descriptor
[564]1047        remote_busylock_acquire( target_join_lock_xp );
[440]1048
1049        // get join_done from target thread descriptor
[564]1050        target_join_done = ((hal_remote_l32( target_flags_xp ) & THREAD_FLAG_JOIN_DONE) != 0);
[440]1051   
[583]1052        if( target_join_done )                     // joining thread arrived first
[440]1053        {
1054            // get extended pointer on joining thread
[564]1055            joining_xp  = (xptr_t)hal_remote_l64( target_join_xp_xp );
[440]1056            joining_ptr = GET_PTR( joining_xp );
1057            joining_cxy = GET_CXY( joining_xp );
1058           
1059            // reset the join_done flag in target thread
1060            hal_remote_atomic_and( target_flags_xp , ~THREAD_FLAG_JOIN_DONE );
1061
1062            // unblock the joining thread
1063            thread_unblock( joining_xp , THREAD_BLOCKED_JOIN );
1064
1065            // release the join_lock in target thread descriptor
[564]1066            remote_busylock_release( target_join_lock_xp );
[440]1067
[583]1068            // block the target thread
1069            thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
1070
[564]1071            // set the REQ_DELETE flag in target thread descriptor
1072            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
1073
[583]1074            // exit critical section
[440]1075            hal_restore_irq( save_sr );
[564]1076
[583]1077#if DEBUG_THREAD_DELETE
1078cycle  = (uint32_t)hal_get_cycles;
[564]1079if( DEBUG_THREAD_DELETE < cycle )
[593]1080printk("\n[%s] killer[%x,%x] exit / target[%x,%x] marked after join / cycle %d\n",
[583]1081__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid,
1082target_ptr->process->pid, target_ptr->trdid, cycle );
[564]1083#endif
[583]1084
1085        }
1086        else                                      // killer thread arrived first
1087        {
[440]1088            // set the kill_done flag in target thread
1089            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_KILL_DONE );
1090
1091            // block this thread on BLOCKED_JOIN
1092            thread_block( killer_xp , THREAD_BLOCKED_JOIN );
1093
1094            // set extended pointer on killer thread in target thread
[564]1095            hal_remote_s64( target_join_xp_xp , killer_xp );
[440]1096
1097            // release the join_lock in target thread descriptor
[564]1098            remote_busylock_release( target_join_lock_xp );
[440]1099
[583]1100#if DEBUG_THREAD_DELETE
1101cycle  = (uint32_t)hal_get_cycles;
[564]1102if( DEBUG_THREAD_DELETE < cycle )
[593]1103printk("\n[%s] killer[%x,%x] deschedules / target[%x,%x] not completed / cycle %d\n",
[583]1104__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid,
1105target_ptr->process->pid, target_ptr->trdid, cycle );
[564]1106#endif
[440]1107            // deschedule
1108            sched_yield( "killer thread wait joining thread" );
1109
[583]1110            // block the target thread
1111            thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
1112
[564]1113            // set the REQ_DELETE flag in target thread descriptor
1114            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
1115
[583]1116            // exit critical section
[440]1117            hal_restore_irq( save_sr );
[583]1118
1119#if DEBUG_THREAD_DELETE
1120cycle  = (uint32_t)hal_get_cycles;
1121if( DEBUG_THREAD_DELETE < cycle )
[593]1122printk("\n[%s] killer[%x,%x] exit / target[%x,%x] marked after join / cycle %d\n",
[583]1123__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid,
1124target_ptr->process->pid, target_ptr->trdid, cycle );
1125#endif
1126
[440]1127        }
[564]1128    }
[583]1129    else                     // no synchronization with joining thread required
[564]1130    {
[583]1131        // block the target thread
1132        thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
1133
[564]1134        // set the REQ_DELETE flag in target thread descriptor
1135        hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
[440]1136
1137#if DEBUG_THREAD_DELETE
1138cycle  = (uint32_t)hal_get_cycles;
1139if( DEBUG_THREAD_DELETE < cycle )
[593]1140printk("\n[%s] killer[%x,%x] exit / target [%x,%x] marked / no join / cycle %d\n",
[583]1141__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid,
1142target_ptr->process->pid, target_ptr->trdid, cycle );
[440]1143#endif
1144
[583]1145    }
[440]1146}  // end thread_delete()
1147
1148
1149
[564]1150/////////////////////////////
[485]1151void thread_idle_func( void )
[1]1152{
1153    while( 1 )
1154    {
[408]1155        // unmask IRQs
1156        hal_enable_irq( NULL );
1157
[443]1158        // force core to low-power mode (optional)
[583]1159        if( CONFIG_SCHED_IDLE_MODE_SLEEP ) 
[407]1160        {
[1]1161
[564]1162#if DEBUG_THREAD_IDLE
1163{ 
1164uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1165if( DEBUG_THREAD_IDLE < cycle )
[593]1166printk("\n[%s] idle thread on core[%x,%d] goes to sleep / cycle %d\n",
[446]1167__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cycle );
[564]1168}
[433]1169#endif
[1]1170
[407]1171            hal_core_sleep();
[1]1172
[564]1173#if DEBUG_THREAD_IDLE
1174{
1175uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1176if( DEBUG_THREAD_IDLE < cycle )
[593]1177printk("\n[%s] idle thread on core[%x,%d] wake up / cycle %d\n",
[531]1178__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cycle );
[564]1179}
[433]1180#endif
[407]1181
1182        }
[443]1183
[446]1184#if DEBUG_THREAD_IDLE
[564]1185{
1186uint32_t cycle = (uint32_t)hal_get_cycles();
1187if( DEBUG_THREAD_IDLE < cycle )
[446]1188sched_display( CURRENT_THREAD->core->lid );
[564]1189}
[446]1190#endif     
[564]1191        // search a runable thread
1192        sched_yield( "running idle thread" );
[446]1193
[564]1194    } // end while
1195
[407]1196}  // end thread_idle()
[1]1197
[407]1198
[473]1199///////////////////////////////////////////
1200void thread_time_update( thread_t * thread,
[564]1201                         bool_t     is_user )
[16]1202{
[473]1203    cycle_t current_cycle;   // current cycle counter value
1204    cycle_t last_cycle;      // last cycle counter value
[1]1205
[473]1206    // get pointer on thread_info structure
1207    thread_info_t * info = &thread->info;
1208
1209    // get last cycle counter value
1210    last_cycle = info->last_cycle;
1211
1212    // get current cycle counter value
1213    current_cycle = hal_get_cycles();
1214
1215    // update thread_info structure
1216    info->last_cycle = current_cycle;
1217
1218    // update time in thread_info
1219    if( is_user ) info->usr_cycles += (current_cycle - last_cycle);
1220    else          info->sys_cycles += (current_cycle - last_cycle);
[16]1221
[564]1222}  // end thread_time_update()
1223
[23]1224/////////////////////////////////////
1225xptr_t thread_get_xptr( pid_t    pid,
1226                        trdid_t  trdid )
1227{
1228    cxy_t         target_cxy;          // target thread cluster identifier
1229    ltid_t        target_thread_ltid;  // target thread local index
[171]1230    thread_t    * target_thread_ptr;   // target thread local pointer
[23]1231    xptr_t        target_process_xp;   // extended pointer on target process descriptor
[171]1232    process_t   * target_process_ptr;  // local pointer on target process descriptor
[23]1233    pid_t         target_process_pid;  // target process identifier
1234    xlist_entry_t root;                // root of list of process in target cluster
1235    xptr_t        lock_xp;             // extended pointer on lock protecting  this list
[16]1236
[580]1237#if DEBUG_THREAD_GET_XPTR
1238uint32_t cycle  = (uint32_t)hal_get_cycles();
1239thread_t * this = CURRENT_THREAD;
1240if( DEBUG_THREAD_GET_XPTR < cycle )
[593]1241printk("\n[%s] thread %x in process %x enters / pid %x / trdid %x / cycle %d\n",
[580]1242__FUNCTION__, this->trdid, this->process->pid, pid, trdid, cycle );
1243#endif
1244
[23]1245    // get target cluster identifier and local thread identifier
1246    target_cxy         = CXY_FROM_TRDID( trdid );
1247    target_thread_ltid = LTID_FROM_TRDID( trdid );
1248
[436]1249    // check trdid argument
[564]1250        if( (target_thread_ltid >= CONFIG_THREADS_MAX_PER_CLUSTER) || 
[436]1251        cluster_is_undefined( target_cxy ) )         return XPTR_NULL;
1252
[23]1253    // get root of list of process descriptors in target cluster
1254    hal_remote_memcpy( XPTR( local_cxy  , &root ),
1255                       XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_root ),
1256                       sizeof(xlist_entry_t) );
1257
[564]1258    // get extended pointer on lock protecting the list of local processes
[23]1259    lock_xp = XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_lock );
1260
1261    // take the lock protecting the list of processes in target cluster
[564]1262    remote_queuelock_acquire( lock_xp );
[23]1263
[580]1264#if( DEBUG_THREAD_GET_XPTR & 1 )
1265if( DEBUG_THREAD_GET_XPTR < cycle )
[593]1266printk("\n[%s] scan processes in cluster %x :\n", __FUNCTION__, target_cxy );
[580]1267#endif
1268
[564]1269    // scan the list of local processes in target cluster
[23]1270    xptr_t  iter;
1271    bool_t  found = false;
1272    XLIST_FOREACH( XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_root ) , iter )
1273    {
1274        target_process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
[469]1275        target_process_ptr = GET_PTR( target_process_xp );
[564]1276        target_process_pid = hal_remote_l32( XPTR( target_cxy , &target_process_ptr->pid ) );
[580]1277
1278#if( DEBUG_THREAD_GET_XPTR & 1 )
1279if( DEBUG_THREAD_GET_XPTR < cycle )
1280printk(" - process %x\n", target_process_pid );
1281#endif
1282
[23]1283        if( target_process_pid == pid )
1284        {
1285            found = true;
1286            break;
1287        }
1288    }
1289
1290    // release the lock protecting the list of processes in target cluster
[564]1291    remote_queuelock_release( lock_xp );
[23]1292
[436]1293    // check PID found
[580]1294    if( found == false ) 
1295    {
[23]1296
[580]1297#if( DEBUG_THREAD_GET_XPTR & 1 )
1298if( DEBUG_THREAD_GET_XPTR < cycle )
[593]1299printk("\n[%s] pid %x not found in cluster %x\n",
[580]1300__FUNCTION__, pid, target_cxy );
1301#endif
1302        return XPTR_NULL;
1303    }
1304
[23]1305    // get target thread local pointer
1306    xptr_t xp = XPTR( target_cxy , &target_process_ptr->th_tbl[target_thread_ltid] );
[171]1307    target_thread_ptr = (thread_t *)hal_remote_lpt( xp );
[23]1308
[580]1309    if( target_thread_ptr == NULL )
1310    {
[23]1311
[580]1312#if( DEBUG_THREAD_GET_XPTR & 1 )
1313if( DEBUG_THREAD_GET_XPTR < cycle )
[593]1314printk("\n[%s] thread %x not registered in process %x in cluster %x\n",
[580]1315__FUNCTION__, trdid, pid, target_cxy );
1316#endif
1317        return XPTR_NULL;
1318    }
1319
1320#if DEBUG_THREAD_GET_XPTR
1321cycle  = (uint32_t)hal_get_cycles();
1322if( DEBUG_THREAD_GET_XPTR < cycle )
[593]1323printk("\n[%s] thread %x in process %x exit / pid %x / trdid %x / cycle %d\n",
[580]1324__FUNCTION__, this->trdid, this->process->pid, pid, trdid, cycle );
1325#endif
1326
[23]1327    return XPTR( target_cxy , target_thread_ptr );
[564]1328
1329}  // end thread_get_xptr()
1330
1331///////////////////////////////////////////////////
1332void thread_assert_can_yield( thread_t    * thread,
1333                              const char  * func_str )
1334{
1335    // does nothing if thread does not hold any busylock
1336
1337    if( thread->busylocks )
1338    {
1339        // get pointers on TXT0 chdev
1340        xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
1341        cxy_t     txt0_cxy = GET_CXY( txt0_xp );
1342        chdev_t * txt0_ptr = GET_PTR( txt0_xp );
1343
1344        // get extended pointer on TXT0 lock
1345        xptr_t  txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
1346
1347        // get TXT0 lock
1348        remote_busylock_acquire( txt0_lock_xp );
1349
1350        // display error message on TXT0
[593]1351        nolock_printk("\n[PANIC] in %s / thread[%x,%x] cannot yield : "
[580]1352        "hold %d busylock(s) / cycle %d\n",
[593]1353        func_str, thread->process->pid, thread->trdid,
[624]1354        thread->busylocks - 1, (uint32_t)hal_get_cycles() );
[564]1355
1356#if DEBUG_BUSYLOCK
[580]1357
[583]1358// scan list of busylocks
1359xptr_t    iter_xp;
[580]1360xptr_t    root_xp  = XPTR( local_cxy , &thread->busylocks_root );
1361XLIST_FOREACH( root_xp , iter_xp )
[564]1362{
[580]1363    xptr_t       lock_xp   = XLIST_ELEMENT( iter_xp , busylock_t , xlist );
1364    cxy_t        lock_cxy  = GET_CXY( lock_xp );
1365    busylock_t * lock_ptr  = GET_PTR( lock_xp );
1366    uint32_t     lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->type ) );
1367    nolock_printk(" - %s in cluster %x\n", lock_type_str[lock_type] , lock_cxy );
1368}
[564]1369
1370#endif
[23]1371
[564]1372        // release TXT0 lock
1373        remote_busylock_release( txt0_lock_xp );
1374
1375        // suicide
1376        hal_core_sleep();
1377    }
1378}  // end thread_assert_can yield()
1379
[619]1380//////////////////////////////////////////////////////
1381void thread_display_busylocks( xptr_t       thread_xp,
1382                               const char * string )
[564]1383{
[623]1384
[581]1385    cxy_t      thread_cxy = GET_CXY( thread_xp );
1386    thread_t * thread_ptr = GET_PTR( thread_xp );
[564]1387
[623]1388#if DEBUG_BUSYLOCK
[564]1389
[623]1390    xptr_t     iter_xp;
[564]1391
[623]1392    // get relevant info from target thread descriptor
[619]1393    uint32_t    locks   = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->busylocks ) );
1394    trdid_t     trdid   = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
1395    process_t * process = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->process ) );
1396    pid_t       pid     = hal_remote_l32( XPTR( thread_cxy , &process->pid ) );
[564]1397
[581]1398    // get extended pointer on root of busylocks
[619]1399    xptr_t root_xp = XPTR( thread_cxy , &thread_ptr->busylocks_root );
[564]1400
[581]1401    // get pointers on TXT0 chdev
1402    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
1403    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
1404    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
[580]1405
[581]1406    // get extended pointer on remote TXT0 lock
1407    xptr_t  txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
[580]1408
[581]1409    // get TXT0 lock
1410    remote_busylock_acquire( txt0_lock_xp );
[580]1411
[581]1412    // display header
[619]1413    nolock_printk("\n***** thread[%x,%x] in <%s> : %d busylocks *****\n",
1414    pid, trdid, string, locks );
[581]1415
1416    // scan the xlist of busylocks when required
1417    if( locks )
1418    {
1419        XLIST_FOREACH( root_xp , iter_xp )
[580]1420        {
[581]1421            xptr_t       lock_xp   = XLIST_ELEMENT( iter_xp , busylock_t , xlist );
1422            cxy_t        lock_cxy  = GET_CXY( lock_xp );
1423            busylock_t * lock_ptr  = GET_PTR( lock_xp );
1424            uint32_t     lock_type = hal_remote_l32(XPTR( lock_cxy , &lock_ptr->type ));
1425            nolock_printk(" - %s in cluster %x\n", lock_type_str[lock_type] , lock_cxy );
[580]1426        }
[581]1427    }
[580]1428
[581]1429    // release TXT0 lock
1430    remote_busylock_release( txt0_lock_xp );
1431
[623]1432#else
[581]1433
[623]1434printk("\n[ERROR] in %s : set DEBUG_BUSYLOCK in kernel_config.h for %s / thread(%x,%x)\n",
1435__FUNCTION__, string, thread_cxy, thread_ptr );
1436
[581]1437#endif
1438
[623]1439    return;
[581]1440
[580]1441}  // end thread_display_busylock()
[581]1442
Note: See TracBrowser for help on using the repository browser.