source: trunk/kernel/kern/thread.c @ 439

Last change on this file since 439 was 438, checked in by alain, 6 years ago

Fix a bug in scheduler related to RPC blocking.

File size: 38.4 KB
RevLine 
[1]1/*
2 * thread.c -  implementation of thread operations (user & kernel)
[171]3 *
[1]4 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
[23]5 *         Alain Greiner (2016,2017)
[1]6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
[5]9 * This file is part of ALMOS-MKH.
[1]10 *
[5]11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
[1]12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
[5]15 * ALMOS-MKH is distributed in the hope that it will be useful, but
[1]16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
[5]21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
[1]22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
[14]25#include <kernel_config.h>
[1]26#include <hal_types.h>
27#include <hal_context.h>
28#include <hal_irqmask.h>
29#include <hal_special.h>
30#include <hal_remote.h>
31#include <memcpy.h>
32#include <printk.h>
33#include <cluster.h>
34#include <process.h>
35#include <scheduler.h>
[188]36#include <dev_pic.h>
[1]37#include <core.h>
38#include <list.h>
39#include <xlist.h>
40#include <page.h>
41#include <kmem.h>
42#include <ppm.h>
43#include <thread.h>
44
45//////////////////////////////////////////////////////////////////////////////////////
46// Extern global variables
47//////////////////////////////////////////////////////////////////////////////////////
48
49extern process_t      process_zero;
50
51//////////////////////////////////////////////////////////////////////////////////////
[16]52// This function returns a printable string for the thread type.
[1]53//////////////////////////////////////////////////////////////////////////////////////
[5]54char * thread_type_str( uint32_t type )
55{
[296]56    if     ( type == THREAD_USER   ) return "USR";
[16]57    else if( type == THREAD_RPC    ) return "RPC";
58    else if( type == THREAD_DEV    ) return "DEV";
[296]59    else if( type == THREAD_IDLE   ) return "IDL";
[5]60    else                             return "undefined";
61}
62
[1]63/////////////////////////////////////////////////////////////////////////////////////
[14]64// This static function allocates physical memory for a thread descriptor.
65// It can be called by the three functions:
[1]66// - thread_user_create()
[14]67// - thread_user_fork()
[1]68// - thread_kernel_create()
69/////////////////////////////////////////////////////////////////////////////////////
[14]70// @ return pointer on thread descriptor if success / return NULL if failure.
[1]71/////////////////////////////////////////////////////////////////////////////////////
[14]72static thread_t * thread_alloc()
[1]73{
[23]74        page_t       * page;   // pointer on page descriptor containing thread descriptor
[171]75        kmem_req_t     req;    // kmem request
[1]76
77        // allocates memory for thread descriptor + kernel stack
78        req.type  = KMEM_PAGE;
[14]79        req.size  = CONFIG_THREAD_DESC_ORDER;
[1]80        req.flags = AF_KERNEL | AF_ZERO;
81        page      = kmem_alloc( &req );
82
[23]83        if( page == NULL ) return NULL;
[1]84
[315]85    // return pointer on new thread descriptor
86    xptr_t base_xp = ppm_page2base( XPTR(local_cxy , page ) );
87    return (thread_t *)GET_PTR( base_xp );
88
89}  // end thread_alloc()
90 
91
[14]92/////////////////////////////////////////////////////////////////////////////////////
[23]93// This static function releases the physical memory for a thread descriptor.
[53]94// It is called by the three functions:
[23]95// - thread_user_create()
96// - thread_user_fork()
97// - thread_kernel_create()
98/////////////////////////////////////////////////////////////////////////////////////
99// @ thread  : pointer on thread descriptor.
100/////////////////////////////////////////////////////////////////////////////////////
101static void thread_release( thread_t * thread )
102{
103    kmem_req_t   req;
104
[315]105    xptr_t base_xp = ppm_base2page( XPTR(local_cxy , thread ) );
106
[23]107    req.type  = KMEM_PAGE;
[315]108    req.ptr   = GET_PTR( base_xp );
[23]109    kmem_free( &req );
110}
111
112/////////////////////////////////////////////////////////////////////////////////////
[14]113// This static function initializes a thread descriptor (kernel or user).
[438]114// It can be called by the four functions:
[14]115// - thread_user_create()
116// - thread_user_fork()
117// - thread_kernel_create()
[438]118// - thread_idle_init()
119// It updates the local DQDT.
[14]120/////////////////////////////////////////////////////////////////////////////////////
121// @ thread       : pointer on thread descriptor
122// @ process      : pointer on process descriptor.
123// @ type         : thread type.
124// @ func         : pointer on thread entry function.
125// @ args         : pointer on thread entry function arguments.
126// @ core_lid     : target core local index.
127// @ u_stack_base : stack base (user thread only)
128// @ u_stack_size : stack base (user thread only)
129/////////////////////////////////////////////////////////////////////////////////////
130static error_t thread_init( thread_t      * thread,
131                            process_t     * process,
132                            thread_type_t   type,
133                            void          * func,
134                            void          * args,
135                            lid_t           core_lid,
136                            intptr_t        u_stack_base,
137                            uint32_t        u_stack_size )
138{
139    error_t        error;
140    trdid_t        trdid;      // allocated thread identifier
141
142        cluster_t    * local_cluster = LOCAL_CLUSTER;
143
144    // register new thread in process descriptor, and get a TRDID
[1]145    error = process_register_thread( process, thread , &trdid );
146
[171]147    if( error )
[1]148    {
[14]149        printk("\n[ERROR] in %s : cannot get TRDID\n", __FUNCTION__ );
150        return EINVAL;
[1]151    }
[14]152
[407]153    // compute thread descriptor size without kernel stack
154    uint32_t desc_size = (intptr_t)(&thread->signature) - (intptr_t)thread + 4; 
155
[1]156        // Initialize new thread descriptor
157    thread->trdid           = trdid;
[171]158        thread->type            = type;
[1]159    thread->quantum         = 0;            // TODO
160    thread->ticks_nr        = 0;            // TODO
161    thread->time_last_check = 0;
162        thread->core            = &local_cluster->core_tbl[core_lid];
163        thread->process         = process;
164
165    thread->local_locks     = 0;
[409]166    thread->remote_locks    = 0;
[1]167
[409]168#if CONFIG_LOCKS_DEBUG
169    list_root_init( &thread->locks_root ); 
[1]170    xlist_root_init( XPTR( local_cxy , &thread->xlocks_root ) );
[409]171#endif
[1]172
[171]173    thread->u_stack_base    = u_stack_base;
[1]174    thread->u_stack_size    = u_stack_size;
[407]175    thread->k_stack_base    = (intptr_t)thread + desc_size;
176    thread->k_stack_size    = CONFIG_THREAD_DESC_SIZE - desc_size;
[1]177
178    thread->entry_func      = func;         // thread entry point
179    thread->entry_args      = args;         // thread function arguments
[171]180    thread->flags           = 0;            // all flags reset
[1]181    thread->errno           = 0;            // no error detected
[407]182    thread->fork_user       = 0;            // no user defined placement for fork
183    thread->fork_cxy        = 0;            // user defined target cluster for fork
[409]184    thread->blocked         = THREAD_BLOCKED_GLOBAL;
[1]185
186    // reset children list
187    xlist_root_init( XPTR( local_cxy , &thread->children_root ) );
188    thread->children_nr = 0;
189
190    // reset sched list and brothers list
191    list_entry_init( &thread->sched_list );
192    xlist_entry_init( XPTR( local_cxy , &thread->brothers_list ) );
193
194    // reset thread info
195    memset( &thread->info , 0 , sizeof(thread_info_t) );
196
[409]197    // initializes join_lock
198    remote_spinlock_init( XPTR( local_cxy , &thread->join_lock ) );
199
[1]200    // initialise signature
201        thread->signature = THREAD_SIGNATURE;
202
[408]203    // FIXME call hal_thread_init() function to initialise the save_sr field
204    thread->save_sr = 0xFF13;
205
[171]206    // register new thread in core scheduler
[1]207    sched_register_thread( thread->core , thread );
208
[438]209        // update DQDT
210    dqdt_update_threads( 1 );
211
[1]212        return 0;
213
[296]214} // end thread_init()
215
[1]216/////////////////////////////////////////////////////////
[23]217error_t thread_user_create( pid_t             pid,
218                            void            * start_func,
219                            void            * start_arg,
[1]220                            pthread_attr_t  * attr,
[23]221                            thread_t       ** new_thread )
[1]222{
223    error_t        error;
224        thread_t     * thread;       // pointer on created thread descriptor
225    process_t    * process;      // pointer to local process descriptor
226    lid_t          core_lid;     // selected core local index
[23]227    vseg_t       * vseg;         // stack vseg
[1]228
[407]229    assert( (attr != NULL) , __FUNCTION__, "pthread attributes must be defined" );
[5]230
[438]231#if DEBUG_THREAD_USER_CREATE
[433]232uint32_t cycle = (uint32_t)hal_get_cycles();
[438]233if( DEBUG_THREAD_USER_CREATE < cycle )
[433]234printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n",
235__FUNCTION__, CURRENT_THREAD, pid , cycle );
236#endif
[428]237
[23]238    // get process descriptor local copy
239    process = process_get_local_copy( pid );
240    if( process == NULL )
241    {
242                printk("\n[ERROR] in %s : cannot get process descriptor %x\n",
243               __FUNCTION__ , pid );
244        return ENOMEM;
245    }
246
[171]247    // select a target core in local cluster
[407]248    if( attr->attributes & PT_ATTR_CORE_DEFINED )
[23]249    {
[407]250        core_lid = attr->lid;
251        if( core_lid >= LOCAL_CLUSTER->cores_nr )
252        {
253                printk("\n[ERROR] in %s : illegal core index attribute = %d\n",
254            __FUNCTION__ , core_lid );
255            return EINVAL;
256        }
[23]257    }
[407]258    else
259    {
260        core_lid = cluster_select_local_core();
261    }
[1]262
[171]263    // allocate a stack from local VMM
[407]264    vseg = vmm_create_vseg( process,
265                            VSEG_TYPE_STACK,
266                            0,                 // size unused
267                            0,                 // length unused
268                            0,                 // file_offset unused
269                            0,                 // file_size unused
270                            XPTR_NULL,         // mapper_xp unused
271                            local_cxy );
[1]272
[170]273    if( vseg == NULL )
[23]274    {
275            printk("\n[ERROR] in %s : cannot create stack vseg\n", __FUNCTION__ );
276                return ENOMEM;
[171]277    }
[23]278
[171]279    // allocate memory for thread descriptor
[14]280    thread = thread_alloc();
[1]281
[23]282    if( thread == NULL )
283    {
284            printk("\n[ERROR] in %s : cannot create new thread\n", __FUNCTION__ );
285        vmm_remove_vseg( vseg );
286        return ENOMEM;
287    }
[14]288
[171]289    // initialize thread descriptor
[14]290    error = thread_init( thread,
291                         process,
292                         THREAD_USER,
[23]293                         start_func,
294                         start_arg,
[14]295                         core_lid,
[23]296                         vseg->min,
297                         vseg->max - vseg->min );
[171]298    if( error )
[14]299    {
[23]300            printk("\n[ERROR] in %s : cannot initialize new thread\n", __FUNCTION__ );
301        vmm_remove_vseg( vseg );
302        thread_release( thread );
[14]303        return EINVAL;
304    }
305
306    // set DETACHED flag if required
[407]307    if( attr->attributes & PT_ATTR_DETACH ) 
308    {
309        thread->flags |= THREAD_FLAG_DETACHED;
310    }
[1]311
[171]312    // allocate & initialize CPU context
[407]313        if( hal_cpu_context_create( thread ) )
[23]314    {
315            printk("\n[ERROR] in %s : cannot create CPU context\n", __FUNCTION__ );
316        vmm_remove_vseg( vseg );
317        thread_release( thread );
318        return ENOMEM;
319    }
320
[407]321    // allocate  FPU context
322    if( hal_fpu_context_alloc( thread ) )
[23]323    {
324            printk("\n[ERROR] in %s : cannot create FPU context\n", __FUNCTION__ );
325        vmm_remove_vseg( vseg );
326        thread_release( thread );
327        return ENOMEM;
328    }
329
[438]330#if DEBUG_THREAD_USER_CREATE
[433]331cycle = (uint32_t)hal_get_cycles();
[438]332if( DEBUG_THREAD_USER_CREATE < cycle )
[433]333printk("\n[DBG] %s : thread %x exit / process %x / new_thread %x / core %d / cycle %d\n",
334__FUNCTION__, CURRENT_THREAD, pid, thread, core_lid, cycle );
335#endif
[1]336
337    *new_thread = thread;
338        return 0;
[14]339
[296]340}  // end thread_user_create()
341
[408]342///////////////////////////////////////////////////////
343error_t thread_user_fork( xptr_t      parent_thread_xp,
344                          process_t * child_process,
345                          thread_t ** child_thread )
[1]346{
347    error_t        error;
[408]348        thread_t     * child_ptr;        // local pointer on local child thread
349    lid_t          core_lid;         // selected core local index
[1]350
[408]351    thread_t     * parent_ptr;       // local pointer on remote parent thread
352    cxy_t          parent_cxy;       // parent thread cluster
353    process_t    * parent_process;   // local pointer on parent process
354    xptr_t         parent_gpt_xp;    // extended pointer on parent thread GPT
[5]355
[408]356    void         * func;             // parent thread entry_func
357    void         * args;             // parent thread entry_args
358    intptr_t       base;             // parent thread u_stack_base
359    uint32_t       size;             // parent thread u_stack_size
360    uint32_t       flags;            // parent_thread flags
361    vpn_t          vpn_base;         // parent thread stack vpn_base
362    vpn_t          vpn_size;         // parent thread stack vpn_size
363    reg_t        * uzone;            // parent thread pointer on uzone 
364
365    vseg_t       * vseg;             // child thread STACK vseg
366
[438]367#if DEBUG_THREAD_USER_FORK
[433]368uint32_t cycle = (uint32_t)hal_get_cycles();
[438]369if( DEBUG_THREAD_USER_FORK < cycle )
[433]370printk("\n[DBG] %s : thread %x enter / child_process %x / cycle %d\n",
371__FUNCTION__, CURRENT_THREAD, child_process->pid, cycle );
372#endif
[408]373
[1]374    // select a target core in local cluster
375    core_lid = cluster_select_local_core();
376
[408]377    // get cluster and local pointer on parent thread descriptor
378    parent_cxy = GET_CXY( parent_thread_xp );
379    parent_ptr = (thread_t *)GET_PTR( parent_thread_xp );
[1]380
[408]381    // get relevant fields from parent thread
[428]382    func  = (void *)  hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->entry_func    ));
383    args  = (void *)  hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->entry_args    ));
384    base  = (intptr_t)hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->u_stack_base  ));
385    size  = (uint32_t)hal_remote_lw ( XPTR( parent_cxy , &parent_ptr->u_stack_size  ));
386    flags =           hal_remote_lw ( XPTR( parent_cxy , &parent_ptr->flags         ));
387    uzone = (reg_t *) hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->uzone_current ));
[1]388
[408]389    vpn_base = base >> CONFIG_PPM_PAGE_SHIFT;
390    vpn_size = size >> CONFIG_PPM_PAGE_SHIFT;
391
392    // get pointer on parent process in parent thread cluster
393    parent_process = (process_t *)hal_remote_lpt( XPTR( parent_cxy,
394                                                        &parent_ptr->process ) );
395 
396    // get extended pointer on parent GPT in parent thread cluster
397    parent_gpt_xp = XPTR( parent_cxy , &parent_process->vmm.gpt );
398
399    // allocate memory for child thread descriptor
400    child_ptr = thread_alloc();
401    if( child_ptr == NULL )
[23]402    {
403        printk("\n[ERROR] in %s : cannot allocate new thread\n", __FUNCTION__ );
[408]404        return -1;
[23]405    }
[14]406
[171]407    // initialize thread descriptor
[408]408    error = thread_init( child_ptr,
409                         child_process,
[14]410                         THREAD_USER,
[408]411                         func,
412                         args,
[14]413                         core_lid,
[408]414                         base,
415                         size );
[23]416    if( error )
[14]417    {
[408]418            printk("\n[ERROR] in %s : cannot initialize child thread\n", __FUNCTION__ );
419        thread_release( child_ptr );
[14]420        return EINVAL;
421    }
422
[407]423    // return child pointer
[408]424    *child_thread = child_ptr;
[1]425
[408]426    // set detached flag if required
427    if( flags & THREAD_FLAG_DETACHED ) child_ptr->flags = THREAD_FLAG_DETACHED;
[1]428
[408]429    // update uzone pointer in child thread descriptor
[428]430    child_ptr->uzone_current = (char *)((intptr_t)uzone +
431                                        (intptr_t)child_ptr - 
432                                        (intptr_t)parent_ptr );
[408]433 
434
[407]435    // allocate CPU context for child thread
[408]436        if( hal_cpu_context_alloc( child_ptr ) )
[23]437    {
[407]438            printk("\n[ERROR] in %s : cannot allocate CPU context\n", __FUNCTION__ );
[408]439        thread_release( child_ptr );
440        return -1;
[23]441    }
442
[407]443    // allocate FPU context for child thread
[408]444        if( hal_fpu_context_alloc( child_ptr ) )
[23]445    {
[407]446            printk("\n[ERROR] in %s : cannot allocate FPU context\n", __FUNCTION__ );
[408]447        thread_release( child_ptr );
448        return -1;
[23]449    }
450
[408]451    // create and initialize STACK vseg
452    vseg = vseg_alloc();
453    vseg_init( vseg,
454               VSEG_TYPE_STACK,
455               base,
456               size,
457               vpn_base,
458               vpn_size,
459               0, 0, XPTR_NULL,                         // not a file vseg
460               local_cxy );
[1]461
[408]462    // register STACK vseg in local child VSL
463    vseg_attach( &child_process->vmm , vseg );
464
465    // copy all valid STACK GPT entries   
466    vpn_t          vpn;
467    bool_t         mapped;
468    ppn_t          ppn;
469    for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ )
470    {
471        error = hal_gpt_pte_copy( &child_process->vmm.gpt,
472                                  parent_gpt_xp,
473                                  vpn,
474                                  true,                 // set cow
475                                  &ppn,
476                                  &mapped );
477        if( error )
478        {
479            vseg_detach( &child_process->vmm , vseg );
480            vseg_free( vseg );
481            thread_release( child_ptr );
482            printk("\n[ERROR] in %s : cannot update child GPT\n", __FUNCTION__ );
483            return -1;
484        }
485
[433]486        // increment pending forks counter for the page if mapped
[408]487        if( mapped )
488        {
489            xptr_t   page_xp  = ppm_ppn2page( ppn );
490            cxy_t    page_cxy = GET_CXY( page_xp );
491            page_t * page_ptr = (page_t *)GET_PTR( page_xp );
[433]492            hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , 1 );
[408]493
[438]494#if (DEBUG_THREAD_USER_FORK & 1)
[433]495cycle = (uint32_t)hal_get_cycles();
[438]496if( DEBUG_THREAD_USER_FORK < cycle )
[433]497printk("\n[DBG] %s : thread %x copied stack PTE to child GPT : vpn %x\n",
498__FUNCTION__, CURRENT_THREAD, vpn );
499#endif
[408]500
501        }
502    }
503
[433]504    // set COW flag for all mapped entries of STAK vseg in parent thread GPT
505    hal_gpt_set_cow( parent_gpt_xp,
506                     vpn_base,
507                     vpn_size );
[408]508 
[438]509#if DEBUG_THREAD_USER_FORK
[433]510cycle = (uint32_t)hal_get_cycles();
[438]511if( DEBUG_THREAD_USER_FORK < cycle )
[433]512printk("\n[DBG] %s : thread %x exit / child_process %x / child_thread %x / cycle %d\n",
513__FUNCTION__, CURRENT_THREAD, child_process->pid, child_ptr, cycle );
514#endif
[407]515
[1]516        return 0;
[5]517
[296]518}  // end thread_user_fork()
519
[1]520/////////////////////////////////////////////////////////
521error_t thread_kernel_create( thread_t     ** new_thread,
522                              thread_type_t   type,
[171]523                              void          * func,
524                              void          * args,
[1]525                                              lid_t           core_lid )
526{
527    error_t        error;
[14]528        thread_t     * thread;       // pointer on new thread descriptor
[1]529
[407]530    assert( ( (type == THREAD_IDLE) || (type == THREAD_RPC) || (type == THREAD_DEV) ) ,
531    __FUNCTION__ , "illegal thread type" );
[1]532
[171]533    assert( (core_lid < LOCAL_CLUSTER->cores_nr) ,
[5]534            __FUNCTION__ , "illegal core_lid" );
[1]535
[438]536#if DEBUG_THREAD_KERNEL_CREATE
[433]537uint32_t cycle = (uint32_t)hal_get_cycles();
[438]538if( DEBUG_THREAD_KERNEL_CREATE < cycle )
[433]539printk("\n[DBG] %s : thread %x enter / requested_type %s / cycle %d\n",
540__FUNCTION__, CURRENT_THREAD, thread, thread_type_str(type), cycle );
541#endif
542
[171]543    // allocate memory for new thread descriptor
[14]544    thread = thread_alloc();
545
546    if( thread == NULL ) return ENOMEM;
547
[171]548    // initialize thread descriptor
[14]549    error = thread_init( thread,
550                         &process_zero,
551                         type,
552                         func,
553                         args,
554                         core_lid,
555                         0 , 0 );  // no user stack for a kernel thread
556
[171]557    if( error ) // release allocated memory for thread descriptor
[1]558    {
[185]559        thread_release( thread );
[14]560        return EINVAL;
[1]561    }
562
[171]563    // allocate & initialize CPU context
564        hal_cpu_context_create( thread );
[14]565
[438]566#if DEBUG_THREAD_KERNEL_CREATE
[433]567cycle = (uint32_t)hal_get_cycles();
[438]568if( DEBUG_THREAD_KERNEL_CREATE < cycle )
[433]569printk("\n[DBG] %s : thread %x exit / new_thread %x / type %s / cycle %d\n",
570__FUNCTION__, CURRENT_THREAD, thread, thread_type_str(type), cycle );
571#endif
[1]572
[171]573    *new_thread = thread;
[1]574        return 0;
[5]575
[296]576} // end thread_kernel_create()
577
[438]578/////////////////////////////////////////////////
579error_t thread_idle_init( thread_t      * thread,
580                          thread_type_t   type,
581                          void          * func,
582                          void          * args,
583                                          lid_t           core_lid )
[14]584{
[407]585    assert( (type == THREAD_IDLE) , __FUNCTION__ , "illegal thread type" );
[1]586
[407]587    assert( (core_lid < LOCAL_CLUSTER->cores_nr) , __FUNCTION__ , "illegal core index" );
[14]588
589    error_t  error = thread_init( thread,
590                                  &process_zero,
591                                  type,
592                                  func,
593                                  args,
594                                  core_lid,
595                                  0 , 0 );   // no user stack for a kernel thread
596
597    // allocate & initialize CPU context if success
598    if( error == 0 ) hal_cpu_context_create( thread );
[171]599
[14]600    return error;
601
[438]602}  // end thread_idle_init()
[407]603
[1]604///////////////////////////////////////////////////////////////////////////////////////
605// TODO: check that all memory dynamically allocated during thread execution
606// has been released, using a cache of mmap and malloc requests. [AG]
607///////////////////////////////////////////////////////////////////////////////////////
608void thread_destroy( thread_t * thread )
609{
[409]610    reg_t        save_sr;
[1]611
612    process_t  * process    = thread->process;
613    core_t     * core       = thread->core;
614
[438]615#if DEBUG_THREAD_DESTROY
[433]616uint32_t cycle = (uint32_t)hal_get_cycles();
[438]617if( DEBUG_THREAD_DESTROY < cycle )
[433]618printk("\n[DBG] %s : thread %x enter to destroy thread %x in process %x / cycle %d\n",
619__FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle );
620#endif
[1]621
[5]622    assert( (thread->children_nr == 0) , __FUNCTION__ , "still attached children" );
623
624    assert( (thread->local_locks == 0) , __FUNCTION__ , "all local locks not released" );
[171]625
[5]626    assert( (thread->remote_locks == 0) , __FUNCTION__ , "all remote locks not released" );
627
[1]628    // update intrumentation values
[408]629        process->vmm.pgfault_nr += thread->info.pgfault_nr;
[1]630
631    // release memory allocated for CPU context and FPU context
632        hal_cpu_context_destroy( thread );
[409]633        if ( thread->type == THREAD_USER ) hal_fpu_context_destroy( thread );
[1]634       
[428]635    // release FPU ownership if required
[409]636        hal_disable_irq( &save_sr );
[1]637        if( core->fpu_owner == thread )
638        {
639                core->fpu_owner = NULL;
640                hal_fpu_disable();
641        }
[409]642        hal_restore_irq( save_sr );
[1]643
[171]644    // remove thread from process th_tbl[]
[428]645    process_remove_thread( thread );
[1]646       
[438]647    // update DQDT
648    dqdt_update_threads( -1 );
[23]649
[1]650    // invalidate thread descriptor
651        thread->signature = 0;
652
653    // release memory for thread descriptor
[23]654    thread_release( thread );
[1]655
[438]656#if DEBUG_THREAD_DESTROY
[433]657cycle = (uint32_t)hal_get_cycles();
[438]658if( DEBUG_THREAD_DESTROY < cycle )
[433]659printk("\n[DBG] %s : thread %x exit / destroyed thread %x in process %x / cycle %d\n",
660__FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle );
661#endif
[1]662
[407]663}   // end thread_destroy()
664
[1]665/////////////////////////////////////////////////
666void thread_child_parent_link( xptr_t  xp_parent,
667                               xptr_t  xp_child )
668{
[171]669    // get extended pointers on children list root
670    cxy_t      parent_cxy = GET_CXY( xp_parent );
[1]671    thread_t * parent_ptr = (thread_t *)GET_PTR( xp_parent );
672    xptr_t     root       = XPTR( parent_cxy , &parent_ptr->children_root );
673
[171]674    // get extended pointer on children list entry
675    cxy_t      child_cxy  = GET_CXY( xp_child );
[1]676    thread_t * child_ptr  = (thread_t *)GET_PTR( xp_child );
677    xptr_t     entry      = XPTR( child_cxy , &child_ptr->brothers_list );
678
679    // set the link
680    xlist_add_first( root , entry );
681    hal_remote_atomic_add( XPTR( parent_cxy , &parent_ptr->children_nr ) , 1 );
682
[409]683}  // end thread_child_parent_link()
684
[1]685///////////////////////////////////////////////////
686void thread_child_parent_unlink( xptr_t  xp_parent,
687                                 xptr_t  xp_child )
688{
689    // get extended pointer on children list lock
[171]690    cxy_t      parent_cxy = GET_CXY( xp_parent );
[1]691    thread_t * parent_ptr = (thread_t *)GET_PTR( xp_parent );
692    xptr_t     lock       = XPTR( parent_cxy , &parent_ptr->children_lock );
693
[171]694    // get extended pointer on children list entry
695    cxy_t      child_cxy  = GET_CXY( xp_child );
[1]696    thread_t * child_ptr  = (thread_t *)GET_PTR( xp_child );
697    xptr_t     entry      = XPTR( child_cxy , &child_ptr->brothers_list );
698
699    // get the lock
700    remote_spinlock_lock( lock );
701
702    // remove the link
703    xlist_unlink( entry );
704    hal_remote_atomic_add( XPTR( parent_cxy , &parent_ptr->children_nr ) , -1 );
[171]705
[1]706    // release the lock
707    remote_spinlock_unlock( lock );
708
[409]709}  // thread_child_parent_unlink()
710
[416]711//////////////////////////////////////////////////
712inline void thread_set_req_ack( thread_t * target,
713                                uint32_t * rsp_count )
[1]714{
[409]715    reg_t    save_sr;   // for critical section
716
[416]717    // get pointer on target thread scheduler
718    scheduler_t * sched = &target->core->scheduler;
[409]719
[416]720    // wait scheduler ready to handle a new request
721    while( sched->req_ack_pending ) asm volatile( "nop" );
[409]722   
723    // enter critical section
724    hal_disable_irq( &save_sr );
725     
[416]726    // set request in target thread scheduler
727    sched->req_ack_pending = true;
[409]728
[416]729    // set ack request in target thread "flags"
730    hal_atomic_or( &target->flags , THREAD_FLAG_REQ_ACK );
[409]731
[416]732    // set pointer on responses counter in target thread
733    target->ack_rsp_count = rsp_count;
[409]734   
735    // exit critical section
736    hal_restore_irq( save_sr );
737
[407]738    hal_fence();
[171]739
[416]740}  // thread_set_req_ack()
[409]741
[416]742/////////////////////////////////////////////////////
743inline void thread_reset_req_ack( thread_t * target )
[1]744{
[409]745    reg_t    save_sr;   // for critical section
746
747    // get pointer on target thread scheduler
[416]748    scheduler_t * sched = &target->core->scheduler;
[409]749
750    // check signal pending in scheduler
[416]751    assert( sched->req_ack_pending , __FUNCTION__ , "no pending signal" );
[409]752   
753    // enter critical section
754    hal_disable_irq( &save_sr );
755     
756    // reset signal in scheduler
[416]757    sched->req_ack_pending = false;
[409]758
759    // reset signal in thread "flags"
[416]760    hal_atomic_and( &target->flags , ~THREAD_FLAG_REQ_ACK );
[409]761
762    // reset pointer on responses counter
[416]763    target->ack_rsp_count = NULL;
[409]764   
765    // exit critical section
766    hal_restore_irq( save_sr );
767
[407]768    hal_fence();
[171]769
[416]770}  // thread_reset_req_ack()
[409]771
[1]772////////////////////////////////
773inline bool_t thread_can_yield()
774{
775    thread_t * this = CURRENT_THREAD;
[367]776    return (this->local_locks == 0) && (this->remote_locks == 0);
[1]777}
778
[367]779/////////////////////////
780void thread_check_sched()
[1]781{
[338]782    thread_t * this = CURRENT_THREAD;
[1]783
[367]784        if( (this->local_locks == 0) && 
785        (this->remote_locks == 0) &&
786        (this->flags & THREAD_FLAG_SCHED) ) 
787    {
788        this->flags &= ~THREAD_FLAG_SCHED;
[408]789        sched_yield( "delayed scheduling" );
[367]790    }
[1]791
[407]792}  // end thread_check_sched()
793
[436]794//////////////////////////////////////
795void thread_block( xptr_t   thread_xp,
796                   uint32_t cause )
[407]797{
[436]798    // get thread cluster and local pointer
799    cxy_t      cxy = GET_CXY( thread_xp );
800    thread_t * ptr = GET_PTR( thread_xp );
801
[407]802    // set blocking cause
[436]803    hal_remote_atomic_or( XPTR( cxy , &ptr->blocked ) , cause );
[407]804    hal_fence();
805
[438]806#if DEBUG_THREAD_BLOCK
[433]807uint32_t cycle = (uint32_t)hal_get_cycles();
[438]808if( DEBUG_THREAD_BLOCK < cycle )
[436]809printk("\n[DBG] %s : thread %x blocked thread %x / cause %x / cycle %d\n",
810__FUNCTION__ , CURRENT_THREAD , ptr , cause , cycle );
[433]811#endif
812
[438]813#if (DEBUG_THREAD_BLOCK & 1)
814if( DEBUG_THREAD_BLOCK < cycle )
[436]815sched_display( ptr->core->lid );
816#endif
817
[407]818} // end thread_block()
819
[433]820////////////////////////////////////////////
821uint32_t thread_unblock( xptr_t   thread_xp,
[407]822                         uint32_t cause )
823{
824    // get thread cluster and local pointer
[433]825    cxy_t      cxy = GET_CXY( thread_xp );
826    thread_t * ptr = GET_PTR( thread_xp );
[407]827
828    // reset blocking cause
829    uint32_t previous = hal_remote_atomic_and( XPTR( cxy , &ptr->blocked ) , ~cause );
830    hal_fence();
831
[438]832#if DEBUG_THREAD_BLOCK
[433]833uint32_t cycle = (uint32_t)hal_get_cycles();
[438]834if( DEBUG_THREAD_BLOCK < cycle )
[436]835printk("\n[DBG] %s : thread %x unblocked thread %x / cause %x / cycle %d\n",
836__FUNCTION__ , CURRENT_THREAD , ptr , cause , cycle );
[433]837#endif
838
[438]839#if (DEBUG_THREAD_BLOCK & 1)
840if( DEBUG_THREAD_BLOCK < cycle )
[436]841sched_display( ptr->core->lid );
842#endif
843
[407]844    // return a non zero value if the cause bit is modified
845    return( previous & cause );
846
847}  // end thread_unblock()
848
[436]849////////////////////////////////////
850void thread_kill( xptr_t  target_xp,
851                  bool_t  is_exit,
852                  bool_t  is_forced )
[1]853{
[436]854    reg_t       save_sr;                // for critical section
855    bool_t      attached;               // target thread in attached mode
856    bool_t      join_done;              // joining thread arrived first
857    xptr_t      killer_xp;              // extended pointer on killer thread (this)
858    thread_t  * killer_ptr;             // pointer on killer thread (this)
859    cxy_t       target_cxy;             // target thread cluster     
860    thread_t  * target_ptr;             // pointer on target thread
861    xptr_t      joining_xp;             // extended pointer on joining thread
862    thread_t  * joining_ptr;            // pointer on joining thread
863    cxy_t       joining_cxy;            // joining thread cluster
864    pid_t       target_pid;             // target process PID
865    cxy_t       owner_cxy;              // target process owner cluster
866    trdid_t     target_trdid;           // target thread identifier
867    ltid_t      target_ltid;            // target thread local index
868    xptr_t      process_state_xp;       // extended pointer on <term_state> in process
[1]869
[436]870    xptr_t      target_flags_xp;        // extended pointer on target thread <flags>
871    xptr_t      target_join_lock_xp;    // extended pointer on target thread <join_lock>
872    xptr_t      target_join_xp_xp;      // extended pointer on target thread <join_xp>
873    xptr_t      target_process_xp;      // extended pointer on target thread <process>
[1]874
[436]875    process_t * target_process;         // pointer on target thread process
876
877    // get target thread cluster and pointer
878    target_cxy = GET_CXY( target_xp );
879    target_ptr = GET_PTR( target_xp );
880
881    // get killer thread pointers
882    killer_ptr = CURRENT_THREAD;
883    killer_xp  = XPTR( local_cxy , killer_ptr );
884
[438]885#if DEBUG_THREAD_KILL
[433]886uint32_t cycle  = (uint32_t)hal_get_cycles;
[438]887if( DEBUG_THREAD_KILL < cycle )
[433]888printk("\n[DBG] %s : thread %x enter for target thread %x / cycle %d\n",
[436]889__FUNCTION__, killer_ptr, target_ptr, cycle );
[433]890#endif
[1]891
[436]892    // block the target thread
893    thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
[409]894
[436]895    // get target thread attached mode
896    target_flags_xp = XPTR( target_cxy , &target_ptr->flags );
897    attached = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_DETACHED) == 0);
898
899    // synchronize with the joining thread
900    // if the target thread is attached && not forced
901
902    if( attached  && (is_forced == false) )
[1]903    {
[436]904        // build extended pointers on target thread join fields
905        target_join_lock_xp  = XPTR( target_cxy , &target_ptr->join_lock );
906        target_join_xp_xp    = XPTR( target_cxy , &target_ptr->join_xp );
[409]907
[436]908        // enter critical section
909        hal_disable_irq( &save_sr );
[409]910
[436]911        // take the join_lock in target thread descriptor
912        remote_spinlock_lock( target_join_lock_xp );
913
914        // get join_done from target thread descriptor
915        join_done = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_JOIN_DONE) != 0);
916   
917        if( join_done )     // joining thread arrived first
[409]918        {
[436]919            // get extended pointer on joining thread
920            joining_xp  = (xptr_t)hal_remote_lwd( target_join_xp_xp );
921            joining_ptr = GET_PTR( joining_xp );
922            joining_cxy = GET_CXY( joining_xp );
923           
924            // reset the join_done flag in target thread
925            hal_remote_atomic_and( target_flags_xp , ~THREAD_FLAG_JOIN_DONE );
[409]926
[436]927            // unblock the joining thread
928            thread_unblock( joining_xp , THREAD_BLOCKED_JOIN );
929
930            // release the join_lock in target thread descriptor
931            remote_spinlock_unlock( target_join_lock_xp );
932
933            // restore IRQs
934            hal_restore_irq( save_sr );
[409]935        }
[436]936        else                // this thread arrived first
937        {
938            // set the kill_done flag in target thread
939            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_KILL_DONE );
[1]940
[436]941            // block this thread on BLOCKED_JOIN
942            thread_block( killer_xp , THREAD_BLOCKED_JOIN );
[1]943
[436]944            // set extended pointer on killer thread in target thread
945            hal_remote_swd( target_join_xp_xp , killer_xp );
946
947            // release the join_lock in target thread descriptor
948            remote_spinlock_unlock( target_join_lock_xp );
949
950            // deschedule
951            sched_yield( "killer thread wait joining thread" );
952
953            // restore IRQs
954            hal_restore_irq( save_sr );
955        }
956    }  // end if attached
957
958    // - if the target thread is the main thread
959    //   => synchronize with the parent process main thread
960    // - if the target thread is not the main thread
961    //   => simply mark the target thread for delete
962
963    // get pointer on target thread process
964    target_process_xp  = XPTR( target_cxy , &target_ptr->process );
965    target_process     = (process_t *)hal_remote_lpt( target_process_xp ); 
966
967        // get target process owner cluster
968        target_pid = hal_remote_lw( XPTR( target_cxy , &target_process->pid ) );
969    owner_cxy = CXY_FROM_PID( target_pid );
970
971    // get target thread local index
972    target_trdid = hal_remote_lw( XPTR( target_cxy , &target_ptr->trdid ) );
973    target_ltid  = LTID_FROM_TRDID( target_trdid );
974
975    if( (owner_cxy == target_cxy) && (target_ltid == 0) )     // main thread
976    {
977        // get extended pointer on term_state in target process owner cluster
978        process_state_xp = XPTR( owner_cxy , &target_process->term_state );
979
980        // set termination info in target process owner 
981        if( is_exit ) hal_remote_atomic_or( process_state_xp , PROCESS_TERM_EXIT );
982        else          hal_remote_atomic_or( process_state_xp , PROCESS_TERM_KILL );
983
[438]984#if DEBUG_THREAD_KILL
[433]985cycle  = (uint32_t)hal_get_cycles;
[438]986if( DEBUG_THREAD_KILL < cycle )
[436]987printk("\n[DBG] %s : thread %x exit for thread %x / main thread / cycle %d\n",
988__FUNCTION__, killer_ptr, target_ptr, cycle );
[433]989#endif
[409]990
[436]991    }
992    else                                                      // main thread
993    {
994        // set the REQ_DELETE flag in target thread descriptor
995        hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
996
[438]997#if DEBUG_THREAD_KILL
[436]998cycle  = (uint32_t)hal_get_cycles;
[438]999if( DEBUG_THREAD_KILL < cycle )
[436]1000printk("\n[DBG] %s : thread %x exit for thread %x / not the main thread / cycle %d\n",
1001__FUNCTION__, killer_ptr, target_ptr, cycle );
1002#endif
1003
1004    }
1005
[407]1006}  // end thread_kill()
1007
[14]1008///////////////////////
1009void thread_idle_func()
[1]1010{
1011    while( 1 )
1012    {
[408]1013        // unmask IRQs
1014        hal_enable_irq( NULL );
1015
[407]1016        if( CONFIG_THREAD_IDLE_MODE_SLEEP ) // force core to low-power mode
1017        {
[1]1018
[438]1019#if DEBUG_THREAD_IDLE
[433]1020uint32_t cycle  = (uint32_t)hal_get_cycles;
1021thread_t * this = CURRENT_THREAD;
[438]1022if( DEBUG_THREAD_IDLE < cycle )
[433]1023printk("\n[DBG] %s : idle thread %x on core[%x,%d] goes to sleep / cycle %d\n",
1024__FUNCTION__, this, local_cxy, this->core->lid, cycle );
1025#endif
[1]1026
[407]1027            hal_core_sleep();
[1]1028
[438]1029#if DEBUG_THREAD_IDLE
[433]1030cycle  = (uint32_t)hal_get_cycles;
[438]1031if( DEBUG_THREAD_IDLE < cycle )
[433]1032printk("\n[DBG] %s : idle thread %x on core[%x,%d] wake up / cycle %d\n",
1033__FUNCTION__, this, local_cxy, this->core->lid, cycle );
1034#endif
[407]1035
1036        }
[418]1037        else                                // search a runable thread
[407]1038        {
[418]1039            sched_yield( "IDLE" );
[407]1040        }
[418]1041    }
[407]1042}  // end thread_idle()
[1]1043
[407]1044
[16]1045/////////////////////////////////////////////////
1046void thread_user_time_update( thread_t * thread )
1047{
1048    // TODO
[337]1049    // printk("\n[WARNING] function %s not implemented\n", __FUNCTION__ );
[16]1050}
[1]1051
[16]1052///////////////////////////////////////////////////
1053void thread_kernel_time_update( thread_t * thread )
1054{
1055    // TODO
[337]1056    // printk("\n[WARNING] function %s not implemented\n", __FUNCTION__ );
[16]1057}
1058
[23]1059/////////////////////////////////////
1060xptr_t thread_get_xptr( pid_t    pid,
1061                        trdid_t  trdid )
1062{
1063    cxy_t         target_cxy;          // target thread cluster identifier
1064    ltid_t        target_thread_ltid;  // target thread local index
[171]1065    thread_t    * target_thread_ptr;   // target thread local pointer
[23]1066    xptr_t        target_process_xp;   // extended pointer on target process descriptor
[171]1067    process_t   * target_process_ptr;  // local pointer on target process descriptor
[23]1068    pid_t         target_process_pid;  // target process identifier
1069    xlist_entry_t root;                // root of list of process in target cluster
1070    xptr_t        lock_xp;             // extended pointer on lock protecting  this list
[16]1071
[23]1072    // get target cluster identifier and local thread identifier
1073    target_cxy         = CXY_FROM_TRDID( trdid );
1074    target_thread_ltid = LTID_FROM_TRDID( trdid );
1075
[436]1076    // check trdid argument
1077        if( (target_thread_ltid >= CONFIG_THREAD_MAX_PER_CLUSTER) || 
1078        cluster_is_undefined( target_cxy ) )         return XPTR_NULL;
1079
[23]1080    // get root of list of process descriptors in target cluster
1081    hal_remote_memcpy( XPTR( local_cxy  , &root ),
1082                       XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_root ),
1083                       sizeof(xlist_entry_t) );
1084
[171]1085    // get extended pointer on lock protecting the list of processes
[23]1086    lock_xp = XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_lock );
1087
1088    // take the lock protecting the list of processes in target cluster
1089    remote_spinlock_lock( lock_xp );
1090
1091    // loop on list of process in target cluster to find the PID process
1092    xptr_t  iter;
1093    bool_t  found = false;
1094    XLIST_FOREACH( XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_root ) , iter )
1095    {
1096        target_process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
1097        target_process_ptr = (process_t *)GET_PTR( target_process_xp );
1098        target_process_pid = hal_remote_lw( XPTR( target_cxy , &target_process_ptr->pid ) );
1099        if( target_process_pid == pid )
1100        {
1101            found = true;
1102            break;
1103        }
1104    }
1105
1106    // release the lock protecting the list of processes in target cluster
1107    remote_spinlock_unlock( lock_xp );
1108
[436]1109    // check PID found
1110    if( found == false ) return XPTR_NULL;
[23]1111
1112    // get target thread local pointer
1113    xptr_t xp = XPTR( target_cxy , &target_process_ptr->th_tbl[target_thread_ltid] );
[171]1114    target_thread_ptr = (thread_t *)hal_remote_lpt( xp );
[23]1115
[436]1116    if( target_thread_ptr == NULL )  return XPTR_NULL;
[23]1117
1118    return XPTR( target_cxy , target_thread_ptr );
[171]1119}
[23]1120
Note: See TracBrowser for help on using the repository browser.