Ignore:
Timestamp:
Nov 7, 2017, 3:08:12 PM (6 years ago)
Author:
alain
Message:

First implementation of fork/exec.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/kern/thread.c

    r406 r407  
    5757    else if( type == THREAD_RPC    ) return "RPC";
    5858    else if( type == THREAD_DEV    ) return "DEV";
    59     else if( type == THREAD_KERNEL ) return "KER";
    6059    else if( type == THREAD_IDLE   ) return "IDL";
    6160    else                             return "undefined";
     
    153152    }
    154153
     154    // compute thread descriptor size without kernel stack
     155    uint32_t desc_size = (intptr_t)(&thread->signature) - (intptr_t)thread + 4;
     156
    155157        // Initialize new thread descriptor
    156158    thread->trdid           = trdid;
     
    170172    thread->u_stack_base    = u_stack_base;
    171173    thread->u_stack_size    = u_stack_size;
    172     thread->k_stack_base    = (intptr_t)thread;
    173     thread->k_stack_size    = CONFIG_THREAD_DESC_SIZE;
     174    thread->k_stack_base    = (intptr_t)thread + desc_size;
     175    thread->k_stack_size    = CONFIG_THREAD_DESC_SIZE - desc_size;
    174176
    175177    thread->entry_func      = func;         // thread entry point
     
    178180    thread->signals         = 0;            // no pending signal
    179181    thread->errno           = 0;            // no error detected
    180     thread->fork_user       = 0;            // no fork required
    181     thread->fork_cxy        = 0;
     182    thread->fork_user       = 0;            // no user defined placement for fork
     183    thread->fork_cxy        = 0;            // user defined target cluster for fork
    182184
    183185    // thread blocked
     
    221223    vseg_t       * vseg;         // stack vseg
    222224
    223     thread_dmsg("\n[DMSG] %s : enters for process %x\n", __FUNCTION__ , pid );
     225    assert( (attr != NULL) , __FUNCTION__, "pthread attributes must be defined" );
    224226
    225227    // get process descriptor local copy
     
    234236
    235237    // select a target core in local cluster
    236     if( attr->attributes & PT_ATTR_CORE_DEFINED ) core_lid = attr->lid;
    237     else                                          core_lid = cluster_select_local_core();
    238 
    239     // check core local index
    240     if( core_lid >= LOCAL_CLUSTER->cores_nr )
    241     {
    242             printk("\n[ERROR] in %s : illegal core index attribute = %d\n",
    243                __FUNCTION__ , core_lid );
    244 
    245         return EINVAL;
     238    if( attr->attributes & PT_ATTR_CORE_DEFINED )
     239    {
     240        core_lid = attr->lid;
     241        if( core_lid >= LOCAL_CLUSTER->cores_nr )
     242        {
     243                printk("\n[ERROR] in %s : illegal core index attribute = %d\n",
     244            __FUNCTION__ , core_lid );
     245            return EINVAL;
     246        }
     247    }
     248    else
     249    {
     250        core_lid = cluster_select_local_core();
    246251    }
    247252
    248253    // allocate a stack from local VMM
    249     vseg = vmm_create_vseg( process, 0 , 0 , VSEG_TYPE_STACK );
     254    vseg = vmm_create_vseg( process,
     255                            VSEG_TYPE_STACK,
     256                            0,                 // size unused
     257                            0,                 // length unused
     258                            0,                 // file_offset unused
     259                            0,                 // file_size unused
     260                            XPTR_NULL,         // mapper_xp unused
     261                            local_cxy );
    250262
    251263    if( vseg == NULL )
     
    287299
    288300    // set DETACHED flag if required
    289     if( attr->attributes & PT_ATTR_DETACH ) thread->flags |= THREAD_FLAG_DETACHED;
     301    if( attr->attributes & PT_ATTR_DETACH )
     302    {
     303        thread->flags |= THREAD_FLAG_DETACHED;
     304    }
    290305
    291306    // allocate & initialize CPU context
    292         error = hal_cpu_context_create( thread );
    293 
    294     if( error )
     307        if( hal_cpu_context_create( thread ) )
    295308    {
    296309            printk("\n[ERROR] in %s : cannot create CPU context\n", __FUNCTION__ );
     
    300313    }
    301314
    302     // allocate & initialize FPU context
    303     error = hal_fpu_context_create( thread );
    304 
    305     if( error )
     315    // allocate  FPU context
     316    if( hal_fpu_context_alloc( thread ) )
    306317    {
    307318            printk("\n[ERROR] in %s : cannot create FPU context\n", __FUNCTION__ );
     
    311322    }
    312323
    313     thread_dmsg("\n[DMSG] %s : exit / trdid = %x / process %x / core = %d\n",
    314                 __FUNCTION__ , thread->trdid , process->pid , core_lid );
     324thread_dmsg("\n[DBG] %s : core[%x,%d] exit / trdid = %x / process %x / core = %d\n",
     325__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid,
     326thread->trdid , process->pid , core_lid );
    315327
    316328    *new_thread = thread;
     
    319331}  // end thread_user_create()
    320332
    321 //////////////////////////////////////////////
     333////////////////////////////////////////////////////
    322334error_t thread_user_fork( process_t * process,
     335                          intptr_t    stack_base,
     336                          uint32_t    stack_size,
    323337                          thread_t ** new_thread )
    324338{
    325339    error_t        error;
    326         thread_t     * thread;       // pointer on new thread descriptor
     340        thread_t     * child;       // pointer on new thread descriptor
    327341    lid_t          core_lid;     // selected core local index
    328         vseg_t       * vseg;         // stack vseg
    329 
    330     thread_dmsg("\n[DMSG] %s : enters\n", __FUNCTION__ );
    331 
    332     // allocate a stack from local VMM
    333     vseg = vmm_create_vseg( process, 0 , 0 , VSEG_TYPE_STACK );
    334 
    335     if( vseg == NULL )
    336     {
    337             printk("\n[ERROR] in %s : cannot create stack vseg\n", __FUNCTION__ );
    338                 return ENOMEM;
    339     }
     342
     343thread_dmsg("\n[DBG] %s : core[%x,%d] enters\n",
     344__FUNCTION__ , local_cxy , core_lid );
    340345
    341346    // select a target core in local cluster
    342347    core_lid = cluster_select_local_core();
    343348
    344     // get pointer on calling thread descriptor
    345     thread_t * this = CURRENT_THREAD;
     349    // get pointer on parent thread descriptor
     350    thread_t * parent = CURRENT_THREAD;
    346351
    347352    // allocate memory for new thread descriptor
    348     thread = thread_alloc();
    349 
    350     if( thread == NULL )
     353    child = thread_alloc();
     354
     355    if( child == NULL )
    351356    {
    352357        printk("\n[ERROR] in %s : cannot allocate new thread\n", __FUNCTION__ );
    353         vmm_remove_vseg( vseg );
    354358        return ENOMEM;
    355359    }
    356360
    357361    // initialize thread descriptor
    358     error = thread_init( thread,
     362    error = thread_init( child,
    359363                         process,
    360364                         THREAD_USER,
    361                          this->entry_func,
    362                          this->entry_args,
     365                         parent->entry_func,
     366                         parent->entry_args,
    363367                         core_lid,
    364                          vseg->min,
    365                          vseg->max - vseg->min );
     368                         stack_base,
     369                         stack_size );
    366370
    367371    if( error )
    368372    {
    369373            printk("\n[ERROR] in %s : cannot initialize new thread\n", __FUNCTION__ );
    370         vmm_remove_vseg( vseg );
    371         thread_release( thread );
     374        thread_release( child );
    372375        return EINVAL;
    373376    }
    374377
    375     // set ATTACHED flag if set in this thread
    376     if( this->flags & THREAD_FLAG_DETACHED ) thread->flags = THREAD_FLAG_DETACHED;
    377 
    378     // allocate & initialize CPU context from calling thread
    379         error = hal_cpu_context_copy( thread , this );
    380 
    381     if( error )
    382     {
    383             printk("\n[ERROR] in %s : cannot create CPU context\n", __FUNCTION__ );
    384         vmm_remove_vseg( vseg );
    385         thread_release( thread );
     378    // return child pointer
     379    *new_thread = child;
     380
     381    // set DETACHED flag if required
     382    if( parent->flags & THREAD_FLAG_DETACHED ) child->flags = THREAD_FLAG_DETACHED;
     383
     384    // allocate CPU context for child thread
     385        if( hal_cpu_context_alloc( child ) )
     386    {
     387            printk("\n[ERROR] in %s : cannot allocate CPU context\n", __FUNCTION__ );
     388        thread_release( child );
    386389        return ENOMEM;
    387390    }
    388391
    389     // allocate & initialize FPU context from calling thread
    390         error = hal_fpu_context_copy( thread , this );
    391 
    392     if( error )
    393     {
    394             printk("\n[ERROR] in %s : cannot create CPU context\n", __FUNCTION__ );
    395         vmm_remove_vseg( vseg );
    396         thread_release( thread );
     392    // allocate FPU context for child thread
     393        if( hal_fpu_context_alloc( child ) )
     394    {
     395            printk("\n[ERROR] in %s : cannot allocate FPU context\n", __FUNCTION__ );
     396        thread_release( child );
    397397        return ENOMEM;
    398398    }
    399399
    400     thread_dmsg("\n[DMSG] %s : exit / thread %x for process %x on core %d in cluster %x\n",
    401                  __FUNCTION__, thread->trdid, process->pid, core_lid, local_cxy );
    402 
    403     *new_thread = thread;
     400    // copy kernel stack content from parent to child thread descriptor
     401    void * dst = (void *)(&child->signature) + 4;
     402    void * src = (void *)(&parent->signature) + 4;
     403    memcpy( dst , src , parent->k_stack_size );
     404
     405thread_dmsg("\n[DBG] %s : core[%x,%d] exit / created main thread %x for process %x\n",
     406__FUNCTION__, local_cxy , core_lid , child->trdid , process->pid );
     407
    404408        return 0;
    405409
     
    416420        thread_t     * thread;       // pointer on new thread descriptor
    417421
    418     thread_dmsg("\n[DMSG] %s : enter / for type %s on core[%x,%d] / cycle %d\n",
    419     __FUNCTION__ , thread_type_str( type ) , local_cxy , core_lid , hal_time_stamp() );
    420 
    421     assert( ( (type == THREAD_KERNEL) || (type == THREAD_RPC) ||
    422               (type == THREAD_IDLE)   || (type == THREAD_DEV) ) ,
    423               __FUNCTION__ , "illegal thread type" );
     422thread_dmsg("\n[DBG] %s : core[%x,%d] enters / type % / cycle %d\n",
     423__FUNCTION__ , local_cxy , core_lid , thread_type_str( type ) , hal_time_stamp() );
     424
     425    assert( ( (type == THREAD_IDLE) || (type == THREAD_RPC) || (type == THREAD_DEV) ) ,
     426    __FUNCTION__ , "illegal thread type" );
    424427
    425428    assert( (core_lid < LOCAL_CLUSTER->cores_nr) ,
     
    449452        hal_cpu_context_create( thread );
    450453
    451     thread_dmsg("\n[DMSG] %s : exit / trdid = %x / type = %s / core = [%x,%d] / cycle %d\n",
    452     __FUNCTION__ , thread->trdid , thread_type_str(type) ,
    453     local_cxy , core_lid , hal_time_stamp() );
     454thread_dmsg("\n[DBG] %s : core = [%x,%d] exit / trdid = %x / type %s / cycle %d\n",
     455__FUNCTION__, local_cxy, core_lid, thread->trdid, thread_type_str(type), hal_time_stamp() );
    454456
    455457    *new_thread = thread;
     
    465467                                            lid_t           core_lid )
    466468{
    467     assert( ( (type == THREAD_KERNEL) || (type == THREAD_RPC) ||
    468               (type == THREAD_IDLE)   || (type == THREAD_DEV) ) ,
    469               __FUNCTION__ , "illegal thread type" );
    470 
    471     if( core_lid >= LOCAL_CLUSTER->cores_nr )
    472     {
    473         panic("illegal core_lid / cores = %d / lid = %d / cxy = %x",
    474               LOCAL_CLUSTER->cores_nr , core_lid , local_cxy );
    475     }
     469    assert( (type == THREAD_IDLE) , __FUNCTION__ , "illegal thread type" );
     470
     471    assert( (core_lid < LOCAL_CLUSTER->cores_nr) , __FUNCTION__ , "illegal core index" );
    476472
    477473    error_t  error = thread_init( thread,
     
    487483
    488484    return error;
    489 }
     485
     486}  // end thread_kernel_init()
    490487
    491488///////////////////////////////////////////////////////////////////////////////////////
     
    502499    core_t     * core       = thread->core;
    503500
    504     thread_dmsg("\n[DMSG] %s : enters for thread %x in process %x / type = %s\n",
     501    thread_dmsg("\n[DBG] %s : enters for thread %x in process %x / type = %s\n",
    505502                __FUNCTION__ , thread->trdid , process->pid , thread_type_str( thread->type ) );
    506503
     
    556553        tm_end = hal_get_cycles();
    557554
    558         thread_dmsg("\n[DMSG] %s : exit for thread %x in process %x / duration = %d\n",
     555        thread_dmsg("\n[DBG] %s : exit for thread %x in process %x / duration = %d\n",
    559556                       __FUNCTION__, thread->trdid , process->pid , tm_end - tm_start );
    560 }
     557
     558}   // end thread_destroy()
    561559
    562560/////////////////////////////////////////////////
     
    609607{
    610608    hal_atomic_or( &thread->signals , mask );
     609    hal_fence();
    611610}
    612611
     
    616615{
    617616    hal_atomic_and( &thread->signals , ~mask );
    618 }
    619 
    620 //////////////////////////////////
    621 inline bool_t thread_is_joinable()
    622 {
    623     thread_t * this = CURRENT_THREAD;
    624     return( (this->brothers_list.next != XPTR_NULL) &&
    625             (this->brothers_list.pred != XPTR_NULL) );
    626 }
    627 
    628 //////////////////////////////////
    629 inline bool_t thread_is_runnable()
    630 {
    631     thread_t * this = CURRENT_THREAD;
    632     return( this->blocked == 0 );
     617    hal_fence();
    633618}
    634619
     
    650635    {
    651636        this->flags &= ~THREAD_FLAG_SCHED;
    652         sched_yield( NULL );
    653     }
    654 }
     637        sched_yield();
     638    }
     639
     640}  // end thread_check_sched()
     641
     642/////////////////////////////////////
     643void thread_block( thread_t * thread,
     644                   uint32_t   cause )
     645{
     646    // set blocking cause
     647    hal_atomic_or( &thread->blocked , cause );
     648    hal_fence();
     649
     650} // end thread_block()
     651
     652/////////////////////////////////////////
     653uint32_t thread_unblock( xptr_t   thread,
     654                         uint32_t cause )
     655{
     656    // get thread cluster and local pointer
     657    cxy_t      cxy = GET_CXY( thread );
     658    thread_t * ptr = (thread_t *)GET_PTR( thread );
     659
     660    // reset blocking cause
     661    uint32_t previous = hal_remote_atomic_and( XPTR( cxy , &ptr->blocked ) , ~cause );
     662    hal_fence();
     663
     664    // return a non zero value if the cause bit is modified
     665    return( previous & cause );
     666
     667}  // end thread_unblock()
    655668
    656669/////////////////////
     
    664677        if( !thread_can_yield() )
    665678        {
    666         printk("ERROR in %s : thread %x in process %x on core %d in cluster %x\n"
    667                " did not released all locks\n",
    668                __FUNCTION__ , this->trdid , this->process->pid ,
    669                CURRENT_CORE->lid , local_cxy );
     679        printk("ERROR in %s : locks not released for thread %x in process %x on core[%x,%d]\n",
     680        __FUNCTION__, this->trdid, this->process->pid, local_cxy, this->core->lid );
    670681        return EINVAL;
    671682    }
     
    686697
    687698    // deschedule
    688     sched_yield( NULL );
     699    sched_yield();
    689700    return 0;
    690 }
    691 
    692 /////////////////////////////////////
    693 void thread_block( thread_t * thread,
    694                    uint32_t   cause )
    695 {
    696     // set blocking cause
    697     hal_atomic_or( &thread->blocked , cause );
    698 }
    699 
    700 ////////////////////////////////////
    701 void thread_unblock( xptr_t   thread,
    702                     uint32_t cause )
    703 {
    704     // get thread cluster and local pointer
    705     cxy_t      cxy = GET_CXY( thread );
    706     thread_t * ptr = (thread_t *)GET_PTR( thread );
    707 
    708     // reset blocking cause
    709     hal_remote_atomic_and( XPTR( cxy , &ptr->blocked ) , ~cause );
    710 }
     701
     702}  // end thread_exit()
    711703
    712704/////////////////////////////////////
     
    721713    // send an IPI to schedule the target thread core.
    722714    dev_pic_send_ipi( local_cxy , target->core->lid );
    723 }
     715
     716}  // end thread_kill()
    724717
    725718///////////////////////
    726719void thread_idle_func()
    727720{
    728 #if CONFIG_IDLE_DEBUG
    729     lid_t  lid = CURRENT_CORE->lid;
    730 #endif
    731 
    732721    while( 1 )
    733722    {
    734         idle_dmsg("\n[DMSG] %s : core[%x][%d] goes to sleep at cycle %d\n",
    735                     __FUNCTION__ , local_cxy , lid , hal_get_cycles() );
    736 
    737         // force core to sleeping state
    738         //hal_core_sleep();
    739 
    740         idle_dmsg("\n[DMSG] %s : core[%x][%d] wake up at cycle %d\n",
    741                     __FUNCTION__ , local_cxy , lid , hal_get_cycles() );
    742 
    743         // force scheduling
    744         sched_yield( NULL );
     723        if( CONFIG_THREAD_IDLE_MODE_SLEEP ) // force core to low-power mode
     724        {
     725
     726idle_dmsg("\n[DBG] %s : core[%x][%d] goes to sleep at cycle %d\n",
     727__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , hal_get_cycles() );
     728
     729            hal_core_sleep();
     730
     731idle_dmsg("\n[DBG] %s : core[%x][%d] wake up at cycle %d\n",
     732__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , hal_get_cycles() );
     733
     734        }
     735        else                                // yield each ~ 100000 cycles
     736
     737        {
     738             hal_fixed_delay( 500000 );
     739        }
     740
     741        // force scheduling at each iteration
     742        sched_yield();
    745743   }
    746 }
     744}  // end thread_idle()
     745
    747746
    748747/////////////////////////////////////////////////
Note: See TracChangeset for help on using the changeset viewer.