Changeset 443 for trunk/kernel/kern


Ignore:
Timestamp:
May 16, 2018, 4:15:22 PM (6 years ago)
Author:
alain
Message:

Fix few bugs whike debugging the sort multi-thread application.

Location:
trunk/kernel/kern
Files:
13 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/kern/cluster.c

    r440 r443  
    3434#include <spinlock.h>
    3535#include <core.h>
     36#include <chdev.h>
    3637#include <scheduler.h>
    3738#include <list.h>
     
    4950/////////////////////////////////////////////////////////////////////////////////////
    5051
    51 extern process_t process_zero;     // allocated in kernel_init.c file
    52 
    53 
    54 /////////////////////////////////////////////////
     52extern process_t           process_zero;     // allocated in kernel_init.c file
     53extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c file
     54
     55///////////////////////////////////////////////n
    5556error_t cluster_init( struct boot_info_s * info )
    5657{
     
    252253
    253254//////////////////////////////////////////////////////
     255xptr_t cluster_get_process_from_pid_in_cxy( cxy_t cxy,
     256                                            pid_t pid )
     257{
     258    xptr_t      root_xp;       // xptr on root of list of processes in owner cluster
     259    xptr_t      lock_xp;       // xptr on lock protecting this list
     260    xptr_t      iter_xp;       // iterator
     261    xptr_t      current_xp;    // xptr on current process descriptor
     262    bool_t      found;
     263
     264    cluster_t * cluster = LOCAL_CLUSTER;
     265
     266    // get owner cluster and lpid
     267    cxy_t   owner_cxy = CXY_FROM_PID( pid );
     268    lpid_t  lpid      = LPID_FROM_PID( pid );
     269
     270    // get lock & root of list of copies from owner cluster
     271    root_xp = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
     272    lock_xp = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
     273
     274    // take the lock protecting the list of processes
     275    remote_spinlock_lock( lock_xp );
     276
     277    // scan list of processes
     278    found = false;
     279    XLIST_FOREACH( root_xp , iter_xp )
     280    {
     281        current_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
     282
     283        if( GET_CXY( current_xp ) == cxy )
     284        {
     285            found = true;
     286            break;
     287        }
     288    }
     289
     290    // release the lock protecting the list of processes
     291    remote_spinlock_unlock( lock_xp );
     292
     293    // return extended pointer on process descriptor in owner cluster
     294    if( found ) return current_xp;
     295    else        return XPTR_NULL;
     296
     297}  // end cluster_get_process_from_pid_in_cxy()
     298
     299
     300//////////////////////////////////////////////////////
    254301xptr_t cluster_get_owner_process_from_pid( pid_t pid )
    255302{
     
    298345}  // end cluster_get_owner_process_from_pid()
    299346
     347
    300348//////////////////////////////////////////////////////////
    301349xptr_t cluster_get_reference_process_from_pid( pid_t pid )
     
    459507void cluster_process_local_link( process_t * process )
    460508{
    461     uint32_t irq_state;
     509    reg_t    save_sr;
     510
    462511    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
    463512
     513    // get extended pointers on local process list root & lock
     514    xptr_t root_xp = XPTR( local_cxy , &pm->local_root );
     515    xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock );
     516
    464517    // get lock protecting the process manager local list
    465     remote_spinlock_lock_busy( XPTR( local_cxy , &pm->local_lock ) , & irq_state );
    466 
    467     xlist_add_last( XPTR( local_cxy , &pm->local_root ),
    468                     XPTR( local_cxy , &process->local_list ) );
     518    remote_spinlock_lock_busy( lock_xp , &save_sr );
     519
     520    // register process in local list
     521    xlist_add_last( root_xp , XPTR( local_cxy , &process->local_list ) );
    469522    pm->local_nr++;
    470523
    471524    // release lock protecting the process manager local list
    472     remote_spinlock_unlock_busy( XPTR( local_cxy , &pm->local_lock ) , irq_state );
     525    remote_spinlock_unlock_busy( lock_xp , save_sr );
    473526}
    474527
     
    476529void cluster_process_local_unlink( process_t * process )
    477530{
    478     uint32_t irq_state;
     531    reg_t save_sr;
     532
    479533    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
    480534
     535    // get extended pointers on local process list lock
     536    xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock );
     537
    481538    // get lock protecting the process manager local list
    482     remote_spinlock_lock_busy( XPTR( local_cxy , &pm->local_lock ) , &irq_state );
    483 
     539    remote_spinlock_lock_busy( lock_xp , &save_sr );
     540
     541    // remove process from local list
    484542    xlist_unlink( XPTR( local_cxy , &process->local_list ) );
    485543    pm->local_nr--;
    486544
    487545    // release lock protecting the process manager local list
    488     remote_spinlock_unlock_busy( XPTR( local_cxy , &pm->local_lock ) , irq_state );
     546    remote_spinlock_unlock_busy( lock_xp , save_sr );
    489547}
    490548
     
    582640{
    583641    xptr_t        root_xp;
     642    xptr_t        lock_xp;
    584643    xptr_t        iter_xp;
    585     xptr_t        process_xp;     
    586 
    587     // get extended pointer on root of process in cluster cxy
     644    xptr_t        process_xp;
     645    cxy_t         txt0_cxy;
     646    chdev_t     * txt0_ptr;
     647    xptr_t        txt0_xp;
     648    xptr_t        txt0_lock_xp;
     649    reg_t         txt0_save_sr;     // save SR to take TXT0 lock in busy mode     
     650
     651    assert( (cluster_is_undefined( cxy ) == false),
     652    __FUNCTION__, "illegal cluster index" );
     653
     654    // get extended pointer on root and lock for local process list in cluster
    588655    root_xp = XPTR( cxy , &LOCAL_CLUSTER->pmgr.local_root );
    589 
    590     // skip one line
    591     printk("\n***** processes in cluster %x / cycle %d\n", cxy , (uint32_t)hal_get_cycles() );
    592 
    593     // loop on all reference processes in cluster cxy
     656    lock_xp = XPTR( cxy , &LOCAL_CLUSTER->pmgr.local_lock );
     657
     658    // get pointers on TXT0 chdev
     659    txt0_xp  = chdev_dir.txt_tx[0];
     660    txt0_cxy = GET_CXY( txt0_xp );
     661    txt0_ptr = GET_PTR( txt0_xp );
     662
     663    // get extended pointer on TXT0 lock
     664    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
     665
     666    // get lock on local process list
     667    remote_spinlock_lock( lock_xp );
     668
     669    // get TXT0 lock in busy waiting mode
     670    remote_spinlock_lock_busy( txt0_lock_xp , &txt0_save_sr );
     671     
     672    // display header
     673    nolock_printk("\n***** processes in cluster %x / cycle %d\n",
     674    cxy , (uint32_t)hal_get_cycles() );
     675
     676    // loop on all processes in cluster cxy
    594677    XLIST_FOREACH( root_xp , iter_xp )
    595678    {
     
    597680        process_display( process_xp );
    598681    }
     682
     683    // release TXT0 lock in busy waiting mode
     684    remote_spinlock_unlock_busy( txt0_lock_xp , txt0_save_sr );
     685
     686    // release lock on local process list
     687    remote_spinlock_unlock( lock_xp );
     688
    599689}  // end cluster_processes_display()
    600690
  • trunk/kernel/kern/cluster.h

    r440 r443  
    6161 * Warning : the "owner" cluster, and the "reference" cluster can be different clusters.
    6262 *
    63  * The process manager of a cluster K maintains three structures:
    64  * 1) The pref_tbl[] is an array indexed by lpid. There is one entry per owned process.
     63 * The process manager of a cluster K maintains three sets of process descriptors:
     64 *
     65 * 1) pref_tbl[] is an array indexed by lpid. There is one entry per owned process.
    6566 *    Each entry contains an extended pointer on the reference process descriptor.
    6667 *
    67  * 2) The local_root is the root of the local list of process descriptors in cluster K.
     68 * 2) The local_root is the root of the local list of all process descriptors in cluster K.
    6869 *    A process descriptor P is present in K, as soon as P has a thread in cluster K.
    6970 *
     
    187188/******************************************************************************************
    188189 * This function returns an extended pointer on the process descriptor in owner cluster
    189  * from the process PID. This PID can be be different from the calling process PID.
     190 * from the process <pid>. This PID can be be different from the calling process PID.
    190191 * It can be called by any thread running in any cluster,
    191192 ******************************************************************************************
     
    197198/******************************************************************************************
    198199 * This function returns an extended pointer on the reference process descriptor
    199  * from the process PID. This PID can be be different from the calling process PID.
     200 * from the process <pid>. This PID can be be different from the calling process PID.
    200201 * It can be called by any thread running in any cluster,
    201202 ******************************************************************************************
     
    204205 *****************************************************************************************/
    205206xptr_t cluster_get_reference_process_from_pid( pid_t pid );
     207
     208/******************************************************************************************
     209 * This function returns an extended pointer on the process descriptor copy for the
     210 * process identified by <pid> in cluster defined by <cxy> argument.
     211 * This PID can be be different from the calling process PID.
     212 * It can be called by any thread running in any cluster,
     213 ******************************************************************************************
     214 * @ cxy  : target cluster identifier.
     215 * @ pid  : process identifier.
     216 * @ return extended pointer on reference process if found / XPTR_NULL if not found.
     217 *****************************************************************************************/
     218xptr_t cluster_get_process_from_pid_in_cxy( cxy_t cxy,
     219                                            pid_t pid );
    206220
    207221/******************************************************************************************
  • trunk/kernel/kern/core.c

    r438 r443  
    4949        core->usage             = 0;
    5050        core->spurious_irqs     = 0;
    51         core->thread_idle       = NULL;
    5251        core->fpu_owner         = NULL;
    5352        core->rand_last         = hal_time_stamp() & 0xFFF;
     
    9089void core_compute_stats( core_t * core )
    9190{
    92         thread_t * idle  = core->thread_idle;
     91        thread_t * idle  = core->scheduler.idle;
    9392        uint32_t   ticks = core->ticks_nr;
    9493
     
    115114void core_reset_stats( core_t * core )
    116115{
    117         core->ticks_nr              = 0;
    118         core->usage                 = 0;
    119         core->thread_idle->ticks_nr = 0;
     116        thread_t * idle  = core->scheduler.idle;
     117
     118        core->ticks_nr = 0;
     119        core->usage    = 0;
     120        idle->ticks_nr = 0;
     121
    120122        hal_fence();
    121123}
  • trunk/kernel/kern/core.h

    r409 r443  
    5757        uint32_t            usage;          /*! cumulated busy_percent (idle / total)      */
    5858        uint32_t            spurious_irqs;  /*! for instrumentation...                     */
    59         struct thread_s   * thread_idle;    /*! pointer on idle thread descriptor          */
    6059        struct thread_s   * fpu_owner;      /*! pointer on current FPU owner thread        */
    6160    uint32_t            rand_last;      /*! last computed random value                 */
  • trunk/kernel/kern/do_syscall.c

    r438 r443  
    102102    sys_trace,              // 47
    103103    sys_fg,                 // 48
     104    sys_undefined,          // 49
    104105};
    105106
     
    136137        else if( index == SYS_CLOSEDIR       ) return "CLOSEDIR";         // 25
    137138        else if( index == SYS_GETCWD         ) return "GETCWD";           // 26
     139        else if( index == SYS_ISATTY         ) return "ISATTY";           // 27
    138140        else if( index == SYS_ALARM          ) return "ALARM";            // 28
    139141        else if( index == SYS_RMDIR          ) return "RMDIR";            // 29
  • trunk/kernel/kern/kernel_init.c

    r440 r443  
    5959#include <mapper.h>
    6060
    61 #define KERNEL_INIT_SYNCHRO  0xA5A5B5B5
    62 
    6361///////////////////////////////////////////////////////////////////////////////////////////
    6462// All the following global variables are replicated in all clusters.
     
    912910
    913911#if( DEBUG_KERNEL_INIT & 1 )
     912if( (core_lid ==  0) & (local_cxy == 0) )
    914913chdev_dir_display();
    915914#endif
     
    11791178printk("\n[DBG] %s : exit barrier 8 : process init created / cycle %d\n",
    11801179__FUNCTION__ , (uint32_t)hal_get_cycles() );
     1180#endif
     1181
     1182#if (DEBUG_KERNEL_INIT & 1)
     1183if( (core_lid ==  0) & (local_cxy == 0) )
     1184sched_display( 0 );
    11811185#endif
    11821186
  • trunk/kernel/kern/printk.c

    r437 r443  
    350350{
    351351    va_list       args;
    352     uint32_t      save_sr;
     352    reg_t         save_sr;
    353353
    354354    // get pointers on TXT0 chdev
  • trunk/kernel/kern/process.c

    r441 r443  
    135135        process->pid        = pid;
    136136    process->ref_xp     = XPTR( local_cxy , process );
     137    process->owner_xp   = XPTR( local_cxy , process );
    137138    process->parent_xp  = parent_xp;
    138139    process->term_state = 0;
     
    320321    local_process->parent_xp  = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->parent_xp ) );
    321322    local_process->ref_xp     = reference_process_xp;
     323    local_process->owner_xp   = reference_process_xp;
    322324    local_process->term_state = 0;
    323325
     
    409411    cluster_process_copies_unlink( process );
    410412
    411     // remove process from children_list if process is in owner cluster
     413    // remove process from children_list if process owner cluster
    412414    if( CXY_FROM_PID( pid ) == local_cxy )
    413415    {
     
    426428    }
    427429
    428     // release the process PID to cluster manager if owner cluster
     430    // release the process PID to cluster manager if process owner cluster
    429431    if( CXY_FROM_PID( pid ) == local_cxy ) cluster_pid_release( pid );
    430432
     
    800802        // allocate memory for local process descriptor
    801803        process_ptr = process_alloc();
     804
    802805        if( process_ptr == NULL )  return NULL;
    803806
    804807        // initialize local process descriptor copy
    805808        error = process_copy_init( process_ptr , ref_xp );
     809
    806810        if( error ) return NULL;
    807811    }
     
    10211025    }
    10221026
    1023 
    10241027    // release lock protecting th_tbl
    10251028    hal_fence();
     
    10301033}  // end process_register_thread()
    10311034
    1032 ///////////////////////////////////////////////
    1033 void process_remove_thread( thread_t * thread )
    1034 {
     1035/////////////////////////////////////////////////
     1036bool_t process_remove_thread( thread_t * thread )
     1037{
     1038    uint32_t count;  // number of threads in local process descriptor
     1039
    10351040    assert( (thread != NULL) , __FUNCTION__ , "thread argument is NULL" );
    10361041
     
    10431048    spinlock_lock( &process->th_lock );
    10441049
    1045     assert( (process->th_nr) , __FUNCTION__ , "process th_nr cannot be 0\n" );
     1050    count = process->th_nr;
     1051
     1052    assert( (count > 0) , __FUNCTION__ , "process th_nr cannot be 0\n" );
    10461053
    10471054    // remove thread from th_tbl[]
     
    10491056    process->th_nr--;
    10501057
     1058    // release lock protecting th_tbl
    10511059    hal_fence();
    1052 
    1053     // release lock protecting th_tbl
    10541060    spinlock_unlock( &process->th_lock );
     1061
     1062    return (count == 1);
    10551063
    10561064}  // process_remove_thread()
     
    14011409    process->pid        = 0;
    14021410    process->ref_xp     = XPTR( local_cxy , process );
     1411    process->owner_xp   = XPTR( local_cxy , process );
    14031412    process->parent_xp  = XPTR_NULL;
    14041413    process->term_state = 0;
     
    15311540    process_t   * process_ptr;
    15321541    cxy_t         process_cxy;
     1542
    15331543    xptr_t        parent_xp;       // extended pointer on parent process
    15341544    process_t   * parent_ptr;
    15351545    cxy_t         parent_cxy;
    15361546
     1547    xptr_t        owner_xp;        // extended pointer on owner process
     1548    process_t   * owner_ptr;
     1549    cxy_t         owner_cxy;
     1550
    15371551    pid_t         pid;
    15381552    pid_t         ppid;
    15391553    uint32_t      state;
    1540     xptr_t        ref_xp;
    15411554    uint32_t      th_nr;
    15421555
    1543     xptr_t        txt_file_xp;     // extended pointer on TXT_RX pseudo file
    1544     xptr_t        chdev_xp;        // extended pointer on TXT_RX chdev
    1545     chdev_t     * chdev_ptr;
    1546     cxy_t         chdev_cxy;
    1547     xptr_t        owner_xp;        // extended pointer on TXT owner process
     1556    xptr_t        txt_file_xp;     // extended pointer on TXT_RX file descriptor
     1557    xptr_t        txt_chdev_xp;    // extended pointer on TXT_RX chdev
     1558    chdev_t     * txt_chdev_ptr;
     1559    cxy_t         txt_chdev_cxy;
     1560    xptr_t        txt_owner_xp;    // extended pointer on TXT owner process
    15481561
    15491562    xptr_t        elf_file_xp;     // extended pointer on .elf file
     
    15581571    process_ptr = GET_PTR( process_xp );
    15591572    process_cxy = GET_CXY( process_xp );
    1560 
    1561     // check reference process
    1562     ref_xp = hal_remote_lwd( XPTR( process_cxy , &process_ptr->ref_xp ) );
    1563     assert( (process_xp == ref_xp) , __FUNCTION__ , "process is not the reference\n");
    15641573
    15651574    // get PID and state
     
    15761585    th_nr      = hal_remote_lw( XPTR( process_cxy , &process_ptr->th_nr ) );
    15771586
    1578     // get TXT name and process owner
    1579     txt_file_xp = hal_remote_lwd( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
     1587    // get pointers on owner process descriptor
     1588    owner_xp  = hal_remote_lwd( XPTR( process_cxy , &process_ptr->owner_xp ) );
     1589    owner_cxy = GET_CXY( owner_xp );
     1590    owner_ptr = GET_PTR( owner_xp );
     1591
     1592    // get extended pointer on TXT_RX file descriptor attached to process
     1593    txt_file_xp = hal_remote_lwd( XPTR( owner_cxy , &owner_ptr->fd_array.array[0] ) );
    15801594
    15811595    assert( (txt_file_xp != XPTR_NULL) , __FUNCTION__ ,
    15821596    "process must be attached to one TXT terminal\n" );
    15831597
    1584     chdev_xp  = chdev_from_file( txt_file_xp );
    1585     chdev_cxy = GET_CXY( chdev_xp );
    1586     chdev_ptr = (chdev_t *)GET_PTR( chdev_xp );
     1598    // get TXT_RX chdev pointers
     1599    txt_chdev_xp  = chdev_from_file( txt_file_xp );
     1600    txt_chdev_cxy = GET_CXY( txt_chdev_xp );
     1601    txt_chdev_ptr = GET_PTR( txt_chdev_xp );
     1602
     1603    // get TXT_RX name and ownership
    15871604    hal_remote_strcpy( XPTR( local_cxy , txt_name ) ,
    1588                            XPTR( chdev_cxy , chdev_ptr->name ) );
    1589     owner_xp = (xptr_t)hal_remote_lwd( XPTR( chdev_cxy , &chdev_ptr->ext.txt.owner_xp ) );
     1605                       XPTR( txt_chdev_cxy , txt_chdev_ptr->name ) );
     1606   
     1607    txt_owner_xp = (xptr_t)hal_remote_lwd( XPTR( txt_chdev_cxy,
     1608                                                 &txt_chdev_ptr->ext.txt.owner_xp ) );
    15901609   
    15911610    // get process .elf name
    15921611    elf_file_xp   = hal_remote_lwd( XPTR( process_cxy , &process_ptr->vfs_bin_xp ) );
    1593 
    15941612    elf_file_cxy  = GET_CXY( elf_file_xp );
    15951613    elf_file_ptr  = (vfs_file_t *)GET_PTR( elf_file_xp );
     
    15981616
    15991617    // display process info
    1600     if( owner_xp == process_xp )
    1601     {
    1602         printk("PID %X | PPID %X | STS %X | %s (FG) | %X | %d | %s\n",
     1618    if( txt_owner_xp == process_xp )
     1619    {
     1620        nolock_printk("PID %X | PPID %X | STS %X | %s (FG) | %X | %d | %s\n",
    16031621        pid, ppid, state, txt_name, process_ptr, th_nr, elf_name );
    16041622    }
    16051623    else
    16061624    {
    1607         printk("PID %X | PPID %X | STS %X | %s (BG) | %X | %d | %s\n",
     1625        nolock_printk("PID %X | PPID %X | STS %X | %s (BG) | %X | %d | %s\n",
    16081626        pid, ppid, state, txt_name, process_ptr, th_nr, elf_name );
    16091627    }
     
    19882006    xptr_t      current_xp;
    19892007    xptr_t      iter_xp;
    1990 
    1991     // check terminal index
     2008    cxy_t       txt0_cxy;
     2009    chdev_t   * txt0_ptr;
     2010    xptr_t      txt0_xp;
     2011    xptr_t      txt0_lock_xp;
     2012    reg_t       txt0_save_sr;    // save SR to take TXT0 lock in busy mode
     2013   
    19922014    assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
    19932015    __FUNCTION__ , "illegal TXT terminal index" );
     2016
     2017    // get pointers on TXT0 chdev
     2018    txt0_xp  = chdev_dir.txt_tx[0];
     2019    txt0_cxy = GET_CXY( txt0_xp );
     2020    txt0_ptr = GET_PTR( txt0_xp );
     2021
     2022    // get extended pointer on TXT0 lock
     2023    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
    19942024
    19952025    // get pointers on TXT_RX[txt_id] chdev
     
    20022032    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
    20032033
     2034    // get lock on attached process list
     2035    remote_spinlock_lock( lock_xp );
     2036
     2037    // get TXT0 lock in busy waiting mode
     2038    remote_spinlock_lock_busy( txt0_lock_xp , &txt0_save_sr );
     2039
    20042040    // display header
    2005     printk("\n***** processes attached to TXT_%d\n", txt_id );
    2006 
    2007     // get lock
    2008     remote_spinlock_lock( lock_xp );
     2041    nolock_printk("\n***** processes attached to TXT_%d / cycle %d\n",
     2042    txt_id , (uint32_t)hal_get_cycles() );
    20092043
    20102044    // scan attached process list
     
    20152049    }
    20162050
    2017     // release lock
     2051    // release TXT0 lock in busy waiting mode
     2052    remote_spinlock_unlock_busy( txt0_lock_xp , txt0_save_sr );
     2053
     2054    // release lock on attached process list
    20182055    remote_spinlock_unlock( lock_xp );
    20192056
  • trunk/kernel/kern/process.h

    r441 r443  
    9090 * The process descriptor is replicated in all clusters containing at least one thread
    9191 * of the PID process, with the following rules :
    92  * 1) The <pid>, <ppid>, <ref_xp>, <vfs_root_xp>, <vfs_bin_xp>  fields are defined
    93  *    in all process descriptor copies.
     92 * 1) The <pid>, <ppid>, <ref_xp>, <owner_xp>, <vfs_root_xp>, <vfs_bin_xp>  fields are
     93 *    defined in all process descriptor copies.
    9494 * 2) The <vfs_cwd_xp> and associated <cwd_lock>, that can be dynamically modified,
    9595 *    are only defined in the reference process descriptor.
     
    106106 *    are defined in all process descriptors copies.
    107107 * 7) The termination <flags> and <exit_status> are only defined in the reference cluster.
    108  *    The term state format is defined in the shared_syscalls.h file.
     108 *    The term_state format is defined in the shared_syscalls.h file.
    109109 ********************************************************************************************/
    110110
     
    119119        pid_t             pid;              /*! process identifier                              */
    120120    xptr_t            ref_xp;           /*! extended pointer on reference process           */
     121    xptr_t            owner_xp;         /*! extended pointer on owner process               */
    121122    xptr_t            parent_xp;        /*! extended pointer on parent process              */
    122123
     
    264265/*********************************************************************************************
    265266 * This debug function diplays on the kernel terminal TXT0 detailed informations on a
    266  * reference process identified by the <process_xp> argument.
     267 * process descriptor identified by the <process_xp> argument.
    267268 * It can be called by a thread running in any cluster.
    268  *********************************************************************************************
    269  * @ process_xp : extended pointer on reference process.
     269 * WARNING: this function uses the nolock_printk() function, and the  TXT0 lock MUST be
     270 * taken by the caller function.
     271 *********************************************************************************************
     272 * @ process_xp : extended pointer on process descriptor.
    270273 ********************************************************************************************/
    271274void process_display( xptr_t process_xp );
     
    379382
    380383/*********************************************************************************************
    381  * This function implements the "fork" system call, and is called by the sys_fork() function.
    382  * It allocates memory and initializes a new "child" process descriptor, and the
    383  * associated "child" thread descriptor in the local cluster. This function can involve
    384  * up to three different clusters :
    385  * - the child (local) cluster can be any cluster defined by the sys_fork function.
     384 * This function implements the "fork" system call, and is called by the sys_fork() function,
     385 * likely throuch the RPC_PROCESS_MAKE_FORK.
     386 * It allocates memory and initializes a new "child" process descriptor, and the associated
     387 * "child" thread descriptor in local cluster. It involves up to three different clusters :
     388 * - the child (local) cluster can be any cluster selected by the sys_fork function.
    386389 * - the parent cluster must be the reference cluster for the parent process.
    387390 * - the client cluster containing the thread requesting the fork can be any cluster.
     
    483486 * It checks that there is an available slot in the local th_tbl[] array,
    484487 * allocates a new LTID, and registers the new thread in the th_tbl[].
    485  * WARNING : the lock protecting the th_tbl[] must be taken by the caller.
     488 * It takes the lock protecting exclusive access to the th_tbl[].
    486489 *********************************************************************************************
    487490 * @ process  : pointer on the local process descriptor.
     
    496499/*********************************************************************************************
    497500 * This function removes a thread registration from the local process descriptor.
    498  * WARNING : the lock protecting the th_tbl[] must be taken by the caller.
     501 * It takes the lock protecting exclusive access to the th_tbl[].
    499502 *********************************************************************************************
    500503 * @ thread   : local pointer on thread to be removed.
    501  ********************************************************************************************/
    502 void process_remove_thread( struct thread_s * thread );
     504 * @ return true if the removed thread was the last registered thread.
     505 ********************************************************************************************/
     506bool_t process_remove_thread( struct thread_s * thread );
    503507
    504508
     
    551555 * When the process dentified by the <owner_xp> argument has the exclusive ownership of
    552556 * the TXT_RX terminal, this function transfer this ownership to another attached process.
    553  * The process descriptor must be in the process owner cluster.
    554  * This function does nothing if the <pid> process is not the owner.
     557 * The process descriptor must be the process owner.
     558 * This function does nothing if the process identified by the <process_xp> is not
     559 * the TXT owner.
    555560 * - If the current owner is not the KSH process, the new owner is the KSH process.
    556  * - If the <pid> process is the the KSH process, the new owner is another attached process.
     561 * - If the current owner is the KSH process, the new owner is another attached process.
    557562 * - If there is no other attached process, the TXT has no more defined owner.
    558563 *********************************************************************************************
  • trunk/kernel/kern/scheduler.c

    r440 r443  
    3434#include <scheduler.h>
    3535
     36
    3637///////////////////////////////////////////////////////////////////////////////////////////
    3738// Extern global variables
    3839///////////////////////////////////////////////////////////////////////////////////////////
    3940
    40 extern chdev_directory_t    chdev_dir;            // allocated in kernel_init.c file
    41 extern uint32_t             switch_save_sr[];     // allocated in kernel_init.c file
     41uint32_t   idle_thread_count;
     42uint32_t   idle_thread_count_active;
     43
     44extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c file
     45extern uint32_t             switch_save_sr[];   // allocated in kernel_init.c file
    4246
    4347////////////////////////////////
     
    5054
    5155    sched->current        = CURRENT_THREAD;
    52     sched->idle           = NULL;             // initialized in kernel_init()
    53     sched->u_last         = NULL;             // initialized in sched_register_thread()
    54     sched->k_last         = NULL;             // initialized in sched_register_thread()
     56    sched->idle           = NULL;               // initialized in kernel_init()
     57    sched->u_last         = NULL;               // initialized in sched_register_thread()
     58    sched->k_last         = NULL;               // initialized in sched_register_thread()
    5559
    5660    // initialise threads lists
     
    5862    list_root_init( &sched->k_root );
    5963
    60     sched->req_ack_pending = false;           // no pending request
     64    sched->req_ack_pending = false;             // no pending request
     65    sched->trace           = false;             // context switches trace desactivated
    6166
    6267}  // end sched_init()
     
    179184    thread_t     * thread;
    180185    process_t    * process;
     186    bool_t         last_thread;
    181187
    182188    // get pointer on scheduler
     
    237243
    238244            // delete thread
    239             thread_destroy( thread );
     245            last_thread = thread_destroy( thread );
    240246
    241247#if DEBUG_SCHED_HANDLE_SIGNALS
    242248uint32_t cycle = (uint32_t)hal_get_cycles();
    243249if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
    244 printk("\n[DBG] %s : thread %x in proces %x (%x) deleted / cycle %d\n",
    245 __FUNCTION__ , thread , process->pid , process , cycle );
     250printk("\n[DBG] %s : thread %x in proces %x on core[%x,%d] deleted / cycle %d\n",
     251__FUNCTION__ , thread->trdid , process->pid , local_cxy , thread->core->lid , cycle );
    246252#endif
    247253            // destroy process descriptor if no more threads
    248             if( process->th_nr == 0 )
     254            if( last_thread )
    249255            {
    250256                // delete process   
     
    254260cycle = (uint32_t)hal_get_cycles();
    255261if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
    256 printk("\n[DBG] %s : process %x has been deleted / cycle %d\n",
    257 __FUNCTION__ , process->pid , cycle );
     262printk("\n[DBG] %s : process %x in cluster %x deleted / cycle %d\n",
     263__FUNCTION__ , process->pid , local_cxy , cycle );
    258264#endif
    259265
     
    277283 
    278284#if (DEBUG_SCHED_YIELD & 0x1)
    279 if( DEBUG_SCHED_YIELD < (uint32_t)hal_get_cycles() )
     285if( sched->trace )
    280286sched_display( core->lid );
    281287#endif
     
    295301
    296302    // check next thread kernel_stack overflow
    297     assert( (next->signature == THREAD_SIGNATURE),
    298     __FUNCTION__ , "kernel stack overflow for thread %x\n", next );
     303    assert( (next->signature == THREAD_SIGNATURE), __FUNCTION__ ,
     304    "kernel stack overflow for thread %x on core[%x,%d] \n", next, local_cxy, core->lid );
    299305
    300306    // check next thread attached to same core as the calling thread
    301     assert( (next->core == current->core),
    302     __FUNCTION__ , "next core %x != current core %x\n", next->core, current->core );
     307    assert( (next->core == current->core), __FUNCTION__ ,
     308    "next core %x != current core %x\n", next->core, current->core );
    303309
    304310    // check next thread not blocked when type != IDLE
     
    311317    {
    312318
    313 if( (local_cxy == 0X1) && (core->lid == 1) && ((uint32_t)current == 0xcc000) )
    314 printk("\n@@@@@ cc000 exit at cycle %d\n", (uint32_t)hal_get_cycles() );
    315 
    316 if( (local_cxy == 0X1) && (core->lid == 1) && ((uint32_t)next == 0xcc000) )
    317 printk("\n@@@@@ cc000 enter at cycle %d\n", (uint32_t)hal_get_cycles() );
    318 
    319319#if DEBUG_SCHED_YIELD
    320 uint32_t cycle = (uint32_t)hal_get_cycles();
    321 if( DEBUG_SCHED_YIELD < cycle )
     320if( sched->trace )
    322321printk("\n[DBG] %s : core[%x,%d] / cause = %s\n"
    323322"      thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n",
    324323__FUNCTION__, local_cxy, core->lid, cause,
    325 current, thread_type_str(current->type), current->process->pid, current->trdid,
    326 next , thread_type_str(next->type) , next->process->pid , next->trdid , cycle );
     324current, thread_type_str(current->type), current->process->pid, current->trdid,next ,
     325thread_type_str(next->type) , next->process->pid , next->trdid , (uint32_t)hal_get_cycles() );
    327326#endif
    328327
     
    345344    {
    346345
    347 #if (DEBUG_SCHED_YIELD & 1)
    348 uint32_t cycle = (uint32_t)hal_get_cycles();
    349 if( DEBUG_SCHED_YIELD < cycle )
     346#if DEBUG_SCHED_YIELD
     347if( sched->trace )
    350348printk("\n[DBG] %s : core[%x,%d] / cause = %s\n"
    351349"      thread %x (%s) (%x,%x) continue / cycle %d\n",
    352 __FUNCTION__, local_cxy, core->lid, cause,
    353 current, thread_type_str(current->type), current->process->pid, current->trdid, cycle );
     350__FUNCTION__, local_cxy, core->lid, cause, current, thread_type_str(current->type),
     351current->process->pid, current->trdid, (uint32_t)hal_get_cycles() );
    354352#endif
    355353
     
    389387
    390388    nolock_printk("\n***** threads on core[%x,%d] / current %x / cycle %d\n",
    391             local_cxy , core->lid, sched->current, (uint32_t)hal_get_cycles() );
     389    local_cxy , core->lid, sched->current, (uint32_t)hal_get_cycles() );
    392390
    393391    // display kernel threads
  • trunk/kernel/kern/scheduler.h

    r437 r443  
    5050    struct thread_s * current;         /*! pointer on current running thread                */
    5151    volatile bool_t   req_ack_pending; /*! sequencialize ack requests when true             */
     52    bool_t            trace;           /*! context switches trace activated if true         */
    5253}
    5354scheduler_t;
  • trunk/kernel/kern/thread.c

    r440 r443  
    142142        cluster_t    * local_cluster = LOCAL_CLUSTER;
    143143
     144#if DEBUG_THREAD_USER_INIT
     145uint32_t cycle = (uint32_t)hal_get_cycles();
     146if( DEBUG_THREAD_USER_INIT < cycle )
     147printk("\n[DBG] %s : thread %x enter to init thread %x in process %x / cycle %d\n",
     148__FUNCTION__, CURRENT_THREAD, thread, process->pid , cycle );
     149#endif
     150
    144151    // register new thread in process descriptor, and get a TRDID
    145152    error = process_register_thread( process, thread , &trdid );
     
    196203        thread->signature = THREAD_SIGNATURE;
    197204
    198     // FIXME call hal_thread_init() function to initialise the save_sr field
     205    // FIXME define and call an architecture specific hal_thread_init()
     206    // function to initialise the save_sr field
    199207    thread->save_sr = 0xFF13;
    200208
     
    204212        // update DQDT
    205213    dqdt_update_threads( 1 );
     214
     215#if DEBUG_THREAD_USER_INIT
     216cycle = (uint32_t)hal_get_cycles();
     217if( DEBUG_THREAD_USER_INIT < cycle )
     218printk("\n[DBG] %s : thread %x exit  after init of thread %x in process %x / cycle %d\n",
     219__FUNCTION__, CURRENT_THREAD, thread, process->pid , cycle );
     220#endif
    206221
    207222        return 0;
     
    227242uint32_t cycle = (uint32_t)hal_get_cycles();
    228243if( DEBUG_THREAD_USER_CREATE < cycle )
    229 printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n",
    230 __FUNCTION__, CURRENT_THREAD, pid , cycle );
     244printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",
     245__FUNCTION__, CURRENT_THREAD, pid , local_cxy , cycle );
    231246#endif
    232247
     
    240255        return ENOMEM;
    241256    }
     257
     258#if( DEBUG_THREAD_USER_CREATE & 1)
     259if( DEBUG_THREAD_USER_CREATE < cycle )
     260printk("\n[DBG] %s : process descriptor = %x for process %x in cluster %x\n",
     261__FUNCTION__, process , pid , local_cxy );
     262#endif
    242263
    243264    // select a target core in local cluster
     
    256277        core_lid = cluster_select_local_core();
    257278    }
     279
     280#if( DEBUG_THREAD_USER_CREATE & 1)
     281if( DEBUG_THREAD_USER_CREATE < cycle )
     282printk("\n[DBG] %s : core[%x,%d] selected\n",
     283__FUNCTION__, local_cxy , core_lid );
     284#endif
    258285
    259286    // allocate a stack from local VMM
     
    282309        return ENOMEM;
    283310    }
     311
     312#if( DEBUG_THREAD_USER_CREATE & 1)
     313if( DEBUG_THREAD_USER_CREATE < cycle )
     314printk("\n[DBG] %s : thread descriptor %x allocated\n",
     315__FUNCTION__, thread );
     316#endif
    284317
    285318    // initialize thread descriptor
     
    300333    }
    301334
     335#if( DEBUG_THREAD_USER_CREATE & 1)
     336if( DEBUG_THREAD_USER_CREATE < cycle )
     337printk("\n[DBG] %s : thread descriptor %x initialised / trdid = %x\n",
     338__FUNCTION__, thread , thread->trdid );
     339#endif
     340
    302341    // set DETACHED flag if required
    303342    if( attr->attributes & PT_ATTR_DETACH )
     
    327366cycle = (uint32_t)hal_get_cycles();
    328367if( DEBUG_THREAD_USER_CREATE < cycle )
    329 printk("\n[DBG] %s : thread %x exit / process %x / new_thread %x / core %d / cycle %d\n",
    330 __FUNCTION__, CURRENT_THREAD, pid, thread, core_lid, cycle );
     368printk("\n[DBG] %s : thread %x exit / new_thread %x in process %x / core %d / cycle %d\n",
     369__FUNCTION__, CURRENT_THREAD, thread->trdid , pid , core_lid, cycle );
    331370#endif
    332371
     
    602641// has been released, using a cache of mmap requests. [AG]
    603642///////////////////////////////////////////////////////////////////////////////////////
    604 void thread_destroy( thread_t * thread )
     643bool_t thread_destroy( thread_t * thread )
    605644{
    606645    reg_t        save_sr;
     646    bool_t       last_thread;
    607647
    608648    process_t  * process    = thread->process;
     
    616656#endif
    617657
    618     assert( (thread->local_locks == 0) , __FUNCTION__ , "all local locks not released" );
    619 
    620     assert( (thread->remote_locks == 0) , __FUNCTION__ , "all remote locks not released" );
     658    assert( (thread->local_locks == 0) , __FUNCTION__ ,
     659    "local lock not released for thread %x in process %x", thread->trdid, process->pid );
     660
     661    assert( (thread->remote_locks == 0) , __FUNCTION__ ,
     662    "remote lock not released for thread %x in process %x", thread->trdid, process->pid );
    621663
    622664    // update intrumentation values
     
    637679
    638680    // remove thread from process th_tbl[]
    639     process_remove_thread( thread );
     681    last_thread = process_remove_thread( thread );
    640682       
    641683    // update DQDT
     
    654696__FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle );
    655697#endif
     698
     699    return last_thread;
    656700
    657701}   // end thread_destroy()
     
    10921136        hal_enable_irq( NULL );
    10931137
    1094         if( CONFIG_THREAD_IDLE_MODE_SLEEP ) // force core to low-power mode
     1138        // force core to low-power mode (optional)
     1139        if( CONFIG_THREAD_IDLE_MODE_SLEEP )
    10951140        {
    10961141
     
    11131158
    11141159        }
    1115         else                                // search a runable thread
    1116         {
    1117             sched_yield( "IDLE" );
    1118         }
     1160
     1161        // search a runable thread
     1162        sched_yield( "IDLE" );
    11191163    }
    11201164}  // end thread_idle()
  • trunk/kernel/kern/thread.h

    r440 r443  
    7777
    7878/***************************************************************************************
    79  * This defines the masks associated to the blocking causes.
     79 * This defines the thread blocking causes bit-vector.
    8080 **************************************************************************************/
    8181
     
    278278
    279279/***************************************************************************************
    280  * This function is called by the kernel_init() function to initialize the IDLE thread.
    281  * It initializes an existing thread descriptor from arguments values.
     280 * This function is called by the kernel_init() function to initialize the IDLE thread
     281 * descriptor from arguments values.
    282282 * The THREAD_BLOCKED_GLOBAL bit is set, and the thread must be activated to start.
    283283 ***************************************************************************************
     
    296296
    297297/***************************************************************************************
    298  * This function releases the physical memory allocated for a thread in a given cluster.
    299  * This include the thread descriptor itself, the associated CPU and FPU context, and
    300  * the physical memory allocated for an user thread local stack.
     298 * This function is called by the sched_handle_signals() function to releases
     299 * the physical memory allocated for a thread in a given cluster, when this thread
     300 * is marked for delete. This include the thread descriptor itself, the associated
     301 * CPU and FPU context, and the physical memory allocated for an user thread local stack.
     302 * The destroyed thread is removed from the local process th_tbl[] array, and returns
     303 * true when the destroyed thread was the last thread registered in process.
    301304 ***************************************************************************************
    302305 * @ thread  : pointer on the thread descriptor to release.
    303  **************************************************************************************/
    304 void thread_destroy( thread_t * thread );
     306 * @ return true, if the thread was the last registerd thread in local process.
     307 **************************************************************************************/
     308bool_t thread_destroy( thread_t * thread );
    305309
    306310/***************************************************************************************
    307311 * This function defines the code of the thread executed by all cores after kernel_init,
    308312 * or when no other thread is runnable for a given core.
    309  *
    310  * TODO: In the TSAR architecture, it enters an infinite loop, in wich it forces
    311  * the core in sleep (low-power) mode. Any IRQ will force the core to exit this sleep
    312  * mode, but no ISR is executed.
    313  * TODO: We must analyse if we have the same behaviour for I86 architectures...
     313 * It enter and infinite loop in wich:
     314 * - it unmask the IRQs
     315 * - it optionally calls the hal_core_sleep() function to reduce the power consumption
     316 *   (this behavior is controlled by the CONFIG_THREAD_IDLE_MODE_SLEEP flag).
     317 * - it call the sched_yield() function to find another runnable thread.
     318 *
     319 * TODO: In the TSAR architecture the hal_core_sleep() function forces the core to
     320 * low-power mode. Any IRQ will force the core to exit this low-power mode, but no ISR
     321 * is executed. We must analyse if we have the same behaviour for I86 architectures...
    314322 **************************************************************************************/
    315323void thread_idle_func();
     
    357365/***************************************************************************************
    358366 * This function is used by the four sys_thread_cancel(), sys_thread_exit(),
    359  * sys_kill() and sys_exit() system calls to delete a given thread.
     367 * sys_kill() and sys_exit() system calls to mark for delete a given thread.
    360368 * It set the THREAD_BLOCKED_GLOBAL bit and set the the THREAD_FLAG_REQ_DELETE bit
    361369 * in the thread descriptor identified by the <thread_xp> argument, to ask the scheduler
Note: See TracChangeset for help on using the changeset viewer.