Changeset 279 for trunk/kernel/kern


Ignore:
Timestamp:
Jul 27, 2017, 12:23:29 AM (7 years ago)
Author:
alain
Message:

1) Introduce independant command fields for the various devices in the thread descriptor.
2) Introduce a new dev_pic_enable_ipi() function in the generic PIC device
3) Fix two bugs identified by Maxime in the scheduler initialisation, and in the sched_select().
4) fix several bugs in the TSAR hal_kentry.S.
5) Introduce a third kgiet segment (besides kdata and kcode) in the TSAR bootloader.

Location:
trunk/kernel/kern
Files:
14 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/kern/chdev.h

    r249 r279  
    169169    xptr_t   nic_tx[CONFIG_MAX_NIC_CHANNELS];    // external / multi-channels / shared
    170170
    171     xptr_t   icu[CONFIG_MAX_CLUSTERS];           // internal / single channel / shared
    172171    xptr_t   mmc[CONFIG_MAX_CLUSTERS];           // internal / single channel / shared
    173172
  • trunk/kernel/kern/cluster.c

    r124 r279  
    7777    // initialize cluster local parameters
    7878        cluster->cores_nr        = info->cores_nr;
    79     cluster->cores_in_kernel = info->cores_nr; // all cpus start in kernel mode
     79    cluster->cores_in_kernel = 0;
    8080
    8181    // initialize the lock protecting the embedded kcm allocator
     
    130130    // initialises RPC fifo
    131131        rpc_fifo_init( &cluster->rpc_fifo );
     132    cluster->rpc_threads = 0;
    132133
    133134    cluster_dmsg("\n[INFO] %s : RPC fifo inialized in cluster %x at cycle %d\n",
  • trunk/kernel/kern/cluster.h

    r188 r279  
    9191 * This structure defines a cluster manager.
    9292 * It contains both global platform information, and cluster specific resources
    93  * managed by the local kernel instance.
     93 * controled by the local kernel instance.
    9494 ******************************************************************************************/
    9595
     
    9999
    100100    // global parameters
    101 
    102101        uint32_t          paddr_width;     /*! numer of bits in physical address              */
    103102    uint32_t          x_width;         /*! number of bits to code x_size  (can be 0)      */
     
    109108
    110109    // local parameters
    111 
    112110        uint32_t          cores_nr;        /*! number of cores in cluster                     */
    113111    uint32_t          cores_in_kernel; /*! number of cores currently in kernel mode       */
    114112
     113    uint32_t          ram_size;        /*! physical memory size                           */
     114    uint32_t          ram_base;        /*! physical memory base (local address)           */
     115
    115116        core_t            core_tbl[CONFIG_MAX_LOCAL_CORES];         /*! embedded cores        */
    116117
     118        list_entry_t      dev_root;        /*! root of list of devices in cluster             */
     119
     120    // memory allocators
    117121        ppm_t             ppm;             /*! embedded kernel page manager                   */
    118122        khm_t             khm;             /*! embedded kernel heap manager                   */
    119123        kcm_t             kcm;             /*! embedded kernel cache manager (for KCMs)       */
    120 
    121124    kcm_t           * kcm_tbl[KMEM_TYPES_NR];         /*! pointers on allocated KCMs      */
    122125
    123     uint32_t          ram_size;        /*! physical memory size                           */
    124     uint32_t          ram_base;        /*! physical memory base (local address)           */
    125 
    126         rpc_fifo_t        rpc_fifo;        /*! cluster RPC fifo (shared)                      */
    127         list_entry_t      devlist;         /*! root of list of devices in cluster             */
    128 
     126    // RPC
     127        rpc_fifo_t        rpc_fifo;        /*! RPC fifo                                       */
     128    uint32_t          rpc_threads;     /*! current number of RPC threads                  */
     129
     130    // DQDT
    129131    int32_t           pages_var;       /*! pages number increment from last DQQT update   */
    130132    int32_t           threads_var;     /*! threads number increment from last DQDT update */
     
    132134        dqdt_node_t       dqdt_tbl[CONFIG_MAX_DQDT_DEPTH];     /*! embedded DQDT nodes        */
    133135
     136    // Local process manager
    134137    pmgr_t            pmgr;            /*! embedded process manager                       */
    135138
  • trunk/kernel/kern/core.c

    r188 r279  
    5050        core->usage             = 0;
    5151        core->spurious_irqs     = 0;
    52     core->rpc_threads       = 0;
    5352        core->thread_idle       = NULL;
    5453        core->fpu_owner         = NULL;
  • trunk/kernel/kern/core.h

    r188 r279  
    5656        uint32_t            usage;          /*! cumulated busy_percent (idle / total)      */
    5757        uint32_t            spurious_irqs;  /*! for instrumentation...                     */
    58     uint32_t            rpc_threads;    /*! current RPC threads number for this core   */
    59         struct thread_s   * thread_rpc;     /*! pointer on current RPC thread descriptor   */
    6058        struct thread_s   * thread_idle;    /*! pointer on idle thread descriptor          */
    6159        struct thread_s   * fpu_owner;      /*! pointer on current FPU owner thread        */
  • trunk/kernel/kern/kernel_init.c

    r265 r279  
    2828#include <hal_special.h>
    2929#include <hal_context.h>
     30#include <hal_irqmask.h>
    3031#include <barrier.h>
    3132#include <remote_barrier.h>
     
    5960
    6061///////////////////////////////////////////////////////////////////////////////////////////
    61 // All these global variables are replicated in all clusters.
     62// All the following global variables are replicated in all clusters.
    6263// They are initialised by the kernel_init() function.
    6364//
     
    135136           "    /_/        \\_\\ |______| |_|    |_|   \\_____/  |______/        |_|    |_|  |_|  \\_\\ |_|   |_|  \n"
    136137           "\n\n\t\t Advanced Locality Management Operating System / Multi Kernel Hybrid\n"
    137            "\n\n\t\t\t Version 0.0   :   %d clusters   /   %d cores per cluster\n\n", nclusters , ncores );
     138           "\n\n\t\t\t Version 0.0 : %d cluster(s)   /   %d core(s) per cluster\n\n", nclusters , ncores );
    138139}
    139140
     
    274275            }
    275276
    276             if( local_cxy == 0 )
    277             kinit_dmsg("\n[INFO] %s created MMC chdev in cluster 0 at cycle %d\n",
    278                        __FUNCTION__ , local_cxy , (uint32_t)hal_time_stamp() );
     277            kinit_dmsg("\n[INFO] %s created MMC in cluster %x / chdev = %x\n",
     278                       __FUNCTION__ , channel , local_cxy , chdev_ptr );
    279279        }
    280280        ///////////////////////////////
     
    301301                chdev_dir.dma[channel] = XPTR( local_cxy , chdev_ptr );
    302302
    303                 kinit_dmsg("\n[INFO] %s created DMA[%d] chdev in cluster 0 at cycle %d\n",
    304                            __FUNCTION__ , channel , (uint32_t)hal_time_stamp() );
     303                kinit_dmsg("\n[INFO] %s created DMA[%d] in cluster %x / chdev = %x\n",
     304                           __FUNCTION__ , channel , local_cxy , chdev_ptr );
    305305            }
    306306        }
     
    433433                    }
    434434
    435                             kinit_dmsg("\n[INFO] %s create chdev %s[%d] in cluster %x at cycle %d\n",
    436                                __FUNCTION__ , chdev_func_str( func ), channel,
    437                                local_cxy , (uint32_t)hal_time_stamp() );
     435                            kinit_dmsg("\n[INFO] %s create chdev %s[%d] in cluster %x / chdev = %x\n",
     436                    __FUNCTION__ , chdev_func_str( func ), channel , local_cxy , chdev );
    438437
    439438                }  // end if match
     
    658657}
    659658
     659////////////////////////////////////////////////////////////////////////////////////////////
     660// This function display on TXT0 the content of the external chdev directory,
     661// in the local cluster.
     662////////////////////////////////////////////////////////////////////////////////////////////
     663static void chdev_dir_display( )
     664{
     665    cxy_t     iob_cxy  = GET_CXY( chdev_dir.iob );
     666    chdev_t * iob_ptr  = (chdev_t *)GET_PTR( chdev_dir.iob );
     667    xptr_t    iob_base = hal_remote_lwd( XPTR( iob_cxy , &iob_ptr->base ) );
     668
     669    cxy_t     pic_cxy  = GET_CXY( chdev_dir.pic );
     670    chdev_t * pic_ptr  = (chdev_t *)GET_PTR( chdev_dir.pic );
     671    xptr_t    pic_base = hal_remote_lwd( XPTR( pic_cxy , &pic_ptr->base ) );
     672
     673    cxy_t     txt0_cxy  = GET_CXY( chdev_dir.txt[0] );
     674    chdev_t * txt0_ptr  = (chdev_t *)GET_PTR( chdev_dir.txt[0] );
     675    xptr_t    txt0_base = hal_remote_lwd( XPTR( txt0_cxy , &txt0_ptr->base ) );
     676
     677    cxy_t     txt1_cxy  = GET_CXY( chdev_dir.txt[1] );
     678    chdev_t * txt1_ptr  = (chdev_t *)GET_PTR( chdev_dir.txt[1] );
     679    xptr_t    txt1_base = hal_remote_lwd( XPTR( txt1_cxy , &txt1_ptr->base ) );
     680
     681    cxy_t     txt2_cxy  = GET_CXY( chdev_dir.txt[2] );
     682    chdev_t * txt2_ptr  = (chdev_t *)GET_PTR( chdev_dir.txt[2] );
     683    xptr_t    txt2_base = hal_remote_lwd( XPTR( txt2_cxy , &txt2_ptr->base ) );
     684
     685    cxy_t     ioc_cxy  = GET_CXY( chdev_dir.ioc[0] );
     686    chdev_t * ioc_ptr  = (chdev_t *)GET_PTR( chdev_dir.ioc[0] );
     687    xptr_t    ioc_base = hal_remote_lwd( XPTR( ioc_cxy , &ioc_ptr->base ) );
     688
     689    cxy_t     fbf_cxy  = GET_CXY( chdev_dir.fbf[0] );
     690    chdev_t * fbf_ptr  = (chdev_t *)GET_PTR( chdev_dir.fbf[0] );
     691    xptr_t    fbf_base = hal_remote_lwd( XPTR( fbf_cxy , &fbf_ptr->base ) );
     692
     693    cxy_t     nic_rx_cxy  = GET_CXY( chdev_dir.nic_rx[0] );
     694    chdev_t * nic_rx_ptr  = (chdev_t *)GET_PTR( chdev_dir.nic_rx[0] );
     695    xptr_t    nic_rx_base = hal_remote_lwd( XPTR( nic_rx_cxy , &nic_rx_ptr->base ) );
     696
     697    cxy_t     nic_tx_cxy  = GET_CXY( chdev_dir.nic_tx[0] );
     698    chdev_t * nic_tx_ptr  = (chdev_t *)GET_PTR( chdev_dir.nic_tx[0] );
     699    xptr_t    nic_tx_base = hal_remote_lwd( XPTR( nic_tx_cxy , &nic_tx_ptr->base ) );
     700
     701    printk("\n*** external chdev directory in cluster %x\n"
     702           "  - iob       = %l / base = %l\n"
     703           "  - pic       = %l / base = %l\n"
     704           "  - txt[0]    = %l / base = %l\n"
     705           "  - txt[1]    = %l / base = %l\n"
     706           "  - txt[2]    = %l / base = %l\n"
     707           "  - ioc[0]    = %l / base = %l\n"
     708           "  - fbf[0]    = %l / base = %l\n"
     709           "  - nic_rx[0] = %l / base = %l\n"
     710           "  - nic_tx[0] = %l / base = %l\n",
     711           local_cxy,
     712           chdev_dir.iob, iob_base,
     713           chdev_dir.pic, pic_base,
     714           chdev_dir.txt[0], txt0_base,
     715           chdev_dir.txt[1], txt1_base,
     716           chdev_dir.txt[2], txt2_base,
     717           chdev_dir.ioc[0], ioc_base,
     718           chdev_dir.fbf[0], fbf_base,
     719           chdev_dir.nic_rx[0], nic_rx_base,
     720           chdev_dir.nic_tx[0], nic_tx_base );
     721}
     722
    660723///////////////////////////////////////////////////////////////////////////////////////////
    661724// This function is the entry point for the kernel initialisation.
     
    683746
    684747    error_t      error;
     748    uint32_t     status;                    // running core status register
    685749
    686750    cxy_t        io_cxy = info->io_cxy;
     
    732796    if( error )
    733797    {
    734         nolock_printk("\n[PANIC] in %s : illegal core identifiers"
     798        printk("\n[PANIC] in %s : illegal core identifiers"
    735799               " gid = %x / cxy = %x / lid = %d\n",
    736800               __FUNCTION__ , core_lid , core_cxy , core_lid );
     
    745809        if( error )
    746810        {
    747             nolock_printk("\n[PANIC] in %s : cannot initialise cluster %x",
     811            printk("\n[PANIC] in %s : cannot initialise cluster %x",
    748812                   __FUNCTION__ , local_cxy );
    749813            hal_core_sleep();
     
    764828    // STEP 2 : all CP0s initialize the process_zero descriptor.
    765829    //          CP0 in cluster 0 initialises the IOPIC device.
    766     //          all CP0s complete the distibuted LAPIC initialization.
    767830    /////////////////////////////////////////////////////////////////////////////////
    768831
     
    777840    if( (core_lid == 0) && (local_cxy == 0) ) iopic_init( info );
    778841   
    779     // all CP0s initialize their local LAPIC extension,
    780     if( core_lid == 0 ) lapic_init( info );
    781 
    782842    ////////////////////////////////////////////////////////////////////////////////
    783843    if( core_lid == 0 ) remote_barrier( XPTR( io_cxy , &global_barrier ),
     
    791851
    792852    ////////////////////////////////////////////////////////////////////////////////
    793     // STEP 3 : all CP0s initialize their local chdev descriptors
    794     //          (both internal devices and external devices).
     853    // STEP 3 : all CP0s complete the distibuted LAPIC initialization.
     854    //          all CP0s initialize their internal chdev descriptors
     855    //          all CP0s initialize their local external chdev descriptors
    795856    ////////////////////////////////////////////////////////////////////////////////
     857
     858    // all CP0s initialize their local LAPIC extension,
     859    if( core_lid == 0 ) lapic_init( info );
    796860
    797861    // CP0 scan the internal (private) peripherals,
     
    818882
    819883    /////////////////////////////////////////////////////////////////////////////////
    820     // STEP 4 : Alls cores initialize their private IDLE thread.
     884    // STEP 4 : All cores enable IPI (Inter Procesor Interrupt),
     885    //          Alh cores initialize IDLE thread.
    821886    //          Only CP0 in cluster 0 creates the VFS root inode.
    822887    //          It access the boot device to initialize the file system context.
    823888    /////////////////////////////////////////////////////////////////////////////////
    824889
    825     // all cores create idle thread descriptor
     890    if( CONFIG_KINIT_DEBUG ) chdev_dir_display();
     891   
     892    // All cores enable the shared IPI channel
     893
     894// @@@
     895    hal_set_ebase( 0x1000 );
     896// @@@
     897
     898    dev_pic_enable_ipi();
     899    hal_enable_irq( &status );
     900
     901    kinit_dmsg("\n[INFO] %s : IRQs enabled for core[%x,%d] / SR = %x\n",
     902               __FUNCTION__ , local_cxy , core_lid , hal_get_sr() );
     903
     904    // all cores create the idle thread descriptor
    826905    error = thread_kernel_init( thread,
    827906                                THREAD_IDLE,
     
    831910    if( error )
    832911    {
    833         nolock_printk("\n[PANIC] in %s : core[%x][%d] cannot initialize idle thread\n",
     912        printk("\n[PANIC] in %s : core[%x][%d] cannot initialize idle thread\n",
    834913                      __FUNCTION__ , local_cxy , core_lid );
    835914        hal_core_sleep();
     
    860939            fatfs_ctx_t * fatfs_ctx = fatfs_ctx_alloc();
    861940
    862             nolock_assert( (fatfs_ctx != NULL) , __FUNCTION__ ,
    863                            "cannot create FATFS context in cluster 0\n" );
     941            assert( (fatfs_ctx != NULL) , __FUNCTION__ ,
     942                    "cannot create FATFS context in cluster 0\n" );
    864943
    865944            // 2. access boot device to initialize FATFS context
     
    883962                                      &vfs_root_inode_xp );                // return
    884963
    885             nolock_assert( (error == 0) , __FUNCTION__ ,
    886                            "cannot create VFS root inode\n" );
     964            assert( (error == 0) , __FUNCTION__ ,
     965                    "cannot create VFS root inode\n" );
    887966
    888967            // 5. initialize VFS context for FAT in cluster 0
     
    896975        else
    897976        {
    898             nolock_printk("\n[PANIC] in %s : root FS must be FATFS\n", __FUNCTION__ );
     977            printk("\n[PANIC] in %s : root FS must be FATFS\n", __FUNCTION__ );
    899978            hal_core_sleep();
    900979        }
     
    9311010            fatfs_ctx_t * fatfs_ctx = fatfs_ctx_alloc();
    9321011
    933             nolock_assert( (fatfs_ctx != NULL) , __FUNCTION__ ,
    934                            "cannot create FATFS context\n" );
     1012            assert( (fatfs_ctx != NULL) , __FUNCTION__ ,
     1013                    "cannot create FATFS context\n" );
    9351014
    9361015            // get local pointer on VFS context for FATFS
     
    9651044    /////////////////////////////////////////////////////////////////////////////////
    9661045
    967     if( (core_lid ==  0) && (local_cxy == 0) )
     1046//    if( (core_lid ==  0) && (local_cxy == 0) )
    9681047    kinit_dmsg("\n[INFO] %s exit barrier 5 at cycle %d : VFS OK in all clusters\n",
    9691048               __FUNCTION__, (uint32_t)hal_time_stamp());
     
    9861065        devfs_ctx_t * devfs_ctx = devfs_ctx_alloc();
    9871066
    988         nolock_assert( (devfs_ctx != NULL) , __FUNCTION__ ,
    989                        "cannot create DEVFS context in cluster IO\n");
     1067        assert( (devfs_ctx != NULL) , __FUNCTION__ ,
     1068                "cannot create DEVFS context in cluster IO\n");
    9901069
    9911070        // register DEVFS root and external directories
     
    9931072    }   
    9941073
     1074printk("\n@@@ %s : cluster %x reach barrier 6\n", __FUNCTION__ , local_cxy );
     1075
    9951076    /////////////////////////////////////////////////////////////////////////////////
    9961077    if( core_lid == 0 ) remote_barrier( XPTR( io_cxy , &global_barrier ),
     
    9991080    /////////////////////////////////////////////////////////////////////////////////
    10001081
    1001     if( (core_lid ==  0) && (local_cxy == 0) )
     1082//    if( (core_lid ==  0) && (local_cxy == 0) )
    10021083    kinit_dmsg("\n[INFO] %s exit barrier 6 at cycle %d : DEVFS OK in cluster IO\n",
    10031084               __FUNCTION__, (uint32_t)hal_time_stamp());
     
    10711152        print_banner( (info->x_size * info->y_size) , info->cores_nr );
    10721153
    1073         kinit_dmsg("\n\n*** memory fooprint of main kernet objects ***\n"
     1154        kinit_dmsg("\n\n*** memory fooprint for main kernet objects ***\n\n"
    10741155                   " - thread descriptor  : %d bytes\n"
    10751156                   " - process descriptor : %d bytes\n"
     
    11141195    }
    11151196
    1116     // each core activates its private PTI IRQ
     1197    // each core activates its private TICK IRQ
    11171198    dev_pic_enable_timer( CONFIG_SCHED_TICK_PERIOD );
    11181199
  • trunk/kernel/kern/printk.c

    r246 r279  
    401401}
    402402
    403 ////////////////////////////////////////
    404 void nolock_printk( char * format , ...)
    405 {
    406     va_list   args;
    407 
    408     // call kernel_printf on TXT0, in busy waiting mode
    409     va_start( args , format );
    410     kernel_printf( 0 , 1 , format , &args );
    411     va_end( args );
    412 }
    413 
    414403///////////////////////////////////////////
    415404inline void assert( bool_t       condition,
     
    424413}
    425414
    426 //////////////////////////////////////////////////
    427 inline void nolock_assert( bool_t       condition,
    428                            const char * function_name,
    429                            char       * string )
    430 {
    431     if( condition == false )
    432     {
    433         nolock_printk("\n[PANIC] in %s : %s\n" , function_name , string );
    434         hal_core_sleep();
    435     }
    436 }
    437 
    438 
    439415
    440416// Local Variables:
  • trunk/kernel/kern/printk.h

    r188 r279  
    7474
    7575/**********************************************************************************
    76  * This function displays a formated string on the kernel terminal TXT0,
    77  * using a busy waiting policy: It calls directly the relevant TXT driver,
    78  * without taking the the lock protecting exclusive access to TXT0 terminal.
    79  **********************************************************************************
    80  * @ format     : formated string.
    81  *********************************************************************************/
    82 void nolock_printk( char* format, ... );
    83 
    84 /**********************************************************************************
    8576 * This function displays a "PANIC" message and force the calling core in
    8677 * sleeping mode if a Boolean condition is false.
     
    9586                    char       * string );
    9687
    97 /**********************************************************************************
    98  * This function displays a "PANIC" message and force the calling core in
    99  * sleeping mode if a Boolean condition is false,
    100  * without taking the the lock protecting exclusive access to TXT0 terminal.
    101  **********************************************************************************
    102  * @ condition     : condition that must be true.
    103  * @ function_name : name of the calling function.
    104  * @ string        : error message if condition is false.
    105  *********************************************************************************/
    106 inline void nolock_assert( bool_t       condition,
    107                            const char * function_name,
    108                            char       * string );
    109 
    11088///////////////////////////////////////////////////////////////////////////////////
    11189//       Conditionnal debug macros
     
    215193
    216194#if CONFIG_KINIT_DEBUG
    217 #define kinit_dmsg(...) nolock_printk(__VA_ARGS__)
     195#define kinit_dmsg(...) printk(__VA_ARGS__)
    218196#else
    219197#define kinit_dmsg(...)
  • trunk/kernel/kern/process.c

    r204 r279  
    9090    pid_t       parent_pid;
    9191
    92     process_dmsg("\n[INFO] %s : enters for process %x in cluster %x / parent_xp = %l\n",
    93                  __FUNCTION__ , pid , parent_xp );
     92    process_dmsg("\n[INFO] %s : enters for process %x in cluster %x\n",
     93                 __FUNCTION__ , pid , local_cxy );
    9494
    9595    // get parent process cluster, local pointer, and pid
     
    198198    local_process->ref_xp = reference_process_xp;
    199199
     200    process_dmsg("\n[INFO] %s : enter for process %x in cluster %x\n",
     201                 __FUNCTION__ , local_process->pid );
     202
    200203    // reset children list root (not used in a process descriptor copy)
    201204    xlist_root_init( XPTR( local_cxy , &local_process->children_root ) );
     
    229232
    230233        hal_fence();
     234
     235    process_dmsg("\n[INFO] %s : exit for process %x in cluster %x\n",
     236                 __FUNCTION__ , local_process->pid );
    231237
    232238    return 0;
  • trunk/kernel/kern/rpc.c

    r265 r279  
    101101    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    102102
     103    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     104
    103105    // initialise RPC descriptor header
    104106    rpc_desc_t  rpc;
     
    115117    *error  = (error_t)rpc.args[0];     
    116118    *ppn    = (uint32_t)rpc.args[1];
     119
     120    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    117121}
    118122
     
    153157    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    154158
     159    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     160
    155161    // initialise RPC descriptor header
    156162    rpc_desc_t  rpc;
     
    167173    *pid    = (pid_t)rpc.args[1];
    168174    *error  = (error_t)rpc.args[2];     
     175
     176    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    169177}
    170178
     
    204212    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    205213
     214    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     215
    206216    // initialise RPC descriptor header
    207217    rpc_desc_t  rpc;
     
    217227    // get output arguments from RPC descriptor
    218228    *error  = (error_t)rpc.args[1];     
     229
     230    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    219231}
    220232
     
    256268    assert( (GET_CXY( process->ref_xp ) == local_cxy) , __FUNCTION__ ,
    257269            "caller must be reference process cluster\n");
     270
     271    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
    258272
    259273    // get local process index in reference cluster
     
    282296        if( target_cxy != local_cxy ) rpc_send_sync( target_cxy , &rpc );
    283297    }
     298
     299    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    284300
    285301
     
    327343    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    328344
     345    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     346
    329347    // initialise RPC descriptor header
    330348    rpc_desc_t  rpc;
     
    344362    *thread_xp = (xptr_t)rpc.args[4];
    345363    *error     = (error_t)rpc.args[5];
     364
     365    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    346366}
    347367
     
    405425    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    406426
     427    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     428
    407429    // initialise RPC descriptor header
    408430    rpc_desc_t  rpc;
     
    421443    *thread_xp = (xptr_t)rpc.args[3];
    422444    *error     = (error_t)rpc.args[4];
     445
     446    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    423447}
    424448
     
    463487    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    464488
     489    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     490
    465491    // initialise RPC descriptor header
    466492    rpc_desc_t  rpc;
     
    474500    // register RPC request in remote RPC fifo
    475501    rpc_send_sync( cxy , &rpc );
     502
     503    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    476504}
    477505
     
    513541    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    514542
     543    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     544
    515545    // initialise RPC descriptor header
    516546    rpc_desc_t  rpc;
     
    534564    *inode_xp = (xptr_t)rpc.args[8];
    535565    *error    = (error_t)rpc.args[9];
     566
     567    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    536568}
    537569
     
    590622    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    591623
     624    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     625
    592626    // initialise RPC descriptor header
    593627    rpc_desc_t  rpc;
     
    600634    // register RPC request in remote RPC fifo (blocking function)
    601635    rpc_send_sync( cxy , &rpc );
     636
     637    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    602638}
    603639
     
    632668    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    633669
     670    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     671
    634672    // initialise RPC descriptor header
    635673    rpc_desc_t  rpc;
     
    648686    *dentry_xp = (xptr_t)rpc.args[3];
    649687    *error     = (error_t)rpc.args[4];
     688
     689    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    650690}
    651691
     
    695735    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    696736
     737    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     738
    697739    // initialise RPC descriptor header
    698740    rpc_desc_t  rpc;
     
    705747    // register RPC request in remote RPC fifo (blocking function)
    706748    rpc_send_sync( cxy , &rpc );
     749
     750    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    707751}
    708752
     
    737781    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    738782
     783    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     784
    739785    // initialise RPC descriptor header
    740786    rpc_desc_t  rpc;
     
    752798    *file_xp = (xptr_t)rpc.args[2];
    753799    *error   = (error_t)rpc.args[3];
     800
     801    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    754802}
    755803
     
    790838    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    791839
     840    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     841
    792842    // initialise RPC descriptor header
    793843    rpc_desc_t  rpc;
     
    800850    // register RPC request in remote RPC fifo (blocking function)
    801851    rpc_send_sync( cxy , &rpc );
     852
     853    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    802854}
    803855
     
    831883    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    832884
     885    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     886
    833887    // initialise RPC descriptor header
    834888    rpc_desc_t  rpc;
     
    846900    // get output values from RPC descriptor
    847901    *error   = (error_t)rpc.args[3];
     902
     903    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    848904}
    849905
     
    889945    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    890946
     947    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     948
    891949    // initialise RPC descriptor header
    892950    rpc_desc_t  rpc;
     
    902960    // get output values from RPC descriptor
    903961    *error   = (error_t)rpc.args[1];
     962
     963    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    904964}
    905965
     
    938998    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    939999
     1000    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     1001
    9401002    // initialise RPC descriptor header
    9411003    rpc_desc_t  rpc;
     
    9541016    *cluster = (uint32_t)rpc.args[3];
    9551017    *error   = (error_t)rpc.args[4];
     1018
     1019    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    9561020}
    9571021
     
    9941058    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    9951059
     1060    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     1061
    9961062    // initialise RPC descriptor header
    9971063    rpc_desc_t  rpc;
     
    10081074    // get output argument from rpc descriptor
    10091075    *vseg_xp = rpc.args[2];
     1076
     1077    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    10101078}
    10111079
     
    10501118    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    10511119
     1120    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     1121
    10521122    // initialise RPC descriptor header
    10531123    rpc_desc_t  rpc;
     
    10661136    *ppn   = (ppn_t)rpc.args[3];
    10671137    *error = (error_t)rpc.args[4];
     1138
     1139    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    10681140}
    10691141
     
    11051177    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    11061178
     1179    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     1180
    11071181    // initialise RPC descriptor header
    11081182    rpc_desc_t  rpc;
     
    11181192    // get output arguments from RPC descriptor
    11191193    *buf_xp = (xptr_t)rpc.args[1];
     1194
     1195    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    11201196}
    11211197
     
    11521228    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    11531229
     1230    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     1231
    11541232    // initialise RPC descriptor header
    11551233    rpc_desc_t  rpc;
     
    11631241    // register RPC request in remote RPC fifo
    11641242    rpc_send_sync( cxy , &rpc );
     1243
     1244    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    11651245}
    11661246
     
    11991279    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    12001280
     1281    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     1282
    12011283    // initialise RPC descriptor header
    12021284    rpc_desc_t  rpc;
     
    12171299    // get output values from RPC descriptor
    12181300    *error     = (error_t)rpc.args[6];
     1301
     1302    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    12191303}
    12201304
     
    12621346                    rpc_desc_t * rpc )
    12631347{
    1264         thread_t * this = CURRENT_THREAD;
    12651348    uint32_t   cores;
    12661349    error_t    error;
     
    12681351    reg_t      sr_save;
    12691352
    1270     // get client CPU and cluster coordinates
    1271     cxy_t      client_cxy = local_cxy;   
    1272     lid_t      client_lid = CURRENT_CORE->lid;
     1353    thread_t * this = CURRENT_THREAD;
     1354
     1355    rpc_dmsg("\n[INFO] %s : enter / client_cxy = %x / server_cxy = %x\n",
     1356             __FUNCTION__ , local_cxy , server_cxy );
    12731357
    12741358    // allocate and initialise an extended pointer on the RPC descriptor
    1275         xptr_t   xp = XPTR( client_cxy , rpc );
    1276 
    1277     // get local pointer on rpc_fifo in remote cluster with the
    1278     // assumption that addresses are identical in all clusters
     1359        xptr_t   desc_xp = XPTR( local_cxy , rpc );
     1360
     1361    // get local pointer on rpc_fifo in remote cluster, with the
     1362    // assumption that rpc_fifo pddresses are identical in all clusters
    12791363    rpc_fifo_t * rf = &LOCAL_CLUSTER->rpc_fifo;
    12801364
     
    12841368    {
    12851369        error = remote_fifo_put_item( XPTR( server_cxy , &rf->fifo ),
    1286                                       (uint64_t *)&xp,
     1370                                      (uint64_t )desc_xp,
    12871371                                      &first );
    12881372
    12891373            if ( error )
    12901374        {
    1291             printk("\n[WARNING] %s : core %d in cluster %x cannot post RPC to cluster %x\n",
    1292                    __FUNCTION__ , client_lid , client_cxy , server_cxy );
     1375            printk("\n[WARNING] %s : cluster %x cannot post RPC to cluster %x\n",
     1376                   __FUNCTION__ , local_cxy , server_cxy );
     1377
    12931378            if( thread_can_yield() ) sched_yield();
     1379        }
     1380        else
     1381        {
    12941382        }
    12951383    }
    12961384    while( error );
    12971385 
    1298     rpc_dmsg("\n[INFO] %s on core %d in cluster %x sent RPC %p to cluster %x\n",
    1299               __FUNCTION__ , client_lid , client_cxy , rpc , server_cxy );
     1386    rpc_dmsg("\n[INFO] %s : RPC registered / client_cxy = %x / server_cxy = %x\n",
     1387             __FUNCTION__ , local_cxy , server_cxy , first );
    13001388       
    1301     // send IPI if this is the first RPC in remote FIFO
    1302     // and no CPU is in kernel mode in server cluster.
    1303     // the selected CPU in server has the same lid as the client CPU.
     1389    // send IPI to remote CP0, if this is the first RPC in remote FIFO,
     1390    // and there is no CPU is in kernel mode in server cluster.
    13041391        if( first )
    13051392        {
     
    13091396                if( cores == 0 ) // no core in kernel mode in server
    13101397                {
    1311                     dev_pic_send_ipi( server_cxy , client_lid );
    1312 
    1313                     rpc_dmsg("\n[INFO] %s : core %d in cluster %x send IPI to core %d in cluster %x\n",
    1314                       __FUNCTION__, client_lid , client_cxy , client_lid , server_cxy );
     1398                    dev_pic_send_ipi( server_cxy , 0 );
     1399
     1400                    rpc_dmsg("\n[INFO] %s : IPI sent / client_cxy = %x / server_cxy = %x\n",
     1401                     __FUNCTION__, local_cxy , server_cxy );
    13151402        }
    13161403        }
    13171404
    1318         // activate preemption to allow incoming RPC and avoid deadlock
     1405        // enable IRQs to allow incoming RPC and avoid deadlock
    13191406        if( this->type == THREAD_RPC ) hal_enable_irq( &sr_save );
    13201407
    1321     // the sending thread poll the response slot until RPC completed
     1408    // the server thread poll the response slot until RPC completed
     1409    // TODO this could be replaced by a descheduling policy... [AG]
    13221410        while( 1 )
    13231411    {
     
    13251413    }
    13261414
    1327     // restore preemption
     1415    // restore IRQs
    13281416        if( this->type == THREAD_RPC ) hal_restore_irq( sr_save );
     1417
     1418    rpc_dmsg("\n[INFO] %s : completed / client_cxy = %x / server_cxy = %x\n",
     1419             __FUNCTION__ , local_cxy , server_cxy );
    13291420
    13301421}  // end rpc_send_sync()
     
    13441435}
    13451436
    1346 ////////////////////////////////////////////////
    1347 error_t rpc_execute_all( rpc_fifo_t * rpc_fifo )
     1437/////////////////////////////////////////////
     1438void rpc_execute_all( rpc_fifo_t * rpc_fifo )
    13481439{
    13491440        xptr_t         xp;             // extended pointer on RPC descriptor
     
    13531444    rpc_desc_t   * desc;           // pointer on RPC descriptor
    13541445    uint32_t       index;          // RPC index
    1355     uint32_t       expected;       // number of expected responses
    13561446    cxy_t          client_cxy;     // client cluster identifier
    13571447        error_t        error;
     
    13701460                if ( error == 0 )  // One RPC request successfully extracted from RPC_FIFO
    13711461        {
    1372             rpc_dmsg("\n[INFO] %s : RPC_THREAD %x on core %x in cluster %x handles RPC %d\n"
     1462            rpc_dmsg("\n[INFO] %s : RPC_THREAD %x on core %x in cluster %x handles RPC %d\n",
    13731463                                     __FUNCTION__ , this->trdid , core->lid , local_cxy , count );
    13741464
     
    13771467            desc       = (rpc_desc_t *)GET_PTR( xp );
    13781468
    1379             // get rpc index and expected responses from RPC descriptor
     1469            // get rpc index from RPC descriptor
    13801470                index     = hal_remote_lw( XPTR( client_cxy , &desc->index ) );
    1381                 expected  = hal_remote_lw( XPTR( client_cxy , &desc->response ) );
    13821471
    13831472            // call the relevant server function
     
    13881477
    13891478            // notify RPC completion as required
    1390             if( expected == 1 ) hal_remote_sw( XPTR(client_cxy,&desc->response) , 0 );
    1391             if( expected >  1 ) hal_remote_atomic_add( XPTR(client_cxy,&desc->response) , -1 );
     1479            hal_remote_atomic_add( XPTR(client_cxy,&desc->response) , -1 );
    13921480                }
    13931481       
     
    14001488            (count > CONFIG_RPC_PENDING_MAX) ) break;
    14011489        }
    1402     while( 1 )
    1403 
    1404         rpc_dmsg("\n[INFO] %s running on core %d in cluster %x exit\n"
    1405               __FUNCTION__ , CURRENT_CORE->lid , local_cxy );
    1406                
     1490    while( 1 );
     1491
    14071492    // update RPC_FIFO global counter
    14081493        rpc_fifo->count += count;
    14091494
    1410         return 0;
    14111495}  // end rpc_execute_all()
    14121496
     
    14221506    reg_t         sr_save;
    14231507
     1508   
    14241509        this   = CURRENT_THREAD;
    14251510    core   = this->core;
     
    14271512    found  = false;
    14281513
    1429     // calling thread must be the RPC_FIFO owner
    1430     if( this->trdid != rpc_fifo->owner )
    1431     {
    1432         printk("\n[PANIC] in %s : calling thread is not RPC_FIFO owner\n", __FUNCTION__ );
    1433         hal_core_sleep();
    1434     }
     1514    assert( (this->trdid == rpc_fifo->owner) , __FUNCTION__ ,
     1515          "calling thread is not RPC_FIFO owner\n" );
    14351516
    14361517    // makes the calling thread not preemptable
     
    14431524    {
    14441525        thread = LIST_ELEMENT( iter , thread_t , sched_list );
    1445         if( (thread->type == THREAD_RPC) && (thread->blocked ==  THREAD_BLOCKED_IDLE ) )
     1526        if( (thread->type == THREAD_RPC) && (thread->blocked == THREAD_BLOCKED_IDLE ) )
    14461527        {
    14471528            found = true;
     
    14531534    {
    14541535        thread->blocked = 0;
     1536
     1537        rpc_dmsg("\n[INFO] %s : activate RPC thread %x on core %x in cluster %x at cycle %d\n",
     1538                          __FUNCTION__ , thread , core->gid , local_cxy , hal_get_cycles() );
    14551539    }
    14561540    else                           // create a new RPC thread
     
    14691553        }
    14701554
    1471         rpc_dmsg("\n[INFO] %s creates RPC thread %x on core %x in cluster %x at cycle %d\n",
     1555        rpc_dmsg("\n[INFO] %s : create RPC thread %x on core %x in cluster %x at cycle %d\n",
    14721556                          __FUNCTION__ , thread , core->gid , local_cxy , hal_get_cycles() );
    14731557
    14741558        // update core descriptor counter 
    1475             hal_atomic_add( &core->rpc_threads , 1 );
     1559            hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 );
    14761560    }
    14771561
    14781562    // update owner in rpc_fifo
    14791563    rpc_fifo->owner = thread->trdid;
    1480 
    1481     rpc_dmsg ("\n[INFO] %s activates RPC thread %x on core %x in cluster %x at cycle %d\n",
    1482                       __FUNCTION__ , thread , core->gid , local_cxy , hal_get_cycles() );
    14831564
    14841565    // current thread deschedules / RPC thread start execution
     
    15061587    }
    15071588
    1508         // calling thread tries to take the light lock,
    1509     // and activates an RPC thread if success
     1589        // try to take the light lock, and activates an RPC thread if success
    15101590    if( hal_atomic_test_set( &rpc_fifo->owner , this->trdid ) )
    15111591        {
     
    15431623
    15441624    // this infinite loop is not preemptable
    1545     // the RPC thread deschedule when the RPC_FIFO is empty
     1625    // the RPC thread deschedule only when the RPC_FIFO is empty
    15461626        while(1)
    15471627        {
     
    15611641
    15621642
    1563         // suicide if too much RPC threads for this core
    1564                 if( this->core->rpc_threads > CONFIG_RPC_THREADS_MAX )
     1643        //  block and deschedule or sucide
     1644                if( LOCAL_CLUSTER->rpc_threads >= CONFIG_RPC_THREADS_MAX )
    15651645                {
    15661646            rpc_dmsg("\n[INFO] RPC thread %x suicide on core %d in cluster %x at cycle %d\n",
     
    15681648
    15691649            // update core descriptor counter
    1570                 hal_atomic_add( &this->core->rpc_threads , -1 );
     1650                hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , -1 );
    15711651
    15721652            // suicide
    15731653                        thread_exit();
    15741654                }
    1575 
    1576         // block and deschedule
    1577         rpc_dmsg("\n[INFO] RPC thread %x deschedule on core %d in cluster %x at cycle %d\n",
    1578                               this->trdid , this->core->lid , local_cxy , hal_get_cycles() );
    1579 
    1580                 thread_block( this , THREAD_BLOCKED_IDLE );
    1581         sched_yield();
    1582 
    1583                 rpc_dmsg("\n[INFO] RPC thread %x wake up on core %d in cluster %x at cycle %d\n",
    1584                               this->trdid , this->core->lid , local_cxy , hal_get_cycles() );
    1585         }
     1655        else
     1656        {
     1657            rpc_dmsg("\n[INFO] RPC thread %x blocks on core %d in cluster %x at cycle %d\n",
     1658                                 this->trdid , this->core->lid , local_cxy , hal_get_cycles() );
     1659
     1660                     thread_block( this , THREAD_BLOCKED_IDLE );
     1661             sched_yield();
     1662
     1663                     rpc_dmsg("\n[INFO] RPC thread %x wake up on core %d in cluster %x at cycle %d\n",
     1664                          this->trdid , this->core->lid , local_cxy , hal_get_cycles() );
     1665        }
     1666        } // end while
    15861667} // end rpc_thread_func()
    15871668
  • trunk/kernel/kern/rpc.h

    r265 r279  
    158158 * This function is the entry point for RPC handling on the server side.
    159159 * It can be executed by any thread running (in kernel mode) on any core.
    160  * It first checks the core private RPC fifo, an then the cluster shared RPC fifo.
    161  * It calls the rpc_activate_thread() function to activate a dedicated RPC thread.
    162  ***********************************************************************************
    163  * @ returns true if at least one RPC found / false otherwise.
     160 * It checks the RPC fifo, try to take the light-lock and activates (or creates)
     161 * an RPC thread in case of success.
     162 ***********************************************************************************
     163 * @ returns true if success / false otherwise.
    164164 **********************************************************************************/
    165165bool_t rpc_check();
     
    170170 ***********************************************************************************
    171171 * @ rpc_fifo  : pointer on the local RPC fifo
    172  * @ returns 0 if success
    173  **********************************************************************************/
    174 error_t rpc_execute_all( rpc_fifo_t * rpc_fifo );
     172 **********************************************************************************/
     173void rpc_execute_all( rpc_fifo_t * rpc_fifo );
    175174
    176175/**********************************************************************************
  • trunk/kernel/kern/scheduler.c

    r278 r279  
    4141    sched->k_threads_nr   = 0;
    4242
    43     sched->current        = NULL;
    44     sched->idle           = NULL;
    45     sched->u_last         = NULL;
    46     sched->k_last         = NULL;
     43    sched->current        = CURRENT_THREAD;
     44    sched->idle           = NULL;             // initialized in kernel_init()
     45    sched->u_last         = NULL;             // initialized in sched_register_thread()
     46    sched->k_last         = NULL;             // initialized in sched_register_thread()
    4747
    4848    // initialise threads lists
     
    6262    spinlock_lock( &sched->lock );
    6363
    64     // register thread
    6564    if( type == THREAD_USER )
    6665    {
     66        // register thread in scheduler user list
    6767        list_add_last( &sched->u_root , &thread->sched_list );
    6868        sched->u_threads_nr++;
     69
     70        // initialize u_last field if first user thread
     71        if( sched->u_last == NULL ) sched->u_last = &thread->sched_list;
    6972    }
    7073    else // kernel thread
    7174    {
     75        // register thread in scheduler kernel list
    7276        list_add_last( &sched->k_root , &thread->sched_list );
    7377        sched->k_threads_nr++;
     78
     79        // initialize k_last field if first kernel thread
     80        if( sched->k_last == NULL ) sched->k_last = &thread->sched_list;
    7481    }
    7582
     
    8996    spinlock_lock( &sched->lock );
    9097
    91     // remove thread
    9298    if( type == THREAD_USER )
    9399    {
     100        // remove thread from user list
    94101        list_unlink( &thread->sched_list );
    95102        sched->u_threads_nr--;
     103
     104        // reset the u_last field if list empty
     105        if( sched->u_threads_nr == 0 ) sched->u_last = NULL;
    96106    }
    97107    else // kernel thread
    98108    {
     109        // remove thread from kernel list
    99110        list_unlink( &thread->sched_list );
    100111        sched->k_threads_nr--;
     112
     113        // reset the k_last field if list empty
     114        if( sched->k_threads_nr == 0 ) sched->k_last = NULL;
    101115    }
    102116
     
    140154    list_entry_t * last;
    141155
    142     // first scan the kernel threads
    143     last    = sched->k_last;
    144     current = sched->k_last;
    145     do
    146     {
    147         // get next entry in kernel list
    148         current = list_next( &sched->k_root , current );
    149 
    150         // skip the list root that does not contain a thread
    151         if( current == NULL ) continue;
    152 
    153         // get thread pointer
    154         thread = LIST_ELEMENT( current , thread_t , sched_list );
    155 
    156         // return thread if not blocked
    157         if( thread->blocked == 0 )
     156    // first : scan the kernel threads list,
     157    // only if this list is not empty
     158    if( list_is_empty( &sched->k_root ) == false )
     159    {
     160        last    = sched->k_last;
     161        current = sched->k_last;
     162        do
    158163        {
    159             // release lock
    160             spinlock_unlock( &sched->lock );
    161             return thread;
     164            // get next entry in kernel list
     165            current = list_next( &sched->k_root , current );
     166
     167            // skip the root that does not contain a thread
     168            if( current == NULL ) current = sched->k_root.next;
     169
     170            // get thread pointer for this entry
     171            thread = LIST_ELEMENT( current , thread_t , sched_list );
     172
     173            // return thread if runnable
     174            if( thread->blocked == 0 )
     175            {
     176                // release lock
     177                spinlock_unlock( &sched->lock );
     178                return thread;
     179            }
    162180        }
    163     }
    164     while( current != last );
    165 
    166     // second scan the user threads
    167     last    = sched->u_last;
    168     current = sched->u_last;
    169     do
    170     {
    171         // get next entry in user list
    172         current = list_next( &sched->u_root , current );
    173 
    174         // skip the list root that does not contain a thread
    175         if( current == NULL ) continue;
    176 
    177         // get thread pointer
    178         thread = LIST_ELEMENT( current , thread_t , sched_list );
    179 
    180         // return thread if not blocked
    181         if( thread->blocked == 0 )
     181        while( current != last );
     182    }
     183
     184    // second : scan the user threads list,
     185    // only if this list is not empty
     186    if( list_is_empty( &sched->u_root ) == false )
     187    {
     188        last    = sched->u_last;
     189        current = sched->u_last;
     190        do
    182191        {
    183             // release lock
    184             spinlock_unlock( &sched->lock );
    185             return thread;
     192            // get next entry in user list
     193            current = list_next( &sched->u_root , current );
     194
     195            // skip the root that does not contain a thread
     196            if( current == NULL ) current = sched->u_root.next;
     197
     198            // get thread pointer for this entry
     199            thread = LIST_ELEMENT( current , thread_t , sched_list );
     200
     201            // return thread if runnable
     202            if( thread->blocked == 0 )
     203            {
     204                // release lock
     205                spinlock_unlock( &sched->lock );
     206                return thread;
     207            }
    186208        }
    187     }
    188     while( current != last );
     209        while( current != last );
     210    }
    189211
    190212    // release lock
    191213    spinlock_unlock( &sched->lock );
    192214
    193     // third, return idle thread if no runnable thread
     215    // third : return idle thread if no runnable thread
    194216    return sched->idle;
    195217
     
    234256    thread_t    * current = CURRENT_THREAD;
    235257    core_t      * core    = current->core;
     258    scheduler_t * sched   = &core->scheduler;
    236259
    237260    if( thread_can_yield() == false )
     
    265288               __FUNCTION__, core->lid, local_cxy, current->trdid, next->trdid );
    266289
    267     // switch contexts if new thread
     290    // switch contexts and update scheduler state if new thread
    268291        if( next != current ) 
    269292        {
    270293        hal_cpu_context_save( current );
    271294        hal_cpu_context_restore( next );
     295
     296        if( current->type == THREAD_USER ) sched->u_last = &current->sched_list;
     297        else                               sched->k_last = &current->sched_list;
     298
     299        sched->current = next;
    272300        }
    273301
  • trunk/kernel/kern/scheduler.h

    r14 r279  
    3434struct thread_s;
    3535
    36 /***********************************************************************************************
     36/*********************************************************************************************
    3737 * This structure define the scheduler associated to a given core.
    3838 * WARNING : the idle thread is executed when there is no runable thread in the list
    3939 * of attached threads, but is NOT part of the list of attached threads.
    40  **********************************************************************************************/
     40 ********************************************************************************************/
    4141
    4242typedef struct scheduler_s
    4343{
    44     spinlock_t        lock;         /*! readlock protecting lists of threads                  */
    45     uint16_t          u_threads_nr; /*! total numbre of attached user threads                 */
    46     uint16_t          k_threads_nr; /*! total number of attached kernel threads               */
    47     list_entry_t      u_root;       /*! root of list of user threads for this scheduler       */
    48     list_entry_t      k_root;       /*! root of list of kernel threads for this scheduler     */
    49     list_entry_t    * u_last;       /*! pointer on list_entry for last executed kernel thread */
    50     list_entry_t    * k_last;       /*! pointer on list entry for last executed user thread   */
    51     struct thread_s * idle;         /*! pointer on idle thread                                */
    52     struct thread_s * current;      /*! pointer on current running thread                     */
     44    spinlock_t        lock;         /*! readlock protecting lists of threads                */
     45    uint16_t          u_threads_nr; /*! total numbre of attached user threads               */
     46    uint16_t          k_threads_nr; /*! total number of attached kernel threads             */
     47    list_entry_t      u_root;       /*! root of list of user threads for this scheduler     */
     48    list_entry_t      k_root;       /*! root of list of kernel threads for this scheduler   */
     49    list_entry_t    * u_last;       /*! pointer on list_entry for last executed k_thread    */
     50    list_entry_t    * k_last;       /*! pointer on list entry for last executed u_thread    */
     51    struct thread_s * idle;         /*! pointer on idle thread                              */
     52    struct thread_s * current;      /*! pointer on current running thread                   */
    5353}
    5454scheduler_t;
    5555
    56 /***********************************************************************************************
     56/*********************************************************************************************
    5757 *  This function initialises the scheduler for a given core.
    58  **********************************************************************************************
     58 ********************************************************************************************
    5959void sched_init( struct core_s * core );
    6060
    61 /***********************************************************************************************
     61/*********************************************************************************************
    6262 * This function register a new thread in a given core scheduler.
    63  ***********************************************************************************************
     63 *********************************************************************************************
    6464 * @ core    : local pointer on the core descriptor.
    6565 * @ thread  : local pointer on the thread descriptor.
    66  **********************************************************************************************
     66 ********************************************************************************************
    6767void sched_register_thread( struct core_s   * core,
    6868                            struct thread_s * thread );
    6969
    70 /***********************************************************************************************
     70/*********************************************************************************************
    7171 *  This function removes a thread from the set of threads attached to a given core.
    72  ***********************************************************************************************
     72 *********************************************************************************************
    7373 * @ thread  : local pointer on the thread descriptor.
    74  **********************************************************************************************
     74 ********************************************************************************************
    7575void sched_remove_thread( struct thread_s * thread );
    7676
    77 /***********************************************************************************************
     77/*********************************************************************************************
    7878 * This function handles pending signals for all registered threads, and tries to make
    7979 * a context switch for the core running the calling thread.
     
    8282 * - If there is no other runable thread, the calling thread continues execution.
    8383 * - If there is no runable thread, the idle thread is executed.
    84  **********************************************************************************************/
     84 ********************************************************************************************/
    8585void sched_yield();
    8686
    87 /***********************************************************************************************
     87/*********************************************************************************************
    8888 * This function handles pending signals for all registered threads, and make
    8989 * a context switch to the thread defined by the <thread> argument.
    9090 * If the selected thread is not attached to the same core as the calling thread,
    9191 * or is blocked, it causes a kernel panic.
    92  ***********************************************************************************************
     92 *********************************************************************************************
    9393 * @ new   : local pointer on the thread to run.
    94  **********************************************************************************************/
     94 ********************************************************************************************/
    9595void sched_switch_to( struct thread_s * new );
    9696
    97 /***********************************************************************************************
     97/*********************************************************************************************
    9898 * This function scan all threads attached to a given core scheduler, and executes
    9999 * the relevant actions for pending signals, such as the THREAD_SIG_KILL signal.
    100  ***********************************************************************************************
     100 *********************************************************************************************
    101101 * @ core    : local pointer on the core descriptor.
    102  **********************************************************************************************/
     102 ********************************************************************************************/
    103103void sched_handle_signals( struct core_s * core );
    104104
    105 /***********************************************************************************************
     105/*********************************************************************************************
    106106 * This function is used by the scheduler of a given core to actually kill a thread that has
    107107 * the SIG_KILL signal set (following a thread_exit() or a thread_kill() event).
     
    110110 * - It removes the thread from the scheduler.
    111111 * - It release physical memory allocated for thread descriptor.
    112  ***********************************************************************************************
     112 *********************************************************************************************
    113113 * @ thread  : local pointer on the thread descriptor.
    114  **********************************************************************************************/
     114 ********************************************************************************************/
    115115void sched_kill_thread( struct thread_s * thread );
    116116
    117 /***********************************************************************************************
     117/*********************************************************************************************
    118118 * This function does NOT modify the scheduler state.
    119119 * It just select a thread in the list of attached threads, implementing the following policy:
     
    123123 *    the last executed one, and returns the first runable found (can be the current thread).
    124124 * 3) if no runable thread found, it returns the idle thread.
    125  ***********************************************************************************************
     125 *********************************************************************************************
    126126 * @ core    : local pointer on the core descriptor.
    127127 * @ returns pointer on selected thread descriptor
    128  **********************************************************************************************/
     128 ********************************************************************************************/
    129129struct thread_s * sched_select( struct core_s * core );
    130130
    131 /***********************************************************************************************
     131/*********************************************************************************************
    132132 * This function scan the list of kernel threads to find an idle (blocked) RPC thread.
    133  ***********************************************************************************************
     133 *********************************************************************************************
    134134 * @ core    : local pointer on the core descriptor.
    135135 * @ returns pointer on RPC thread descriptor / returns NULL if no idle RPC thread.
    136  **********************************************************************************************/
     136 ********************************************************************************************/
    137137struct thread_s * sched_get_rpc_thead( struct core_s * core );
    138138
  • trunk/kernel/kern/thread.h

    r174 r279  
    213213
    214214    uint32_t            dev_channel;     /*! device channel for a DEV thread          */
    215     union                                /*! embedded command for a DEV thread        */
    216     {
    217         ioc_command_t   ioc;             /*! IOC device generic command               */
    218         txt_command_t   txt;             /*! TXT device generic command               */
    219         nic_command_t   nic;             /*! NIC device generic command               */
    220         mmc_command_t   mmc;             /*! MMC device generic command               */
    221         dma_command_t   dma;             /*! DMA device generic command               */
    222     }
    223     command;
     215
     216    ioc_command_t       ioc_cmd;         /*! IOC device generic command               */
     217    txt_command_t       txt_cmd;         /*! TXT device generic command               */
     218    nic_command_t       nic_cmd;         /*! NIC device generic command               */
     219    mmc_command_t       mmc_cmd;         /*! MMC device generic command               */
     220    dma_command_t       dma_cmd;         /*! DMA device generic command               */
    224221
    225222        cxy_t               rpc_client_cxy;  /*! client cluster index (for a RPC thread)  */
Note: See TracChangeset for help on using the changeset viewer.