Changeset 279 for trunk/kernel


Ignore:
Timestamp:
Jul 27, 2017, 12:23:29 AM (7 years ago)
Author:
alain
Message:

1) Introduce independant command fields for the various devices in the thread descriptor.
2) Introduce a new dev_pic_enable_ipi() function in the generic PIC device
3) Fix two bugs identified by Maxime in the scheduler initialisation, and in the sched_select().
4) fix several bugs in the TSAR hal_kentry.S.
5) Introduce a third kgiet segment (besides kdata and kcode) in the TSAR bootloader.

Location:
trunk/kernel
Files:
31 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/devices/dev_dma.c

    r262 r279  
    9999
    100100    // register command in calling thread descriptor
    101     this->command.dma.dev_xp  = dev_xp;
    102     this->command.dma.dst_xp  = dst_xp;
    103     this->command.dma.src_xp  = src_xp;
    104     this->command.dma.size    = size;
     101    this->dma_cmd.dev_xp  = dev_xp;
     102    this->dma_cmd.dst_xp  = dst_xp;
     103    this->dma_cmd.src_xp  = src_xp;
     104    this->dma_cmd.size    = size;
    105105
    106106    // register client thread in waiting queue, activate server thread
     
    110110
    111111    dma_dmsg("\n[INFO] %s : completes for thread %x / error = %d\n",
    112              __FUNCTION__ ,  this->trdid , this->command.dma.error );
     112             __FUNCTION__ ,  this->trdid , this->dma_cmd.error );
    113113
    114114    // return I/O operation status from calling thread descriptor
    115     return this->command.dma.error; 
     115    return this->dma_cmd.error; 
    116116
    117117}  // dev_dma_remote_memcpy()
  • trunk/kernel/devices/dev_fbf.c

    r214 r279  
    120120// It builds and registers the command in the calling thread descriptor, after
    121121// translation of buffer virtual address to physical address.
    122 // Then, it registers the calling thead in chdev waiting queue.
     122// Then, it registers the calling thead in the relevant DMA chdev waiting queue.
    123123// Finally it blocks on the THREAD_BLOCKED_DEV condition and deschedule.
    124124////////////////////////////////////i/////////////////////////////////////////////
  • trunk/kernel/devices/dev_iob.h

    r14 r279  
    3434 * The IOB device is used to access external peripherals. It implements an IO-MMU service
    3535 * for DMA transactions launched by DMA capable external peripherals.
     36 *
    3637 * This IOB peripheral is acting as a dynamically configurable bridge, used for others
    3738 * I/O operations. Therefore, ALMOS-MKH does not use the IOB device waiting queue,
  • trunk/kernel/devices/dev_ioc.c

    r238 r279  
    116116
    117117    // register command in calling thread descriptor
    118     this->command.ioc.dev_xp    = dev_xp;
    119     this->command.ioc.type      = cmd_type;
    120     this->command.ioc.buf_xp    = XPTR( local_cxy , buffer );
    121     this->command.ioc.lba       = lba;
    122     this->command.ioc.count     = count;
     118    this->ioc_cmd.dev_xp    = dev_xp;
     119    this->ioc_cmd.type      = cmd_type;
     120    this->ioc_cmd.buf_xp    = XPTR( local_cxy , buffer );
     121    this->ioc_cmd.lba       = lba;
     122    this->ioc_cmd.count     = count;
    123123
    124124    // register client thread in IOC chdev waiting queue, activate server thread,
     
    130130             " completes / error = %d / at cycle %d\n",
    131131             __FUNCTION__ , this->trdid , this->process->pid ,
    132              this->command.ioc.error , hal_get_cycles() );
     132             this->ioc_cmd.error , hal_get_cycles() );
    133133
    134134    // return I/O operation status
    135     return this->command.ioc.error;
     135    return this->ioc_cmd.error;
    136136
    137137}  // end dev_ioc_access()
     
    158158                           uint32_t   count )
    159159{
     160    ioc_dmsg("\n[INFO] %s : enter in cluster %x\n",
     161             __FUNCTION__ , local_cxy );
     162
    160163    // get pointer on calling thread
    161164    thread_t * this = CURRENT_THREAD;
     
    165168
    166169    // get extended pointer on IOC[0] chdev
    167     xptr_t  dev_xp = chdev_dir.ioc[0];
    168 
    169     assert( (dev_xp != XPTR_NULL) , __FUNCTION__ , "undefined IOC chdev descriptor" );
     170    xptr_t  ioc_xp = chdev_dir.ioc[0];
     171
     172    assert( (ioc_xp != XPTR_NULL) , __FUNCTION__ , "undefined IOC chdev descriptor" );
    170173
    171174    // register command in calling thread descriptor
    172     this->command.ioc.dev_xp    = dev_xp;
    173     this->command.ioc.type      = IOC_SYNC_READ;
    174     this->command.ioc.buf_xp    = XPTR( local_cxy , buffer );
    175     this->command.ioc.lba       = lba;
    176     this->command.ioc.count     = count;
     175    this->ioc_cmd.dev_xp    = ioc_xp;
     176    this->ioc_cmd.type      = IOC_SYNC_READ;
     177    this->ioc_cmd.buf_xp    = XPTR( local_cxy , buffer );
     178    this->ioc_cmd.lba       = lba;
     179    this->ioc_cmd.count     = count;
    177180
    178181    // get driver command function
    179     cxy_t       dev_cxy = GET_CXY( dev_xp );
    180     chdev_t   * dev_ptr = (chdev_t *)GET_PTR( dev_xp );
    181     dev_cmd_t * cmd = (dev_cmd_t *)hal_remote_lpt( XPTR( dev_cxy , &dev_ptr->cmd ) );
     182    cxy_t       ioc_cxy = GET_CXY( ioc_xp );
     183    chdev_t   * ioc_ptr = (chdev_t *)GET_PTR( ioc_xp );
     184    dev_cmd_t * cmd = (dev_cmd_t *)hal_remote_lpt( XPTR( ioc_cxy , &ioc_ptr->cmd ) );
     185
     186    // get core local index for the core handling the IOC IRQ
     187    thread_t * server = (thread_t *)hal_remote_lpt( XPTR( ioc_cxy , &ioc_ptr->server ) );
     188    core_t   * core   = (core_t *)hal_remote_lpt( XPTR( ioc_cxy , &server->core ) );
     189    lid_t      lid    = (lid_t)hal_remote_lw( XPTR( ioc_cxy , &core->lid ) );
    182190
    183191    // mask the IRQ
    184     thread_t * server = (thread_t *)hal_remote_lpt( XPTR( dev_cxy , &dev_ptr->server ) );
    185     core_t   * core = (core_t *)hal_remote_lpt( XPTR( dev_cxy , &server->core ) );
    186     lid_t      lid = (lid_t)hal_remote_lw( XPTR( dev_cxy , &core->lid ) );
    187     dev_pic_disable_irq( lid , dev_xp );
    188 
    189     // call directly driver command
     192    dev_pic_disable_irq( lid , ioc_xp );
     193
     194    ioc_dmsg("\n[INFO] %s : coucou 3\n",
     195             __FUNCTION__ );
     196
     197    // call driver function
    190198    cmd( XPTR( local_cxy , this ) );
    191199
    192200    // unmask the IRQ
    193     dev_pic_enable_irq( lid , dev_xp );
     201    dev_pic_enable_irq( lid , ioc_xp );
     202
     203    ioc_dmsg("\n[INFO] %s : exit in cluster %x\n",
     204             __FUNCTION__ , local_cxy );
    194205
    195206    // return I/O operation status from calling thread descriptor
    196     return this->command.ioc.error;
    197 }
    198 
     207    return this->ioc_cmd.error;
     208
     209}  // end ioc_sync_read()
     210
  • trunk/kernel/devices/dev_mmc.c

    r257 r279  
    6565{
    6666    // get extended pointer on MMC device descriptor
    67     xptr_t  dev_xp = this->command.mmc.dev_xp;
     67    xptr_t  dev_xp = this->mmc_cmd.dev_xp;
    6868
    6969    assert( (dev_xp != XPTR_NULL) , __FUNCTION__ , "target MMC device undefined" );
     
    8686
    8787    // return operation status
    88     return this->command.mmc.error; 
     88    return this->mmc_cmd.error; 
    8989
    9090}  // end dev_mmc_access()
     
    116116
    117117    // store command arguments in thread descriptor
    118     this->command.mmc.dev_xp    = chdev_dir.mmc[buf_cxy];
    119     this->command.mmc.type      = MMC_CC_INVAL;
    120     this->command.mmc.buf_paddr = buf_paddr;
    121     this->command.mmc.buf_size  = buf_size;
     118    this->mmc_cmd.dev_xp    = chdev_dir.mmc[buf_cxy];
     119    this->mmc_cmd.type      = MMC_CC_INVAL;
     120    this->mmc_cmd.buf_paddr = buf_paddr;
     121    this->mmc_cmd.buf_size  = buf_size;
    122122
    123123    // call MMC driver
     
    156156
    157157    // store command arguments in thread descriptor
    158     this->command.mmc.dev_xp    = chdev_dir.mmc[buf_cxy];
    159     this->command.mmc.type      = MMC_CC_SYNC;
    160     this->command.mmc.buf_paddr = buf_paddr;
    161     this->command.mmc.buf_size  = buf_size;
     158    this->mmc_cmd.dev_xp    = chdev_dir.mmc[buf_cxy];
     159    this->mmc_cmd.type      = MMC_CC_SYNC;
     160    this->mmc_cmd.buf_paddr = buf_paddr;
     161    this->mmc_cmd.buf_size  = buf_size;
    162162
    163163    // call MMC driver
     
    179179
    180180    // store command arguments in thread descriptor
    181     this->command.mmc.dev_xp    = chdev_dir.mmc[cxy];
    182     this->command.mmc.type      = MMC_SET_ERROR;
    183     this->command.mmc.reg_index = index;
    184     this->command.mmc.reg_ptr   = &wdata;
     181    this->mmc_cmd.dev_xp    = chdev_dir.mmc[cxy];
     182    this->mmc_cmd.type      = MMC_SET_ERROR;
     183    this->mmc_cmd.reg_index = index;
     184    this->mmc_cmd.reg_ptr   = &wdata;
    185185
    186186    // execute operation
     
    197197
    198198    // store command arguments in thread descriptor
    199     this->command.mmc.dev_xp    = chdev_dir.mmc[cxy];
    200     this->command.mmc.type      = MMC_GET_ERROR;
    201     this->command.mmc.reg_index = index;
    202     this->command.mmc.reg_ptr   = rdata;
     199    this->mmc_cmd.dev_xp    = chdev_dir.mmc[cxy];
     200    this->mmc_cmd.type      = MMC_GET_ERROR;
     201    this->mmc_cmd.reg_index = index;
     202    this->mmc_cmd.reg_ptr   = rdata;
    203203
    204204    // execute operation
     
    215215
    216216    // store command arguments in thread descriptor
    217     this->command.mmc.dev_xp    = chdev_dir.mmc[cxy];
    218     this->command.mmc.type      = MMC_GET_INSTRU;
    219     this->command.mmc.reg_index = index;
    220     this->command.mmc.reg_ptr   = rdata;
     217    this->mmc_cmd.dev_xp    = chdev_dir.mmc[cxy];
     218    this->mmc_cmd.type      = MMC_GET_INSTRU;
     219    this->mmc_cmd.reg_index = index;
     220    this->mmc_cmd.reg_ptr   = rdata;
    221221
    222222    // execute operation
  • trunk/kernel/devices/dev_nic.c

    r259 r279  
    110110
    111111    // initialize command in thread descriptor
    112     thread_ptr->command.nic.dev_xp = dev_xp;
     112    thread_ptr->nic_cmd.dev_xp = dev_xp;
    113113
    114114    // call driver to test readable
    115     thread_ptr->command.nic.cmd = NIC_CMD_READABLE;
    116     dev_ptr->cmd( thread_xp );
    117 
    118     // check error
    119     error = thread_ptr->command.nic.error;
     115    thread_ptr->nic_cmd.cmd = NIC_CMD_READABLE;
     116    dev_ptr->cmd( thread_xp );
     117
     118    // check error
     119    error = thread_ptr->nic_cmd.error;
    120120    if( error ) return error;
    121121
    122122    // block and deschedule if queue non readable
    123     if( thread_ptr->command.nic.status == false ) 
     123    if( thread_ptr->nic_cmd.status == false ) 
    124124    {
    125125        // enable NIC-RX IRQ
     
    135135
    136136    // call driver for actual read
    137     thread_ptr->command.nic.cmd     = NIC_CMD_READ;
    138     thread_ptr->command.nic.buffer  = pkd->buffer;
    139     dev_ptr->cmd( thread_xp );
    140 
    141     // check error
    142     error = thread_ptr->command.nic.error;
     137    thread_ptr->nic_cmd.cmd     = NIC_CMD_READ;
     138    thread_ptr->nic_cmd.buffer  = pkd->buffer;
     139    dev_ptr->cmd( thread_xp );
     140
     141    // check error
     142    error = thread_ptr->nic_cmd.error;
    143143    if( error ) return error;
    144144
    145145    // returns packet length   
    146     pkd->length = thread_ptr->command.nic.length;
     146    pkd->length = thread_ptr->nic_cmd.length;
    147147
    148148    nic_dmsg("\n[INFO] %s exit for NIC-RX thread on core %d in cluster %x\n",
     
    180180
    181181    // initialize command in thread descriptor
    182     thread_ptr->command.nic.dev_xp = dev_xp;
     182    thread_ptr->nic_cmd.dev_xp = dev_xp;
    183183
    184184    // call driver to test writable
    185     thread_ptr->command.nic.cmd = NIC_CMD_WRITABLE;
    186     dev_ptr->cmd( thread_xp );
    187 
    188     // check error
    189     error = thread_ptr->command.nic.error;
     185    thread_ptr->nic_cmd.cmd = NIC_CMD_WRITABLE;
     186    dev_ptr->cmd( thread_xp );
     187
     188    // check error
     189    error = thread_ptr->nic_cmd.error;
    190190    if( error ) return error;
    191191
    192192    // block and deschedule if queue non writable
    193     if( thread_ptr->command.nic.status == false ) 
     193    if( thread_ptr->nic_cmd.status == false ) 
    194194    {
    195195        // enable NIC-TX IRQ
     
    205205
    206206    // call driver for actual write
    207     thread_ptr->command.nic.cmd    = NIC_CMD_WRITE;
    208     thread_ptr->command.nic.buffer = pkd->buffer;
    209     thread_ptr->command.nic.length = pkd->length;
    210     dev_ptr->cmd( thread_xp );
    211 
    212     // check error
    213     error = thread_ptr->command.nic.error;
     207    thread_ptr->nic_cmd.cmd    = NIC_CMD_WRITE;
     208    thread_ptr->nic_cmd.buffer = pkd->buffer;
     209    thread_ptr->nic_cmd.length = pkd->length;
     210    dev_ptr->cmd( thread_xp );
     211
     212    // check error
     213    error = thread_ptr->nic_cmd.error;
    214214    if( error ) return error;
    215215
  • trunk/kernel/devices/dev_pic.c

    r252 r279  
    2626#include <chdev.h>
    2727#include <printk.h>
     28#include <thread.h>
    2829#include <hal_drivers.h>
    2930#include <dev_pic.h>
     
    8283                         xptr_t  src_chdev_xp )
    8384{
     85    irq_dmsg("\n[INFO] %s : core = [%x,%d] / source_chdev_xp = %l\n",
     86             __FUNCTION__ , local_cxy , lid , src_chdev_xp );
     87
    8488    // get pointer on PIC chdev
    8589    chdev_t * pic_ptr = (chdev_t *)GET_PTR( chdev_dir.pic );
     
    97101                          xptr_t  src_chdev_xp )
    98102{
     103    irq_dmsg("\n[INFO] %s : core = [%x,%d] / source_chdev_xp = %l\n",
     104             __FUNCTION__ , local_cxy , lid , src_chdev_xp );
     105
    99106    // get pointer on PIC chdev
    100107    chdev_t * pic_ptr = (chdev_t *)GET_PTR( chdev_dir.pic );
     
    111118void dev_pic_enable_timer( uint32_t period )
    112119{
     120    irq_dmsg("\n[INFO] %s : core = [%x,%d] / period = %d\n",
     121             __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , period );
     122
    113123    // get pointer on PIC chdev
    114124    chdev_t * pic_ptr = (chdev_t *)GET_PTR( chdev_dir.pic );
     
    122132}
    123133
     134/////////////////////////
     135void dev_pic_enable_ipi()
     136{
     137    irq_dmsg("\n[INFO] %s : core = [%x,%d]\n",
     138             __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid );
     139
     140    // get pointer on PIC chdev
     141    chdev_t * pic_ptr = (chdev_t *)GET_PTR( chdev_dir.pic );
     142    cxy_t     pic_cxy = GET_CXY( chdev_dir.pic );
     143
     144    // get pointer on enable_timer function
     145    enable_ipi_t * f = hal_remote_lpt( XPTR( pic_cxy , &pic_ptr->ext.pic.enable_ipi ) );
     146
     147    // call relevant driver function
     148    f();
     149}
     150
    124151//////////////////////////////////
    125152void dev_pic_send_ipi( cxy_t  cxy,
    126153                       lid_t  lid )
    127154{
     155    irq_dmsg("\n[INFO] %s : enter / src_core = [%x,%d] / dst_core = [%x,%d] / cycle = %d\n",
     156             __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cxy, lid, hal_time_stamp() );
     157
    128158    // get pointer on PIC chdev
    129159    chdev_t * pic_ptr = (chdev_t *)GET_PTR( chdev_dir.pic );
     
    135165    // call relevant driver function
    136166    f( cxy , lid );
     167
     168    irq_dmsg("\n[INFO] %s : exit / src_core = [%x,%d] / dst_core = [%x,%d] / cycle = %d\n",
     169             __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cxy, lid, hal_time_stamp() );
    137170}
    138171
  • trunk/kernel/devices/dev_pic.h

    r205 r279  
    3434 * to route a given IRQ to a given core, in a given cluster, and to help the interrupt
    3535 * handler to select  and execute the relevant ISR (Interrupt Service Routine).
    36  * It handles the following type of interrupts:
    37  * - External IRQs generated by the external (shared) peripherals.
    38  * - Internal IRQs generated by the internal (replicated) peripherals.
    39  * - Timer IRQs generated by the timers (one timer per core).
    40  * - Inter Processor IRQs (IPI) generated by software.
    41  *
    42  * In most supported manycores architectures, the PIC device contains two types
     36 * It handles the four following types of interrupts:
     37 *
     38 * 1) EXT_IRQ (External IRQ) generated by the external (shared) peripherals.
     39 * 2) INT_IRQ (Internal IRQ) generated by the internal (replicated) peripherals.
     40 * 3) TIM_IRQ (Timer IRQ) generated by the timers (one timer per core).
     41 * 4) IPI_IRQ (Inter Processor IRQ) generated by software (one IPI per core).
     42 *
     43 * In supported manycores architectures, the PIC device contains two types
    4344 * of hardware components:
    4445 * - the IOPIC is an external component, handling all external peripherals IRQs.
     
    5455 * at kernel initialization.
    5556 *
    56  * The PIC device defines 4 generic commands that can be used by each kernel instance,
     57 * The PIC device defines generic commands that can be used by each kernel instance,
    5758 * - to create in local cluster the PIC implementation specific interupt vector(s),
    5859 * - to bind a given IRQ (internal or external IRQ to a given core in the local cluster,
     
    6465 * cluster manager or to the core descriptors to register the interrupt vectors
    6566 * used by the kernel to select the relevant ISR when an interrupt is received
    66  * by a given core in agiven cluster.
     67 * by a given core in a given cluster.
    6768 
    6869 * This PIC device does not execute itself I/O operations. It is just acting as a
     
    8788typedef void   (disable_irq_t)  ( lid_t lid , xptr_t src_chdev_xp );   
    8889typedef void   (enable_timer_t) ( uint32_t period );   
     90typedef void   (enable_ipi_t)   ( );   
    8991typedef void   (send_ipi_t)     ( cxy_t cxy , lid_t lid );
    9092typedef void   (extend_init_t)  ( uint32_t * lapic_base );
     
    9698    disable_irq_t   * disable_irq;   /*! pointer on the driver "disable_irq" function   */
    9799    enable_timer_t  * enable_timer;  /*! pointer on the driver "enable_timer" function  */
     100    enable_timer_t  * enable_ipi;    /*! pointer on the driver "enable_ipi" function    */
    98101    send_ipi_t      * send_ipi;      /*! pointer on the driver "send_ipi" function      */
    99102    extend_init_t   * extend_init;   /*! pointer on the driver "init_extend" function   */
     
    186189
    187190/*****************************************************************************************
    188  * This function enables remote IRQ generated by a remote chdev, defined by the
     191 * This function enables the IRQ generated by a remote chdev, defined by the
    189192 * <src_chdev_xp> argument. It can be called by any thread running in any cluster,
    190193 * and can be used for both internal & external IRQs.
     
    199202 * This function disables remote IRQ generated by a remote chdev, defined by the
    200203 * <src_chdev_xp> argument. It can be called by any thread running in any cluster,
    201  * and can be used for both internal & external IRQs.
     204 * and can be used for both INT_IRq & EXT_IRQ.
    202205 *****************************************************************************************
    203206 * @ lid           : target core local index (in cluster containing the source chdev).
     
    208211
    209212/*****************************************************************************************
    210  * This function activates the TICK timer for the calling core.
    211  * The <period> argument define the number of cycles between IRQs.
     213 * This function activates the TIM_IRQ for the calling core.
     214 * The <period> argument define the number of cycles between twoo successive IRQs.
    212215 *****************************************************************************************
    213216 * @ period      : number of cycles between IRQs.
    214217 ****************************************************************************************/
    215218void dev_pic_enable_timer( uint32_t period );
     219
     220/*****************************************************************************************
     221 * This function activates the IPI_IRQ for the calling core.
     222 ****************************************************************************************/
     223void dev_pic_enable_ipi();
    216224
    217225/*****************************************************************************************
  • trunk/kernel/devices/dev_txt.c

    r255 r279  
    112112
    113113    // register command in calling thread descriptor
    114     this->command.txt.dev_xp  = dev_xp;
    115     this->command.txt.type    = type;
    116     this->command.txt.buf_xp  = XPTR( local_cxy , buffer );
    117     this->command.txt.count   = count;
     114    this->txt_cmd.dev_xp  = dev_xp;
     115    this->txt_cmd.type    = type;
     116    this->txt_cmd.buf_xp  = XPTR( local_cxy , buffer );
     117    this->txt_cmd.count   = count;
    118118
    119119    // register client thread in waiting queue, activate server thread
     
    123123
    124124    txt_dmsg("\n[INFO] in %s : thread %x in process %x completes / error = %d\n",
    125              __FUNCTION__ , this->trdid , this->process->pid , this->command.txt.error );
     125             __FUNCTION__ , this->trdid , this->process->pid , this->txt_cmd.error );
    126126
    127127    // return I/O operation status from calling thread descriptor
    128     return this->command.txt.error;
     128    return this->txt_cmd.error;
    129129}
    130130
     
    157157    assert( (dev_xp != XPTR_NULL) , __FUNCTION__ , "undefined TXT0 chdev descriptor" );
    158158
    159     // register command in calling thread
    160     this->command.txt.dev_xp  = dev_xp;
    161     this->command.txt.type    = TXT_SYNC_WRITE;
    162     this->command.txt.buf_xp  = XPTR( local_cxy , buffer );
    163     this->command.txt.count   = count;
     159    // register command in calling thread descriptor
     160    this->txt_cmd.dev_xp  = dev_xp;
     161    this->txt_cmd.type    = TXT_SYNC_WRITE;
     162    this->txt_cmd.buf_xp  = XPTR( local_cxy , buffer );
     163    this->txt_cmd.count   = count;
    164164
    165165    // get driver command function
     
    168168    dev_cmd_t * cmd = (dev_cmd_t *)hal_remote_lpt( XPTR( dev_cxy , &dev_ptr->cmd ) );
    169169
    170     // call directly driver command
     170    // call driver function
    171171    cmd( XPTR( local_cxy , this ) );
    172172
    173173    // return I/O operation status from calling thread descriptor
    174     return this->command.txt.error;
     174    return this->txt_cmd.error;
    175175}
    176176
  • trunk/kernel/kern/chdev.h

    r249 r279  
    169169    xptr_t   nic_tx[CONFIG_MAX_NIC_CHANNELS];    // external / multi-channels / shared
    170170
    171     xptr_t   icu[CONFIG_MAX_CLUSTERS];           // internal / single channel / shared
    172171    xptr_t   mmc[CONFIG_MAX_CLUSTERS];           // internal / single channel / shared
    173172
  • trunk/kernel/kern/cluster.c

    r124 r279  
    7777    // initialize cluster local parameters
    7878        cluster->cores_nr        = info->cores_nr;
    79     cluster->cores_in_kernel = info->cores_nr; // all cpus start in kernel mode
     79    cluster->cores_in_kernel = 0;
    8080
    8181    // initialize the lock protecting the embedded kcm allocator
     
    130130    // initialises RPC fifo
    131131        rpc_fifo_init( &cluster->rpc_fifo );
     132    cluster->rpc_threads = 0;
    132133
    133134    cluster_dmsg("\n[INFO] %s : RPC fifo inialized in cluster %x at cycle %d\n",
  • trunk/kernel/kern/cluster.h

    r188 r279  
    9191 * This structure defines a cluster manager.
    9292 * It contains both global platform information, and cluster specific resources
    93  * managed by the local kernel instance.
     93 * controled by the local kernel instance.
    9494 ******************************************************************************************/
    9595
     
    9999
    100100    // global parameters
    101 
    102101        uint32_t          paddr_width;     /*! numer of bits in physical address              */
    103102    uint32_t          x_width;         /*! number of bits to code x_size  (can be 0)      */
     
    109108
    110109    // local parameters
    111 
    112110        uint32_t          cores_nr;        /*! number of cores in cluster                     */
    113111    uint32_t          cores_in_kernel; /*! number of cores currently in kernel mode       */
    114112
     113    uint32_t          ram_size;        /*! physical memory size                           */
     114    uint32_t          ram_base;        /*! physical memory base (local address)           */
     115
    115116        core_t            core_tbl[CONFIG_MAX_LOCAL_CORES];         /*! embedded cores        */
    116117
     118        list_entry_t      dev_root;        /*! root of list of devices in cluster             */
     119
     120    // memory allocators
    117121        ppm_t             ppm;             /*! embedded kernel page manager                   */
    118122        khm_t             khm;             /*! embedded kernel heap manager                   */
    119123        kcm_t             kcm;             /*! embedded kernel cache manager (for KCMs)       */
    120 
    121124    kcm_t           * kcm_tbl[KMEM_TYPES_NR];         /*! pointers on allocated KCMs      */
    122125
    123     uint32_t          ram_size;        /*! physical memory size                           */
    124     uint32_t          ram_base;        /*! physical memory base (local address)           */
    125 
    126         rpc_fifo_t        rpc_fifo;        /*! cluster RPC fifo (shared)                      */
    127         list_entry_t      devlist;         /*! root of list of devices in cluster             */
    128 
     126    // RPC
     127        rpc_fifo_t        rpc_fifo;        /*! RPC fifo                                       */
     128    uint32_t          rpc_threads;     /*! current number of RPC threads                  */
     129
     130    // DQDT
    129131    int32_t           pages_var;       /*! pages number increment from last DQQT update   */
    130132    int32_t           threads_var;     /*! threads number increment from last DQDT update */
     
    132134        dqdt_node_t       dqdt_tbl[CONFIG_MAX_DQDT_DEPTH];     /*! embedded DQDT nodes        */
    133135
     136    // Local process manager
    134137    pmgr_t            pmgr;            /*! embedded process manager                       */
    135138
  • trunk/kernel/kern/core.c

    r188 r279  
    5050        core->usage             = 0;
    5151        core->spurious_irqs     = 0;
    52     core->rpc_threads       = 0;
    5352        core->thread_idle       = NULL;
    5453        core->fpu_owner         = NULL;
  • trunk/kernel/kern/core.h

    r188 r279  
    5656        uint32_t            usage;          /*! cumulated busy_percent (idle / total)      */
    5757        uint32_t            spurious_irqs;  /*! for instrumentation...                     */
    58     uint32_t            rpc_threads;    /*! current RPC threads number for this core   */
    59         struct thread_s   * thread_rpc;     /*! pointer on current RPC thread descriptor   */
    6058        struct thread_s   * thread_idle;    /*! pointer on idle thread descriptor          */
    6159        struct thread_s   * fpu_owner;      /*! pointer on current FPU owner thread        */
  • trunk/kernel/kern/kernel_init.c

    r265 r279  
    2828#include <hal_special.h>
    2929#include <hal_context.h>
     30#include <hal_irqmask.h>
    3031#include <barrier.h>
    3132#include <remote_barrier.h>
     
    5960
    6061///////////////////////////////////////////////////////////////////////////////////////////
    61 // All these global variables are replicated in all clusters.
     62// All the following global variables are replicated in all clusters.
    6263// They are initialised by the kernel_init() function.
    6364//
     
    135136           "    /_/        \\_\\ |______| |_|    |_|   \\_____/  |______/        |_|    |_|  |_|  \\_\\ |_|   |_|  \n"
    136137           "\n\n\t\t Advanced Locality Management Operating System / Multi Kernel Hybrid\n"
    137            "\n\n\t\t\t Version 0.0   :   %d clusters   /   %d cores per cluster\n\n", nclusters , ncores );
     138           "\n\n\t\t\t Version 0.0 : %d cluster(s)   /   %d core(s) per cluster\n\n", nclusters , ncores );
    138139}
    139140
     
    274275            }
    275276
    276             if( local_cxy == 0 )
    277             kinit_dmsg("\n[INFO] %s created MMC chdev in cluster 0 at cycle %d\n",
    278                        __FUNCTION__ , local_cxy , (uint32_t)hal_time_stamp() );
     277            kinit_dmsg("\n[INFO] %s created MMC in cluster %x / chdev = %x\n",
     278                       __FUNCTION__ , channel , local_cxy , chdev_ptr );
    279279        }
    280280        ///////////////////////////////
     
    301301                chdev_dir.dma[channel] = XPTR( local_cxy , chdev_ptr );
    302302
    303                 kinit_dmsg("\n[INFO] %s created DMA[%d] chdev in cluster 0 at cycle %d\n",
    304                            __FUNCTION__ , channel , (uint32_t)hal_time_stamp() );
     303                kinit_dmsg("\n[INFO] %s created DMA[%d] in cluster %x / chdev = %x\n",
     304                           __FUNCTION__ , channel , local_cxy , chdev_ptr );
    305305            }
    306306        }
     
    433433                    }
    434434
    435                             kinit_dmsg("\n[INFO] %s create chdev %s[%d] in cluster %x at cycle %d\n",
    436                                __FUNCTION__ , chdev_func_str( func ), channel,
    437                                local_cxy , (uint32_t)hal_time_stamp() );
     435                            kinit_dmsg("\n[INFO] %s create chdev %s[%d] in cluster %x / chdev = %x\n",
     436                    __FUNCTION__ , chdev_func_str( func ), channel , local_cxy , chdev );
    438437
    439438                }  // end if match
     
    658657}
    659658
     659////////////////////////////////////////////////////////////////////////////////////////////
     660// This function display on TXT0 the content of the external chdev directory,
     661// in the local cluster.
     662////////////////////////////////////////////////////////////////////////////////////////////
     663static void chdev_dir_display( )
     664{
     665    cxy_t     iob_cxy  = GET_CXY( chdev_dir.iob );
     666    chdev_t * iob_ptr  = (chdev_t *)GET_PTR( chdev_dir.iob );
     667    xptr_t    iob_base = hal_remote_lwd( XPTR( iob_cxy , &iob_ptr->base ) );
     668
     669    cxy_t     pic_cxy  = GET_CXY( chdev_dir.pic );
     670    chdev_t * pic_ptr  = (chdev_t *)GET_PTR( chdev_dir.pic );
     671    xptr_t    pic_base = hal_remote_lwd( XPTR( pic_cxy , &pic_ptr->base ) );
     672
     673    cxy_t     txt0_cxy  = GET_CXY( chdev_dir.txt[0] );
     674    chdev_t * txt0_ptr  = (chdev_t *)GET_PTR( chdev_dir.txt[0] );
     675    xptr_t    txt0_base = hal_remote_lwd( XPTR( txt0_cxy , &txt0_ptr->base ) );
     676
     677    cxy_t     txt1_cxy  = GET_CXY( chdev_dir.txt[1] );
     678    chdev_t * txt1_ptr  = (chdev_t *)GET_PTR( chdev_dir.txt[1] );
     679    xptr_t    txt1_base = hal_remote_lwd( XPTR( txt1_cxy , &txt1_ptr->base ) );
     680
     681    cxy_t     txt2_cxy  = GET_CXY( chdev_dir.txt[2] );
     682    chdev_t * txt2_ptr  = (chdev_t *)GET_PTR( chdev_dir.txt[2] );
     683    xptr_t    txt2_base = hal_remote_lwd( XPTR( txt2_cxy , &txt2_ptr->base ) );
     684
     685    cxy_t     ioc_cxy  = GET_CXY( chdev_dir.ioc[0] );
     686    chdev_t * ioc_ptr  = (chdev_t *)GET_PTR( chdev_dir.ioc[0] );
     687    xptr_t    ioc_base = hal_remote_lwd( XPTR( ioc_cxy , &ioc_ptr->base ) );
     688
     689    cxy_t     fbf_cxy  = GET_CXY( chdev_dir.fbf[0] );
     690    chdev_t * fbf_ptr  = (chdev_t *)GET_PTR( chdev_dir.fbf[0] );
     691    xptr_t    fbf_base = hal_remote_lwd( XPTR( fbf_cxy , &fbf_ptr->base ) );
     692
     693    cxy_t     nic_rx_cxy  = GET_CXY( chdev_dir.nic_rx[0] );
     694    chdev_t * nic_rx_ptr  = (chdev_t *)GET_PTR( chdev_dir.nic_rx[0] );
     695    xptr_t    nic_rx_base = hal_remote_lwd( XPTR( nic_rx_cxy , &nic_rx_ptr->base ) );
     696
     697    cxy_t     nic_tx_cxy  = GET_CXY( chdev_dir.nic_tx[0] );
     698    chdev_t * nic_tx_ptr  = (chdev_t *)GET_PTR( chdev_dir.nic_tx[0] );
     699    xptr_t    nic_tx_base = hal_remote_lwd( XPTR( nic_tx_cxy , &nic_tx_ptr->base ) );
     700
     701    printk("\n*** external chdev directory in cluster %x\n"
     702           "  - iob       = %l / base = %l\n"
     703           "  - pic       = %l / base = %l\n"
     704           "  - txt[0]    = %l / base = %l\n"
     705           "  - txt[1]    = %l / base = %l\n"
     706           "  - txt[2]    = %l / base = %l\n"
     707           "  - ioc[0]    = %l / base = %l\n"
     708           "  - fbf[0]    = %l / base = %l\n"
     709           "  - nic_rx[0] = %l / base = %l\n"
     710           "  - nic_tx[0] = %l / base = %l\n",
     711           local_cxy,
     712           chdev_dir.iob, iob_base,
     713           chdev_dir.pic, pic_base,
     714           chdev_dir.txt[0], txt0_base,
     715           chdev_dir.txt[1], txt1_base,
     716           chdev_dir.txt[2], txt2_base,
     717           chdev_dir.ioc[0], ioc_base,
     718           chdev_dir.fbf[0], fbf_base,
     719           chdev_dir.nic_rx[0], nic_rx_base,
     720           chdev_dir.nic_tx[0], nic_tx_base );
     721}
     722
    660723///////////////////////////////////////////////////////////////////////////////////////////
    661724// This function is the entry point for the kernel initialisation.
     
    683746
    684747    error_t      error;
     748    uint32_t     status;                    // running core status register
    685749
    686750    cxy_t        io_cxy = info->io_cxy;
     
    732796    if( error )
    733797    {
    734         nolock_printk("\n[PANIC] in %s : illegal core identifiers"
     798        printk("\n[PANIC] in %s : illegal core identifiers"
    735799               " gid = %x / cxy = %x / lid = %d\n",
    736800               __FUNCTION__ , core_lid , core_cxy , core_lid );
     
    745809        if( error )
    746810        {
    747             nolock_printk("\n[PANIC] in %s : cannot initialise cluster %x",
     811            printk("\n[PANIC] in %s : cannot initialise cluster %x",
    748812                   __FUNCTION__ , local_cxy );
    749813            hal_core_sleep();
     
    764828    // STEP 2 : all CP0s initialize the process_zero descriptor.
    765829    //          CP0 in cluster 0 initialises the IOPIC device.
    766     //          all CP0s complete the distibuted LAPIC initialization.
    767830    /////////////////////////////////////////////////////////////////////////////////
    768831
     
    777840    if( (core_lid == 0) && (local_cxy == 0) ) iopic_init( info );
    778841   
    779     // all CP0s initialize their local LAPIC extension,
    780     if( core_lid == 0 ) lapic_init( info );
    781 
    782842    ////////////////////////////////////////////////////////////////////////////////
    783843    if( core_lid == 0 ) remote_barrier( XPTR( io_cxy , &global_barrier ),
     
    791851
    792852    ////////////////////////////////////////////////////////////////////////////////
    793     // STEP 3 : all CP0s initialize their local chdev descriptors
    794     //          (both internal devices and external devices).
     853    // STEP 3 : all CP0s complete the distibuted LAPIC initialization.
     854    //          all CP0s initialize their internal chdev descriptors
     855    //          all CP0s initialize their local external chdev descriptors
    795856    ////////////////////////////////////////////////////////////////////////////////
     857
     858    // all CP0s initialize their local LAPIC extension,
     859    if( core_lid == 0 ) lapic_init( info );
    796860
    797861    // CP0 scan the internal (private) peripherals,
     
    818882
    819883    /////////////////////////////////////////////////////////////////////////////////
    820     // STEP 4 : Alls cores initialize their private IDLE thread.
     884    // STEP 4 : All cores enable IPI (Inter Procesor Interrupt),
     885    //          Alh cores initialize IDLE thread.
    821886    //          Only CP0 in cluster 0 creates the VFS root inode.
    822887    //          It access the boot device to initialize the file system context.
    823888    /////////////////////////////////////////////////////////////////////////////////
    824889
    825     // all cores create idle thread descriptor
     890    if( CONFIG_KINIT_DEBUG ) chdev_dir_display();
     891   
     892    // All cores enable the shared IPI channel
     893
     894// @@@
     895    hal_set_ebase( 0x1000 );
     896// @@@
     897
     898    dev_pic_enable_ipi();
     899    hal_enable_irq( &status );
     900
     901    kinit_dmsg("\n[INFO] %s : IRQs enabled for core[%x,%d] / SR = %x\n",
     902               __FUNCTION__ , local_cxy , core_lid , hal_get_sr() );
     903
     904    // all cores create the idle thread descriptor
    826905    error = thread_kernel_init( thread,
    827906                                THREAD_IDLE,
     
    831910    if( error )
    832911    {
    833         nolock_printk("\n[PANIC] in %s : core[%x][%d] cannot initialize idle thread\n",
     912        printk("\n[PANIC] in %s : core[%x][%d] cannot initialize idle thread\n",
    834913                      __FUNCTION__ , local_cxy , core_lid );
    835914        hal_core_sleep();
     
    860939            fatfs_ctx_t * fatfs_ctx = fatfs_ctx_alloc();
    861940
    862             nolock_assert( (fatfs_ctx != NULL) , __FUNCTION__ ,
    863                            "cannot create FATFS context in cluster 0\n" );
     941            assert( (fatfs_ctx != NULL) , __FUNCTION__ ,
     942                    "cannot create FATFS context in cluster 0\n" );
    864943
    865944            // 2. access boot device to initialize FATFS context
     
    883962                                      &vfs_root_inode_xp );                // return
    884963
    885             nolock_assert( (error == 0) , __FUNCTION__ ,
    886                            "cannot create VFS root inode\n" );
     964            assert( (error == 0) , __FUNCTION__ ,
     965                    "cannot create VFS root inode\n" );
    887966
    888967            // 5. initialize VFS context for FAT in cluster 0
     
    896975        else
    897976        {
    898             nolock_printk("\n[PANIC] in %s : root FS must be FATFS\n", __FUNCTION__ );
     977            printk("\n[PANIC] in %s : root FS must be FATFS\n", __FUNCTION__ );
    899978            hal_core_sleep();
    900979        }
     
    9311010            fatfs_ctx_t * fatfs_ctx = fatfs_ctx_alloc();
    9321011
    933             nolock_assert( (fatfs_ctx != NULL) , __FUNCTION__ ,
    934                            "cannot create FATFS context\n" );
     1012            assert( (fatfs_ctx != NULL) , __FUNCTION__ ,
     1013                    "cannot create FATFS context\n" );
    9351014
    9361015            // get local pointer on VFS context for FATFS
     
    9651044    /////////////////////////////////////////////////////////////////////////////////
    9661045
    967     if( (core_lid ==  0) && (local_cxy == 0) )
     1046//    if( (core_lid ==  0) && (local_cxy == 0) )
    9681047    kinit_dmsg("\n[INFO] %s exit barrier 5 at cycle %d : VFS OK in all clusters\n",
    9691048               __FUNCTION__, (uint32_t)hal_time_stamp());
     
    9861065        devfs_ctx_t * devfs_ctx = devfs_ctx_alloc();
    9871066
    988         nolock_assert( (devfs_ctx != NULL) , __FUNCTION__ ,
    989                        "cannot create DEVFS context in cluster IO\n");
     1067        assert( (devfs_ctx != NULL) , __FUNCTION__ ,
     1068                "cannot create DEVFS context in cluster IO\n");
    9901069
    9911070        // register DEVFS root and external directories
     
    9931072    }   
    9941073
     1074printk("\n@@@ %s : cluster %x reach barrier 6\n", __FUNCTION__ , local_cxy );
     1075
    9951076    /////////////////////////////////////////////////////////////////////////////////
    9961077    if( core_lid == 0 ) remote_barrier( XPTR( io_cxy , &global_barrier ),
     
    9991080    /////////////////////////////////////////////////////////////////////////////////
    10001081
    1001     if( (core_lid ==  0) && (local_cxy == 0) )
     1082//    if( (core_lid ==  0) && (local_cxy == 0) )
    10021083    kinit_dmsg("\n[INFO] %s exit barrier 6 at cycle %d : DEVFS OK in cluster IO\n",
    10031084               __FUNCTION__, (uint32_t)hal_time_stamp());
     
    10711152        print_banner( (info->x_size * info->y_size) , info->cores_nr );
    10721153
    1073         kinit_dmsg("\n\n*** memory fooprint of main kernet objects ***\n"
     1154        kinit_dmsg("\n\n*** memory fooprint for main kernet objects ***\n\n"
    10741155                   " - thread descriptor  : %d bytes\n"
    10751156                   " - process descriptor : %d bytes\n"
     
    11141195    }
    11151196
    1116     // each core activates its private PTI IRQ
     1197    // each core activates its private TICK IRQ
    11171198    dev_pic_enable_timer( CONFIG_SCHED_TICK_PERIOD );
    11181199
  • trunk/kernel/kern/printk.c

    r246 r279  
    401401}
    402402
    403 ////////////////////////////////////////
    404 void nolock_printk( char * format , ...)
    405 {
    406     va_list   args;
    407 
    408     // call kernel_printf on TXT0, in busy waiting mode
    409     va_start( args , format );
    410     kernel_printf( 0 , 1 , format , &args );
    411     va_end( args );
    412 }
    413 
    414403///////////////////////////////////////////
    415404inline void assert( bool_t       condition,
     
    424413}
    425414
    426 //////////////////////////////////////////////////
    427 inline void nolock_assert( bool_t       condition,
    428                            const char * function_name,
    429                            char       * string )
    430 {
    431     if( condition == false )
    432     {
    433         nolock_printk("\n[PANIC] in %s : %s\n" , function_name , string );
    434         hal_core_sleep();
    435     }
    436 }
    437 
    438 
    439415
    440416// Local Variables:
  • trunk/kernel/kern/printk.h

    r188 r279  
    7474
    7575/**********************************************************************************
    76  * This function displays a formated string on the kernel terminal TXT0,
    77  * using a busy waiting policy: It calls directly the relevant TXT driver,
    78  * without taking the the lock protecting exclusive access to TXT0 terminal.
    79  **********************************************************************************
    80  * @ format     : formated string.
    81  *********************************************************************************/
    82 void nolock_printk( char* format, ... );
    83 
    84 /**********************************************************************************
    8576 * This function displays a "PANIC" message and force the calling core in
    8677 * sleeping mode if a Boolean condition is false.
     
    9586                    char       * string );
    9687
    97 /**********************************************************************************
    98  * This function displays a "PANIC" message and force the calling core in
    99  * sleeping mode if a Boolean condition is false,
    100  * without taking the the lock protecting exclusive access to TXT0 terminal.
    101  **********************************************************************************
    102  * @ condition     : condition that must be true.
    103  * @ function_name : name of the calling function.
    104  * @ string        : error message if condition is false.
    105  *********************************************************************************/
    106 inline void nolock_assert( bool_t       condition,
    107                            const char * function_name,
    108                            char       * string );
    109 
    11088///////////////////////////////////////////////////////////////////////////////////
    11189//       Conditionnal debug macros
     
    215193
    216194#if CONFIG_KINIT_DEBUG
    217 #define kinit_dmsg(...) nolock_printk(__VA_ARGS__)
     195#define kinit_dmsg(...) printk(__VA_ARGS__)
    218196#else
    219197#define kinit_dmsg(...)
  • trunk/kernel/kern/process.c

    r204 r279  
    9090    pid_t       parent_pid;
    9191
    92     process_dmsg("\n[INFO] %s : enters for process %x in cluster %x / parent_xp = %l\n",
    93                  __FUNCTION__ , pid , parent_xp );
     92    process_dmsg("\n[INFO] %s : enters for process %x in cluster %x\n",
     93                 __FUNCTION__ , pid , local_cxy );
    9494
    9595    // get parent process cluster, local pointer, and pid
     
    198198    local_process->ref_xp = reference_process_xp;
    199199
     200    process_dmsg("\n[INFO] %s : enter for process %x in cluster %x\n",
     201                 __FUNCTION__ , local_process->pid );
     202
    200203    // reset children list root (not used in a process descriptor copy)
    201204    xlist_root_init( XPTR( local_cxy , &local_process->children_root ) );
     
    229232
    230233        hal_fence();
     234
     235    process_dmsg("\n[INFO] %s : exit for process %x in cluster %x\n",
     236                 __FUNCTION__ , local_process->pid );
    231237
    232238    return 0;
  • trunk/kernel/kern/rpc.c

    r265 r279  
    101101    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    102102
     103    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     104
    103105    // initialise RPC descriptor header
    104106    rpc_desc_t  rpc;
     
    115117    *error  = (error_t)rpc.args[0];     
    116118    *ppn    = (uint32_t)rpc.args[1];
     119
     120    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    117121}
    118122
     
    153157    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    154158
     159    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     160
    155161    // initialise RPC descriptor header
    156162    rpc_desc_t  rpc;
     
    167173    *pid    = (pid_t)rpc.args[1];
    168174    *error  = (error_t)rpc.args[2];     
     175
     176    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    169177}
    170178
     
    204212    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    205213
     214    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     215
    206216    // initialise RPC descriptor header
    207217    rpc_desc_t  rpc;
     
    217227    // get output arguments from RPC descriptor
    218228    *error  = (error_t)rpc.args[1];     
     229
     230    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    219231}
    220232
     
    256268    assert( (GET_CXY( process->ref_xp ) == local_cxy) , __FUNCTION__ ,
    257269            "caller must be reference process cluster\n");
     270
     271    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
    258272
    259273    // get local process index in reference cluster
     
    282296        if( target_cxy != local_cxy ) rpc_send_sync( target_cxy , &rpc );
    283297    }
     298
     299    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    284300
    285301
     
    327343    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    328344
     345    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     346
    329347    // initialise RPC descriptor header
    330348    rpc_desc_t  rpc;
     
    344362    *thread_xp = (xptr_t)rpc.args[4];
    345363    *error     = (error_t)rpc.args[5];
     364
     365    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    346366}
    347367
     
    405425    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    406426
     427    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     428
    407429    // initialise RPC descriptor header
    408430    rpc_desc_t  rpc;
     
    421443    *thread_xp = (xptr_t)rpc.args[3];
    422444    *error     = (error_t)rpc.args[4];
     445
     446    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    423447}
    424448
     
    463487    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    464488
     489    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     490
    465491    // initialise RPC descriptor header
    466492    rpc_desc_t  rpc;
     
    474500    // register RPC request in remote RPC fifo
    475501    rpc_send_sync( cxy , &rpc );
     502
     503    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    476504}
    477505
     
    513541    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    514542
     543    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     544
    515545    // initialise RPC descriptor header
    516546    rpc_desc_t  rpc;
     
    534564    *inode_xp = (xptr_t)rpc.args[8];
    535565    *error    = (error_t)rpc.args[9];
     566
     567    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    536568}
    537569
     
    590622    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    591623
     624    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     625
    592626    // initialise RPC descriptor header
    593627    rpc_desc_t  rpc;
     
    600634    // register RPC request in remote RPC fifo (blocking function)
    601635    rpc_send_sync( cxy , &rpc );
     636
     637    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    602638}
    603639
     
    632668    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    633669
     670    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     671
    634672    // initialise RPC descriptor header
    635673    rpc_desc_t  rpc;
     
    648686    *dentry_xp = (xptr_t)rpc.args[3];
    649687    *error     = (error_t)rpc.args[4];
     688
     689    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    650690}
    651691
     
    695735    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    696736
     737    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     738
    697739    // initialise RPC descriptor header
    698740    rpc_desc_t  rpc;
     
    705747    // register RPC request in remote RPC fifo (blocking function)
    706748    rpc_send_sync( cxy , &rpc );
     749
     750    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    707751}
    708752
     
    737781    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    738782
     783    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     784
    739785    // initialise RPC descriptor header
    740786    rpc_desc_t  rpc;
     
    752798    *file_xp = (xptr_t)rpc.args[2];
    753799    *error   = (error_t)rpc.args[3];
     800
     801    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    754802}
    755803
     
    790838    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    791839
     840    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     841
    792842    // initialise RPC descriptor header
    793843    rpc_desc_t  rpc;
     
    800850    // register RPC request in remote RPC fifo (blocking function)
    801851    rpc_send_sync( cxy , &rpc );
     852
     853    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    802854}
    803855
     
    831883    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    832884
     885    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     886
    833887    // initialise RPC descriptor header
    834888    rpc_desc_t  rpc;
     
    846900    // get output values from RPC descriptor
    847901    *error   = (error_t)rpc.args[3];
     902
     903    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    848904}
    849905
     
    889945    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    890946
     947    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     948
    891949    // initialise RPC descriptor header
    892950    rpc_desc_t  rpc;
     
    902960    // get output values from RPC descriptor
    903961    *error   = (error_t)rpc.args[1];
     962
     963    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    904964}
    905965
     
    938998    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    939999
     1000    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     1001
    9401002    // initialise RPC descriptor header
    9411003    rpc_desc_t  rpc;
     
    9541016    *cluster = (uint32_t)rpc.args[3];
    9551017    *error   = (error_t)rpc.args[4];
     1018
     1019    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    9561020}
    9571021
     
    9941058    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    9951059
     1060    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     1061
    9961062    // initialise RPC descriptor header
    9971063    rpc_desc_t  rpc;
     
    10081074    // get output argument from rpc descriptor
    10091075    *vseg_xp = rpc.args[2];
     1076
     1077    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    10101078}
    10111079
     
    10501118    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    10511119
     1120    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     1121
    10521122    // initialise RPC descriptor header
    10531123    rpc_desc_t  rpc;
     
    10661136    *ppn   = (ppn_t)rpc.args[3];
    10671137    *error = (error_t)rpc.args[4];
     1138
     1139    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    10681140}
    10691141
     
    11051177    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    11061178
     1179    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     1180
    11071181    // initialise RPC descriptor header
    11081182    rpc_desc_t  rpc;
     
    11181192    // get output arguments from RPC descriptor
    11191193    *buf_xp = (xptr_t)rpc.args[1];
     1194
     1195    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    11201196}
    11211197
     
    11521228    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    11531229
     1230    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     1231
    11541232    // initialise RPC descriptor header
    11551233    rpc_desc_t  rpc;
     
    11631241    // register RPC request in remote RPC fifo
    11641242    rpc_send_sync( cxy , &rpc );
     1243
     1244    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    11651245}
    11661246
     
    11991279    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    12001280
     1281    rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     1282
    12011283    // initialise RPC descriptor header
    12021284    rpc_desc_t  rpc;
     
    12171299    // get output values from RPC descriptor
    12181300    *error     = (error_t)rpc.args[6];
     1301
     1302    rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
    12191303}
    12201304
     
    12621346                    rpc_desc_t * rpc )
    12631347{
    1264         thread_t * this = CURRENT_THREAD;
    12651348    uint32_t   cores;
    12661349    error_t    error;
     
    12681351    reg_t      sr_save;
    12691352
    1270     // get client CPU and cluster coordinates
    1271     cxy_t      client_cxy = local_cxy;   
    1272     lid_t      client_lid = CURRENT_CORE->lid;
     1353    thread_t * this = CURRENT_THREAD;
     1354
     1355    rpc_dmsg("\n[INFO] %s : enter / client_cxy = %x / server_cxy = %x\n",
     1356             __FUNCTION__ , local_cxy , server_cxy );
    12731357
    12741358    // allocate and initialise an extended pointer on the RPC descriptor
    1275         xptr_t   xp = XPTR( client_cxy , rpc );
    1276 
    1277     // get local pointer on rpc_fifo in remote cluster with the
    1278     // assumption that addresses are identical in all clusters
     1359        xptr_t   desc_xp = XPTR( local_cxy , rpc );
     1360
     1361    // get local pointer on rpc_fifo in remote cluster, with the
     1362    // assumption that rpc_fifo pddresses are identical in all clusters
    12791363    rpc_fifo_t * rf = &LOCAL_CLUSTER->rpc_fifo;
    12801364
     
    12841368    {
    12851369        error = remote_fifo_put_item( XPTR( server_cxy , &rf->fifo ),
    1286                                       (uint64_t *)&xp,
     1370                                      (uint64_t )desc_xp,
    12871371                                      &first );
    12881372
    12891373            if ( error )
    12901374        {
    1291             printk("\n[WARNING] %s : core %d in cluster %x cannot post RPC to cluster %x\n",
    1292                    __FUNCTION__ , client_lid , client_cxy , server_cxy );
     1375            printk("\n[WARNING] %s : cluster %x cannot post RPC to cluster %x\n",
     1376                   __FUNCTION__ , local_cxy , server_cxy );
     1377
    12931378            if( thread_can_yield() ) sched_yield();
     1379        }
     1380        else
     1381        {
    12941382        }
    12951383    }
    12961384    while( error );
    12971385 
    1298     rpc_dmsg("\n[INFO] %s on core %d in cluster %x sent RPC %p to cluster %x\n",
    1299               __FUNCTION__ , client_lid , client_cxy , rpc , server_cxy );
     1386    rpc_dmsg("\n[INFO] %s : RPC registered / client_cxy = %x / server_cxy = %x\n",
     1387             __FUNCTION__ , local_cxy , server_cxy , first );
    13001388       
    1301     // send IPI if this is the first RPC in remote FIFO
    1302     // and no CPU is in kernel mode in server cluster.
    1303     // the selected CPU in server has the same lid as the client CPU.
     1389    // send IPI to remote CP0, if this is the first RPC in remote FIFO,
     1390    // and there is no CPU is in kernel mode in server cluster.
    13041391        if( first )
    13051392        {
     
    13091396                if( cores == 0 ) // no core in kernel mode in server
    13101397                {
    1311                     dev_pic_send_ipi( server_cxy , client_lid );
    1312 
    1313                     rpc_dmsg("\n[INFO] %s : core %d in cluster %x send IPI to core %d in cluster %x\n",
    1314                       __FUNCTION__, client_lid , client_cxy , client_lid , server_cxy );
     1398                    dev_pic_send_ipi( server_cxy , 0 );
     1399
     1400                    rpc_dmsg("\n[INFO] %s : IPI sent / client_cxy = %x / server_cxy = %x\n",
     1401                     __FUNCTION__, local_cxy , server_cxy );
    13151402        }
    13161403        }
    13171404
    1318         // activate preemption to allow incoming RPC and avoid deadlock
     1405        // enable IRQs to allow incoming RPC and avoid deadlock
    13191406        if( this->type == THREAD_RPC ) hal_enable_irq( &sr_save );
    13201407
    1321     // the sending thread poll the response slot until RPC completed
     1408    // the server thread poll the response slot until RPC completed
     1409    // TODO this could be replaced by a descheduling policy... [AG]
    13221410        while( 1 )
    13231411    {
     
    13251413    }
    13261414
    1327     // restore preemption
     1415    // restore IRQs
    13281416        if( this->type == THREAD_RPC ) hal_restore_irq( sr_save );
     1417
     1418    rpc_dmsg("\n[INFO] %s : completed / client_cxy = %x / server_cxy = %x\n",
     1419             __FUNCTION__ , local_cxy , server_cxy );
    13291420
    13301421}  // end rpc_send_sync()
     
    13441435}
    13451436
    1346 ////////////////////////////////////////////////
    1347 error_t rpc_execute_all( rpc_fifo_t * rpc_fifo )
     1437/////////////////////////////////////////////
     1438void rpc_execute_all( rpc_fifo_t * rpc_fifo )
    13481439{
    13491440        xptr_t         xp;             // extended pointer on RPC descriptor
     
    13531444    rpc_desc_t   * desc;           // pointer on RPC descriptor
    13541445    uint32_t       index;          // RPC index
    1355     uint32_t       expected;       // number of expected responses
    13561446    cxy_t          client_cxy;     // client cluster identifier
    13571447        error_t        error;
     
    13701460                if ( error == 0 )  // One RPC request successfully extracted from RPC_FIFO
    13711461        {
    1372             rpc_dmsg("\n[INFO] %s : RPC_THREAD %x on core %x in cluster %x handles RPC %d\n"
     1462            rpc_dmsg("\n[INFO] %s : RPC_THREAD %x on core %x in cluster %x handles RPC %d\n",
    13731463                                     __FUNCTION__ , this->trdid , core->lid , local_cxy , count );
    13741464
     
    13771467            desc       = (rpc_desc_t *)GET_PTR( xp );
    13781468
    1379             // get rpc index and expected responses from RPC descriptor
     1469            // get rpc index from RPC descriptor
    13801470                index     = hal_remote_lw( XPTR( client_cxy , &desc->index ) );
    1381                 expected  = hal_remote_lw( XPTR( client_cxy , &desc->response ) );
    13821471
    13831472            // call the relevant server function
     
    13881477
    13891478            // notify RPC completion as required
    1390             if( expected == 1 ) hal_remote_sw( XPTR(client_cxy,&desc->response) , 0 );
    1391             if( expected >  1 ) hal_remote_atomic_add( XPTR(client_cxy,&desc->response) , -1 );
     1479            hal_remote_atomic_add( XPTR(client_cxy,&desc->response) , -1 );
    13921480                }
    13931481       
     
    14001488            (count > CONFIG_RPC_PENDING_MAX) ) break;
    14011489        }
    1402     while( 1 )
    1403 
    1404         rpc_dmsg("\n[INFO] %s running on core %d in cluster %x exit\n"
    1405               __FUNCTION__ , CURRENT_CORE->lid , local_cxy );
    1406                
     1490    while( 1 );
     1491
    14071492    // update RPC_FIFO global counter
    14081493        rpc_fifo->count += count;
    14091494
    1410         return 0;
    14111495}  // end rpc_execute_all()
    14121496
     
    14221506    reg_t         sr_save;
    14231507
     1508   
    14241509        this   = CURRENT_THREAD;
    14251510    core   = this->core;
     
    14271512    found  = false;
    14281513
    1429     // calling thread must be the RPC_FIFO owner
    1430     if( this->trdid != rpc_fifo->owner )
    1431     {
    1432         printk("\n[PANIC] in %s : calling thread is not RPC_FIFO owner\n", __FUNCTION__ );
    1433         hal_core_sleep();
    1434     }
     1514    assert( (this->trdid == rpc_fifo->owner) , __FUNCTION__ ,
     1515          "calling thread is not RPC_FIFO owner\n" );
    14351516
    14361517    // makes the calling thread not preemptable
     
    14431524    {
    14441525        thread = LIST_ELEMENT( iter , thread_t , sched_list );
    1445         if( (thread->type == THREAD_RPC) && (thread->blocked ==  THREAD_BLOCKED_IDLE ) )
     1526        if( (thread->type == THREAD_RPC) && (thread->blocked == THREAD_BLOCKED_IDLE ) )
    14461527        {
    14471528            found = true;
     
    14531534    {
    14541535        thread->blocked = 0;
     1536
     1537        rpc_dmsg("\n[INFO] %s : activate RPC thread %x on core %x in cluster %x at cycle %d\n",
     1538                          __FUNCTION__ , thread , core->gid , local_cxy , hal_get_cycles() );
    14551539    }
    14561540    else                           // create a new RPC thread
     
    14691553        }
    14701554
    1471         rpc_dmsg("\n[INFO] %s creates RPC thread %x on core %x in cluster %x at cycle %d\n",
     1555        rpc_dmsg("\n[INFO] %s : create RPC thread %x on core %x in cluster %x at cycle %d\n",
    14721556                          __FUNCTION__ , thread , core->gid , local_cxy , hal_get_cycles() );
    14731557
    14741558        // update core descriptor counter 
    1475             hal_atomic_add( &core->rpc_threads , 1 );
     1559            hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 );
    14761560    }
    14771561
    14781562    // update owner in rpc_fifo
    14791563    rpc_fifo->owner = thread->trdid;
    1480 
    1481     rpc_dmsg ("\n[INFO] %s activates RPC thread %x on core %x in cluster %x at cycle %d\n",
    1482                       __FUNCTION__ , thread , core->gid , local_cxy , hal_get_cycles() );
    14831564
    14841565    // current thread deschedules / RPC thread start execution
     
    15061587    }
    15071588
    1508         // calling thread tries to take the light lock,
    1509     // and activates an RPC thread if success
     1589        // try to take the light lock, and activates an RPC thread if success
    15101590    if( hal_atomic_test_set( &rpc_fifo->owner , this->trdid ) )
    15111591        {
     
    15431623
    15441624    // this infinite loop is not preemptable
    1545     // the RPC thread deschedule when the RPC_FIFO is empty
     1625    // the RPC thread deschedule only when the RPC_FIFO is empty
    15461626        while(1)
    15471627        {
     
    15611641
    15621642
    1563         // suicide if too much RPC threads for this core
    1564                 if( this->core->rpc_threads > CONFIG_RPC_THREADS_MAX )
     1643        //  block and deschedule or sucide
     1644                if( LOCAL_CLUSTER->rpc_threads >= CONFIG_RPC_THREADS_MAX )
    15651645                {
    15661646            rpc_dmsg("\n[INFO] RPC thread %x suicide on core %d in cluster %x at cycle %d\n",
     
    15681648
    15691649            // update core descriptor counter
    1570                 hal_atomic_add( &this->core->rpc_threads , -1 );
     1650                hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , -1 );
    15711651
    15721652            // suicide
    15731653                        thread_exit();
    15741654                }
    1575 
    1576         // block and deschedule
    1577         rpc_dmsg("\n[INFO] RPC thread %x deschedule on core %d in cluster %x at cycle %d\n",
    1578                               this->trdid , this->core->lid , local_cxy , hal_get_cycles() );
    1579 
    1580                 thread_block( this , THREAD_BLOCKED_IDLE );
    1581         sched_yield();
    1582 
    1583                 rpc_dmsg("\n[INFO] RPC thread %x wake up on core %d in cluster %x at cycle %d\n",
    1584                               this->trdid , this->core->lid , local_cxy , hal_get_cycles() );
    1585         }
     1655        else
     1656        {
     1657            rpc_dmsg("\n[INFO] RPC thread %x blocks on core %d in cluster %x at cycle %d\n",
     1658                                 this->trdid , this->core->lid , local_cxy , hal_get_cycles() );
     1659
     1660                     thread_block( this , THREAD_BLOCKED_IDLE );
     1661             sched_yield();
     1662
     1663                     rpc_dmsg("\n[INFO] RPC thread %x wake up on core %d in cluster %x at cycle %d\n",
     1664                          this->trdid , this->core->lid , local_cxy , hal_get_cycles() );
     1665        }
     1666        } // end while
    15861667} // end rpc_thread_func()
    15871668
  • trunk/kernel/kern/rpc.h

    r265 r279  
    158158 * This function is the entry point for RPC handling on the server side.
    159159 * It can be executed by any thread running (in kernel mode) on any core.
    160  * It first checks the core private RPC fifo, an then the cluster shared RPC fifo.
    161  * It calls the rpc_activate_thread() function to activate a dedicated RPC thread.
    162  ***********************************************************************************
    163  * @ returns true if at least one RPC found / false otherwise.
     160 * It checks the RPC fifo, try to take the light-lock and activates (or creates)
     161 * an RPC thread in case of success.
     162 ***********************************************************************************
     163 * @ returns true if success / false otherwise.
    164164 **********************************************************************************/
    165165bool_t rpc_check();
     
    170170 ***********************************************************************************
    171171 * @ rpc_fifo  : pointer on the local RPC fifo
    172  * @ returns 0 if success
    173  **********************************************************************************/
    174 error_t rpc_execute_all( rpc_fifo_t * rpc_fifo );
     172 **********************************************************************************/
     173void rpc_execute_all( rpc_fifo_t * rpc_fifo );
    175174
    176175/**********************************************************************************
  • trunk/kernel/kern/scheduler.c

    r278 r279  
    4141    sched->k_threads_nr   = 0;
    4242
    43     sched->current        = NULL;
    44     sched->idle           = NULL;
    45     sched->u_last         = NULL;
    46     sched->k_last         = NULL;
     43    sched->current        = CURRENT_THREAD;
     44    sched->idle           = NULL;             // initialized in kernel_init()
     45    sched->u_last         = NULL;             // initialized in sched_register_thread()
     46    sched->k_last         = NULL;             // initialized in sched_register_thread()
    4747
    4848    // initialise threads lists
     
    6262    spinlock_lock( &sched->lock );
    6363
    64     // register thread
    6564    if( type == THREAD_USER )
    6665    {
     66        // register thread in scheduler user list
    6767        list_add_last( &sched->u_root , &thread->sched_list );
    6868        sched->u_threads_nr++;
     69
     70        // initialize u_last field if first user thread
     71        if( sched->u_last == NULL ) sched->u_last = &thread->sched_list;
    6972    }
    7073    else // kernel thread
    7174    {
     75        // register thread in scheduler kernel list
    7276        list_add_last( &sched->k_root , &thread->sched_list );
    7377        sched->k_threads_nr++;
     78
     79        // initialize k_last field if first kernel thread
     80        if( sched->k_last == NULL ) sched->k_last = &thread->sched_list;
    7481    }
    7582
     
    8996    spinlock_lock( &sched->lock );
    9097
    91     // remove thread
    9298    if( type == THREAD_USER )
    9399    {
     100        // remove thread from user list
    94101        list_unlink( &thread->sched_list );
    95102        sched->u_threads_nr--;
     103
     104        // reset the u_last field if list empty
     105        if( sched->u_threads_nr == 0 ) sched->u_last = NULL;
    96106    }
    97107    else // kernel thread
    98108    {
     109        // remove thread from kernel list
    99110        list_unlink( &thread->sched_list );
    100111        sched->k_threads_nr--;
     112
     113        // reset the k_last field if list empty
     114        if( sched->k_threads_nr == 0 ) sched->k_last = NULL;
    101115    }
    102116
     
    140154    list_entry_t * last;
    141155
    142     // first scan the kernel threads
    143     last    = sched->k_last;
    144     current = sched->k_last;
    145     do
    146     {
    147         // get next entry in kernel list
    148         current = list_next( &sched->k_root , current );
    149 
    150         // skip the list root that does not contain a thread
    151         if( current == NULL ) continue;
    152 
    153         // get thread pointer
    154         thread = LIST_ELEMENT( current , thread_t , sched_list );
    155 
    156         // return thread if not blocked
    157         if( thread->blocked == 0 )
     156    // first : scan the kernel threads list,
     157    // only if this list is not empty
     158    if( list_is_empty( &sched->k_root ) == false )
     159    {
     160        last    = sched->k_last;
     161        current = sched->k_last;
     162        do
    158163        {
    159             // release lock
    160             spinlock_unlock( &sched->lock );
    161             return thread;
     164            // get next entry in kernel list
     165            current = list_next( &sched->k_root , current );
     166
     167            // skip the root that does not contain a thread
     168            if( current == NULL ) current = sched->k_root.next;
     169
     170            // get thread pointer for this entry
     171            thread = LIST_ELEMENT( current , thread_t , sched_list );
     172
     173            // return thread if runnable
     174            if( thread->blocked == 0 )
     175            {
     176                // release lock
     177                spinlock_unlock( &sched->lock );
     178                return thread;
     179            }
    162180        }
    163     }
    164     while( current != last );
    165 
    166     // second scan the user threads
    167     last    = sched->u_last;
    168     current = sched->u_last;
    169     do
    170     {
    171         // get next entry in user list
    172         current = list_next( &sched->u_root , current );
    173 
    174         // skip the list root that does not contain a thread
    175         if( current == NULL ) continue;
    176 
    177         // get thread pointer
    178         thread = LIST_ELEMENT( current , thread_t , sched_list );
    179 
    180         // return thread if not blocked
    181         if( thread->blocked == 0 )
     181        while( current != last );
     182    }
     183
     184    // second : scan the user threads list,
     185    // only if this list is not empty
     186    if( list_is_empty( &sched->u_root ) == false )
     187    {
     188        last    = sched->u_last;
     189        current = sched->u_last;
     190        do
    182191        {
    183             // release lock
    184             spinlock_unlock( &sched->lock );
    185             return thread;
     192            // get next entry in user list
     193            current = list_next( &sched->u_root , current );
     194
     195            // skip the root that does not contain a thread
     196            if( current == NULL ) current = sched->u_root.next;
     197
     198            // get thread pointer for this entry
     199            thread = LIST_ELEMENT( current , thread_t , sched_list );
     200
     201            // return thread if runnable
     202            if( thread->blocked == 0 )
     203            {
     204                // release lock
     205                spinlock_unlock( &sched->lock );
     206                return thread;
     207            }
    186208        }
    187     }
    188     while( current != last );
     209        while( current != last );
     210    }
    189211
    190212    // release lock
    191213    spinlock_unlock( &sched->lock );
    192214
    193     // third, return idle thread if no runnable thread
     215    // third : return idle thread if no runnable thread
    194216    return sched->idle;
    195217
     
    234256    thread_t    * current = CURRENT_THREAD;
    235257    core_t      * core    = current->core;
     258    scheduler_t * sched   = &core->scheduler;
    236259
    237260    if( thread_can_yield() == false )
     
    265288               __FUNCTION__, core->lid, local_cxy, current->trdid, next->trdid );
    266289
    267     // switch contexts if new thread
     290    // switch contexts and update scheduler state if new thread
    268291        if( next != current ) 
    269292        {
    270293        hal_cpu_context_save( current );
    271294        hal_cpu_context_restore( next );
     295
     296        if( current->type == THREAD_USER ) sched->u_last = &current->sched_list;
     297        else                               sched->k_last = &current->sched_list;
     298
     299        sched->current = next;
    272300        }
    273301
  • trunk/kernel/kern/scheduler.h

    r14 r279  
    3434struct thread_s;
    3535
    36 /***********************************************************************************************
     36/*********************************************************************************************
    3737 * This structure define the scheduler associated to a given core.
    3838 * WARNING : the idle thread is executed when there is no runable thread in the list
    3939 * of attached threads, but is NOT part of the list of attached threads.
    40  **********************************************************************************************/
     40 ********************************************************************************************/
    4141
    4242typedef struct scheduler_s
    4343{
    44     spinlock_t        lock;         /*! readlock protecting lists of threads                  */
    45     uint16_t          u_threads_nr; /*! total numbre of attached user threads                 */
    46     uint16_t          k_threads_nr; /*! total number of attached kernel threads               */
    47     list_entry_t      u_root;       /*! root of list of user threads for this scheduler       */
    48     list_entry_t      k_root;       /*! root of list of kernel threads for this scheduler     */
    49     list_entry_t    * u_last;       /*! pointer on list_entry for last executed kernel thread */
    50     list_entry_t    * k_last;       /*! pointer on list entry for last executed user thread   */
    51     struct thread_s * idle;         /*! pointer on idle thread                                */
    52     struct thread_s * current;      /*! pointer on current running thread                     */
     44    spinlock_t        lock;         /*! readlock protecting lists of threads                */
     45    uint16_t          u_threads_nr; /*! total numbre of attached user threads               */
     46    uint16_t          k_threads_nr; /*! total number of attached kernel threads             */
     47    list_entry_t      u_root;       /*! root of list of user threads for this scheduler     */
     48    list_entry_t      k_root;       /*! root of list of kernel threads for this scheduler   */
     49    list_entry_t    * u_last;       /*! pointer on list_entry for last executed k_thread    */
     50    list_entry_t    * k_last;       /*! pointer on list entry for last executed u_thread    */
     51    struct thread_s * idle;         /*! pointer on idle thread                              */
     52    struct thread_s * current;      /*! pointer on current running thread                   */
    5353}
    5454scheduler_t;
    5555
    56 /***********************************************************************************************
     56/*********************************************************************************************
    5757 *  This function initialises the scheduler for a given core.
    58  **********************************************************************************************
     58 ********************************************************************************************
    5959void sched_init( struct core_s * core );
    6060
    61 /***********************************************************************************************
     61/*********************************************************************************************
    6262 * This function register a new thread in a given core scheduler.
    63  ***********************************************************************************************
     63 *********************************************************************************************
    6464 * @ core    : local pointer on the core descriptor.
    6565 * @ thread  : local pointer on the thread descriptor.
    66  **********************************************************************************************
     66 ********************************************************************************************
    6767void sched_register_thread( struct core_s   * core,
    6868                            struct thread_s * thread );
    6969
    70 /***********************************************************************************************
     70/*********************************************************************************************
    7171 *  This function removes a thread from the set of threads attached to a given core.
    72  ***********************************************************************************************
     72 *********************************************************************************************
    7373 * @ thread  : local pointer on the thread descriptor.
    74  **********************************************************************************************
     74 ********************************************************************************************
    7575void sched_remove_thread( struct thread_s * thread );
    7676
    77 /***********************************************************************************************
     77/*********************************************************************************************
    7878 * This function handles pending signals for all registered threads, and tries to make
    7979 * a context switch for the core running the calling thread.
     
    8282 * - If there is no other runable thread, the calling thread continues execution.
    8383 * - If there is no runable thread, the idle thread is executed.
    84  **********************************************************************************************/
     84 ********************************************************************************************/
    8585void sched_yield();
    8686
    87 /***********************************************************************************************
     87/*********************************************************************************************
    8888 * This function handles pending signals for all registered threads, and make
    8989 * a context switch to the thread defined by the <thread> argument.
    9090 * If the selected thread is not attached to the same core as the calling thread,
    9191 * or is blocked, it causes a kernel panic.
    92  ***********************************************************************************************
     92 *********************************************************************************************
    9393 * @ new   : local pointer on the thread to run.
    94  **********************************************************************************************/
     94 ********************************************************************************************/
    9595void sched_switch_to( struct thread_s * new );
    9696
    97 /***********************************************************************************************
     97/*********************************************************************************************
    9898 * This function scan all threads attached to a given core scheduler, and executes
    9999 * the relevant actions for pending signals, such as the THREAD_SIG_KILL signal.
    100  ***********************************************************************************************
     100 *********************************************************************************************
    101101 * @ core    : local pointer on the core descriptor.
    102  **********************************************************************************************/
     102 ********************************************************************************************/
    103103void sched_handle_signals( struct core_s * core );
    104104
    105 /***********************************************************************************************
     105/*********************************************************************************************
    106106 * This function is used by the scheduler of a given core to actually kill a thread that has
    107107 * the SIG_KILL signal set (following a thread_exit() or a thread_kill() event).
     
    110110 * - It removes the thread from the scheduler.
    111111 * - It release physical memory allocated for thread descriptor.
    112  ***********************************************************************************************
     112 *********************************************************************************************
    113113 * @ thread  : local pointer on the thread descriptor.
    114  **********************************************************************************************/
     114 ********************************************************************************************/
    115115void sched_kill_thread( struct thread_s * thread );
    116116
    117 /***********************************************************************************************
     117/*********************************************************************************************
    118118 * This function does NOT modify the scheduler state.
    119119 * It just select a thread in the list of attached threads, implementing the following policy:
     
    123123 *    the last executed one, and returns the first runable found (can be the current thread).
    124124 * 3) if no runable thread found, it returns the idle thread.
    125  ***********************************************************************************************
     125 *********************************************************************************************
    126126 * @ core    : local pointer on the core descriptor.
    127127 * @ returns pointer on selected thread descriptor
    128  **********************************************************************************************/
     128 ********************************************************************************************/
    129129struct thread_s * sched_select( struct core_s * core );
    130130
    131 /***********************************************************************************************
     131/*********************************************************************************************
    132132 * This function scan the list of kernel threads to find an idle (blocked) RPC thread.
    133  ***********************************************************************************************
     133 *********************************************************************************************
    134134 * @ core    : local pointer on the core descriptor.
    135135 * @ returns pointer on RPC thread descriptor / returns NULL if no idle RPC thread.
    136  **********************************************************************************************/
     136 ********************************************************************************************/
    137137struct thread_s * sched_get_rpc_thead( struct core_s * core );
    138138
  • trunk/kernel/kern/thread.h

    r174 r279  
    213213
    214214    uint32_t            dev_channel;     /*! device channel for a DEV thread          */
    215     union                                /*! embedded command for a DEV thread        */
    216     {
    217         ioc_command_t   ioc;             /*! IOC device generic command               */
    218         txt_command_t   txt;             /*! TXT device generic command               */
    219         nic_command_t   nic;             /*! NIC device generic command               */
    220         mmc_command_t   mmc;             /*! MMC device generic command               */
    221         dma_command_t   dma;             /*! DMA device generic command               */
    222     }
    223     command;
     215
     216    ioc_command_t       ioc_cmd;         /*! IOC device generic command               */
     217    txt_command_t       txt_cmd;         /*! TXT device generic command               */
     218    nic_command_t       nic_cmd;         /*! NIC device generic command               */
     219    mmc_command_t       mmc_cmd;         /*! MMC device generic command               */
     220    dma_command_t       dma_cmd;         /*! DMA device generic command               */
    224221
    225222        cxy_t               rpc_client_cxy;  /*! client cluster index (for a RPC thread)  */
  • trunk/kernel/libk/elf.c

    r270 r279  
    191191                        process->vmm.code_vpn_base = start >> CONFIG_PPM_PAGE_SHIFT;
    192192
    193                         elf_dmsg("\n[INFO] %s found CODE vseg / base = %x / size = %x\n",
     193                        elf_dmsg("\n[INFO] %s : found CODE vseg / base = %x / size = %x\n",
    194194                                 __FUNCTION__ , start , mem_size );
    195195                }
     
    199199                        process->vmm.data_vpn_base = start >> CONFIG_PPM_PAGE_SHIFT;
    200200
    201                         elf_dmsg("\n[INFO] %s found DATA vseg / base = %x / size = %x\n",
     201                        elf_dmsg("\n[INFO] %s : found DATA vseg / base = %x / size = %x\n",
    202202                                 __FUNCTION__, start , mem_size );
    203203                }
  • trunk/kernel/libk/remote_fifo.c

    r124 r279  
    4949//////////////////////////////////////////////
    5050error_t remote_fifo_put_item( xptr_t     fifo,
    51                               uint64_t * item,
     51                              uint64_t   item,
    5252                              bool_t   * first )
    5353{
     
    112112
    113113    // copy item to fifo
    114         hal_remote_swd( XPTR( cxy , &ptr->data[ptw] ), *item );
     114        hal_remote_swd( XPTR( cxy , &ptr->data[ptw] ), item );
     115
    115116        hal_fence();
    116117
  • trunk/kernel/libk/remote_fifo.h

    r68 r279  
    4040 *
    4141 * WARNING : the number of slots is statically defined by the global
    42  * configuration parameter CONFIG_REMOTE_FIFO_SLOTS for all fifos, requiring
    43  * 12 * CONFIG_REMOTE_FIFO_SLOTS bytes for each FIFO.
     42 * configuration parameter CONFIG_REMOTE_FIFO_SLOTS for all fifos. requiring
     43 * Each FIFO requires 8 + (12 * CONFIG_REMOTE_FIFO_SLOTS) bytes.
    4444 ***********************************************************************************/
    4545
     
    6767 ************************************************************************************
    6868 * @ fifo    : pointer to the local fifo.
    69  * @ item    : pointer on destination buffer for extracted item.
    70  * @ size    : actual number of bytes in one item.
     69 * @ item    : [out] pointer on buffer for extracted item.
    7170 * @ return  0 on success, EAGAIN if the buffer is empty.
    7271 ***********************************************************************************/
     
    7877 * by an extended pointer.
    7978 * This function gets a write ticket using a remote_atomic_increment on the
    80  * write slot index and waits until the slot is empty, using a descheduling
    81  * policy (without blocking).
     79 * write slot. Then, it waits until the slot is empty, using a descheduling
     80 * policy without blocking.
    8281 ************************************************************************************
    8382 * @ fifo    : extended pointer to the fifo in remote cluster.
    84  * @ item    : pointer on a local buffer containing the item to be stored.
     83 * @ item    : item to be stored.
    8584 * @ first   : [out] true if first item registered in remote fifo.
    8685 * @ return  0 on success / EBUSY if a contention has been detected.
    8786 ***********************************************************************************/
    8887error_t remote_fifo_put_item( xptr_t     fifo,
    89                               uint64_t * item,
     88                              uint64_t   item,
    9089                              bool_t   * first );
    9190
  • trunk/kernel/mm/mapper.c

    r265 r279  
    3030#include <rwlock.h>
    3131#include <printk.h>
     32#include <memcpy.h>
    3233#include <thread.h>
    3334#include <core.h>
  • trunk/kernel/syscalls/syscalls.h

    r50 r279  
    401401 * @ return 0 if success / returns -1 if failure.
    402402 ********************************************************************************************/
    403 int sys_mkdir( char      pathname,
     403int sys_mkdir( char    * pathname,
    404404               uint32_t  mode );
    405405
  • trunk/kernel/vfs/devfs.c

    r204 r279  
    7979    error_t  error;
    8080
     81    devfs_dmsg("\n[INFO] %s : enter in cluster %x\n",
     82               __FUNCTION__ , local_cxy );
     83
    8184    // creates DEVFS "dev" inode in cluster IO
    8285    error = vfs_add_child_in_parent( LOCAL_CLUSTER->io_cxy,
     
    8891                                     devfs_dev_inode_xp );
    8992
    90     nolock_assert( (error == 0) , __FUNCTION__ , "cannot create <dev>\n" );
     93    assert( (error == 0) , __FUNCTION__ , "cannot create <dev>\n" );
     94
     95    devfs_dmsg("\n[INFO] %s : <dev> created in cluster %x\n",
     96               __FUNCTION__ , local_cxy );
    9197
    9298    // create DEVFS "external" inode in cluster IO
     
    99105                                     devfs_external_inode_xp );
    100106
    101     nolock_assert( (error == 0) , __FUNCTION__ , "cannot create <external>\n" );
     107    assert( (error == 0) , __FUNCTION__ , "cannot create <external>\n" );
     108
     109    devfs_dmsg("\n[INFO] %s : <external> created in cluster %x\n",
     110               __FUNCTION__ , local_cxy );
    102111}
    103112
  • trunk/kernel/vfs/fatfs.c

    r265 r279  
    217217} // end get_name_from_long()
    218218
    219 //////////////////////////////////////////////////////////////////////////////////////////
    220 // This function returns the FATFS cluster index of a page identified by its page
    221 // index in the file, using the FAT mapper. It scans the FAT mapper, starting from the
    222 // FATFS cluster index allocated to the first page of the file, until it reaches the
    223 // searched page. The FAT mapper is automatically updated in case of miss.
    224 // This function can be called by any thread running in any cluster, as it uses the
    225 // RPC_FATFS_GET_CLUSTER to access the remote FAT mapper if required.
    226 // We use a RPC to scan the FAT because the RPC_FIFO will avoid contention
    227 // in the cluster containing the FAT mapper, and the RPC latency is not critical
    228 // compared to the device access latency.
    229 //////////////////////////////////////////////////////////////////////////////////////////
    230 // @ ctx               : pointer on local FATFS context.
    231 // @ first_cluster : first cluster allocated to a file in FATFS.
    232 // @ page_index    : index of searched page in file (one page occupies one cluster).
    233 // @ cluster_index : [out] pointer on buffer for FATFS cluster index.
    234 // @ return 0 if success / return EIO if a FAT cluster miss cannot be solved.
    235 //////////////////////////////////////////////////////////////////////////////////////////
    236 static error_t fatfs_cluster_from_index( fatfs_ctx_t * ctx,
    237                                          uint32_t      first_cluster,
    238                                          uint32_t      page_index,
    239                                          uint32_t    * cluster_index )
    240 {
    241     uint32_t searched_cluster;   // searched FATFS cluster index
    242     error_t  error;
    243 
    244     // get extended pointer on FAT mapper
    245     xptr_t fat_mapper_xp = ctx->fat_mapper_xp;
    246 
    247     // get cluster cxy and local pointer on FAT mapper
    248     cxy_t      fat_mapper_cxy = GET_CXY( fat_mapper_xp );
    249     mapper_t * fat_mapper_ptr = (mapper_t *)GET_PTR( fat_mapper_xp );
    250 
    251     if( fat_mapper_cxy == local_cxy )    // FAT mapper is local
    252     {
    253         error = fatfs_get_cluster( fat_mapper_ptr,
    254                                    first_cluster,
    255                                    page_index,
    256                                    &searched_cluster );
    257     }
    258     else                                 // FAT mapper is remote
    259     {
    260         rpc_fatfs_get_cluster_client( fat_mapper_cxy,
    261                                       fat_mapper_ptr,
    262                                       first_cluster,
    263                                       page_index,
    264                                       &searched_cluster,
    265                                       &error );
    266     }
    267    
    268     if( error )
    269     {
    270         printk("\n[ERROR] in %s : cannot access FAT\n", __FUNCTION__ );
    271         return error;
    272     }
    273 
    274     // return success
    275     *cluster_index = searched_cluster;
    276     return 0;
    277 
    278 }  // end fatfs_cluster_from_index()
    279219
    280220//////////////////////////////////////////////////////////////////////////////////////////
     
    400340    uint8_t     * buffer;
    401341
    402     fatfs_dmsg("\n[INFO] %s : enters for fatfs_ctx = %x\n",
     342    fatfs_dmsg("\n[INFO] %s : enter for fatfs_ctx = %x\n",
    403343               __FUNCTION__ , fatfs_ctx );
    404344
     
    414354                   "cannot allocate memory for 512 bytes buffer\n" );
    415355     
     356    fatfs_dmsg("\n[INFO] %s : allocated 512 bytes buffer\n", __FUNCTION__ );
     357
    416358    // load the boot record from device
    417359    // using a synchronous access to IOC device 
    418360    error = dev_ioc_sync_read( buffer , 0 , 1 );
     361
     362    fatfs_dmsg("\n[INFO] %s : buffer loaded\n", __FUNCTION__ );
    419363
    420364    assert( (error == 0) , __FUNCTION__ ,
     
    441385    uint32_t sector_size = fatfs_get_record( BPB_BYTSPERSEC , buffer , 1 );
    442386
    443     nolock_assert( (sector_size == 512) , __FUNCTION__ ,
    444                    "sector size must be 512 bytes\n" );
     387    assert( (sector_size == 512) , __FUNCTION__ ,
     388            "sector size must be 512 bytes\n" );
    445389
    446390    // check cluster size from boot record
    447391    uint32_t nb_sectors = fatfs_get_record( BPB_SECPERCLUS , buffer , 1 );
    448392
    449     nolock_assert( (nb_sectors == 8) , __FUNCTION__ ,
    450                    "cluster size must be 8 sectors\n" );
     393    assert( (nb_sectors == 8) , __FUNCTION__ ,
     394            "cluster size must be 8 sectors\n" );
    451395
    452396    // check number of FAT copies from boot record
    453397    uint32_t nb_fats = fatfs_get_record( BPB_NUMFATS , buffer , 1 );
    454398
    455     nolock_assert( (nb_fats == 1) , __FUNCTION__ ,
    456                    "number of FAT copies must be 1\n" );
     399    assert( (nb_fats == 1) , __FUNCTION__ ,
     400            "number of FAT copies must be 1\n" );
    457401
    458402    // get & check number of sectors in FAT from boot record
    459403    uint32_t fat_sectors = fatfs_get_record( BPB_FAT32_FATSZ32 , buffer , 1 );
    460404
    461     nolock_assert( ((fat_sectors & 0xF) == 0) , __FUNCTION__ ,
    462                    "FAT not multiple of 16 sectors\n");
     405    assert( ((fat_sectors & 0xF) == 0) , __FUNCTION__ ,
     406            "FAT not multiple of 16 sectors\n");
    463407
    464408    // get and check root cluster from boot record
    465409    uint32_t root_cluster = fatfs_get_record( BPB_FAT32_ROOTCLUS , buffer , 1 );
    466410
    467     nolock_assert( (root_cluster == 2) , __FUNCTION__ ,
    468                    "root cluster index must be  2\n");
     411    assert( (root_cluster == 2) , __FUNCTION__ ,
     412            "root cluster index must be  2\n");
    469413
    470414    // get FAT lba from boot record
     
    475419    req.ptr  = buffer;
    476420    kmem_free( &req );
     421
     422    fatfs_dmsg("\n[INFO] %s : boot record read & released\n",
     423               __FUNCTION__ );
    477424
    478425    // allocate a mapper for the FAT itself
     
    494441    fatfs_ctx->last_allocated_index  = 0;    // TODO ???
    495442    fatfs_ctx->fat_mapper_xp         = XPTR( local_cxy , fat_mapper );
     443
     444    fatfs_dmsg("\n[INFO] %s : exit for fatfs_ctx = %x\n",
     445               __FUNCTION__ , fatfs_ctx );
    496446
    497447}  // end fatfs_ctx_init()
  • trunk/kernel/vfs/vfs.c

    r271 r279  
    154154    error_t            error;
    155155
     156    vfs_dmsg("\n[INFO] %s : enter / local_cluster = %x / parent_cluster = %x\n",
     157             __FUNCTION__ , local_cxy , GET_CXY( dentry_xp ) );
     158 
    156159    // check fs type and get pointer on context
    157160    if     ( fs_type == FS_TYPE_FATFS ) ctx = &fs_context[FS_TYPE_FATFS];
     
    224227    remote_rwlock_init( XPTR( local_cxy , &inode->data_lock ) );
    225228    remote_spinlock_init( XPTR( local_cxy , &inode->main_lock ) );
     229
     230    vfs_dmsg("\n[INFO] %s : enter / local_cluster = %x / parent_cluster = %x\n",
     231             __FUNCTION__ , local_cxy , GET_CXY( dentry_xp ) );
    226232
    227233    // return extended pointer on inode
     
    15161522    parent_ptr = (vfs_inode_t *)GET_PTR( parent_xp );
    15171523
     1524    vfs_dmsg("\n[INFO] %s : enter in cluster %x / child_cxy = %x / parent_cxy = %x\n",
     1525             __FUNCTION__ , local_cxy , child_cxy , parent_cxy );
     1526
    15181527    // 1. create dentry
    15191528    if( parent_cxy == local_cxy )      // parent cluster is the local cluster
     
    15231532                                   parent_ptr,
    15241533                                   &dentry_xp );
     1534
     1535        vfs_dmsg("\n[INFO] %s : dentry created in local cluster %x\n",
     1536                 __FUNCTION__ , local_cxy );
    15251537    }
    15261538    else                               // parent cluster is remote
     
    15321544                                      &dentry_xp,
    15331545                                      &error );
     1546
     1547        vfs_dmsg("\n[INFO] %s : dentry created in remote cluster %x\n",
     1548                 __FUNCTION__ , parent_cxy );
    15341549    }
    15351550                                     
     
    15581573                                  gid,
    15591574                                  &inode_xp );
     1575
     1576        vfs_dmsg("\n[INFO] %s : inode created in local cluster %x\n",
     1577                 __FUNCTION__ , local_cxy );
    15601578    }
    15611579    else                              // child cluster is remote
     
    15721590                                     &inode_xp,
    15731591                                     &error );
     1592
     1593        vfs_dmsg("\n[INFO] %s : inodecreated in remote cluster %x\n",
     1594                 __FUNCTION__ , child_cxy );
    15741595    }
    15751596                                     
Note: See TracChangeset for help on using the changeset viewer.