Changeset 296 for trunk/kernel/kern


Ignore:
Timestamp:
Jul 31, 2017, 1:59:52 PM (7 years ago)
Author:
alain
Message:

Several modifs in the generic scheduler and in the hal_context to
fix the context switch mechanism.

Location:
trunk/kernel/kern
Files:
11 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/kern/chdev.c

    r23 r296  
    129129    // client thread goes to blocked state and deschedule
    130130    thread_block( thread_ptr , THREAD_BLOCKED_IO );
    131     sched_yield();
     131    sched_yield( NULL );
    132132
    133133}  // end chdev_register_command()
     
    162162            // block and deschedule
    163163            thread_block( server , THREAD_BLOCKED_DEV_QUEUE );
    164             sched_yield();
     164            sched_yield( NULL );
    165165        }
    166166        else
  • trunk/kernel/kern/cluster.c

    r279 r296  
    117117                 __FUNCTION__ , local_cxy , hal_get_cycles() );
    118118
    119     // initialises all cores descriptors
     119    // initialises all cores descriptors 
    120120        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
    121121        {
  • trunk/kernel/kern/core.c

    r279 r296  
    115115
    116116        // handle scheduler TODO  improve the scheduling condition ... AG
    117         if( (ticks % 10) == 0 ) sched_yield();
     117        if( (ticks % 10) == 0 ) sched_yield( NULL );
    118118
    119119        // update DQDT TODO  This update should depend on the cluster identifier,
  • trunk/kernel/kern/kernel_init.c

    r285 r296  
    2929#include <hal_context.h>
    3030#include <hal_irqmask.h>
     31#include <hal_ppm.h>
    3132#include <barrier.h>
    3233#include <remote_barrier.h>
     
    784785
    785786    if( (core_lid ==  0) && (local_cxy == 0) )
    786     kinit_dmsg("\n[INFO] %s exit barrier 0 at cycle %d : TXT0 initialized\n",
    787                __FUNCTION__, (uint32_t)hal_time_stamp());
     787    kinit_dmsg("\n[INFO] %s : exit barrier 0 : TXT0 initialized / cycle %d\n",
     788    __FUNCTION__, hal_time_stamp() );
    788789
    789790    /////////////////////////////////////////////////////////////////////////////
     
    822823
    823824    if( (core_lid ==  0) && (local_cxy == 0) )
    824     kinit_dmsg("\n[INFO] %s exit barrier 1 at cycle %d : clusters initialised\n",
    825                __FUNCTION__, (uint32_t)hal_time_stamp());
     825    kinit_dmsg("\n[INFO] %s : exit barrier 1 : clusters initialised / cycle %d\n",
     826    __FUNCTION__, hal_time_stamp() );
    826827
    827828    /////////////////////////////////////////////////////////////////////////////////
    828829    // STEP 2 : all CP0s initialize the process_zero descriptor.
    829     //          CP0 in cluster 0 initialises the IOPIC device.
     830    //          CP0 in cluster 0 initializes the IOPIC device.
    830831    /////////////////////////////////////////////////////////////////////////////////
    831832
     
    847848
    848849    if( (core_lid ==  0) && (local_cxy == 0) )
    849     kinit_dmsg("\n[INFO] %s exit barrier 2 at cycle %d : PIC initialised\n",
    850                __FUNCTION__, (uint32_t)hal_time_stamp());
     850    kinit_dmsg("\n[INFO] %s : exit barrier 2 : PIC initialised / cycle %d\n",
     851    __FUNCTION__, hal_time_stamp() );
    851852
    852853    ////////////////////////////////////////////////////////////////////////////////
    853     // STEP 3 : all CP0s complete the distibuted LAPIC initialization.
    854     //          all CP0s initialize their internal chdev descriptors
    855     //          all CP0s initialize their local external chdev descriptors
     854    // STEP 3 : all CP0s initialize the distibuted LAPIC descriptor.
     855    //          all CP0s initialize the internal chdev descriptors
     856    //          all CP0s initialize the local external chdev descriptors
    856857    ////////////////////////////////////////////////////////////////////////////////
    857858
     
    878879
    879880    if( (core_lid ==  0) && (local_cxy == 0) )
    880     kinit_dmsg("\n[INFO] %s exit barrier 3 at cycle %d : all chdev initialised\n",
    881                __FUNCTION__, (uint32_t)hal_time_stamp());
     881    kinit_dmsg("\n[INFO] %s : exit barrier 3 : all chdev initialised / cycle %d\n",
     882               __FUNCTION__, hal_time_stamp());
    882883
    883884    /////////////////////////////////////////////////////////////////////////////////
    884885    // STEP 4 : All cores enable IPI (Inter Procesor Interrupt),
     886    //          All cores initialise specific core registers
    885887    //          Alh cores initialize IDLE thread.
    886888    //          Only CP0 in cluster 0 creates the VFS root inode.
     
    891893   
    892894    // All cores enable the shared IPI channel
    893 
    894 // @@@
    895     hal_set_ebase( 0x1000 );
    896 // @@@
    897 
    898895    dev_pic_enable_ipi();
    899896    hal_enable_irq( &status );
    900897
     898    // All cores initialize specific core registers
     899    hal_core_init( info );
     900
    901901    kinit_dmsg("\n[INFO] %s : IRQs enabled for core[%x,%d] / SR = %x\n",
    902902               __FUNCTION__ , local_cxy , core_lid , hal_get_sr() );
    903903
    904     // all cores create the idle thread descriptor
     904    // all cores initialize the idle thread descriptor
    905905    error = thread_kernel_init( thread,
    906906                                THREAD_IDLE,
     
    915915    }
    916916
    917     // all cores register idle thread in scheduler
     917    // all cores unblock idle thread, and register it in scheduler
     918    thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL );
    918919    core->scheduler.idle = thread;
    919920
    920     // all core activate the idle thread
    921     thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL );
    922 
    923921    if( (core_lid ==  0) && (local_cxy == 0) )
    924922    {
    925         kinit_dmsg("\n[INFO] %s : created idle thread %x at cycle %d\n",
    926                    __FUNCTION__ , thread , (uint32_t)hal_time_stamp());
    927     }
     923        kinit_dmsg("\n[INFO] %s : initialized idle thread %x on core[%x,%d] / cycle %d\n",
     924        __FUNCTION__ , thread->trdid , local_cxy, core_lid, (uint32_t)hal_time_stamp());
     925    }
     926
     927    #if CONFIG_KINIT_DEBUG
     928    sched_display();
     929    #endif
    928930
    929931    // CPO in cluster 0 creates the VFS root
     
    991993
    992994    if( (core_lid ==  0) && (local_cxy == 0) )
    993     kinit_dmsg("\n[INFO] %s exit barrier 4 at cycle %d : VFS OK in cluster 0\n",
    994                __FUNCTION__, (uint32_t)hal_time_stamp());
     995    kinit_dmsg("\n[INFO] %s : exit barrier 4 : VFS_root = %l in cluster 0 / cycle %d\n",
     996               __FUNCTION__, vfs_root_inode_xp , hal_time_stamp());
    995997
    996998    /////////////////////////////////////////////////////////////////////////////////
     
    10101012            fatfs_ctx_t * fatfs_ctx = fatfs_ctx_alloc();
    10111013
    1012             assert( (fatfs_ctx != NULL) , __FUNCTION__ ,
    1013                     "cannot create FATFS context\n" );
     1014            assert( (fatfs_ctx != NULL) , __FUNCTION__ , "cannot create FATFS context\n" );
    10141015
    10151016            // get local pointer on VFS context for FATFS
     
    10311032
    10321033        // get extended pointer on VFS root inode from cluster 0
    1033         vfs_root_inode_xp = hal_remote_lwd( XPTR( 0 , process_zero.vfs_root_xp ) );
     1034        vfs_root_inode_xp = hal_remote_lwd( XPTR( 0 , &process_zero.vfs_root_xp ) );
    10341035
    10351036        // update local process_zero descriptor
     
    10441045    /////////////////////////////////////////////////////////////////////////////////
    10451046
    1046 //    if( (core_lid ==  0) && (local_cxy == 0) )
    1047     kinit_dmsg("\n[INFO] %s exit barrier 5 at cycle %d : VFS OK in all clusters\n",
    1048                __FUNCTION__, (uint32_t)hal_time_stamp());
    1049 
     1047    if( (core_lid ==  0) && (local_cxy == io_cxy) )
     1048    kinit_dmsg("\n[INFO] %s : exit barrier 5 : VFS_root = %l in cluster IO / cycle %d\n",
     1049    __FUNCTION__, vfs_root_inode_xp , hal_time_stamp() );
    10501050
    10511051    /////////////////////////////////////////////////////////////////////////////////
     
    10721072    }   
    10731073
    1074 printk("\n@@@ %s : cluster %x reach barrier 6\n", __FUNCTION__ , local_cxy );
    1075 
    10761074    /////////////////////////////////////////////////////////////////////////////////
    10771075    if( core_lid == 0 ) remote_barrier( XPTR( io_cxy , &global_barrier ),
     
    10801078    /////////////////////////////////////////////////////////////////////////////////
    10811079
    1082 //    if( (core_lid ==  0) && (local_cxy == 0) )
    1083     kinit_dmsg("\n[INFO] %s exit barrier 6 at cycle %d : DEVFS OK in cluster IO\n",
    1084                __FUNCTION__, (uint32_t)hal_time_stamp());
     1080    if( (core_lid ==  0) && (local_cxy == io_cxy) )
     1081    kinit_dmsg("\n[INFO] %s : exit barrier 6 : dev_root = %l in cluster IO / cycle %d\n",
     1082    __FUNCTION__, devfs_dev_inode_xp , hal_time_stamp() );
    10851083
    10861084    /////////////////////////////////////////////////////////////////////////////////
     
    11181116
    11191117    if( (core_lid ==  0) && (local_cxy == 0) )
    1120     kinit_dmsg("\n[INFO] %s exit barrier 7 at cycle %d : DEVFS OK in all clusters\n",
    1121                __FUNCTION__, (uint32_t)hal_time_stamp());
     1118    kinit_dmsg("\n[INFO] %s : exit barrier 7 : dev_root = %l in cluster 0 / cycle %d\n",
     1119    __FUNCTION__, devfs_dev_inode_xp , hal_time_stamp() );
    11221120
    11231121    #if CONFIG_KINIT_DEBUG
     
    11411139
    11421140    if( (core_lid ==  0) && (local_cxy == 0) )
    1143     kinit_dmsg("\n[INFO] %s exit barrier 8 at cycle %d : process init created\n",
    1144                __FUNCTION__ , (uint32_t)hal_time_stamp() );
     1141    kinit_dmsg("\n[INFO] %s : exit barrier 8 : process init created / cycle %d\n",
     1142    __FUNCTION__ , hal_time_stamp() );
    11451143
    11461144    /////////////////////////////////////////////////////////////////////////////////
     
    11981196    dev_pic_enable_timer( CONFIG_SCHED_TICK_PERIOD );
    11991197
    1200     if( (core_lid ==  0) && (local_cxy == io_cxy) )
    1201     thread_dmsg("\n[INFO] %s complete kernel init in cluster 0 at cycle %d\n"
    1202                 __FUNCTION__ , (uint32_t)hal_time_stamp() )
    1203 
    12041198    // each core jump to idle thread
    12051199    thread_idle_func();
  • trunk/kernel/kern/printk.c

    r279 r296  
    401401}
    402402
     403////////////////////////////////////////
     404void nolock_printk( char * format , ...)
     405{
     406    va_list       args;
     407
     408    // call kernel_printf on TXT0, in busy waiting mode
     409    va_start( args , format );
     410    kernel_printf( 0 , 1 , format , &args );
     411    va_end( args );
     412}
     413
    403414///////////////////////////////////////////
    404415inline void assert( bool_t       condition,
  • trunk/kernel/kern/printk.h

    r279 r296  
    6767 * This function displays a formated string on the kernel terminal TXT0,
    6868 * using a busy waiting policy: It calls directly the relevant TXT driver,
    69  * after taking the lock for exclusive access to the TXT0 terminal.
     69 * after taking the TXT0 lock.
    7070 **********************************************************************************
    7171 * @ format     : formated string.
    7272 *********************************************************************************/
    7373void printk( char* format, ... );
     74
     75/**********************************************************************************
     76 * This function displays a formated string on the kernel terminal TXT0,
     77 * using a busy waiting policy: It calls directly the relevant TXT driver,
     78 * without taking the TXT0 lock.
     79 **********************************************************************************
     80 * @ format     : formated string.
     81 *********************************************************************************/
     82void nolock_printk( char* format, ... );
    7483
    7584/**********************************************************************************
  • trunk/kernel/kern/rpc.c

    r279 r296  
    9999                                uint32_t * ppn )       // out
    100100{
    101     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    102 
    103     rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     101    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     102
     103    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    104104
    105105    // initialise RPC descriptor header
     
    118118    *ppn    = (uint32_t)rpc.args[1];
    119119
    120     rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
     120    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    121121}
    122122
     
    127127    error_t  error;  // output
    128128    uint32_t ppn;    // output
     129
     130    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    129131
    130132    // get client cluster identifier and pointer on RPC descriptor
     
    143145    hal_remote_sw( XPTR( cxy , &desc->args[0] ) , error );
    144146    hal_remote_sw( XPTR( cxy , &desc->args[1] ) , ppn );
     147
     148    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    145149}
    146150
     
    155159                                   pid_t     * pid )     // out
    156160{
    157     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    158 
    159     rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     161    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     162
     163    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    160164
    161165    // initialise RPC descriptor header
     
    174178    *error  = (error_t)rpc.args[2];     
    175179
    176     rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
     180    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    177181}
    178182
     
    183187    error_t     error;     // output : error status
    184188    pid_t       pid;       // output : process identifier
     189
     190    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    185191
    186192    // get client cluster identifier and pointer on RPC descriptor
     
    198204    hal_remote_sw( XPTR( client_cxy , &desc->args[0] ) , (uint64_t)error );
    199205    hal_remote_sw( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)pid );
     206
     207    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    200208}
    201209
     
    210218                              error_t     * error )   // out
    211219{
    212     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    213 
    214     rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     220    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     221
     222    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    215223
    216224    // initialise RPC descriptor header
     
    228236    *error  = (error_t)rpc.args[1];     
    229237
    230     rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
     238    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    231239}
    232240
     
    237245    exec_info_t   info;      // local copy of exec_info structure
    238246    error_t       error;     // local error error status
     247
     248    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    239249
    240250    // get client cluster identifier and pointer on RPC descriptor
     
    255265    // set output argument into client RPC descriptor
    256266    hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error );
     267
     268    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    257269}
    258270
     
    265277void rpc_process_kill_client( process_t * process )
    266278{
     279    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     280
    267281    // only reference cluster can send this RPC
    268282    assert( (GET_CXY( process->ref_xp ) == local_cxy) , __FUNCTION__ ,
    269283            "caller must be reference process cluster\n");
    270 
    271     rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
    272284
    273285    // get local process index in reference cluster
     
    297309    }
    298310
    299     rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
     311    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    300312
    301313
     
    305317    pid_t       pid;
    306318    process_t * process; 
     319
     320    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    307321
    308322    // get client cluster identifier and pointer on RPC descriptor
     
    325339        process_kill( process );
    326340    }
     341
     342    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    327343}
    328344
     
    341357                                    error_t        * error )      // out
    342358{
    343     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    344 
    345     rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     359    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     360
     361    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    346362
    347363    // initialise RPC descriptor header
     
    363379    *error     = (error_t)rpc.args[5];
    364380
    365     rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
     381    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    366382}
    367383
     
    378394    void           * start_arg;
    379395    error_t          error;
     396
     397    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    380398
    381399    // get client cluster identifier and pointer on RPC descriptor
     
    409427    hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error );
    410428    hal_remote_swd( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)thread_xp );
     429
     430    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    411431}
    412432
     
    423443                                      error_t * error )      // out
    424444{
    425     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    426 
    427     rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     445    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     446
     447    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    428448
    429449    // initialise RPC descriptor header
     
    444464    *error     = (error_t)rpc.args[4];
    445465
    446     rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
     466    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    447467}
    448468
     
    455475    error_t          error;   
    456476
     477    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     478
    457479    // get client cluster identifier and pointer on RPC descriptor
    458480    cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
     
    474496    hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error );
    475497    hal_remote_swd( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)thread_xp );
     498
     499    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    476500}
    477501
     
    485509                             uint32_t    sig_id )    // in
    486510{
    487     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    488 
    489     rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     511    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     512
     513    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    490514
    491515    // initialise RPC descriptor header
     
    501525    rpc_send_sync( cxy , &rpc );
    502526
    503     rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
     527    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    504528}
    505529
     
    509533    process_t  * process;  // local pointer on process descriptor
    510534    uint32_t     sig_id;   // signal index
     535
     536    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    511537
    512538    // get client cluster identifier and pointer on RPC descriptor
     
    520546    // call local kernel function
    521547    signal_rise( process , sig_id );
     548
     549    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    522550}
    523551
     
    539567                                  error_t      * error )     // out
    540568{
    541     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    542 
    543     rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     569    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     570
     571    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    544572
    545573    // initialise RPC descriptor header
     
    565593    *error    = (error_t)rpc.args[9];
    566594
    567     rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
     595    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    568596}
    569597
     
    582610    error_t          error;
    583611
     612    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     613
    584614    // get client cluster identifier and pointer on RPC descriptor
    585615    cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
     
    610640    hal_remote_swd( XPTR( client_cxy , &desc->args[8] ) , (uint64_t)inode_xp );
    611641    hal_remote_swd( XPTR( client_cxy , &desc->args[9] ) , (uint64_t)error );
     642
     643    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    612644}
    613645
     
    620652                                   struct vfs_inode_s * inode )
    621653{
    622     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    623 
    624     rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     654    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     655
     656    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    625657
    626658    // initialise RPC descriptor header
     
    635667    rpc_send_sync( cxy , &rpc );
    636668
    637     rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
     669    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    638670}
    639671
     
    642674{
    643675    vfs_inode_t * inode;
     676
     677    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    644678
    645679    // get client cluster identifier and pointer on RPC descriptor
     
    652686    // call local kernel function
    653687    vfs_inode_destroy( inode );
     688
     689    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    654690}
    655691
     
    666702                                   error_t              * error )       // out
    667703{
    668     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    669 
    670     rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     704    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     705
     706    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    671707
    672708    // initialise RPC descriptor header
     
    687723    *error     = (error_t)rpc.args[4];
    688724
    689     rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
     725    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    690726}
    691727
     
    701737    char          name_copy[CONFIG_VFS_MAX_NAME_LENGTH];
    702738
     739    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     740
    703741    // get client cluster identifier and pointer on RPC descriptor
    704742    cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
     
    709747    name   = (char *)(intptr_t)       hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) );
    710748    parent = (vfs_inode_t *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[2] ) );
    711                        
     749
    712750    // makes a local copy of  name
    713751    hal_remote_strcpy( XPTR( local_cxy , name_copy ),
     
    719757                               parent,
    720758                               &dentry_xp );
    721  
    722759    // set output arguments
    723760    hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)dentry_xp );
    724761    hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error );
     762
     763    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    725764}
    726765
     
    728767// [13]          Marshaling functions attached to RPC_VFS_DENTRY_DESTROY
    729768/////////////////////////////////////////////////////////////////////////////////////////
     769
    730770
    731771///////////////////////////////////////////////////////
     
    733773                                    vfs_dentry_t * dentry )
    734774{
    735     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    736 
    737     rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     775    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     776
     777    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    738778
    739779    // initialise RPC descriptor header
     
    748788    rpc_send_sync( cxy , &rpc );
    749789
    750     rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
     790    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    751791}
    752792
     
    755795{
    756796    vfs_dentry_t * dentry;
     797
     798    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    757799
    758800    // get client cluster identifier and pointer on RPC descriptor
     
    765807    // call local kernel function
    766808    vfs_dentry_destroy( dentry );
     809
     810    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    767811}
    768812
     
    779823                                 error_t              * error )      // out
    780824{
    781     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    782 
    783     rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     825    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     826
     827    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    784828
    785829    // initialise RPC descriptor header
     
    799843    *error   = (error_t)rpc.args[3];
    800844
    801     rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
     845    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    802846}
    803847
     
    809853    xptr_t        file_xp;
    810854    error_t       error;
     855
     856    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    811857
    812858    // get client cluster identifier and pointer on RPC descriptor
     
    826872    hal_remote_swd( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)file_xp );
    827873    hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error );
     874
     875    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    828876}
    829877
     
    836884                                  vfs_file_t * file )
    837885{
    838     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    839 
    840     rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     886    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     887
     888    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    841889
    842890    // initialise RPC descriptor header
     
    851899    rpc_send_sync( cxy , &rpc );
    852900
    853     rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
     901    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    854902}
    855903
     
    858906{
    859907    vfs_file_t * file;
     908
     909    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    860910
    861911    // get client cluster identifier and pointer on RPC descriptor
     
    868918    // call local kernel function
    869919    vfs_file_destroy( file );
     920
     921    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    870922}
    871923
     
    881933                                error_t     * error )          // out
    882934{
    883     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    884 
    885     rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     935    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     936
     937    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    886938
    887939    // initialise RPC descriptor header
     
    901953    *error   = (error_t)rpc.args[3];
    902954
    903     rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
     955    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    904956}
    905957
     
    914966    char          name_copy[CONFIG_VFS_MAX_NAME_LENGTH];
    915967
     968    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     969
    916970    // get client cluster identifier and pointer on RPC descriptor
    917971    cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
     
    932986    // set output argument
    933987    hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error );
     988
     989    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    934990}
    935991
     
    943999                                     error_t     * error )     // out
    9441000{
    945     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    946 
    947     rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     1001    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     1002
     1003    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    9481004
    9491005    // initialise RPC descriptor header
     
    9611017    *error   = (error_t)rpc.args[1];
    9621018
    963     rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
     1019    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    9641020}
    9651021
     
    9701026    vfs_inode_t * inode;
    9711027
     1028    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     1029
    9721030    // get client cluster identifier and pointer on RPC descriptor
    9731031    cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
     
    9821040    // set output argument
    9831041    hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error );
     1042
     1043    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    9841044}
    9851045
     
    9961056                                   error_t  * error )    // out
    9971057{
    998     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    999 
    1000     rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     1058    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     1059
     1060    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    10011061
    10021062    // initialise RPC descriptor header
     
    10171077    *error   = (error_t)rpc.args[4];
    10181078
    1019     rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
     1079    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    10201080}
    10211081
     
    10291089    error_t       error;
    10301090
     1091    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     1092
    10311093    // get client cluster identifier and pointer on RPC descriptor
    10321094    cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
     
    10441106    hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)cluster );
    10451107    hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error );
     1108
     1109    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    10461110}
    10471111
     
    10561120                                  xptr_t    * vseg_xp )    // out
    10571121{
    1058     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    1059 
    1060     rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     1122    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     1123
     1124    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    10611125
    10621126    // initialise RPC descriptor header
     
    10751139    *vseg_xp = rpc.args[2];
    10761140
    1077     rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
     1141    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    10781142}
    10791143
     
    10851149    vseg_t      * vseg_ptr;
    10861150    xptr_t        vseg_xp;
     1151
     1152    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    10871153
    10881154    // get client cluster identifier and pointer on RPC descriptor
     
    11011167    else                   vseg_xp = XPTR( local_cxy , vseg_ptr );
    11021168    hal_remote_swd( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)vseg_xp );
     1169
     1170    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    11031171}
    11041172
     
    11161184                             error_t   * error )   // out
    11171185{
    1118     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    1119 
    1120     rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     1186    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     1187
     1188    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    11211189
    11221190    // initialise RPC descriptor header
     
    11371205    *error = (error_t)rpc.args[4];
    11381206
    1139     rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
     1207    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    11401208}
    11411209
     
    11481216    ppn_t         ppn;
    11491217    error_t       error;
     1218
     1219    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    11501220
    11511221    // get client cluster identifier and pointer on RPC descriptor
     
    11641234    hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)ppn );
    11651235    hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error );
     1236
     1237    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    11661238}
    11671239
     
    11751247                           xptr_t *   buf_xp )     // out
    11761248{
    1177     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    1178 
    1179     rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     1249    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     1250
     1251    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    11801252
    11811253    // initialise RPC descriptor header
     
    11931265    *buf_xp = (xptr_t)rpc.args[1];
    11941266
    1195     rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
     1267    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    11961268}
    11971269
     
    11991271void rpc_kcm_alloc_server( xptr_t xp )
    12001272{
     1273    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     1274
    12011275    // get client cluster identifier and pointer on RPC descriptor
    12021276    cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
     
    12151289    xptr_t buf_xp = XPTR( local_cxy , buf_ptr );
    12161290    hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)buf_xp );
     1291
     1292    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    12171293}   
    12181294
     
    12261302                          uint32_t   kmem_type )   // in
    12271303{
    1228     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    1229 
    1230     rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     1304    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     1305
     1306    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    12311307
    12321308    // initialise RPC descriptor header
     
    12421318    rpc_send_sync( cxy , &rpc );
    12431319
    1244     rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
     1320    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    12451321}
    12461322
     
    12481324void rpc_kcm_free_server( xptr_t xp )
    12491325{
     1326    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     1327
    12501328    // get client cluster identifier and pointer on RPC descriptor
    12511329    cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
     
    12611339    req.ptr  = buf;
    12621340    kmem_free( &req );
     1341
     1342    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    12631343}   
    12641344
     
    12771357                                    error_t  * error )        // out
    12781358{
    1279     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    1280 
    1281     rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );
     1359    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     1360
     1361    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    12821362
    12831363    // initialise RPC descriptor header
     
    13001380    *error     = (error_t)rpc.args[6];
    13011381
    1302     rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ );
     1382    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    13031383}
    13041384
     
    13141394    error_t    error;
    13151395
     1396    rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     1397
    13161398    // get client cluster identifier and pointer on RPC descriptor
    13171399    cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
     
    13361418    // set output argument to client RPC descriptor
    13371419    hal_remote_swd( XPTR( client_cxy , &desc->args[6] ) , (uint64_t)error );
     1420
     1421    rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    13381422}
    13391423
     
    13531437    thread_t * this = CURRENT_THREAD;
    13541438
    1355     rpc_dmsg("\n[INFO] %s : enter / client_cxy = %x / server_cxy = %x\n",
    1356              __FUNCTION__ , local_cxy , server_cxy );
     1439    rpc_dmsg("\n[INFO] %s : enter / client_cxy = %x / server_cxy = %x / cycle %d\n",
     1440    __FUNCTION__ , local_cxy , server_cxy , hal_time_stamp() );
    13571441
    13581442    // allocate and initialise an extended pointer on the RPC descriptor
     
    13741458        {
    13751459            printk("\n[WARNING] %s : cluster %x cannot post RPC to cluster %x\n",
    1376                    __FUNCTION__ , local_cxy , server_cxy );
    1377 
    1378             if( thread_can_yield() ) sched_yield();
    1379         }
    1380         else
    1381         {
     1460            __FUNCTION__ , local_cxy , server_cxy );
     1461
     1462            if( thread_can_yield() ) sched_yield( NULL );
    13821463        }
    13831464    }
    13841465    while( error );
    13851466 
    1386     rpc_dmsg("\n[INFO] %s : RPC registered / client_cxy = %x / server_cxy = %x\n",
    1387              __FUNCTION__ , local_cxy , server_cxy , first );
     1467    rpc_dmsg("\n[INFO] %s : RPC %l registered / server_cxy = %x / cycle %d\n",
     1468    __FUNCTION__ , desc_xp , server_cxy , hal_time_stamp() );
    13881469       
    13891470    // send IPI to remote CP0, if this is the first RPC in remote FIFO,
     
    13991480
    14001481                    rpc_dmsg("\n[INFO] %s : IPI sent / client_cxy = %x / server_cxy = %x\n",
    1401                      __FUNCTION__, local_cxy , server_cxy );
     1482            __FUNCTION__, local_cxy , server_cxy );
    14021483        }
    14031484        }
     
    14161497        if( this->type == THREAD_RPC ) hal_restore_irq( sr_save );
    14171498
    1418     rpc_dmsg("\n[INFO] %s : completed / client_cxy = %x / server_cxy = %x\n",
    1419              __FUNCTION__ , local_cxy , server_cxy );
     1499    rpc_dmsg("\n[INFO] %s : completed / client_cxy = %x / server_cxy = %x / cycle %d\n",
     1500    __FUNCTION__ , local_cxy , server_cxy , hal_time_stamp() );
    14201501
    14211502}  // end rpc_send_sync()
     
    14471528        error_t        error;
    14481529     
    1449         this = CURRENT_THREAD;
    1450     core = this->core;   
    1451 
    1452 
     1530        this  = CURRENT_THREAD;
     1531    core  = this->core;   
     1532        count = 0;
     1533
     1534    rpc_dmsg("\n[INFO] %s : enter / thread %x on core[%x,%d] / fifo = %x / cycle %d\n",
     1535    __FUNCTION__, this->trdid, local_cxy, core->lid , hal_time_stamp() );
     1536 
    14531537    // handle up to CONFIG_RPC_PENDING_MAX requests before exit
    1454         count = 0;
    14551538        do
    14561539    {
    1457             error = local_fifo_get_item( &rpc_fifo->fifo,
    1458                                      (uint64_t *)&xp );
     1540            error = local_fifo_get_item( &rpc_fifo->fifo, (uint64_t *)&xp );
    14591541
    14601542                if ( error == 0 )  // One RPC request successfully extracted from RPC_FIFO
    14611543        {
    1462             rpc_dmsg("\n[INFO] %s : RPC_THREAD %x on core %x in cluster %x handles RPC %d\n",
    1463                                      __FUNCTION__ , this->trdid , core->lid , local_cxy , count );
    1464 
    14651544            // get client cluster identifier and pointer on RPC descriptor
    14661545            client_cxy = (cxy_t)GET_CXY( xp );
     
    14681547
    14691548            // get rpc index from RPC descriptor
    1470                 index     = hal_remote_lw( XPTR( client_cxy , &desc->index ) );
     1549                index = hal_remote_lw( XPTR( client_cxy , &desc->index ) );
     1550
     1551            rpc_dmsg("\n[INFO] %s : thread %x on core [%x,%d] / index = %d / &rpc = %x\n",
     1552                     __FUNCTION__ , this->trdid , core->lid , local_cxy , index , rpc_server[index] );
    14711553
    14721554            // call the relevant server function
     
    15331615    if( found )                    // activate this idle RPC thread     
    15341616    {
     1617        // unblock it
    15351618        thread->blocked = 0;
    15361619
    1537         rpc_dmsg("\n[INFO] %s : activate RPC thread %x on core %x in cluster %x at cycle %d\n",
    1538                           __FUNCTION__ , thread , core->gid , local_cxy , hal_get_cycles() );
     1620        rpc_dmsg("\n[INFO] %s : activate RPC thread %x on core [%x,%d] / cycle %d\n",
     1621                          __FUNCTION__ , thread , core->gid , local_cxy , hal_time_stamp() );
    15391622    }
    15401623    else                           // create a new RPC thread
    15411624    {
     1625        // create new thread
    15421626        error = thread_kernel_create( &thread,
    15431627                                      THREAD_RPC,
     
    15531637        }
    15541638
    1555         rpc_dmsg("\n[INFO] %s : create RPC thread %x on core %x in cluster %x at cycle %d\n",
    1556                           __FUNCTION__ , thread , core->gid , local_cxy , hal_get_cycles() );
     1639        // unblock new thread
     1640        thread->blocked = 0;
    15571641
    15581642        // update core descriptor counter 
    15591643            hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 );
     1644
     1645        rpc_dmsg("\n[INFO] %s : create RPC thread %x on core [%x,%d] / cycle %d\n",
     1646                          __FUNCTION__ , thread->trdid, local_cxy, core->lid, hal_time_stamp() );
    15601647    }
    15611648
     
    15631650    rpc_fifo->owner = thread->trdid;
    15641651
    1565     // current thread deschedules / RPC thread start execution
    1566         sched_switch_to( thread );
     1652    // current thread switch to RPC thread
     1653        sched_yield( thread );
    15671654
    15681655    // restore IRQs for the calling thread
     
    15811668    error_t      error;
    15821669
     1670    rpc_dmsg("\n[INFO] %s : enter in cluster %x\n",
     1671             __FUNCTION__ , local_cxy );
     1672
    15831673    // calling thread does nothing if light lock already taken or FIFO empty 
    15841674        if( (rpc_fifo->owner != 0) || (local_fifo_is_empty( &rpc_fifo->fifo )) )
    15851675    {
     1676        rpc_dmsg("\n[INFO] %s : exit but do nothing in cluster %x\n",
     1677                 __FUNCTION__ , local_cxy );
     1678
    15861679        return false;
    15871680    }
     
    16011694        }
    16021695
     1696        rpc_dmsg("\n[INFO] %s : exit after activating an RPC thread in cluster %x\n",
     1697                 __FUNCTION__ , local_cxy );
     1698
    16031699        return true;
    16041700    }
    16051701    else  // light lock taken by another thread
    16061702    {
     1703        rpc_dmsg("\n[INFO] %s : exit but do nothing in cluster %x\n",
     1704                 __FUNCTION__ , local_cxy );
     1705
    16071706        return false;
    16081707    }
     
    16131712void rpc_thread_func()
    16141713{
    1615     // makes the calling thread not preemptable
     1714    // makes the RPC thread not preemptable
    16161715        hal_disable_irq( NULL );
    16171716 
     
    16191718        rpc_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;
    16201719
    1621     rpc_dmsg("\n[INFO] RPC thread %x created on core %d in cluster %x at cycle %d\n",
    1622              this->trdid , this->core->lid , local_cxy , hal_get_cycles() );
     1720    rpc_dmsg("\n[INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     1721             __FUNCTION__, this->trdid, local_cxy, this->core->lid, hal_time_stamp() );
    16231722
    16241723    // this infinite loop is not preemptable
     
    16291728        if( this->trdid != rpc_fifo->owner )
    16301729        {
    1631             printk("\n[PANIC] in %s : RPC_THREAD %x not owner of RPC_FIFO in cluster %x\n",
    1632                    __FUNCTION__ , this->trdid , local_cxy );
     1730            printk("\n[PANIC] in %s : thread %x on core[%x,%d] not owner of RPC_FIFO\n",
     1731            __FUNCTION__, this->trdid, local_cxy, this->core->lid );
    16331732            hal_core_sleep();
    16341733        }
     
    16441743                if( LOCAL_CLUSTER->rpc_threads >= CONFIG_RPC_THREADS_MAX )
    16451744                {
    1646             rpc_dmsg("\n[INFO] RPC thread %x suicide on core %d in cluster %x at cycle %d\n",
    1647                              this->trdid , this->core->lid , local_cxy , hal_get_cycles() );
     1745            rpc_dmsg("\n[INFO] thread %x on core[%x,%d] suicide / cycle %d\n",
     1746                    __FUNCTION__, this->trdid, local_cxy, this->core->lid, hal_time_stamp() );
    16481747
    16491748            // update core descriptor counter
     
    16551754        else
    16561755        {
    1657             rpc_dmsg("\n[INFO] RPC thread %x blocks on core %d in cluster %x at cycle %d\n",
    1658                                  this->trdid , this->core->lid , local_cxy , hal_get_cycles() );
    1659 
    1660                      thread_block( this , THREAD_BLOCKED_IDLE );
    1661              sched_yield();
    1662 
    1663                      rpc_dmsg("\n[INFO] RPC thread %x wake up on core %d in cluster %x at cycle %d\n",
    1664                           this->trdid , this->core->lid , local_cxy , hal_get_cycles() );
     1756            rpc_dmsg("\n[INFO] %s : thread %x on core[%x,%d] blocks / cycle %d\n",
     1757                        __FUNCTION__, this->trdid, local_cxy, this->core->lid, hal_time_stamp() );
     1758
     1759                    thread_block( this , THREAD_BLOCKED_IDLE );
     1760            sched_yield( NULL );
     1761
     1762                    rpc_dmsg("\n[INFO] RPC thread %x wake up on core[%x,%d] / cycle %d\n",
     1763                __FUNCTION__, this->trdid, local_cxy, this->core->lid, hal_time_stamp() );
    16651764        }
    16661765        } // end while
  • trunk/kernel/kern/rpc.h

    r279 r296  
    157157/***********************************************************************************
    158158 * This function is the entry point for RPC handling on the server side.
    159  * It can be executed by any thread running (in kernel mode) on any core.
     159 * It is executed by a core receiving an IPI.
    160160 * It checks the RPC fifo, try to take the light-lock and activates (or creates)
    161161 * an RPC thread in case of success.
  • trunk/kernel/kern/scheduler.c

    r279 r296  
    3030#include <core.h>
    3131#include <thread.h>
     32#include <chdev.h>
    3233#include <scheduler.h>
     34
     35///////////////////////////////////////////////////////////////////////////////////////////
     36// Extern global variables
     37///////////////////////////////////////////////////////////////////////////////////////////
     38
     39extern chdev_directory_t    chdev_dir;            // allocated in kernel_init.c file
    3340
    3441
     
    144151thread_t * sched_select( core_t * core )
    145152{
    146     thread_t * thread;
     153    thread_t    * thread;
    147154
    148155    scheduler_t * sched = &core->scheduler;
     156
     157    sched_dmsg("\n[INFO] %s : enter core[%x,%d] / cycle %d\n",
     158    __FUNCTION__ , local_cxy , core->lid , hal_time_stamp() );
    149159
    150160    // take lock protecting sheduler lists
     
    154164    list_entry_t * last;
    155165
    156     // first : scan the kernel threads list,
    157     // only if this list is not empty
     166    // first : scan the kernel threads list if not empty
    158167    if( list_is_empty( &sched->k_root ) == false )
    159168    {
     
    171180            thread = LIST_ELEMENT( current , thread_t , sched_list );
    172181
    173             // return thread if runnable
    174             if( thread->blocked == 0 )
     182            // return thread if not idle_thread and runnable
     183            if( (thread->type != THREAD_IDLE) && (thread->blocked == 0) )
    175184            {
    176185                // release lock
    177186                spinlock_unlock( &sched->lock );
     187
     188                sched_dmsg("\n[INFO] %s : exit core[%x,%d] / k_thread = %x / cycle %d\n",
     189                __FUNCTION__ , local_cxy , core->lid , thread->trdid , hal_time_stamp() );
     190
    178191                return thread;
    179192            }
     
    182195    }
    183196
    184     // second : scan the user threads list,
    185     // only if this list is not empty
     197    // second : scan the user threads list if not empty
    186198    if( list_is_empty( &sched->u_root ) == false )
    187199    {
     
    204216                // release lock
    205217                spinlock_unlock( &sched->lock );
     218
     219                sched_dmsg("\n[INFO] %s : exit core[%x,%d] / u_thread = %x / cycle %d\n",
     220                __FUNCTION__ , local_cxy , core->lid , thread->trdid , hal_time_stamp() );
    206221                return thread;
    207222            }
     
    213228    spinlock_unlock( &sched->lock );
    214229
     230    sched_dmsg("\n[INFO] %s : exit core[%x,%d] / idle = %x / cycle %d\n",
     231    __FUNCTION__ , local_cxy , core->lid , sched->idle->trdid , hal_time_stamp() );
     232
    215233    // third : return idle thread if no runnable thread
    216234    return sched->idle;
    217235
    218 }  // end sched_elect()
     236}  // end sched_select()
    219237
    220238//////////////////////////////////////////
     
    223241    list_entry_t * iter;
    224242    thread_t     * thread;
    225 
    226243    scheduler_t  * sched = &core->scheduler;
     244
     245    sched_dmsg("\n[INFO] %s : enter / thread %x on core[%x,%d]\n",
     246    __FUNCTION__, CURRENT_THREAD->trdid , local_cxy , core->lid );
    227247
    228248    // take lock protecting threads lists
     
    246266    spinlock_unlock( &sched->lock );
    247267
     268    sched_dmsg("\n[INFO] %s : exit / thread %x on core[%x,%d]\n",
     269    __FUNCTION__, CURRENT_THREAD->trdid , local_cxy , core->lid );
     270
    248271} // end sched_handle_signals()
    249272
    250 //////////////////
    251 void sched_yield()
     273///////////////////////////////////
     274void sched_yield( thread_t * next )
    252275{
    253276    reg_t         sr_save;
    254     thread_t    * next;
    255277
    256278    thread_t    * current = CURRENT_THREAD;
     
    258280    scheduler_t * sched   = &core->scheduler;
    259281
    260     if( thread_can_yield() == false )
    261     {
    262         printk("\n[PANIC] in %s : thread %x for process %x on core_gid %x"
    263                " has not released all locks at cycle %d\n",
    264                __FUNCTION__, current->trdid, current->process->pid,
    265                local_cxy , core->lid , hal_get_cycles() );
    266         hal_core_sleep();
    267     }
    268 
    269     // desactivate IRQs
    270     hal_disable_irq( &sr_save );
     282    sched_dmsg("\n[INFO] %s : thread %x on core[%x,%d] enter / cycle %d\n",
     283    __FUNCTION__, current->trdid, local_cxy, core->lid, hal_time_stamp() );
     284
     285    // check calling thread released all locks
     286    assert( (thread_can_yield() == true), __FUNCTION__, "locks not released\n");
    271287
    272288    // first loop on all threads to handle pending signals
    273289    sched_handle_signals( core );
    274290
    275     // second loop on threads to select next thread
    276     next = sched_select( core );
    277 
    278     // check stack overflow for selected thread
    279     if( next->signature != THREAD_SIGNATURE )
    280     {
    281         printk("\n[PANIC] in %s : detected stack overflow for thread %x of process %x"
    282                " on core [%x][%d]\n",
    283                __FUNCTION__, next->trdid, next->process->pid, local_cxy , core->lid );
    284         hal_core_sleep();
    285         }
    286        
    287         sched_dmsg("\n[INFO] %s on core %d in cluster %x / old thread = %x / new thread = %x\n",
    288                __FUNCTION__, core->lid, local_cxy, current->trdid, next->trdid );
    289 
    290     // switch contexts and update scheduler state if new thread
    291         if( next != current ) 
    292         {
    293         hal_cpu_context_save( current );
    294         hal_cpu_context_restore( next );
    295 
     291    // second loop on threads to select next thread if required
     292    if( next == NULL ) next = sched_select( core );
     293
     294    // check next thread attached to same core as the calling thread
     295    assert( (next->core == current->core), __FUNCTION__ , "next core != current core\n");
     296
     297    // check next thread not blocked
     298    assert( (next->blocked == 0), __FUNCTION__ , "next thread is blocked\n");
     299
     300    // switch contexts and update scheduler state if next != current
     301        if( next != current )
     302    {
     303        sched_dmsg("\n[INFO] %s : trd %x (%s) on core[%x,%d] => trd %x (%s) / cycle %d\n",
     304        __FUNCTION__, current->trdid, thread_type_str(current->type), local_cxy, core->lid,
     305        next->trdid, thread_type_str(next->type), hal_time_stamp() );
     306
     307        // calling thread desactivate IRQs
     308        hal_disable_irq( &sr_save );
     309
     310        // update scheduler
    296311        if( current->type == THREAD_USER ) sched->u_last = &current->sched_list;
    297312        else                               sched->k_last = &current->sched_list;
    298 
    299313        sched->current = next;
    300         }
    301 
    302     // restore IRQs
    303     hal_restore_irq( sr_save );
    304 
    305         if( current->type != THREAD_USER ) return;
    306 
    307         if( next == core->fpu_owner ) hal_fpu_enable();
    308         else                          hal_fpu_disable();
    309 
     314
     315        // handle FPU
     316            if( next->type == THREAD_USER ) return;
     317        {
     318                if( next == core->fpu_owner )  hal_fpu_enable();
     319                else                           hal_fpu_disable();
     320        }
     321
     322        // switch contexts
     323        hal_cpu_context_save( current->cpu_context );
     324        hal_cpu_context_restore( next->cpu_context );
     325
     326        // restore IRQs when calling thread resume
     327        hal_restore_irq( sr_save );
     328
     329        sched_dmsg("\n[INFO] %s : thread %x on core[%x,%d] / cycle %d\n",
     330        __FUNCTION__, current->trdid, local_cxy, core->lid, hal_time_stamp() );
     331    }
     332    else
     333    {
     334        sched_dmsg("\n[INFO] %s : thread %x on core[%x,%d] continue / cycle %d\n",
     335        __FUNCTION__, current->trdid, local_cxy, core->lid, hal_time_stamp() );
     336    }
    310337}  // end sched_yield()
    311338
    312 //////////////////////////////////////
    313 void sched_switch_to( thread_t * new )
    314 {
    315     reg_t         sr_save;
    316 
    317     thread_t    * current = CURRENT_THREAD;
    318     core_t      * core    = current->core;
    319     process_t   * process = current->process;
    320 
    321     // check calling thread released all locks
    322     if( thread_can_yield() == false )
    323     {
    324         printk("\n[PANIC] in %s : thread %x for process %x on core %d in cluster %x"
    325                " has not released all locks\n",
    326                __FUNCTION__, current->trdid, process->pid, core->lid, local_cxy );
    327         hal_core_sleep();
    328     }
    329 
    330     // check new thread attached to same core as the calling thread
    331     if( new->core != current->core )
    332     {
    333         printk("\n[PANIC] in %s : new thread %x is attached to core %d"
    334                " different from core %d of current thread\n",
    335                __FUNCTION__, new->trdid, new->core->lid, core->lid , current->trdid );
    336         hal_core_sleep();
    337     }
    338 
    339     // check new thread not blocked
    340     if( new->blocked == 0 )
    341     {
    342         printk("\n[PANIC] in %s for thread %x of process %x on core %d in cluster %x"
    343                " : new thread %x is blocked\n",
    344                __FUNCTION__, current->trdid, process->pid , core->lid, local_cxy , new->trdid );
    345         hal_core_sleep();
    346     }
    347 
    348     // check stack overflow for new thread
    349     if( new->signature != THREAD_SIGNATURE )
    350     {
    351         printk("\n[PANIC] in %s : stack overflow for new thread %x of process %x"
    352                " on core %d in cluster %x\n",
    353                __FUNCTION__, new->trdid, process->pid , core->lid , local_cxy );
    354         hal_core_sleep();
    355         }
    356 
    357     // desactivate IRQs
    358     hal_disable_irq( &sr_save );
    359 
    360     // loop on all threads to handle pending signals
    361     sched_handle_signals( core );
    362 
    363     // check stack overflow for new thread
    364     if( new->signature != THREAD_SIGNATURE )
    365     {
    366         printk("PANIC %s detected stack overflow for thread %x of process %x"
    367                " on core %d in cluster %x\n",
    368                __FUNCTION__, new->trdid, new->process->pid, core->lid, local_cxy);
    369         hal_core_sleep();
    370         }
    371        
    372         sched_dmsg("INFO : %s on core %d in cluster %x / old thread = %x / new thread = %x\n",
    373                __FUNCTION__, core->lid, local_cxy, current->trdid, new->trdid );
    374 
    375     // switch contexts if new thread
    376     hal_cpu_context_save( current );
    377     hal_cpu_context_restore( new );
    378 
    379     // restore IRQs
    380     hal_restore_irq( sr_save );
    381 
    382         if( current->type != THREAD_USER ) return;
    383 
    384         if( current == core->fpu_owner )  hal_fpu_enable();
    385         else                              hal_fpu_disable();
    386 
    387 }  // end sched_switch_to()
    388 
     339////////////////////
     340void sched_display()
     341{
     342    list_entry_t * iter;
     343    thread_t     * thread;
     344    uint32_t       save_sr;
     345
     346    thread_t     * current = CURRENT_THREAD;
     347    core_t       * core    = current->core;
     348    scheduler_t  * sched   = &core->scheduler;
     349   
     350    // get pointers on TXT0 chdev
     351    xptr_t    txt0_xp  = chdev_dir.txt[0];
     352    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
     353    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
     354
     355    // get extended pointer on remote TXT0 chdev lock
     356    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
     357
     358    // get TXT0 lock in busy waiting mode
     359    remote_spinlock_lock_busy( lock_xp , &save_sr );
     360
     361    nolock_printk("\n********** scheduler state for core[%x,%d] **********************\n"
     362           "kernel_threads = %d / user_threads = %d / current = %x\n",
     363            local_cxy , core->lid,
     364            sched->k_threads_nr, sched->u_threads_nr, sched->current->trdid );
     365
     366    // display kernel threads
     367    LIST_FOREACH( &sched->k_root , iter )
     368    {
     369        thread = LIST_ELEMENT( iter , thread_t , sched_list );
     370        nolock_printk(" - type = %s / trdid = %x / pid = %x / func = %x / blocked_vect = %x\n",
     371        thread_type_str( thread->type ), thread->trdid, thread->process->pid,
     372        thread->entry_func, thread->blocked );
     373    }
     374
     375    // display user threads
     376    LIST_FOREACH( &sched->u_root , iter )
     377    {
     378        thread = LIST_ELEMENT( iter , thread_t , sched_list );
     379        nolock_printk(" - type = %s / trdid = %x / pid = %x / func = %x / blocked_vect = %x\n",
     380        thread_type_str( thread->type ), thread->trdid, thread->process->pid,
     381        thread->entry_func, thread->blocked );
     382    }
     383
     384    // release TXT0 lock
     385    remote_spinlock_unlock_busy( lock_xp , save_sr );
     386
     387}  // end sched_display()
     388
  • trunk/kernel/kern/scheduler.h

    r279 r296  
    3636/*********************************************************************************************
    3737 * This structure define the scheduler associated to a given core.
    38  * WARNING : the idle thread is executed when there is no runable thread in the list
    39  * of attached threads, but is NOT part of the list of attached threads.
    4038 ********************************************************************************************/
    4139
     
    7876 * This function handles pending signals for all registered threads, and tries to make
    7977 * a context switch for the core running the calling thread.
    80  * - If there is a runable thread (other than the current thread or the idle thread),
    81  *   the calling thread is descheduled, but its state is not modified.
     78 * - If the <next> argument is not NULL, this next thread starts execution.
     79 * - If <next> is NULL, it calls the sched_select() function. If there is a runable thread
     80 *   (other than current thread or idle thread), this selected thread starts execution.
    8281 * - If there is no other runable thread, the calling thread continues execution.
    8382 * - If there is no runable thread, the idle thread is executed.
     83 *********************************************************************************************
     84 * @ next  : local pointer on next thread to run / call sched_select() if NULL.
    8485 ********************************************************************************************/
    85 void sched_yield();
    86 
    87 /*********************************************************************************************
    88  * This function handles pending signals for all registered threads, and make
    89  * a context switch to the thread defined by the <thread> argument.
    90  * If the selected thread is not attached to the same core as the calling thread,
    91  * or is blocked, it causes a kernel panic.
    92  *********************************************************************************************
    93  * @ new   : local pointer on the thread to run.
    94  ********************************************************************************************/
    95 void sched_switch_to( struct thread_s * new );
     86void sched_yield( struct thread_s * next );
    9687
    9788/*********************************************************************************************
     
    130121
    131122/*********************************************************************************************
    132  * This function scan the list of kernel threads to find an idle (blocked) RPC thread.
    133  *********************************************************************************************
    134  * @ core    : local pointer on the core descriptor.
    135  * @ returns pointer on RPC thread descriptor / returns NULL if no idle RPC thread.
     123 * This function display the internal state of the calling core scheduler.
    136124 ********************************************************************************************/
    137 struct thread_s * sched_get_rpc_thead( struct core_s * core );
    138 
     125void sched_display();
    139126
    140127
  • trunk/kernel/kern/thread.c

    r286 r296  
    5454char * thread_type_str( uint32_t type )
    5555{
    56     if     ( type == THREAD_USER   ) return "USER";
     56    if     ( type == THREAD_USER   ) return "USR";
    5757    else if( type == THREAD_RPC    ) return "RPC";
    5858    else if( type == THREAD_DEV    ) return "DEV";
    59     else if( type == THREAD_KERNEL ) return "KERNEL";
    60     else if( type == THREAD_IDLE   ) return "IDLE";
     59    else if( type == THREAD_KERNEL ) return "KER";
     60    else if( type == THREAD_IDLE   ) return "IDL";
    6161    else                             return "undefined";
    6262}
     
    199199
    200200        return 0;
    201 }
     201
     202} // end thread_init()
    202203
    203204/////////////////////////////////////////////////////////
     
    309310    *new_thread = thread;
    310311        return 0;
    311 }
     312
     313}  // end thread_user_create()
    312314
    313315//////////////////////////////////////////////
     
    395397    *new_thread = thread;
    396398        return 0;
    397 }
     399
     400}  // end thread_user_fork()
    398401
    399402/////////////////////////////////////////////////////////
     
    407410        thread_t     * thread;       // pointer on new thread descriptor
    408411
    409     thread_dmsg("\n[INFO] %s : enters for type %s in cluster %x\n",
    410                 __FUNCTION__ , thread_type_str( type ) , local_cxy );
     412    thread_dmsg("\n[INFO] %s : enter / for type %s on core[%x,%d] / cycle %d\n",
     413    __FUNCTION__ , thread_type_str( type ) , local_cxy , core_lid , hal_time_stamp() );
    411414
    412415    assert( ( (type == THREAD_KERNEL) || (type == THREAD_RPC) ||
     
    440443        hal_cpu_context_create( thread );
    441444
    442     thread_dmsg("\n[INFO] %s : exit in cluster %x / trdid = %x / core_lid = %d\n",
    443                  __FUNCTION__ , local_cxy , thread->trdid , core_lid );
     445    thread_dmsg("\n[INFO] %s : exit / trdid = %x / type = %s / core = [%x,%d] / cycle %d\n",
     446    __FUNCTION__ , thread->trdid , thread_type_str(type) ,
     447    local_cxy , core_lid , hal_time_stamp() );
    444448
    445449    *new_thread = thread;
    446450        return 0;
    447 }
     451
     452} // end thread_kernel_create()
    448453
    449454///////////////////////////////////////////////////
     
    685690
    686691    // deschedule
    687     sched_yield();
     692    sched_yield( NULL );
    688693    return 0;
    689694}
     
    741746
    742747        // force scheduling
    743         sched_yield();
     748        sched_yield( NULL );
    744749   }
    745750}
Note: See TracChangeset for help on using the changeset viewer.