Ignore:
Timestamp:
Oct 4, 2018, 11:47:36 PM (6 years ago)
Author:
alain
Message:

Complete restructuration of kernel locks.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/kern/scheduler.c

    r551 r564  
    22 * scheduler.c - Core scheduler implementation.
    33 *
    4  * Author    Alain Greiner (2016)
     4 * Author    Alain Greiner (2016,2017,2018)
    55 *
    66 * Copyright (c)  UPMC Sorbonne Universites
     
    3636
    3737///////////////////////////////////////////////////////////////////////////////////////////
    38 // Extern global variables
     38//         global variables
    3939///////////////////////////////////////////////////////////////////////////////////////////
    4040
    41 uint32_t   idle_thread_count;
    42 uint32_t   idle_thread_count_active;
    43 
    44 extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c file
    45 extern uint32_t             switch_save_sr[];   // allocated in kernel_init.c file
    46 
    47 ////////////////////////////////
    48 void sched_init( core_t * core )
    49 {
    50     scheduler_t * sched = &core->scheduler;
    51 
    52     sched->u_threads_nr   = 0;
    53     sched->k_threads_nr   = 0;
    54 
    55     sched->current        = CURRENT_THREAD;
    56     sched->idle           = NULL;               // initialized in kernel_init()
    57     sched->u_last         = NULL;               // initialized in sched_register_thread()
    58     sched->k_last         = NULL;               // initialized in sched_register_thread()
    59 
    60     // initialise threads lists
    61     list_root_init( &sched->u_root );
    62     list_root_init( &sched->k_root );
    63 
    64     // init spinlock
    65     spinlock_init( &sched->lock );
    66 
    67     sched->req_ack_pending = false;             // no pending request
    68     sched->trace           = false;             // context switches trace desactivated
    69 
    70 }  // end sched_init()
    71 
    72 ////////////////////////////////////////////
    73 void sched_register_thread( core_t   * core,
    74                             thread_t * thread )
    75 {
    76     scheduler_t * sched = &core->scheduler;
    77     thread_type_t type  = thread->type;
    78 
    79     // take lock protecting sheduler lists
    80     uint32_t       irq_state;
    81     spinlock_lock_busy( &sched->lock, &irq_state );
    82 
    83     if( type == THREAD_USER )
    84     {
    85         list_add_last( &sched->u_root , &thread->sched_list );
    86         sched->u_threads_nr++;
    87         if( sched->u_last == NULL ) sched->u_last = &thread->sched_list;
    88     }
    89     else // kernel thread
    90     {
    91         list_add_last( &sched->k_root , &thread->sched_list );
    92         sched->k_threads_nr++;
    93         if( sched->k_last == NULL ) sched->k_last = &thread->sched_list;
    94     }
    95 
    96     // release lock
    97     hal_fence();
    98     spinlock_unlock_busy( &sched->lock, irq_state);
    99 
    100 }  // end sched_register_thread()
    101 
    102 //////////////////////////////////////////////
     41extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c
     42
     43///////////////////////////////////////////////////////////////////////////////////////////
     44//         private functions
     45///////////////////////////////////////////////////////////////////////////////////////////
     46
     47
     48////////////////////////////////////////////////////////////////////////////////////////////
     49// This static function does NOT modify the scheduler state.
     50// It just select a thread in the list of attached threads, implementing the following
     51// three steps policy:
     52// 1) It scan the list of kernel threads, from the next thread after the last executed one,
     53//    and returns the first runnable found : not IDLE, not blocked, client queue not empty.
     54//    It can be the current thread.
     55// 2) If no kernel thread found, it scan the list of user thread, from the next thread after
     56//    the last executed one, and returns the first runable found : not blocked.
     57//    It can be the current thread.
     58// 3) If no runable thread found, it returns the idle thread.
     59////////////////////////////////////////////////////////////////////////////////////////////
     60// @ sched   : local pointer on scheduler.
     61// @ returns pointer on selected thread descriptor
     62////////////////////////////////////////////////////////////////////////////////////////////
    10363thread_t * sched_select( scheduler_t * sched )
    10464{
     
    11070    uint32_t       count;
    11171
    112     // take lock protecting sheduler lists
    113     spinlock_lock( &sched->lock );
    114 
    11572    // first : scan the kernel threads list if not empty
    11673    if( list_is_empty( &sched->k_root ) == false )
     
    12481        while( done == false )
    12582        {
    126             assert( (count < sched->k_threads_nr), "bad kernel threads list" );
     83
     84// check kernel threads list
     85assert( (count < sched->k_threads_nr),
     86"bad kernel threads list" );
    12787
    12888            // get next entry in kernel list
     
    140100
    141101            // select kernel thread if non blocked and non THREAD_IDLE
    142             if( (thread->blocked == 0)  && (thread->type != THREAD_IDLE) )
    143             {
    144                 spinlock_unlock( &sched->lock );
    145                 return thread;
    146             }
     102            if( (thread->blocked == 0)  && (thread->type != THREAD_IDLE) ) return thread;
     103
    147104        } // end loop on kernel threads
    148105    } // end kernel threads
     
    159116        while( done == false )
    160117        {
    161             assert( (count < sched->u_threads_nr), "bad user threads list" );
     118
     119// check user threads list
     120assert( (count < sched->u_threads_nr),
     121"bad user threads list" );
    162122
    163123            // get next entry in user list
     
    175135
    176136            // select thread if non blocked
    177             if( thread->blocked == 0 )
    178             {
    179                 spinlock_unlock( &sched->lock );
    180                 return thread;
    181             }
     137            if( thread->blocked == 0 )  return thread;
     138
    182139        } // end loop on user threads
    183140    } // end user threads
    184141
    185142    // third : return idle thread if no other runnable thread
    186     spinlock_unlock( &sched->lock );
    187143    return sched->idle;
    188144
    189145}  // end sched_select()
    190146
    191 ///////////////////////////////////////////
    192 void sched_handle_signals( core_t * core )
     147////////////////////////////////////////////////////////////////////////////////////////////
     148// This static function is the only function that can remove a thread from the scheduler.
     149// It is private, because it is called by the sched_yield() public function.
     150// It scan all threads attached to a given scheduler, and executes the relevant
     151// actions for pending requests:
     152// - REQ_ACK : it checks that target thread is blocked, decrements the response counter
     153//   to acknowledge the client thread, and reset the pending request.
     154// - REQ_DELETE : it detach the target thread from parent if attached, detach it from
     155//   the process, remove it from scheduler, release memory allocated to thread descriptor,
     156//   and destroy the process descriptor it the target thread was the last thread.
     157////////////////////////////////////////////////////////////////////////////////////////////
     158// @ core    : local pointer on the core descriptor.
     159////////////////////////////////////////////////////////////////////////////////////////////
     160static void sched_handle_signals( core_t * core )
    193161{
    194162
     
    197165    thread_t     * thread;
    198166    process_t    * process;
    199     bool_t         last_thread;
     167    scheduler_t  * sched;
     168    bool_t         last;
    200169
    201170    // get pointer on scheduler
    202     scheduler_t  * sched = &core->scheduler;
     171    sched = &core->scheduler;
    203172
    204173    // get pointer on user threads root
    205174    root = &sched->u_root;
    206175
    207     // take lock protecting threads lists
    208     spinlock_lock( &sched->lock );
    209 
    210176    // We use a while to scan the user threads, to control the iterator increment,
    211     // because some threads will be destroyed, and we cannot use a LIST_FOREACH()
     177    // because some threads will be destroyed, and we want not use a LIST_FOREACH()
    212178
    213179    // initialise list iterator
     
    226192        if( thread->flags & THREAD_FLAG_REQ_ACK )
    227193        {
    228             // check thread blocked
    229             assert( (thread->blocked & THREAD_BLOCKED_GLOBAL) ,
    230             "thread not blocked" );
     194
     195// check thread blocked
     196assert( (thread->blocked & THREAD_BLOCKED_GLOBAL) ,
     197"thread not blocked" );
    231198 
    232199            // decrement response counter
     
    237204        }
    238205
    239         // handle REQ_DELETE
    240         if( thread->flags & THREAD_FLAG_REQ_DELETE )
     206        // handle REQ_DELETE only if target thread != calling thread
     207        if( (thread->flags & THREAD_FLAG_REQ_DELETE) && (thread != CURRENT_THREAD) )
    241208        {
    242209            // get thread process descriptor
     
    246213                if( thread->core->fpu_owner == thread )  thread->core->fpu_owner = NULL;
    247214
    248             // remove thread from scheduler (scheduler lock already taken)
     215            // take lock protecting sheduler state
     216            busylock_acquire( &sched->lock );
     217
     218            // update scheduler state
    249219            uint32_t threads_nr = sched->u_threads_nr;
    250 
    251             assert( (threads_nr != 0) , "u_threads_nr cannot be 0\n" );
    252 
    253220            sched->u_threads_nr = threads_nr - 1;
    254221            list_unlink( &thread->sched_list );
     
    269236            }
    270237
     238            // release lock protecting scheduler state
     239            busylock_release( &sched->lock );
     240
    271241            // delete thread descriptor
    272             last_thread = thread_destroy( thread );
     242            last = thread_destroy( thread );
    273243
    274244#if DEBUG_SCHED_HANDLE_SIGNALS
     
    279249#endif
    280250            // destroy process descriptor if no more threads
    281             if( last_thread )
     251            if( last )
    282252            {
    283253                // delete process   
     
    293263        }
    294264    }
     265} // end sched_handle_signals()
     266
     267////////////////////////////////////////////////////////////////////////////////////////////
     268// This static function is called by the sched_yield function when the RFC_FIFO
     269// associated to the core is not empty.
     270// It checks if it exists an idle (blocked) RPC thread for this core, and unblock
     271// it if found. It creates a new RPC thread if no idle RPC thread is found.
     272////////////////////////////////////////////////////////////////////////////////////////////
     273// @ sched   : local pointer on scheduler.
     274////////////////////////////////////////////////////////////////////////////////////////////
     275void sched_rpc_activate( scheduler_t * sched )
     276{
     277    error_t         error;
     278    thread_t      * thread; 
     279    list_entry_t  * iter;
     280    lid_t           lid = CURRENT_THREAD->core->lid;
     281    bool_t          found = false;
     282
     283    // search one IDLE RPC thread associated to the selected core   
     284    LIST_FOREACH( &sched->k_root , iter )
     285    {
     286        thread = LIST_ELEMENT( iter , thread_t , sched_list );
     287        if( (thread->type == THREAD_RPC) && (thread->blocked == THREAD_BLOCKED_IDLE ) )
     288        {
     289            // exit loop
     290            found = true;
     291            break;
     292        }
     293    }
     294
     295    if( found == false )     // create new RPC thread     
     296    {
     297        error = thread_kernel_create( &thread,
     298                                      THREAD_RPC,
     299                                              &rpc_thread_func,
     300                                      NULL,
     301                                          lid );
     302        // check memory
     303        if ( error )
     304        {
     305            printk("\n[WARNING] in %s : no memory to create a RPC thread in cluster %x\n",
     306            __FUNCTION__, local_cxy );
     307        }
     308        else
     309        {
     310            // unblock created RPC thread
     311            thread->blocked = 0;
     312
     313            // update RPC threads counter 
     314            hal_atomic_add( &LOCAL_CLUSTER->rpc_threads[lid] , 1 );
     315
     316#if DEBUG_SCHED_RPC_ACTIVATE
     317uint32_t cycle = (uint32_t)hal_get_cycles();
     318if( DEBUG_SCHED_RPC_ACTIVATE < cycle )
     319printk("\n[DBG] %s : new RPC thread %x created for core[%x,%d] / cycle %d\n",
     320__FUNCTION__, thread->trdid, local_cxy, lid, cycle );
     321#endif
     322        }
     323    }
     324    else                 // RPC thread found => unblock it
     325    {
     326        // unblock found RPC thread
     327        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_IDLE );
     328
     329#if DEBUG_SCHED_RPC_ACTIVATE
     330uint32_t cycle = (uint32_t)hal_get_cycles();
     331if( DEBUG_SCHED_RPC_ACTIVATE < cycle )
     332printk("\n[DBG] %s : idle RPC thread %x unblocked for core[%x,%d] / cycle %d\n",
     333__FUNCTION__, thread->trdid, local_cxy, lid, cycle );
     334#endif
     335
     336    }
     337
     338} // end sched_rpc_activate()
     339
     340
     341
     342///////////////////////////////////////////////////////////////////////////////////////////
     343//         public functions
     344///////////////////////////////////////////////////////////////////////////////////////////
     345
     346////////////////////////////////
     347void sched_init( core_t * core )
     348{
     349    scheduler_t * sched = &core->scheduler;
     350
     351    sched->u_threads_nr   = 0;
     352    sched->k_threads_nr   = 0;
     353
     354    sched->current        = CURRENT_THREAD;
     355    sched->idle           = NULL;               // initialized in kernel_init()
     356    sched->u_last         = NULL;               // initialized in sched_register_thread()
     357    sched->k_last         = NULL;               // initialized in sched_register_thread()
     358
     359    // initialise threads lists
     360    list_root_init( &sched->u_root );
     361    list_root_init( &sched->k_root );
     362
     363    // init lock
     364    busylock_init( &sched->lock , LOCK_SCHED_STATE );
     365
     366    sched->req_ack_pending = false;             // no pending request
     367    sched->trace           = false;             // context switches trace desactivated
     368
     369}  // end sched_init()
     370
     371////////////////////////////////////////////
     372void sched_register_thread( core_t   * core,
     373                            thread_t * thread )
     374{
     375    scheduler_t * sched = &core->scheduler;
     376    thread_type_t type  = thread->type;
     377
     378    // take lock protecting sheduler state
     379    busylock_acquire( &sched->lock );
     380
     381    if( type == THREAD_USER )
     382    {
     383        list_add_last( &sched->u_root , &thread->sched_list );
     384        sched->u_threads_nr++;
     385        if( sched->u_last == NULL ) sched->u_last = &thread->sched_list;
     386    }
     387    else // kernel thread
     388    {
     389        list_add_last( &sched->k_root , &thread->sched_list );
     390        sched->k_threads_nr++;
     391        if( sched->k_last == NULL ) sched->k_last = &thread->sched_list;
     392    }
    295393
    296394    // release lock
    297     hal_fence();
    298     spinlock_unlock( &sched->lock );
    299 
    300 } // end sched_handle_signals()
    301 
    302 ////////////////////////////////
     395    busylock_release( &sched->lock );
     396
     397}  // end sched_register_thread()
     398
     399//////////////////////////////////////
    303400void sched_yield( const char * cause )
    304401{
    305     thread_t    * next;
    306     thread_t    * current = CURRENT_THREAD;
    307     core_t      * core    = current->core;
    308     scheduler_t * sched   = &core->scheduler;
     402    thread_t      * next;
     403    thread_t      * current = CURRENT_THREAD;
     404    core_t        * core    = current->core;
     405    lid_t           lid     = core->lid;
     406    scheduler_t   * sched   = &core->scheduler;
     407    remote_fifo_t * fifo    = &LOCAL_CLUSTER->rpc_fifo[lid];
    309408 
    310409#if (DEBUG_SCHED_YIELD & 0x1)
    311 if( sched->trace )
    312 sched_display( core->lid );
     410if( sched->trace ) sched_display( lid );
    313411#endif
    314412
    315     // delay the yield if current thread has locks
    316     if( (current->local_locks != 0) || (current->remote_locks != 0) )
    317     {
    318         current->flags |= THREAD_FLAG_SCHED;
    319         return;
    320     }
    321 
    322     // enter critical section / save SR in current thread descriptor
    323     hal_disable_irq( &CURRENT_THREAD->save_sr );
    324 
    325     // loop on threads to select next thread
     413// check current thread busylocks counter
     414assert( (current->busylocks == 0),
     415"thread cannot yield : busylocks = %d\n", current->busylocks );
     416
     417    // activate or create an RPC thread if RPC_FIFO non empty
     418    if( remote_fifo_is_empty( fifo ) == false )  sched_rpc_activate( sched );
     419
     420    // disable IRQs / save SR in current thread descriptor
     421    hal_disable_irq( &current->save_sr );
     422
     423    // take lock protecting sheduler state
     424    busylock_acquire( &sched->lock );
     425   
     426    // select next thread
    326427    next = sched_select( sched );
    327428
    328     // check next thread kernel_stack overflow
    329     assert( (next->signature == THREAD_SIGNATURE),
    330     "kernel stack overflow for thread %x on core[%x,%d] \n", next, local_cxy, core->lid );
    331 
    332     // check next thread attached to same core as the calling thread
    333     assert( (next->core == current->core),
    334     "next core %x != current core %x\n", next->core, current->core );
    335 
    336     // check next thread not blocked when type != IDLE
    337     assert( ((next->blocked == 0) || (next->type == THREAD_IDLE)) ,
    338     "next thread %x (%s) is blocked on core[%x,%d]\n",
    339     next->trdid , thread_type_str(next->type) , local_cxy , core->lid );
     429// check next thread kernel_stack overflow
     430assert( (next->signature == THREAD_SIGNATURE),
     431"kernel stack overflow for thread %x on core[%x,%d] \n", next, local_cxy, lid );
     432
     433// check next thread attached to same core as the calling thread
     434assert( (next->core == current->core),
     435"next core %x != current core %x\n", next->core, current->core );
     436
     437// check next thread not blocked when type != IDLE
     438assert( ((next->blocked == 0) || (next->type == THREAD_IDLE)) ,
     439"next thread %x (%s) is blocked on core[%x,%d]\n",
     440next->trdid , thread_type_str(next->type) , local_cxy , lid );
    340441
    341442    // switch contexts and update scheduler state if next != current
    342443        if( next != current )
    343444    {
     445        // update scheduler
     446        sched->current = next;
     447        if( next->type == THREAD_USER ) sched->u_last = &next->sched_list;
     448        else                            sched->k_last = &next->sched_list;
     449
     450        // handle FPU ownership
     451            if( next->type == THREAD_USER )
     452        {
     453                if( next == current->core->fpu_owner )  hal_fpu_enable();
     454                else                                    hal_fpu_disable();
     455        }
     456
     457        // release lock protecting scheduler state
     458        busylock_release( &sched->lock );
    344459
    345460#if DEBUG_SCHED_YIELD
     
    347462printk("\n[DBG] %s : core[%x,%d] / cause = %s\n"
    348463"      thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n",
    349 __FUNCTION__, local_cxy, core->lid, cause,
     464__FUNCTION__, local_cxy, lid, cause,
    350465current, thread_type_str(current->type), current->process->pid, current->trdid,next ,
    351466thread_type_str(next->type) , next->process->pid , next->trdid , (uint32_t)hal_get_cycles() );
    352467#endif
    353468
    354         // update scheduler
    355         sched->current = next;
    356         if( next->type == THREAD_USER ) sched->u_last = &next->sched_list;
    357         else                            sched->k_last = &next->sched_list;
    358 
    359         // handle FPU ownership
    360             if( next->type == THREAD_USER )
    361         {
    362                 if( next == current->core->fpu_owner )  hal_fpu_enable();
    363                 else                                    hal_fpu_disable();
    364         }
    365 
    366469        // switch CPU from current thread context to new thread context
    367470        hal_do_cpu_switch( current->cpu_context, next->cpu_context );
     
    369472    else
    370473    {
     474        // release lock protecting scheduler state
     475        busylock_release( &sched->lock );
    371476
    372477#if DEBUG_SCHED_YIELD
     
    374479printk("\n[DBG] %s : core[%x,%d] / cause = %s\n"
    375480"      thread %x (%s) (%x,%x) continue / cycle %d\n",
    376 __FUNCTION__, local_cxy, core->lid, cause, current, thread_type_str(current->type),
     481__FUNCTION__, local_cxy, lid, cause, current, thread_type_str(current->type),
    377482current->process->pid, current->trdid, (uint32_t)hal_get_cycles() );
    378483#endif
     
    394499    list_entry_t * iter;
    395500    thread_t     * thread;
    396     uint32_t       save_sr;
    397 
    398     assert( (lid < LOCAL_CLUSTER->cores_nr), "illegal core index %d\n", lid);
     501
     502// check lid
     503assert( (lid < LOCAL_CLUSTER->cores_nr),
     504"illegal core index %d\n", lid);
    399505
    400506    core_t       * core    = &LOCAL_CLUSTER->core_tbl[lid];
     
    406512    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
    407513
    408     // get extended pointer on remote TXT0 chdev lock
     514    // get extended pointer on remote TXT0 lock
    409515    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
    410516
    411     // get TXT0 lock in busy waiting mode
    412     remote_spinlock_lock_busy( lock_xp , &save_sr );
     517    // get TXT0 lock
     518    remote_busylock_acquire( lock_xp );
    413519
    414520    nolock_printk("\n***** threads on core[%x,%d] / current %x / cycle %d\n",
     
    443549
    444550    // release TXT0 lock
    445     remote_spinlock_unlock_busy( lock_xp , save_sr );
     551    remote_busylock_release( lock_xp );
    446552
    447553}  // end sched_display()
     
    452558{
    453559    thread_t     * thread;
    454     uint32_t       save_sr;
    455 
    456     // check cxy
    457     bool_t undefined = cluster_is_undefined( cxy );
    458     assert( (undefined == false), "illegal cluster %x\n", cxy );
    459 
    460     // check lid
    461     uint32_t cores = hal_remote_lw( XPTR( cxy , &LOCAL_CLUSTER->cores_nr ) );
    462     assert( (lid < cores), "illegal core index %d\n", lid);
     560
     561// check cxy
     562assert( (cluster_is_undefined( cxy ) == false),
     563"illegal cluster %x\n", cxy );
     564
     565// check lid
     566assert( (lid < hal_remote_l32( XPTR( cxy , &LOCAL_CLUSTER->cores_nr ) ) ),
     567"illegal core index %d\n", lid );
    463568
    464569    // get local pointer on target scheduler
     
    481586    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
    482587
    483     // get TXT0 lock in busy waiting mode
    484     remote_spinlock_lock_busy( lock_xp , &save_sr );
     588    // get TXT0 lock
     589    remote_busylock_acquire( lock_xp );
    485590
    486591    // display header
     
    495600
    496601        // get relevant thead info
    497         thread_type_t type    = hal_remote_lw ( XPTR( cxy , &thread->type ) );
    498         trdid_t       trdid   = hal_remote_lw ( XPTR( cxy , &thread->trdid ) );
    499         uint32_t      blocked = hal_remote_lw ( XPTR( cxy , &thread->blocked ) );
    500         uint32_t      flags   = hal_remote_lw ( XPTR( cxy , &thread->flags ) );
     602        thread_type_t type    = hal_remote_l32 ( XPTR( cxy , &thread->type ) );
     603        trdid_t       trdid   = hal_remote_l32 ( XPTR( cxy , &thread->trdid ) );
     604        uint32_t      blocked = hal_remote_l32 ( XPTR( cxy , &thread->blocked ) );
     605        uint32_t      flags   = hal_remote_l32 ( XPTR( cxy , &thread->flags ) );
    501606        process_t *   process = hal_remote_lpt( XPTR( cxy , &thread->process ) );
    502         pid_t         pid     = hal_remote_lw ( XPTR( cxy , &process->pid ) );
     607        pid_t         pid     = hal_remote_l32 ( XPTR( cxy , &process->pid ) );
    503608
    504609        // display thread info
     
    529634
    530635        // get relevant thead info
    531         thread_type_t type    = hal_remote_lw ( XPTR( cxy , &thread->type ) );
    532         trdid_t       trdid   = hal_remote_lw ( XPTR( cxy , &thread->trdid ) );
    533         uint32_t      blocked = hal_remote_lw ( XPTR( cxy , &thread->blocked ) );
    534         uint32_t      flags   = hal_remote_lw ( XPTR( cxy , &thread->flags ) );
     636        thread_type_t type    = hal_remote_l32 ( XPTR( cxy , &thread->type ) );
     637        trdid_t       trdid   = hal_remote_l32 ( XPTR( cxy , &thread->trdid ) );
     638        uint32_t      blocked = hal_remote_l32 ( XPTR( cxy , &thread->blocked ) );
     639        uint32_t      flags   = hal_remote_l32 ( XPTR( cxy , &thread->flags ) );
    535640        process_t *   process = hal_remote_lpt( XPTR( cxy , &thread->process ) );
    536         pid_t         pid     = hal_remote_lw ( XPTR( cxy , &process->pid ) );
     641        pid_t         pid     = hal_remote_l32 ( XPTR( cxy , &process->pid ) );
    537642
    538643        nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n",
     
    544649
    545650    // release TXT0 lock
    546     remote_spinlock_unlock_busy( lock_xp , save_sr );
     651    remote_busylock_release( lock_xp );
    547652
    548653}  // end sched_remote_display()
    549654
     655
Note: See TracChangeset for help on using the changeset viewer.