source: trunk/kernel/kern/scheduler.c @ 640

Last change on this file since 640 was 640, checked in by alain, 5 years ago

Remove all RPCs in page-fault handling.

File size: 24.4 KB
RevLine 
[1]1/*
2 * scheduler.c - Core scheduler implementation.
3 *
[564]4 * Author    Alain Greiner (2016,2017,2018)
[1]5 *
6 * Copyright (c)  UPMC Sorbonne Universites
7 *
8 * This file is part of ALMOS-MKH.
9 *
10 * ALMOS-MKH. is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2.0 of the License.
13 *
14 * ALMOS-MKH. is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
[14]24#include <kernel_config.h>
[457]25#include <hal_kernel_types.h>
[407]26#include <hal_switch.h>
[1]27#include <hal_irqmask.h>
28#include <hal_context.h>
29#include <printk.h>
30#include <list.h>
[619]31#include <rpc.h>
[1]32#include <core.h>
33#include <thread.h>
[296]34#include <chdev.h>
[1]35#include <scheduler.h>
36
[443]37
[296]38///////////////////////////////////////////////////////////////////////////////////////////
[564]39//         global variables
[296]40///////////////////////////////////////////////////////////////////////////////////////////
[1]41
[564]42extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c
[583]43extern process_t            process_zero;       // allocated in kernel_init.c
[296]44
[564]45///////////////////////////////////////////////////////////////////////////////////////////
46//         private functions
47///////////////////////////////////////////////////////////////////////////////////////////
[443]48
[1]49
[564]50////////////////////////////////////////////////////////////////////////////////////////////
51// This static function does NOT modify the scheduler state.
52// It just select a thread in the list of attached threads, implementing the following
53// three steps policy:
54// 1) It scan the list of kernel threads, from the next thread after the last executed one,
55//    and returns the first runnable found : not IDLE, not blocked, client queue not empty.
56//    It can be the current thread.
57// 2) If no kernel thread found, it scan the list of user thread, from the next thread after
58//    the last executed one, and returns the first runable found : not blocked.
59//    It can be the current thread.
60// 3) If no runable thread found, it returns the idle thread.
61////////////////////////////////////////////////////////////////////////////////////////////
62// @ sched   : local pointer on scheduler.
63// @ returns pointer on selected thread descriptor
64////////////////////////////////////////////////////////////////////////////////////////////
[629]65static thread_t * sched_select( scheduler_t * sched )
[1]66{
[408]67    thread_t     * thread;
68    list_entry_t * current;
69    list_entry_t * last;
[437]70    list_entry_t * root;
71    bool_t         done;
[450]72    uint32_t       count;
[1]73
[437]74    // first : scan the kernel threads list if not empty
[279]75    if( list_is_empty( &sched->k_root ) == false )
[1]76    {
[437]77        root    = &sched->k_root;
[279]78        last    = sched->k_last;
[450]79        done    = false;
80        count   = 0;
[437]81        current = last;
82
83        while( done == false )
[279]84        {
[450]85
[564]86// check kernel threads list
[583]87assert( (count < sched->k_threads_nr), "bad kernel threads list" );
[564]88
[279]89            // get next entry in kernel list
[437]90            current = current->next;
[1]91
[437]92            // check exit condition
93            if( current == last ) done = true;
94
[279]95            // skip the root that does not contain a thread
[437]96            if( current == root ) continue;
[450]97            else                  count++;
[1]98
[279]99            // get thread pointer for this entry
100            thread = LIST_ELEMENT( current , thread_t , sched_list );
[1]101
[450]102            // select kernel thread if non blocked and non THREAD_IDLE
[564]103            if( (thread->blocked == 0)  && (thread->type != THREAD_IDLE) ) return thread;
104
[437]105        } // end loop on kernel threads
[450]106    } // end kernel threads
[437]107
108    // second : scan the user threads list if not empty
[279]109    if( list_is_empty( &sched->u_root ) == false )
[1]110    {
[437]111        root    = &sched->u_root;
[279]112        last    = sched->u_last;
[450]113        done    = false;
114        count   = 0;
[437]115        current = last;
116
117        while( done == false )
[279]118        {
[450]119
[564]120// check user threads list
[583]121assert( (count < sched->u_threads_nr), "bad user threads list" );
[564]122
[279]123            // get next entry in user list
[437]124            current = current->next;
[1]125
[437]126            // check exit condition
127            if( current == last ) done = true;
128
[279]129            // skip the root that does not contain a thread
[437]130            if( current == root ) continue;
[450]131            else                  count++;
[1]132
[279]133            // get thread pointer for this entry
134            thread = LIST_ELEMENT( current , thread_t , sched_list );
[1]135
[450]136            // select thread if non blocked
[564]137            if( thread->blocked == 0 )  return thread;
138
[437]139        } // end loop on user threads
[450]140    } // end user threads
[1]141
[437]142    // third : return idle thread if no other runnable thread
[1]143    return sched->idle;
144
[296]145}  // end sched_select()
[1]146
[564]147////////////////////////////////////////////////////////////////////////////////////////////
[592]148// This static function is the only function that can actually delete a thread,
[619]149// (and the associated process descriptor if required).
150// It is private, because it is only called by the sched_yield() public function.
[564]151// It scan all threads attached to a given scheduler, and executes the relevant
[583]152// actions for two types of pending requests:
[592]153//
[564]154// - REQ_ACK : it checks that target thread is blocked, decrements the response counter
155//   to acknowledge the client thread, and reset the pending request.
[583]156// - REQ_DELETE : it removes the target thread from the process th_tbl[], remove it
157//   from the scheduler list, and release the memory allocated to thread descriptor.
158//   For an user thread, it destroys the process descriptor it the target thread is
159//   the last thread in the local process descriptor.
160//
161// Implementation note:
162// We use a while to scan the threads in scheduler lists, because some threads can
163// be destroyed, and we want not use a LIST_FOREACH()
[564]164////////////////////////////////////////////////////////////////////////////////////////////
165// @ core    : local pointer on the core descriptor.
166////////////////////////////////////////////////////////////////////////////////////////////
167static void sched_handle_signals( core_t * core )
[1]168{
[437]169
[1]170    list_entry_t * iter;
[440]171    list_entry_t * root;
[1]172    thread_t     * thread;
[428]173    process_t    * process;
[564]174    scheduler_t  * sched;
[583]175    uint32_t       threads_nr;   // number of threads in scheduler list
176    ltid_t         ltid;         // thread local index
177    uint32_t       count;        // number of threads in local process
[409]178
[440]179    // get pointer on scheduler
[564]180    sched = &core->scheduler;
[1]181
[635]182    ////////////////// scan user threads to handle ACK and DELETE requests
[440]183    root = &sched->u_root;
184    iter = root->next;
185    while( iter != root )
[1]186    {
[440]187        // get pointer on thread
[1]188        thread = LIST_ELEMENT( iter , thread_t , sched_list );
189
[440]190        // increment iterator
191        iter = iter->next;
192
[416]193        // handle REQ_ACK
194        if( thread->flags & THREAD_FLAG_REQ_ACK )
[408]195        {
[564]196
[635]197// check target thread blocked
[592]198assert( (thread->blocked & THREAD_BLOCKED_GLOBAL) , "thread not blocked" );
[416]199 
200            // decrement response counter
201            hal_atomic_add( thread->ack_rsp_count , -1 );
[408]202
[416]203            // reset REQ_ACK in thread descriptor
204            thread_reset_req_ack( thread );
[408]205        }
[416]206
[564]207        // handle REQ_DELETE only if target thread != calling thread
[635]208        if( thread->flags & THREAD_FLAG_REQ_DELETE )
[416]209        {
[635]210
211// check calling thread != target thread
212assert( (thread != CURRENT_THREAD) , "calling thread cannot delete itself" );
213 
[428]214            // get thread process descriptor
215            process = thread->process;
[416]216
[583]217            // get thread ltid
218            ltid = LTID_FROM_TRDID( thread->trdid);
[416]219
[593]220            // take the lock protecting sheduler state
221            busylock_acquire( &sched->lock );
222
[564]223            // update scheduler state
[583]224            threads_nr = sched->u_threads_nr;
[428]225            sched->u_threads_nr = threads_nr - 1;
[416]226            list_unlink( &thread->sched_list );
[450]227            if( sched->u_last == &thread->sched_list )
228            {
229                if( threads_nr == 1 ) 
230                {
231                    sched->u_last = NULL;
232                }
233                else if( sched->u_root.next == &thread->sched_list )
234                {
235                    sched->u_last = sched->u_root.pred;
236                }
237                else
238                {
239                    sched->u_last = sched->u_root.next;
240                }
241            }
[416]242
[593]243            // release the lock protecting sheduler state
244            busylock_release( &sched->lock );
245
[625]246            // release memory allocated for thread
247            count = thread_destroy( thread );
[583]248
[593]249            hal_fence();
250
[438]251#if DEBUG_SCHED_HANDLE_SIGNALS
[440]252uint32_t cycle = (uint32_t)hal_get_cycles();
[438]253if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
[630]254printk("\n[%s] thread[%x,%x] on core[%x,%d] deleted / cycle %d\n",
255__FUNCTION__, process->pid, thread->trdid, local_cxy, thread->core->lid, cycle );
[433]256#endif
[629]257
258#if CONFIG_INSTRUMENTATION_PGFAULTS
259uint32_t local_nr    = thread->info.local_pgfault_nr;
260uint32_t local_cost  = (local_nr == 0)  ? 0 : (thread->info.local_pgfault_cost / local_nr);
261uint32_t global_nr   = thread->info.global_pgfault_nr;
262uint32_t global_cost = (global_nr == 0) ? 0 : (thread->info.global_pgfault_cost / global_nr);
263uint32_t false_nr    = thread->info.false_pgfault_nr;
264uint32_t false_cost  = (false_nr == 0)  ? 0 : (thread->info.false_pgfault_cost / false_nr);
[630]265printk("\n***** page faults for thread[%x,%x]\n" 
[629]266       "  - %d local  : %d cycles\n"
267       "  - %d global : %d cycles\n"
268       "  - %d false  : %d cycles\n",
269       process->pid, thread->trdid,
270       local_nr,  local_cost,
271       global_nr, global_cost,
272       false_nr,  false_cost );
273#endif
[583]274            // destroy process descriptor if last thread
275            if( count == 1 ) 
[428]276            {
277                // delete process   
278                process_destroy( process );
279
[438]280#if DEBUG_SCHED_HANDLE_SIGNALS
[433]281cycle = (uint32_t)hal_get_cycles();
[438]282if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
[610]283printk("\n[%s] process %x in cluster %x deleted / cycle %d\n",
[443]284__FUNCTION__ , process->pid , local_cxy , cycle );
[433]285#endif
[428]286            }
[416]287        }
[583]288    }  // end user threads
289
[625]290    ///////////// scan kernel threads for DELETE only
[583]291    root = &sched->k_root;
292    iter = root->next;
293    while( iter != root )
294    {
295        // get pointer on thread
296        thread = LIST_ELEMENT( iter , thread_t , sched_list );
297
298        // increment iterator
299        iter = iter->next;
300
301        // handle REQ_DELETE only if target thread != calling thread
302        if( (thread->flags & THREAD_FLAG_REQ_DELETE) && (thread != CURRENT_THREAD) )
303        {
304
305// check process descriptor is local kernel process
[625]306assert( ( thread->process == &process_zero ) , "illegal process descriptor");
[583]307
308            // get thread ltid
309            ltid = LTID_FROM_TRDID( thread->trdid);
310
[593]311            // take the lock protecting sheduler state
312            busylock_acquire( &sched->lock );
313
[583]314            // update scheduler state
315            threads_nr = sched->k_threads_nr;
316            sched->k_threads_nr = threads_nr - 1;
317            list_unlink( &thread->sched_list );
318            if( sched->k_last == &thread->sched_list )
319            {
320                if( threads_nr == 1 ) 
321                {
322                    sched->k_last = NULL;
323                }
324                else if( sched->k_root.next == &thread->sched_list )
325                {
326                    sched->k_last = sched->k_root.pred;
327                }
328                else
329                {
330                    sched->k_last = sched->k_root.next;
331                }
332            }
333
[593]334            // release the lock protecting sheduler state
335            busylock_release( &sched->lock );
336
[583]337            // get number of threads in local kernel process
338            count = process_zero.th_nr;
339
340// check th_nr value
[625]341assert( (process_zero.th_nr > 0) , "kernel process th_nr cannot be 0" );
[583]342
343            // remove thread from process th_tbl[]
344            process_zero.th_tbl[ltid] = NULL;
[592]345            hal_atomic_add( &process_zero.th_nr , - 1 );
[583]346 
347            // delete thread descriptor
348            thread_destroy( thread );
349
350#if DEBUG_SCHED_HANDLE_SIGNALS
351uint32_t cycle = (uint32_t)hal_get_cycles();
352if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
[610]353printk("\n[%s] thread[%x,%x] on core[%x,%d] deleted / cycle %d\n",
[583]354__FUNCTION__ , process_zero.pid , thread->trdid , local_cxy , thread->core->lid , cycle );
355#endif
356        }
[1]357    }
[564]358} // end sched_handle_signals()
[1]359
[564]360////////////////////////////////////////////////////////////////////////////////////////////
361// This static function is called by the sched_yield function when the RFC_FIFO
362// associated to the core is not empty.
[583]363// It search an idle RPC thread for this core, and unblock it if found.
364// It creates a new RPC thread if no idle RPC thread is found.
[564]365////////////////////////////////////////////////////////////////////////////////////////////
366// @ sched   : local pointer on scheduler.
367////////////////////////////////////////////////////////////////////////////////////////////
[582]368static void sched_rpc_activate( scheduler_t * sched )
[564]369{
370    error_t         error;
371    thread_t      * thread; 
372    list_entry_t  * iter;
373    lid_t           lid = CURRENT_THREAD->core->lid;
374    bool_t          found = false;
375
376    // search one IDLE RPC thread associated to the selected core   
377    LIST_FOREACH( &sched->k_root , iter )
378    {
379        thread = LIST_ELEMENT( iter , thread_t , sched_list );
[583]380
381        if( (thread->type == THREAD_RPC) && 
382            (thread->blocked == THREAD_BLOCKED_IDLE ) ) 
[564]383        {
384            found = true;
385            break;
386        }
387    }
388
389    if( found == false )     // create new RPC thread     
390    {
391        error = thread_kernel_create( &thread,
392                                      THREAD_RPC, 
[619]393                                              &rpc_server_func, 
[564]394                                      NULL,
395                                          lid );
396        // check memory
397        if ( error )
398        {
[583]399            printk("\n[ERROR] in %s : no memory to create a RPC thread in cluster %x\n",
[564]400            __FUNCTION__, local_cxy );
401        }
402        else
403        {
404            // unblock created RPC thread
405            thread->blocked = 0;
406
407            // update RPC threads counter 
408            hal_atomic_add( &LOCAL_CLUSTER->rpc_threads[lid] , 1 );
409
410#if DEBUG_SCHED_RPC_ACTIVATE
411uint32_t cycle = (uint32_t)hal_get_cycles();
412if( DEBUG_SCHED_RPC_ACTIVATE < cycle ) 
[610]413printk("\n[%s] new RPC thread %x created for core[%x,%d] / total %d / cycle %d\n",
[583]414__FUNCTION__, thread->trdid, local_cxy, lid, LOCAL_CLUSTER->rpc_threads[lid], cycle );
[564]415#endif
416        }
417    }
418    else                 // RPC thread found => unblock it
419    {
420        // unblock found RPC thread
421        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_IDLE );
422
423#if DEBUG_SCHED_RPC_ACTIVATE
424uint32_t cycle = (uint32_t)hal_get_cycles();
425if( DEBUG_SCHED_RPC_ACTIVATE < cycle ) 
[610]426printk("\n[%s] idle RPC thread %x unblocked for core[%x,%d] / cycle %d\n",
[564]427__FUNCTION__, thread->trdid, local_cxy, lid, cycle );
428#endif
429
430    }
431
432} // end sched_rpc_activate()
433
434
435
436///////////////////////////////////////////////////////////////////////////////////////////
437//         public functions
438///////////////////////////////////////////////////////////////////////////////////////////
439
440////////////////////////////////
441void sched_init( core_t * core )
442{
443    scheduler_t * sched = &core->scheduler;
444
445    sched->u_threads_nr   = 0;
446    sched->k_threads_nr   = 0;
447
448    sched->current        = CURRENT_THREAD;
449    sched->idle           = NULL;               // initialized in kernel_init()
450    sched->u_last         = NULL;               // initialized in sched_register_thread()
451    sched->k_last         = NULL;               // initialized in sched_register_thread()
452
453    // initialise threads lists
454    list_root_init( &sched->u_root );
455    list_root_init( &sched->k_root );
456
457    // init lock
458    busylock_init( &sched->lock , LOCK_SCHED_STATE );
459
460    sched->req_ack_pending = false;             // no pending request
461    sched->trace           = false;             // context switches trace desactivated
462
463}  // end sched_init()
464
465////////////////////////////////////////////
466void sched_register_thread( core_t   * core,
467                            thread_t * thread )
468{
469    scheduler_t * sched = &core->scheduler;
470    thread_type_t type  = thread->type;
471
472    // take lock protecting sheduler state
473    busylock_acquire( &sched->lock );
474
475    if( type == THREAD_USER )
476    {
477        list_add_last( &sched->u_root , &thread->sched_list );
478        sched->u_threads_nr++;
479        if( sched->u_last == NULL ) sched->u_last = &thread->sched_list;
480    }
481    else // kernel thread
482    {
483        list_add_last( &sched->k_root , &thread->sched_list );
484        sched->k_threads_nr++;
485        if( sched->k_last == NULL ) sched->k_last = &thread->sched_list; 
486    }
487
[1]488    // release lock
[564]489    busylock_release( &sched->lock );
[1]490
[564]491}  // end sched_register_thread()
[416]492
[625]493//////////////////////////////////////////////////////////////////
494void sched_yield( const char * cause __attribute__((__unused__)) )
[1]495{
[564]496    thread_t      * next;
497    thread_t      * current = CURRENT_THREAD;
498    core_t        * core    = current->core;
499    lid_t           lid     = core->lid;
500    scheduler_t   * sched   = &core->scheduler;
501    remote_fifo_t * fifo    = &LOCAL_CLUSTER->rpc_fifo[lid]; 
[407]502 
[635]503#if DEBUG_SCHED_YIELD
504uint32_t cycle = (uint32_t)hal_get_cycles();
505#endif
506
[438]507#if (DEBUG_SCHED_YIELD & 0x1)
[635]508if( sched->trace || (cycle > DEBUG_SCHED_YIELD) )
[640]509sched_remote_display( local_cxy , lid );
[407]510#endif
[1]511
[614]512// This assert should never be false, as this check has been
513// done before, by any function that can possibly deschedule...
[564]514assert( (current->busylocks == 0),
[581]515"unexpected descheduling of thread holding %d busylocks = %d\n", current->busylocks ); 
[1]516
[564]517    // activate or create an RPC thread if RPC_FIFO non empty
518    if( remote_fifo_is_empty( fifo ) == false )  sched_rpc_activate( sched );
[408]519
[564]520    // disable IRQs / save SR in current thread descriptor
521    hal_disable_irq( &current->save_sr );
522
523    // take lock protecting sheduler state
524    busylock_acquire( &sched->lock );
525   
526    // select next thread
[408]527    next = sched_select( sched );
[1]528
[564]529// check next thread kernel_stack overflow
530assert( (next->signature == THREAD_SIGNATURE),
[625]531"kernel stack overflow for thread %x on core[%x,%d]", next, local_cxy, lid );
[436]532
[564]533// check next thread attached to same core as the calling thread
534assert( (next->core == current->core),
[625]535"next core %x != current core %x", next->core, current->core );
[296]536
[564]537// check next thread not blocked when type != IDLE
538assert( ((next->blocked == 0) || (next->type == THREAD_IDLE)) ,
[625]539"next thread %x (%s) is blocked on core[%x,%d]", 
[564]540next->trdid , thread_type_str(next->type) , local_cxy , lid );
[296]541
542    // switch contexts and update scheduler state if next != current
543        if( next != current )
[1]544    {
[296]545        // update scheduler
[408]546        sched->current = next;
547        if( next->type == THREAD_USER ) sched->u_last = &next->sched_list;
548        else                            sched->k_last = &next->sched_list;
[1]549
[407]550        // handle FPU ownership
[306]551            if( next->type == THREAD_USER )
[296]552        {
[407]553                if( next == current->core->fpu_owner )  hal_fpu_enable();
554                else                                    hal_fpu_disable();
[296]555        }
[1]556
[564]557        // release lock protecting scheduler state
558        busylock_release( &sched->lock );
559
560#if DEBUG_SCHED_YIELD
[635]561if( sched->trace || (cycle > DEBUG_SCHED_YIELD) )
[610]562printk("\n[%s] core[%x,%d] / cause = %s\n"
[564]563"      thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n",
564__FUNCTION__, local_cxy, lid, cause, 
565current, thread_type_str(current->type), current->process->pid, current->trdid,next ,
[635]566thread_type_str(next->type) , next->process->pid , next->trdid , cycle );
[564]567#endif
568
[435]569        // switch CPU from current thread context to new thread context
[407]570        hal_do_cpu_switch( current->cpu_context, next->cpu_context );
[296]571    }
572    else
573    {
[564]574        // release lock protecting scheduler state
575        busylock_release( &sched->lock );
[407]576
[635]577#if DEBUG_SCHED_YIELD
578if( sched->trace || (cycle > DEBUG_SCHED_YIELD) )
[610]579printk("\n[%s] core[%x,%d] / cause = %s\n"
[435]580"      thread %x (%s) (%x,%x) continue / cycle %d\n",
[564]581__FUNCTION__, local_cxy, lid, cause, current, thread_type_str(current->type),
[443]582current->process->pid, current->trdid, (uint32_t)hal_get_cycles() );
[428]583#endif
[407]584
[296]585    }
[408]586
[416]587    // handle pending requests for all threads executing on this core.
[433]588    sched_handle_signals( core );
[409]589
[435]590    // exit critical section / restore SR from current thread descriptor
591    hal_restore_irq( CURRENT_THREAD->save_sr );
[408]592
[1]593}  // end sched_yield()
594
[407]595
[450]596/////////////////////////////////////
597void sched_remote_display( cxy_t cxy,
598                           lid_t lid )
599{
600    thread_t     * thread;
601
602    // get local pointer on target scheduler
603    core_t      * core  = &LOCAL_CLUSTER->core_tbl[lid];
604    scheduler_t * sched = &core->scheduler;
605
606    // get local pointer on current thread in target scheduler
607    thread_t * current = hal_remote_lpt( XPTR( cxy, &sched->current ) );
608
609    // get local pointer on the first kernel and user threads list_entry
610    list_entry_t * k_entry = hal_remote_lpt( XPTR( cxy , &sched->k_root.next ) );
611    list_entry_t * u_entry = hal_remote_lpt( XPTR( cxy , &sched->u_root.next ) );
612   
613    // get pointers on TXT0 chdev
614    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
615    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
616    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
617
618    // get extended pointer on remote TXT0 chdev lock
619    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
620
[564]621    // get TXT0 lock
622    remote_busylock_acquire( lock_xp );
[450]623
[583]624    // get rpc_threads
625    uint32_t rpcs = hal_remote_l32( XPTR( cxy , &LOCAL_CLUSTER->rpc_threads[lid] ) );
626 
[450]627    // display header
[583]628    nolock_printk("\n***** threads on core[%x,%d] / current %x / rpc_threads %d / cycle %d\n",
629    cxy , lid, current, rpcs, (uint32_t)hal_get_cycles() );
[640]630    nolock_printk("  type | pid        | trdid      | desc       | block      | flags      | func\n");
[450]631
632    // display kernel threads
633    while( k_entry != &sched->k_root )
634    {
635        // get local pointer on kernel_thread
636        thread = LIST_ELEMENT( k_entry , thread_t , sched_list );
637
638        // get relevant thead info
[564]639        thread_type_t type    = hal_remote_l32 ( XPTR( cxy , &thread->type ) );
640        trdid_t       trdid   = hal_remote_l32 ( XPTR( cxy , &thread->trdid ) );
641        uint32_t      blocked = hal_remote_l32 ( XPTR( cxy , &thread->blocked ) );
642        uint32_t      flags   = hal_remote_l32 ( XPTR( cxy , &thread->flags ) );
[610]643        process_t *   process = hal_remote_lpt ( XPTR( cxy , &thread->process ) );
[564]644        pid_t         pid     = hal_remote_l32 ( XPTR( cxy , &process->pid ) );
[450]645
646        // display thread info
647        if (type == THREAD_DEV) 
648        {
649            char      name[16];
650            chdev_t * chdev = hal_remote_lpt( XPTR( cxy , &thread->chdev ) );
[610]651            hal_remote_strcpy( XPTR( local_cxy , name ), XPTR( cxy , chdev->name ) );
[450]652
[640]653            nolock_printk(" - %s | %X | %X | %X | %X | %X | %s\n",
[450]654            thread_type_str( type ), pid, trdid, thread, blocked, flags, name );
655        }
656        else
657        {
[640]658            nolock_printk(" - %s | %X | %X | %X | %X | %X |\n",
[450]659            thread_type_str( type ), pid, trdid, thread, blocked, flags );
660        }
661
662        // get next remote kernel thread list_entry
663        k_entry = hal_remote_lpt( XPTR( cxy , &k_entry->next ) );
664    }
665
666    // display user threads
667    while( u_entry != &sched->u_root )
668    {
669        // get local pointer on user_thread
670        thread = LIST_ELEMENT( u_entry , thread_t , sched_list );
671
672        // get relevant thead info
[564]673        thread_type_t type    = hal_remote_l32 ( XPTR( cxy , &thread->type ) );
674        trdid_t       trdid   = hal_remote_l32 ( XPTR( cxy , &thread->trdid ) );
675        uint32_t      blocked = hal_remote_l32 ( XPTR( cxy , &thread->blocked ) );
676        uint32_t      flags   = hal_remote_l32 ( XPTR( cxy , &thread->flags ) );
[610]677        process_t *   process = hal_remote_lpt ( XPTR( cxy , &thread->process ) );
[564]678        pid_t         pid     = hal_remote_l32 ( XPTR( cxy , &process->pid ) );
[640]679        void      *   func    = hal_remote_lpt ( XPTR( cxy , &thread->entry_func ) );
[450]680
[640]681        nolock_printk(" - %s | %X | %X | %X | %X | %X | %x\n",
682        thread_type_str( type ), pid, trdid, thread, blocked, flags, (uint32_t)func );
[450]683
684        // get next user thread list_entry
685        u_entry = hal_remote_lpt( XPTR( cxy , &u_entry->next ) );
686    }
687
688    // release TXT0 lock
[564]689    remote_busylock_release( lock_xp );
[450]690
691}  // end sched_remote_display()
692
[564]693
Note: See TracBrowser for help on using the repository browser.