source: trunk/kernel/kern/scheduler.c @ 624

Last change on this file since 624 was 624, checked in by alain, 5 years ago

Fix several bugs to use the instruction MMU in kernel mode
in replacement of the instruction address extension register,
and remove the "kentry" segment.

This version is running on the tsar_generic_iob" platform.

One interesting bug: the cp0_ebase defining the kernel entry point
(for interrupts, exceptions and syscalls) must be initialized
early in kernel_init(), because the VFS initialisation done by
kernel_ini() uses RPCs, and RPCs uses Inter-Processor-Interrup.

File size: 25.8 KB
Line 
1/*
2 * scheduler.c - Core scheduler implementation.
3 *
4 * Author    Alain Greiner (2016,2017,2018)
5 *
6 * Copyright (c)  UPMC Sorbonne Universites
7 *
8 * This file is part of ALMOS-MKH.
9 *
10 * ALMOS-MKH. is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2.0 of the License.
13 *
14 * ALMOS-MKH. is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24#include <kernel_config.h>
25#include <hal_kernel_types.h>
26#include <hal_switch.h>
27#include <hal_irqmask.h>
28#include <hal_context.h>
29#include <printk.h>
30#include <list.h>
31#include <rpc.h>
32#include <core.h>
33#include <thread.h>
34#include <chdev.h>
35#include <scheduler.h>
36
37
38///////////////////////////////////////////////////////////////////////////////////////////
39//         global variables
40///////////////////////////////////////////////////////////////////////////////////////////
41
42extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c
43extern process_t            process_zero;       // allocated in kernel_init.c
44
45///////////////////////////////////////////////////////////////////////////////////////////
46//         private functions
47///////////////////////////////////////////////////////////////////////////////////////////
48
49
50////////////////////////////////////////////////////////////////////////////////////////////
51// This static function does NOT modify the scheduler state.
52// It just select a thread in the list of attached threads, implementing the following
53// three steps policy:
54// 1) It scan the list of kernel threads, from the next thread after the last executed one,
55//    and returns the first runnable found : not IDLE, not blocked, client queue not empty.
56//    It can be the current thread.
57// 2) If no kernel thread found, it scan the list of user thread, from the next thread after
58//    the last executed one, and returns the first runable found : not blocked.
59//    It can be the current thread.
60// 3) If no runable thread found, it returns the idle thread.
61////////////////////////////////////////////////////////////////////////////////////////////
62// @ sched   : local pointer on scheduler.
63// @ returns pointer on selected thread descriptor
64////////////////////////////////////////////////////////////////////////////////////////////
65thread_t * sched_select( scheduler_t * sched )
66{
67    thread_t     * thread;
68    list_entry_t * current;
69    list_entry_t * last;
70    list_entry_t * root;
71    bool_t         done;
72    uint32_t       count;
73
74    // first : scan the kernel threads list if not empty
75    if( list_is_empty( &sched->k_root ) == false )
76    {
77        root    = &sched->k_root;
78        last    = sched->k_last;
79        done    = false;
80        count   = 0;
81        current = last;
82
83        while( done == false )
84        {
85
86// check kernel threads list
87assert( (count < sched->k_threads_nr), "bad kernel threads list" );
88
89            // get next entry in kernel list
90            current = current->next;
91
92            // check exit condition
93            if( current == last ) done = true;
94
95            // skip the root that does not contain a thread
96            if( current == root ) continue;
97            else                  count++;
98
99            // get thread pointer for this entry
100            thread = LIST_ELEMENT( current , thread_t , sched_list );
101
102            // select kernel thread if non blocked and non THREAD_IDLE
103            if( (thread->blocked == 0)  && (thread->type != THREAD_IDLE) ) return thread;
104
105        } // end loop on kernel threads
106    } // end kernel threads
107
108    // second : scan the user threads list if not empty
109    if( list_is_empty( &sched->u_root ) == false )
110    {
111        root    = &sched->u_root;
112        last    = sched->u_last;
113        done    = false;
114        count   = 0;
115        current = last;
116
117        while( done == false )
118        {
119
120// check user threads list
121assert( (count < sched->u_threads_nr), "bad user threads list" );
122
123            // get next entry in user list
124            current = current->next;
125
126            // check exit condition
127            if( current == last ) done = true;
128
129            // skip the root that does not contain a thread
130            if( current == root ) continue;
131            else                  count++;
132
133            // get thread pointer for this entry
134            thread = LIST_ELEMENT( current , thread_t , sched_list );
135
136            // select thread if non blocked
137            if( thread->blocked == 0 )  return thread;
138
139        } // end loop on user threads
140    } // end user threads
141
142    // third : return idle thread if no other runnable thread
143    return sched->idle;
144
145}  // end sched_select()
146
147////////////////////////////////////////////////////////////////////////////////////////////
148// This static function is the only function that can actually delete a thread,
149// (and the associated process descriptor if required).
150// It is private, because it is only called by the sched_yield() public function.
151// It scan all threads attached to a given scheduler, and executes the relevant
152// actions for two types of pending requests:
153//
154// - REQ_ACK : it checks that target thread is blocked, decrements the response counter
155//   to acknowledge the client thread, and reset the pending request.
156// - REQ_DELETE : it removes the target thread from the process th_tbl[], remove it
157//   from the scheduler list, and release the memory allocated to thread descriptor.
158//   For an user thread, it destroys the process descriptor it the target thread is
159//   the last thread in the local process descriptor.
160//
161// Implementation note:
162// We use a while to scan the threads in scheduler lists, because some threads can
163// be destroyed, and we want not use a LIST_FOREACH()
164////////////////////////////////////////////////////////////////////////////////////////////
165// @ core    : local pointer on the core descriptor.
166////////////////////////////////////////////////////////////////////////////////////////////
167static void sched_handle_signals( core_t * core )
168{
169
170    list_entry_t * iter;
171    list_entry_t * root;
172    thread_t     * thread;
173    process_t    * process;
174    scheduler_t  * sched;
175    uint32_t       threads_nr;   // number of threads in scheduler list
176    ltid_t         ltid;         // thread local index
177    uint32_t       count;        // number of threads in local process
178
179    // get pointer on scheduler
180    sched = &core->scheduler;
181
182    /////////////// scan user threads to handle both ACK and DELETE requests
183    root = &sched->u_root;
184    iter = root->next;
185    while( iter != root )
186    {
187        // get pointer on thread
188        thread = LIST_ELEMENT( iter , thread_t , sched_list );
189
190        // increment iterator
191        iter = iter->next;
192
193        // handle REQ_ACK
194        if( thread->flags & THREAD_FLAG_REQ_ACK )
195        {
196
197// check thread blocked
198assert( (thread->blocked & THREAD_BLOCKED_GLOBAL) , "thread not blocked" );
199 
200            // decrement response counter
201            hal_atomic_add( thread->ack_rsp_count , -1 );
202
203            // reset REQ_ACK in thread descriptor
204            thread_reset_req_ack( thread );
205        }
206
207        // handle REQ_DELETE only if target thread != calling thread
208        if( (thread->flags & THREAD_FLAG_REQ_DELETE) && (thread != CURRENT_THREAD) )
209        {
210            // get thread process descriptor
211            process = thread->process;
212
213            // get thread ltid
214            ltid = LTID_FROM_TRDID( thread->trdid);
215
216            // take the lock protecting sheduler state
217            busylock_acquire( &sched->lock );
218
219            // update scheduler state
220            threads_nr = sched->u_threads_nr;
221            sched->u_threads_nr = threads_nr - 1;
222            list_unlink( &thread->sched_list );
223            if( sched->u_last == &thread->sched_list )
224            {
225                if( threads_nr == 1 ) 
226                {
227                    sched->u_last = NULL;
228                }
229                else if( sched->u_root.next == &thread->sched_list )
230                {
231                    sched->u_last = sched->u_root.pred;
232                }
233                else
234                {
235                    sched->u_last = sched->u_root.next;
236                }
237            }
238
239            // release the lock protecting sheduler state
240            busylock_release( &sched->lock );
241
242// check th_nr value
243assert( (process->th_nr > 0) , "process th_nr cannot be 0\n" );
244
245            // remove thread from process th_tbl[]
246            process->th_tbl[ltid] = NULL;
247            count = hal_atomic_add( &process->th_nr , - 1 );
248 
249            // release memory allocated for thread descriptor
250            thread_destroy( thread );
251
252            hal_fence();
253
254#if DEBUG_SCHED_HANDLE_SIGNALS
255uint32_t cycle = (uint32_t)hal_get_cycles();
256if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
257printk("\n[%s] thread[%x,%x] on core[%x,%d] deleted / cycle %d\n",
258__FUNCTION__ , process->pid , thread->trdid , local_cxy , thread->core->lid , cycle );
259#endif
260            // destroy process descriptor if last thread
261            if( count == 1 ) 
262            {
263                // delete process   
264                process_destroy( process );
265
266#if DEBUG_SCHED_HANDLE_SIGNALS
267cycle = (uint32_t)hal_get_cycles();
268if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
269printk("\n[%s] process %x in cluster %x deleted / cycle %d\n",
270__FUNCTION__ , process->pid , local_cxy , cycle );
271#endif
272            }
273        }
274    }  // end user threads
275
276    ////// scan kernel threads for DELETE only
277    root = &sched->k_root;
278    iter = root->next;
279    while( iter != root )
280    {
281        // get pointer on thread
282        thread = LIST_ELEMENT( iter , thread_t , sched_list );
283
284        // increment iterator
285        iter = iter->next;
286
287        // handle REQ_DELETE only if target thread != calling thread
288        if( (thread->flags & THREAD_FLAG_REQ_DELETE) && (thread != CURRENT_THREAD) )
289        {
290
291// check process descriptor is local kernel process
292assert( ( thread->process == &process_zero ) , "illegal process descriptor\n");
293
294            // get thread ltid
295            ltid = LTID_FROM_TRDID( thread->trdid);
296
297            // take the lock protecting sheduler state
298            busylock_acquire( &sched->lock );
299
300            // update scheduler state
301            threads_nr = sched->k_threads_nr;
302            sched->k_threads_nr = threads_nr - 1;
303            list_unlink( &thread->sched_list );
304            if( sched->k_last == &thread->sched_list )
305            {
306                if( threads_nr == 1 ) 
307                {
308                    sched->k_last = NULL;
309                }
310                else if( sched->k_root.next == &thread->sched_list )
311                {
312                    sched->k_last = sched->k_root.pred;
313                }
314                else
315                {
316                    sched->k_last = sched->k_root.next;
317                }
318            }
319
320            // release the lock protecting sheduler state
321            busylock_release( &sched->lock );
322
323            // get number of threads in local kernel process
324            count = process_zero.th_nr;
325
326// check th_nr value
327assert( (process_zero.th_nr > 0) , "kernel process th_nr cannot be 0\n" );
328
329            // remove thread from process th_tbl[]
330            process_zero.th_tbl[ltid] = NULL;
331            hal_atomic_add( &process_zero.th_nr , - 1 );
332 
333            // delete thread descriptor
334            thread_destroy( thread );
335
336#if DEBUG_SCHED_HANDLE_SIGNALS
337uint32_t cycle = (uint32_t)hal_get_cycles();
338if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
339printk("\n[%s] thread[%x,%x] on core[%x,%d] deleted / cycle %d\n",
340__FUNCTION__ , process_zero.pid , thread->trdid , local_cxy , thread->core->lid , cycle );
341#endif
342        }
343    }
344} // end sched_handle_signals()
345
346////////////////////////////////////////////////////////////////////////////////////////////
347// This static function is called by the sched_yield function when the RFC_FIFO
348// associated to the core is not empty.
349// It search an idle RPC thread for this core, and unblock it if found.
350// It creates a new RPC thread if no idle RPC thread is found.
351////////////////////////////////////////////////////////////////////////////////////////////
352// @ sched   : local pointer on scheduler.
353////////////////////////////////////////////////////////////////////////////////////////////
354static void sched_rpc_activate( scheduler_t * sched )
355{
356    error_t         error;
357    thread_t      * thread; 
358    list_entry_t  * iter;
359    lid_t           lid = CURRENT_THREAD->core->lid;
360    bool_t          found = false;
361
362    // search one IDLE RPC thread associated to the selected core   
363    LIST_FOREACH( &sched->k_root , iter )
364    {
365        thread = LIST_ELEMENT( iter , thread_t , sched_list );
366
367        if( (thread->type == THREAD_RPC) && 
368            (thread->blocked == THREAD_BLOCKED_IDLE ) ) 
369        {
370            found = true;
371            break;
372        }
373    }
374
375    if( found == false )     // create new RPC thread     
376    {
377        error = thread_kernel_create( &thread,
378                                      THREAD_RPC, 
379                                              &rpc_server_func, 
380                                      NULL,
381                                          lid );
382        // check memory
383        if ( error )
384        {
385            printk("\n[ERROR] in %s : no memory to create a RPC thread in cluster %x\n",
386            __FUNCTION__, local_cxy );
387        }
388        else
389        {
390            // unblock created RPC thread
391            thread->blocked = 0;
392
393            // update RPC threads counter 
394            hal_atomic_add( &LOCAL_CLUSTER->rpc_threads[lid] , 1 );
395
396#if DEBUG_SCHED_RPC_ACTIVATE
397uint32_t cycle = (uint32_t)hal_get_cycles();
398if( DEBUG_SCHED_RPC_ACTIVATE < cycle ) 
399printk("\n[%s] new RPC thread %x created for core[%x,%d] / total %d / cycle %d\n",
400__FUNCTION__, thread->trdid, local_cxy, lid, LOCAL_CLUSTER->rpc_threads[lid], cycle );
401#endif
402        }
403    }
404    else                 // RPC thread found => unblock it
405    {
406        // unblock found RPC thread
407        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_IDLE );
408
409#if DEBUG_SCHED_RPC_ACTIVATE
410uint32_t cycle = (uint32_t)hal_get_cycles();
411if( DEBUG_SCHED_RPC_ACTIVATE < cycle ) 
412printk("\n[%s] idle RPC thread %x unblocked for core[%x,%d] / cycle %d\n",
413__FUNCTION__, thread->trdid, local_cxy, lid, cycle );
414#endif
415
416    }
417
418} // end sched_rpc_activate()
419
420
421
422///////////////////////////////////////////////////////////////////////////////////////////
423//         public functions
424///////////////////////////////////////////////////////////////////////////////////////////
425
426////////////////////////////////
427void sched_init( core_t * core )
428{
429    scheduler_t * sched = &core->scheduler;
430
431    sched->u_threads_nr   = 0;
432    sched->k_threads_nr   = 0;
433
434    sched->current        = CURRENT_THREAD;
435    sched->idle           = NULL;               // initialized in kernel_init()
436    sched->u_last         = NULL;               // initialized in sched_register_thread()
437    sched->k_last         = NULL;               // initialized in sched_register_thread()
438
439    // initialise threads lists
440    list_root_init( &sched->u_root );
441    list_root_init( &sched->k_root );
442
443    // init lock
444    busylock_init( &sched->lock , LOCK_SCHED_STATE );
445
446    sched->req_ack_pending = false;             // no pending request
447    sched->trace           = false;             // context switches trace desactivated
448
449}  // end sched_init()
450
451////////////////////////////////////////////
452void sched_register_thread( core_t   * core,
453                            thread_t * thread )
454{
455    scheduler_t * sched = &core->scheduler;
456    thread_type_t type  = thread->type;
457
458    // take lock protecting sheduler state
459    busylock_acquire( &sched->lock );
460
461    if( type == THREAD_USER )
462    {
463        list_add_last( &sched->u_root , &thread->sched_list );
464        sched->u_threads_nr++;
465        if( sched->u_last == NULL ) sched->u_last = &thread->sched_list;
466    }
467    else // kernel thread
468    {
469        list_add_last( &sched->k_root , &thread->sched_list );
470        sched->k_threads_nr++;
471        if( sched->k_last == NULL ) sched->k_last = &thread->sched_list; 
472    }
473
474    // release lock
475    busylock_release( &sched->lock );
476
477}  // end sched_register_thread()
478
479//////////////////////////////////////
480void sched_yield( const char * cause )
481{
482    thread_t      * next;
483    thread_t      * current = CURRENT_THREAD;
484    core_t        * core    = current->core;
485    lid_t           lid     = core->lid;
486    scheduler_t   * sched   = &core->scheduler;
487    remote_fifo_t * fifo    = &LOCAL_CLUSTER->rpc_fifo[lid]; 
488 
489#if (DEBUG_SCHED_YIELD & 0x1)
490// if( sched->trace )
491if( (uint32_t)hal_get_cycles() > DEBUG_SCHED_YIELD )
492sched_display( lid );
493#endif
494
495// This assert should never be false, as this check has been
496// done before, by any function that can possibly deschedule...
497assert( (current->busylocks == 0),
498"unexpected descheduling of thread holding %d busylocks = %d\n", current->busylocks ); 
499
500    // activate or create an RPC thread if RPC_FIFO non empty
501    if( remote_fifo_is_empty( fifo ) == false )  sched_rpc_activate( sched );
502
503    // disable IRQs / save SR in current thread descriptor
504    hal_disable_irq( &current->save_sr );
505
506    // take lock protecting sheduler state
507    busylock_acquire( &sched->lock );
508   
509    // select next thread
510    next = sched_select( sched );
511
512// check next thread kernel_stack overflow
513assert( (next->signature == THREAD_SIGNATURE),
514"kernel stack overflow for thread %x on core[%x,%d] \n", next, local_cxy, lid );
515
516// check next thread attached to same core as the calling thread
517assert( (next->core == current->core),
518"next core %x != current core %x\n", next->core, current->core );
519
520// check next thread not blocked when type != IDLE
521assert( ((next->blocked == 0) || (next->type == THREAD_IDLE)) ,
522"next thread %x (%s) is blocked on core[%x,%d]\n", 
523next->trdid , thread_type_str(next->type) , local_cxy , lid );
524
525    // switch contexts and update scheduler state if next != current
526        if( next != current )
527    {
528        // update scheduler
529        sched->current = next;
530        if( next->type == THREAD_USER ) sched->u_last = &next->sched_list;
531        else                            sched->k_last = &next->sched_list;
532
533        // handle FPU ownership
534            if( next->type == THREAD_USER )
535        {
536                if( next == current->core->fpu_owner )  hal_fpu_enable();
537                else                                    hal_fpu_disable();
538        }
539
540        // release lock protecting scheduler state
541        busylock_release( &sched->lock );
542
543#if DEBUG_SCHED_YIELD
544// if( sched->trace )
545if( (uint32_t)hal_get_cycles() > DEBUG_SCHED_YIELD )
546printk("\n[%s] core[%x,%d] / cause = %s\n"
547"      thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n",
548__FUNCTION__, local_cxy, lid, cause, 
549current, thread_type_str(current->type), current->process->pid, current->trdid,next ,
550thread_type_str(next->type) , next->process->pid , next->trdid , (uint32_t)hal_get_cycles() );
551#endif
552
553        // switch CPU from current thread context to new thread context
554        hal_do_cpu_switch( current->cpu_context, next->cpu_context );
555    }
556    else
557    {
558        // release lock protecting scheduler state
559        busylock_release( &sched->lock );
560
561#if (DEBUG_SCHED_YIELD & 1)
562// if( sched->trace )
563if(uint32_t)hal_get_cycles() > DEBUG_SCHED_YIELD )
564printk("\n[%s] core[%x,%d] / cause = %s\n"
565"      thread %x (%s) (%x,%x) continue / cycle %d\n",
566__FUNCTION__, local_cxy, lid, cause, current, thread_type_str(current->type),
567current->process->pid, current->trdid, (uint32_t)hal_get_cycles() );
568#endif
569
570    }
571
572    // handle pending requests for all threads executing on this core.
573    sched_handle_signals( core );
574
575    // exit critical section / restore SR from current thread descriptor
576    hal_restore_irq( CURRENT_THREAD->save_sr );
577
578}  // end sched_yield()
579
580
581///////////////////////////////
582void sched_display( lid_t lid )
583{
584    list_entry_t * iter;
585    thread_t     * thread;
586
587// check lid
588assert( (lid < LOCAL_CLUSTER->cores_nr), 
589"illegal core index %d\n", lid);
590
591    core_t       * core    = &LOCAL_CLUSTER->core_tbl[lid];
592    scheduler_t  * sched   = &core->scheduler;
593   
594    // get pointers on TXT0 chdev
595    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
596    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
597    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
598
599    // get extended pointer on remote TXT0 lock
600    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
601
602    // get TXT0 lock
603    remote_busylock_acquire( lock_xp );
604
605    nolock_printk("\n***** threads on core[%x,%d] / current %x / rpc_threads %d / cycle %d\n",
606    local_cxy , lid, sched->current, LOCAL_CLUSTER->rpc_threads[lid],
607    (uint32_t)hal_get_cycles() );
608
609    // display kernel threads
610    LIST_FOREACH( &sched->k_root , iter )
611    {
612        thread = LIST_ELEMENT( iter , thread_t , sched_list );
613        if (thread->type == THREAD_DEV) 
614        {
615            nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X / %s\n",
616            thread_type_str( thread->type ), thread->process->pid, thread->trdid,
617            thread, thread->blocked, thread->flags, thread->chdev->name );
618        }
619        else
620        {
621            nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n",
622            thread_type_str( thread->type ), thread->process->pid, thread->trdid,
623            thread, thread->blocked, thread->flags );
624        }
625    }
626
627    // display user threads
628    LIST_FOREACH( &sched->u_root , iter )
629    {
630        thread = LIST_ELEMENT( iter , thread_t , sched_list );
631        nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n",
632        thread_type_str( thread->type ), thread->process->pid, thread->trdid,
633        thread, thread->blocked, thread->flags );
634    }
635
636    // release TXT0 lock
637    remote_busylock_release( lock_xp );
638
639}  // end sched_display()
640
641/////////////////////////////////////
642void sched_remote_display( cxy_t cxy,
643                           lid_t lid )
644{
645    thread_t     * thread;
646
647// check cxy
648assert( (cluster_is_undefined( cxy ) == false),
649"illegal cluster %x\n", cxy );
650
651assert( (lid < hal_remote_l32( XPTR( cxy , &LOCAL_CLUSTER->cores_nr ) ) ),
652"illegal core index %d\n", lid );
653
654    // get local pointer on target scheduler
655    core_t      * core  = &LOCAL_CLUSTER->core_tbl[lid];
656    scheduler_t * sched = &core->scheduler;
657
658    // get local pointer on current thread in target scheduler
659    thread_t * current = hal_remote_lpt( XPTR( cxy, &sched->current ) );
660
661    // get local pointer on the first kernel and user threads list_entry
662    list_entry_t * k_entry = hal_remote_lpt( XPTR( cxy , &sched->k_root.next ) );
663    list_entry_t * u_entry = hal_remote_lpt( XPTR( cxy , &sched->u_root.next ) );
664   
665    // get pointers on TXT0 chdev
666    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
667    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
668    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
669
670    // get extended pointer on remote TXT0 chdev lock
671    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
672
673    // get TXT0 lock
674    remote_busylock_acquire( lock_xp );
675
676    // get rpc_threads
677    uint32_t rpcs = hal_remote_l32( XPTR( cxy , &LOCAL_CLUSTER->rpc_threads[lid] ) );
678 
679    // display header
680    nolock_printk("\n***** threads on core[%x,%d] / current %x / rpc_threads %d / cycle %d\n",
681    cxy , lid, current, rpcs, (uint32_t)hal_get_cycles() );
682
683    // display kernel threads
684    while( k_entry != &sched->k_root )
685    {
686        // get local pointer on kernel_thread
687        thread = LIST_ELEMENT( k_entry , thread_t , sched_list );
688
689        // get relevant thead info
690        thread_type_t type    = hal_remote_l32 ( XPTR( cxy , &thread->type ) );
691        trdid_t       trdid   = hal_remote_l32 ( XPTR( cxy , &thread->trdid ) );
692        uint32_t      blocked = hal_remote_l32 ( XPTR( cxy , &thread->blocked ) );
693        uint32_t      flags   = hal_remote_l32 ( XPTR( cxy , &thread->flags ) );
694        process_t *   process = hal_remote_lpt ( XPTR( cxy , &thread->process ) );
695        pid_t         pid     = hal_remote_l32 ( XPTR( cxy , &process->pid ) );
696
697        // display thread info
698        if (type == THREAD_DEV) 
699        {
700            char      name[16];
701            chdev_t * chdev = hal_remote_lpt( XPTR( cxy , &thread->chdev ) );
702            hal_remote_strcpy( XPTR( local_cxy , name ), XPTR( cxy , chdev->name ) );
703
704            nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X / %s\n",
705            thread_type_str( type ), pid, trdid, thread, blocked, flags, name );
706        }
707        else
708        {
709            nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n",
710            thread_type_str( type ), pid, trdid, thread, blocked, flags );
711        }
712
713        // get next remote kernel thread list_entry
714        k_entry = hal_remote_lpt( XPTR( cxy , &k_entry->next ) );
715    }
716
717    // display user threads
718    while( u_entry != &sched->u_root )
719    {
720        // get local pointer on user_thread
721        thread = LIST_ELEMENT( u_entry , thread_t , sched_list );
722
723        // get relevant thead info
724        thread_type_t type    = hal_remote_l32 ( XPTR( cxy , &thread->type ) );
725        trdid_t       trdid   = hal_remote_l32 ( XPTR( cxy , &thread->trdid ) );
726        uint32_t      blocked = hal_remote_l32 ( XPTR( cxy , &thread->blocked ) );
727        uint32_t      flags   = hal_remote_l32 ( XPTR( cxy , &thread->flags ) );
728        process_t *   process = hal_remote_lpt ( XPTR( cxy , &thread->process ) );
729        pid_t         pid     = hal_remote_l32 ( XPTR( cxy , &process->pid ) );
730
731        nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n",
732        thread_type_str( type ), pid, trdid, thread, blocked, flags );
733
734        // get next user thread list_entry
735        u_entry = hal_remote_lpt( XPTR( cxy , &u_entry->next ) );
736    }
737
738    // release TXT0 lock
739    remote_busylock_release( lock_xp );
740
741}  // end sched_remote_display()
742
743
Note: See TracBrowser for help on using the repository browser.