source: trunk/kernel/kern/scheduler.c @ 628

Last change on this file since 628 was 625, checked in by alain, 5 years ago

Fix a bug in the vmm_remove_vseg() function: the physical pages
associated to an user DATA vseg were released to the kernel when
the target process descriptor was in the reference cluster.
This physical pages release should be done only when the page
forks counter value is zero.
All other modifications are cosmetic.

File size: 25.4 KB
Line 
1/*
2 * scheduler.c - Core scheduler implementation.
3 *
4 * Author    Alain Greiner (2016,2017,2018)
5 *
6 * Copyright (c)  UPMC Sorbonne Universites
7 *
8 * This file is part of ALMOS-MKH.
9 *
10 * ALMOS-MKH. is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2.0 of the License.
13 *
14 * ALMOS-MKH. is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24#include <kernel_config.h>
25#include <hal_kernel_types.h>
26#include <hal_switch.h>
27#include <hal_irqmask.h>
28#include <hal_context.h>
29#include <printk.h>
30#include <list.h>
31#include <rpc.h>
32#include <core.h>
33#include <thread.h>
34#include <chdev.h>
35#include <scheduler.h>
36
37
38///////////////////////////////////////////////////////////////////////////////////////////
39//         global variables
40///////////////////////////////////////////////////////////////////////////////////////////
41
42extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c
43extern process_t            process_zero;       // allocated in kernel_init.c
44
45///////////////////////////////////////////////////////////////////////////////////////////
46//         private functions
47///////////////////////////////////////////////////////////////////////////////////////////
48
49
50////////////////////////////////////////////////////////////////////////////////////////////
51// This static function does NOT modify the scheduler state.
52// It just select a thread in the list of attached threads, implementing the following
53// three steps policy:
54// 1) It scan the list of kernel threads, from the next thread after the last executed one,
55//    and returns the first runnable found : not IDLE, not blocked, client queue not empty.
56//    It can be the current thread.
57// 2) If no kernel thread found, it scan the list of user thread, from the next thread after
58//    the last executed one, and returns the first runable found : not blocked.
59//    It can be the current thread.
60// 3) If no runable thread found, it returns the idle thread.
61////////////////////////////////////////////////////////////////////////////////////////////
62// @ sched   : local pointer on scheduler.
63// @ returns pointer on selected thread descriptor
64////////////////////////////////////////////////////////////////////////////////////////////
65thread_t * sched_select( scheduler_t * sched )
66{
67    thread_t     * thread;
68    list_entry_t * current;
69    list_entry_t * last;
70    list_entry_t * root;
71    bool_t         done;
72    uint32_t       count;
73
74    // first : scan the kernel threads list if not empty
75    if( list_is_empty( &sched->k_root ) == false )
76    {
77        root    = &sched->k_root;
78        last    = sched->k_last;
79        done    = false;
80        count   = 0;
81        current = last;
82
83        while( done == false )
84        {
85
86// check kernel threads list
87assert( (count < sched->k_threads_nr), "bad kernel threads list" );
88
89            // get next entry in kernel list
90            current = current->next;
91
92            // check exit condition
93            if( current == last ) done = true;
94
95            // skip the root that does not contain a thread
96            if( current == root ) continue;
97            else                  count++;
98
99            // get thread pointer for this entry
100            thread = LIST_ELEMENT( current , thread_t , sched_list );
101
102            // select kernel thread if non blocked and non THREAD_IDLE
103            if( (thread->blocked == 0)  && (thread->type != THREAD_IDLE) ) return thread;
104
105        } // end loop on kernel threads
106    } // end kernel threads
107
108    // second : scan the user threads list if not empty
109    if( list_is_empty( &sched->u_root ) == false )
110    {
111        root    = &sched->u_root;
112        last    = sched->u_last;
113        done    = false;
114        count   = 0;
115        current = last;
116
117        while( done == false )
118        {
119
120// check user threads list
121assert( (count < sched->u_threads_nr), "bad user threads list" );
122
123            // get next entry in user list
124            current = current->next;
125
126            // check exit condition
127            if( current == last ) done = true;
128
129            // skip the root that does not contain a thread
130            if( current == root ) continue;
131            else                  count++;
132
133            // get thread pointer for this entry
134            thread = LIST_ELEMENT( current , thread_t , sched_list );
135
136            // select thread if non blocked
137            if( thread->blocked == 0 )  return thread;
138
139        } // end loop on user threads
140    } // end user threads
141
142    // third : return idle thread if no other runnable thread
143    return sched->idle;
144
145}  // end sched_select()
146
147////////////////////////////////////////////////////////////////////////////////////////////
148// This static function is the only function that can actually delete a thread,
149// (and the associated process descriptor if required).
150// It is private, because it is only called by the sched_yield() public function.
151// It scan all threads attached to a given scheduler, and executes the relevant
152// actions for two types of pending requests:
153//
154// - REQ_ACK : it checks that target thread is blocked, decrements the response counter
155//   to acknowledge the client thread, and reset the pending request.
156// - REQ_DELETE : it removes the target thread from the process th_tbl[], remove it
157//   from the scheduler list, and release the memory allocated to thread descriptor.
158//   For an user thread, it destroys the process descriptor it the target thread is
159//   the last thread in the local process descriptor.
160//
161// Implementation note:
162// We use a while to scan the threads in scheduler lists, because some threads can
163// be destroyed, and we want not use a LIST_FOREACH()
164////////////////////////////////////////////////////////////////////////////////////////////
165// @ core    : local pointer on the core descriptor.
166////////////////////////////////////////////////////////////////////////////////////////////
167static void sched_handle_signals( core_t * core )
168{
169
170    list_entry_t * iter;
171    list_entry_t * root;
172    thread_t     * thread;
173    process_t    * process;
174    scheduler_t  * sched;
175    uint32_t       threads_nr;   // number of threads in scheduler list
176    ltid_t         ltid;         // thread local index
177    uint32_t       count;        // number of threads in local process
178
179    // get pointer on scheduler
180    sched = &core->scheduler;
181
182    ////////////////// scan user threads to handle both ACK and DELETE requests
183    root = &sched->u_root;
184    iter = root->next;
185    while( iter != root )
186    {
187        // get pointer on thread
188        thread = LIST_ELEMENT( iter , thread_t , sched_list );
189
190        // increment iterator
191        iter = iter->next;
192
193        // handle REQ_ACK
194        if( thread->flags & THREAD_FLAG_REQ_ACK )
195        {
196
197// check thread blocked
198assert( (thread->blocked & THREAD_BLOCKED_GLOBAL) , "thread not blocked" );
199 
200            // decrement response counter
201            hal_atomic_add( thread->ack_rsp_count , -1 );
202
203            // reset REQ_ACK in thread descriptor
204            thread_reset_req_ack( thread );
205        }
206
207        // handle REQ_DELETE only if target thread != calling thread
208        if( (thread->flags & THREAD_FLAG_REQ_DELETE) && (thread != CURRENT_THREAD) )
209        {
210            // get thread process descriptor
211            process = thread->process;
212
213            // get thread ltid
214            ltid = LTID_FROM_TRDID( thread->trdid);
215
216            // take the lock protecting sheduler state
217            busylock_acquire( &sched->lock );
218
219            // update scheduler state
220            threads_nr = sched->u_threads_nr;
221            sched->u_threads_nr = threads_nr - 1;
222            list_unlink( &thread->sched_list );
223            if( sched->u_last == &thread->sched_list )
224            {
225                if( threads_nr == 1 ) 
226                {
227                    sched->u_last = NULL;
228                }
229                else if( sched->u_root.next == &thread->sched_list )
230                {
231                    sched->u_last = sched->u_root.pred;
232                }
233                else
234                {
235                    sched->u_last = sched->u_root.next;
236                }
237            }
238
239            // release the lock protecting sheduler state
240            busylock_release( &sched->lock );
241
242            // release memory allocated for thread
243            count = thread_destroy( thread );
244
245            hal_fence();
246
247#if DEBUG_SCHED_HANDLE_SIGNALS
248uint32_t cycle = (uint32_t)hal_get_cycles();
249if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
250printk("\n[%s] thread[%x,%x] on core[%x,%d] deleted / %d threads / cycle %d\n",
251__FUNCTION__, process->pid, thread->trdid, local_cxy, thread->core->lid, count, cycle );
252#endif
253            // destroy process descriptor if last thread
254            if( count == 1 ) 
255            {
256                // delete process   
257                process_destroy( process );
258
259#if DEBUG_SCHED_HANDLE_SIGNALS
260cycle = (uint32_t)hal_get_cycles();
261if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
262printk("\n[%s] process %x in cluster %x deleted / cycle %d\n",
263__FUNCTION__ , process->pid , local_cxy , cycle );
264#endif
265            }
266        }
267    }  // end user threads
268
269    ///////////// scan kernel threads for DELETE only
270    root = &sched->k_root;
271    iter = root->next;
272    while( iter != root )
273    {
274        // get pointer on thread
275        thread = LIST_ELEMENT( iter , thread_t , sched_list );
276
277        // increment iterator
278        iter = iter->next;
279
280        // handle REQ_DELETE only if target thread != calling thread
281        if( (thread->flags & THREAD_FLAG_REQ_DELETE) && (thread != CURRENT_THREAD) )
282        {
283
284// check process descriptor is local kernel process
285assert( ( thread->process == &process_zero ) , "illegal process descriptor");
286
287            // get thread ltid
288            ltid = LTID_FROM_TRDID( thread->trdid);
289
290            // take the lock protecting sheduler state
291            busylock_acquire( &sched->lock );
292
293            // update scheduler state
294            threads_nr = sched->k_threads_nr;
295            sched->k_threads_nr = threads_nr - 1;
296            list_unlink( &thread->sched_list );
297            if( sched->k_last == &thread->sched_list )
298            {
299                if( threads_nr == 1 ) 
300                {
301                    sched->k_last = NULL;
302                }
303                else if( sched->k_root.next == &thread->sched_list )
304                {
305                    sched->k_last = sched->k_root.pred;
306                }
307                else
308                {
309                    sched->k_last = sched->k_root.next;
310                }
311            }
312
313            // release the lock protecting sheduler state
314            busylock_release( &sched->lock );
315
316            // get number of threads in local kernel process
317            count = process_zero.th_nr;
318
319// check th_nr value
320assert( (process_zero.th_nr > 0) , "kernel process th_nr cannot be 0" );
321
322            // remove thread from process th_tbl[]
323            process_zero.th_tbl[ltid] = NULL;
324            hal_atomic_add( &process_zero.th_nr , - 1 );
325 
326            // delete thread descriptor
327            thread_destroy( thread );
328
329#if DEBUG_SCHED_HANDLE_SIGNALS
330uint32_t cycle = (uint32_t)hal_get_cycles();
331if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
332printk("\n[%s] thread[%x,%x] on core[%x,%d] deleted / cycle %d\n",
333__FUNCTION__ , process_zero.pid , thread->trdid , local_cxy , thread->core->lid , cycle );
334#endif
335        }
336    }
337} // end sched_handle_signals()
338
339////////////////////////////////////////////////////////////////////////////////////////////
340// This static function is called by the sched_yield function when the RFC_FIFO
341// associated to the core is not empty.
342// It search an idle RPC thread for this core, and unblock it if found.
343// It creates a new RPC thread if no idle RPC thread is found.
344////////////////////////////////////////////////////////////////////////////////////////////
345// @ sched   : local pointer on scheduler.
346////////////////////////////////////////////////////////////////////////////////////////////
347static void sched_rpc_activate( scheduler_t * sched )
348{
349    error_t         error;
350    thread_t      * thread; 
351    list_entry_t  * iter;
352    lid_t           lid = CURRENT_THREAD->core->lid;
353    bool_t          found = false;
354
355    // search one IDLE RPC thread associated to the selected core   
356    LIST_FOREACH( &sched->k_root , iter )
357    {
358        thread = LIST_ELEMENT( iter , thread_t , sched_list );
359
360        if( (thread->type == THREAD_RPC) && 
361            (thread->blocked == THREAD_BLOCKED_IDLE ) ) 
362        {
363            found = true;
364            break;
365        }
366    }
367
368    if( found == false )     // create new RPC thread     
369    {
370        error = thread_kernel_create( &thread,
371                                      THREAD_RPC, 
372                                              &rpc_server_func, 
373                                      NULL,
374                                          lid );
375        // check memory
376        if ( error )
377        {
378            printk("\n[ERROR] in %s : no memory to create a RPC thread in cluster %x\n",
379            __FUNCTION__, local_cxy );
380        }
381        else
382        {
383            // unblock created RPC thread
384            thread->blocked = 0;
385
386            // update RPC threads counter 
387            hal_atomic_add( &LOCAL_CLUSTER->rpc_threads[lid] , 1 );
388
389#if DEBUG_SCHED_RPC_ACTIVATE
390uint32_t cycle = (uint32_t)hal_get_cycles();
391if( DEBUG_SCHED_RPC_ACTIVATE < cycle ) 
392printk("\n[%s] new RPC thread %x created for core[%x,%d] / total %d / cycle %d\n",
393__FUNCTION__, thread->trdid, local_cxy, lid, LOCAL_CLUSTER->rpc_threads[lid], cycle );
394#endif
395        }
396    }
397    else                 // RPC thread found => unblock it
398    {
399        // unblock found RPC thread
400        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_IDLE );
401
402#if DEBUG_SCHED_RPC_ACTIVATE
403uint32_t cycle = (uint32_t)hal_get_cycles();
404if( DEBUG_SCHED_RPC_ACTIVATE < cycle ) 
405printk("\n[%s] idle RPC thread %x unblocked for core[%x,%d] / cycle %d\n",
406__FUNCTION__, thread->trdid, local_cxy, lid, cycle );
407#endif
408
409    }
410
411} // end sched_rpc_activate()
412
413
414
415///////////////////////////////////////////////////////////////////////////////////////////
416//         public functions
417///////////////////////////////////////////////////////////////////////////////////////////
418
419////////////////////////////////
420void sched_init( core_t * core )
421{
422    scheduler_t * sched = &core->scheduler;
423
424    sched->u_threads_nr   = 0;
425    sched->k_threads_nr   = 0;
426
427    sched->current        = CURRENT_THREAD;
428    sched->idle           = NULL;               // initialized in kernel_init()
429    sched->u_last         = NULL;               // initialized in sched_register_thread()
430    sched->k_last         = NULL;               // initialized in sched_register_thread()
431
432    // initialise threads lists
433    list_root_init( &sched->u_root );
434    list_root_init( &sched->k_root );
435
436    // init lock
437    busylock_init( &sched->lock , LOCK_SCHED_STATE );
438
439    sched->req_ack_pending = false;             // no pending request
440    sched->trace           = false;             // context switches trace desactivated
441
442}  // end sched_init()
443
444////////////////////////////////////////////
445void sched_register_thread( core_t   * core,
446                            thread_t * thread )
447{
448    scheduler_t * sched = &core->scheduler;
449    thread_type_t type  = thread->type;
450
451    // take lock protecting sheduler state
452    busylock_acquire( &sched->lock );
453
454    if( type == THREAD_USER )
455    {
456        list_add_last( &sched->u_root , &thread->sched_list );
457        sched->u_threads_nr++;
458        if( sched->u_last == NULL ) sched->u_last = &thread->sched_list;
459    }
460    else // kernel thread
461    {
462        list_add_last( &sched->k_root , &thread->sched_list );
463        sched->k_threads_nr++;
464        if( sched->k_last == NULL ) sched->k_last = &thread->sched_list; 
465    }
466
467    // release lock
468    busylock_release( &sched->lock );
469
470}  // end sched_register_thread()
471
472//////////////////////////////////////////////////////////////////
473void sched_yield( const char * cause __attribute__((__unused__)) )
474{
475    thread_t      * next;
476    thread_t      * current = CURRENT_THREAD;
477    core_t        * core    = current->core;
478    lid_t           lid     = core->lid;
479    scheduler_t   * sched   = &core->scheduler;
480    remote_fifo_t * fifo    = &LOCAL_CLUSTER->rpc_fifo[lid]; 
481 
482#if (DEBUG_SCHED_YIELD & 0x1)
483// if( sched->trace )
484if( (uint32_t)hal_get_cycles() > DEBUG_SCHED_YIELD )
485sched_display( lid );
486#endif
487
488// This assert should never be false, as this check has been
489// done before, by any function that can possibly deschedule...
490assert( (current->busylocks == 0),
491"unexpected descheduling of thread holding %d busylocks = %d\n", current->busylocks ); 
492
493    // activate or create an RPC thread if RPC_FIFO non empty
494    if( remote_fifo_is_empty( fifo ) == false )  sched_rpc_activate( sched );
495
496    // disable IRQs / save SR in current thread descriptor
497    hal_disable_irq( &current->save_sr );
498
499    // take lock protecting sheduler state
500    busylock_acquire( &sched->lock );
501   
502    // select next thread
503    next = sched_select( sched );
504
505// check next thread kernel_stack overflow
506assert( (next->signature == THREAD_SIGNATURE),
507"kernel stack overflow for thread %x on core[%x,%d]", next, local_cxy, lid );
508
509// check next thread attached to same core as the calling thread
510assert( (next->core == current->core),
511"next core %x != current core %x", next->core, current->core );
512
513// check next thread not blocked when type != IDLE
514assert( ((next->blocked == 0) || (next->type == THREAD_IDLE)) ,
515"next thread %x (%s) is blocked on core[%x,%d]", 
516next->trdid , thread_type_str(next->type) , local_cxy , lid );
517
518    // switch contexts and update scheduler state if next != current
519        if( next != current )
520    {
521        // update scheduler
522        sched->current = next;
523        if( next->type == THREAD_USER ) sched->u_last = &next->sched_list;
524        else                            sched->k_last = &next->sched_list;
525
526        // handle FPU ownership
527            if( next->type == THREAD_USER )
528        {
529                if( next == current->core->fpu_owner )  hal_fpu_enable();
530                else                                    hal_fpu_disable();
531        }
532
533        // release lock protecting scheduler state
534        busylock_release( &sched->lock );
535
536#if DEBUG_SCHED_YIELD
537// if( sched->trace )
538if( (uint32_t)hal_get_cycles() > DEBUG_SCHED_YIELD )
539printk("\n[%s] core[%x,%d] / cause = %s\n"
540"      thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n",
541__FUNCTION__, local_cxy, lid, cause, 
542current, thread_type_str(current->type), current->process->pid, current->trdid,next ,
543thread_type_str(next->type) , next->process->pid , next->trdid , (uint32_t)hal_get_cycles() );
544#endif
545
546        // switch CPU from current thread context to new thread context
547        hal_do_cpu_switch( current->cpu_context, next->cpu_context );
548    }
549    else
550    {
551        // release lock protecting scheduler state
552        busylock_release( &sched->lock );
553
554#if (DEBUG_SCHED_YIELD & 1)
555// if( sched->trace )
556if( (uint32_t)hal_get_cycles() > DEBUG_SCHED_YIELD )
557printk("\n[%s] core[%x,%d] / cause = %s\n"
558"      thread %x (%s) (%x,%x) continue / cycle %d\n",
559__FUNCTION__, local_cxy, lid, cause, current, thread_type_str(current->type),
560current->process->pid, current->trdid, (uint32_t)hal_get_cycles() );
561#endif
562
563    }
564
565    // handle pending requests for all threads executing on this core.
566    sched_handle_signals( core );
567
568    // exit critical section / restore SR from current thread descriptor
569    hal_restore_irq( CURRENT_THREAD->save_sr );
570
571}  // end sched_yield()
572
573
574///////////////////////////////
575void sched_display( lid_t lid )
576{
577    list_entry_t * iter;
578    thread_t     * thread;
579
580    core_t       * core    = &LOCAL_CLUSTER->core_tbl[lid];
581    scheduler_t  * sched   = &core->scheduler;
582   
583    // get pointers on TXT0 chdev
584    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
585    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
586    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
587
588    // get extended pointer on remote TXT0 lock
589    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
590
591    // get TXT0 lock
592    remote_busylock_acquire( lock_xp );
593
594    nolock_printk("\n***** threads on core[%x,%d] / current %x / rpc_threads %d / cycle %d\n",
595    local_cxy , lid, sched->current, LOCAL_CLUSTER->rpc_threads[lid],
596    (uint32_t)hal_get_cycles() );
597
598    // display kernel threads
599    LIST_FOREACH( &sched->k_root , iter )
600    {
601        thread = LIST_ELEMENT( iter , thread_t , sched_list );
602        if (thread->type == THREAD_DEV) 
603        {
604            nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X / %s\n",
605            thread_type_str( thread->type ), thread->process->pid, thread->trdid,
606            thread, thread->blocked, thread->flags, thread->chdev->name );
607        }
608        else
609        {
610            nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n",
611            thread_type_str( thread->type ), thread->process->pid, thread->trdid,
612            thread, thread->blocked, thread->flags );
613        }
614    }
615
616    // display user threads
617    LIST_FOREACH( &sched->u_root , iter )
618    {
619        thread = LIST_ELEMENT( iter , thread_t , sched_list );
620        nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n",
621        thread_type_str( thread->type ), thread->process->pid, thread->trdid,
622        thread, thread->blocked, thread->flags );
623    }
624
625    // release TXT0 lock
626    remote_busylock_release( lock_xp );
627
628}  // end sched_display()
629
630/////////////////////////////////////
631void sched_remote_display( cxy_t cxy,
632                           lid_t lid )
633{
634    thread_t     * thread;
635
636    // get local pointer on target scheduler
637    core_t      * core  = &LOCAL_CLUSTER->core_tbl[lid];
638    scheduler_t * sched = &core->scheduler;
639
640    // get local pointer on current thread in target scheduler
641    thread_t * current = hal_remote_lpt( XPTR( cxy, &sched->current ) );
642
643    // get local pointer on the first kernel and user threads list_entry
644    list_entry_t * k_entry = hal_remote_lpt( XPTR( cxy , &sched->k_root.next ) );
645    list_entry_t * u_entry = hal_remote_lpt( XPTR( cxy , &sched->u_root.next ) );
646   
647    // get pointers on TXT0 chdev
648    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
649    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
650    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
651
652    // get extended pointer on remote TXT0 chdev lock
653    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
654
655    // get TXT0 lock
656    remote_busylock_acquire( lock_xp );
657
658    // get rpc_threads
659    uint32_t rpcs = hal_remote_l32( XPTR( cxy , &LOCAL_CLUSTER->rpc_threads[lid] ) );
660 
661    // display header
662    nolock_printk("\n***** threads on core[%x,%d] / current %x / rpc_threads %d / cycle %d\n",
663    cxy , lid, current, rpcs, (uint32_t)hal_get_cycles() );
664
665    // display kernel threads
666    while( k_entry != &sched->k_root )
667    {
668        // get local pointer on kernel_thread
669        thread = LIST_ELEMENT( k_entry , thread_t , sched_list );
670
671        // get relevant thead info
672        thread_type_t type    = hal_remote_l32 ( XPTR( cxy , &thread->type ) );
673        trdid_t       trdid   = hal_remote_l32 ( XPTR( cxy , &thread->trdid ) );
674        uint32_t      blocked = hal_remote_l32 ( XPTR( cxy , &thread->blocked ) );
675        uint32_t      flags   = hal_remote_l32 ( XPTR( cxy , &thread->flags ) );
676        process_t *   process = hal_remote_lpt ( XPTR( cxy , &thread->process ) );
677        pid_t         pid     = hal_remote_l32 ( XPTR( cxy , &process->pid ) );
678
679        // display thread info
680        if (type == THREAD_DEV) 
681        {
682            char      name[16];
683            chdev_t * chdev = hal_remote_lpt( XPTR( cxy , &thread->chdev ) );
684            hal_remote_strcpy( XPTR( local_cxy , name ), XPTR( cxy , chdev->name ) );
685
686            nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X / %s\n",
687            thread_type_str( type ), pid, trdid, thread, blocked, flags, name );
688        }
689        else
690        {
691            nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n",
692            thread_type_str( type ), pid, trdid, thread, blocked, flags );
693        }
694
695        // get next remote kernel thread list_entry
696        k_entry = hal_remote_lpt( XPTR( cxy , &k_entry->next ) );
697    }
698
699    // display user threads
700    while( u_entry != &sched->u_root )
701    {
702        // get local pointer on user_thread
703        thread = LIST_ELEMENT( u_entry , thread_t , sched_list );
704
705        // get relevant thead info
706        thread_type_t type    = hal_remote_l32 ( XPTR( cxy , &thread->type ) );
707        trdid_t       trdid   = hal_remote_l32 ( XPTR( cxy , &thread->trdid ) );
708        uint32_t      blocked = hal_remote_l32 ( XPTR( cxy , &thread->blocked ) );
709        uint32_t      flags   = hal_remote_l32 ( XPTR( cxy , &thread->flags ) );
710        process_t *   process = hal_remote_lpt ( XPTR( cxy , &thread->process ) );
711        pid_t         pid     = hal_remote_l32 ( XPTR( cxy , &process->pid ) );
712
713        nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n",
714        thread_type_str( type ), pid, trdid, thread, blocked, flags );
715
716        // get next user thread list_entry
717        u_entry = hal_remote_lpt( XPTR( cxy , &u_entry->next ) );
718    }
719
720    // release TXT0 lock
721    remote_busylock_release( lock_xp );
722
723}  // end sched_remote_display()
724
725
Note: See TracBrowser for help on using the repository browser.