source: trunk/kernel/kern/cluster.c @ 440

Last change on this file since 440 was 440, checked in by alain, 6 years ago

1/ Fix a bug in the Multithreaded "sort" applicationr:
The pthread_create() arguments must be declared as global variables.
2/ The exit syscall can be called by any thread of a process..

File size: 18.5 KB
RevLine 
[1]1/*
2 * cluster.c - Cluster-Manager related operations
[19]3 *
[1]4 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *         Mohamed Lamine Karaoui (2015)
[437]6 *         Alain Greiner (2016,2017,2018)
[1]7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
10 * This file is part of ALMOS-MKH..
11 *
12 * ALMOS-MKH. is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
16 * ALMOS-MKH. is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
[14]26#include <kernel_config.h>
[1]27#include <hal_types.h>
28#include <hal_atomic.h>
29#include <hal_special.h>
[50]30#include <hal_ppm.h>
[407]31#include <remote_fifo.h>
[1]32#include <printk.h>
33#include <errno.h>
34#include <spinlock.h>
35#include <core.h>
36#include <scheduler.h>
37#include <list.h>
38#include <cluster.h>
39#include <boot_info.h>
40#include <bits.h>
41#include <ppm.h>
42#include <thread.h>
43#include <kmem.h>
44#include <process.h>
45#include <dqdt.h>
46
[408]47/////////////////////////////////////////////////////////////////////////////////////
[1]48// Extern global variables
[408]49/////////////////////////////////////////////////////////////////////////////////////
[1]50
[23]51extern process_t process_zero;     // allocated in kernel_init.c file
[1]52
53
54/////////////////////////////////////////////////
55error_t cluster_init( struct boot_info_s * info )
56{
[428]57    error_t         error;
58    lpid_t          lpid;     // local process_index
59    lid_t           lid;      // local core index
60    uint32_t        i;        // index in loop on external peripherals
61    boot_device_t * dev;      // pointer on external peripheral
62    uint32_t        func;     // external peripheral functionnal type
[1]63
64        cluster_t * cluster = LOCAL_CLUSTER;
65
66    // initialize cluster global parameters
[19]67        cluster->paddr_width     = info->paddr_width;
[1]68        cluster->x_width         = info->x_width;
69        cluster->y_width         = info->y_width;
70        cluster->x_size          = info->x_size;
71        cluster->y_size          = info->y_size;
72        cluster->io_cxy          = info->io_cxy;
73
[428]74    // initialize external peripherals channels
75    for( i = 0 ; i < info->ext_dev_nr ; i++ )
76    {
77        dev  = &info->ext_dev[i];
78        func = FUNC_FROM_TYPE( dev->type );   
79        if( func == DEV_FUNC_TXT ) cluster->nb_txt_channels = dev->channels;
80        if( func == DEV_FUNC_NIC ) cluster->nb_nic_channels = dev->channels;
81        if( func == DEV_FUNC_IOC ) cluster->nb_ioc_channels = dev->channels;
82        if( func == DEV_FUNC_FBF ) cluster->nb_fbf_channels = dev->channels;
83    }
84
[1]85    // initialize cluster local parameters
86        cluster->cores_nr        = info->cores_nr;
87
[19]88    // initialize the lock protecting the embedded kcm allocator
[1]89        spinlock_init( &cluster->kcm_lock );
90
[438]91#if DEBUG_CLUSTER_INIT
[433]92uint32_t cycle = (uint32_t)hal_get_cycles();
[438]93if( DEBUG_CLUSTER_INIT < cycle )
[437]94printk("\n[DBG] %s : thread %x enters for cluster %x / cycle %d\n",
95__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
[433]96#endif
[50]97
[19]98    // initialises DQDT
99    cluster->dqdt_root_level = dqdt_init( info->x_size,
100                                          info->y_size,
[438]101                                          info->y_width ) - 1;
[1]102
103    // initialises embedded PPM
[50]104        error = hal_ppm_init( info );
[1]105
[50]106    if( error )
107    {
108        printk("\n[ERROR] in %s : cannot initialize PPM in cluster %x\n",
109               __FUNCTION__ , local_cxy );
110        return ENOMEM;
111    }
112
[438]113#if( DEBUG_CLUSTER_INIT & 1 )
[433]114cycle = (uint32_t)hal_get_cycles();
[438]115if( DEBUG_CLUSTER_INIT < cycle )
[437]116printk("\n[DBG] %s : PPM initialized in cluster %x / cycle %d\n",
[433]117__FUNCTION__ , local_cxy , cycle );
118#endif
[50]119
[1]120    // initialises embedded KHM
121        khm_init( &cluster->khm );
[19]122
[438]123#if( DEBUG_CLUSTER_INIT & 1 )
[437]124uint32_t cycle = (uint32_t)hal_get_cycles();
[438]125if( DEBUG_CLUSTER_INIT < cycle )
[437]126printk("\n[DBG] %s : KHM initialized in cluster %x at cycle %d\n",
127__FUNCTION__ , local_cxy , hal_get_cycles() );
128#endif
[50]129
[19]130    // initialises embedded KCM
[5]131        kcm_init( &cluster->kcm , KMEM_KCM );
[1]132
[438]133#if( DEBUG_CLUSTER_INIT & 1 )
[437]134uint32_t cycle = (uint32_t)hal_get_cycles();
[438]135if( DEBUG_CLUSTER_INIT < cycle )
[437]136printk("\n[DBG] %s : KCM initialized in cluster %x at cycle %d\n",
137__FUNCTION__ , local_cxy , hal_get_cycles() );
138#endif
[50]139
[296]140    // initialises all cores descriptors
[1]141        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
142        {
143                core_init( &cluster->core_tbl[lid],    // target core descriptor
144                       lid,                        // local core index
145                       info->core[lid].gid );      // gid from boot_info_t
146        }
[19]147
[438]148#if( DEBUG_CLUSTER_INIT & 1 )
[433]149cycle = (uint32_t)hal_get_cycles();
[438]150if( DEBUG_CLUSTER_INIT < cycle )
[437]151printk("\n[DBG] %s : cores initialized in cluster %x / cycle %d\n",
[433]152__FUNCTION__ , local_cxy , cycle );
153#endif
[50]154
[440]155    // initialises RPC FIFOs
156        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
157    {
158            local_fifo_init( &cluster->rpc_fifo[lid] );
159        cluster->rpc_threads[lid] = 0;
160    }
[1]161
[438]162#if( DEBUG_CLUSTER_INIT & 1 )
[437]163cycle = (uint32_t)hal_get_cycles();
[438]164if( DEBUG_CLUSTER_INIT < cycle )
[437]165printk("\n[DBG] %s : RPC fifo inialized in cluster %x at cycle %d\n",
[407]166__FUNCTION__ , local_cxy , hal_get_cycles() );
[437]167#endif
[50]168
[1]169    // initialise pref_tbl[] in process manager
170        spinlock_init( &cluster->pmgr.pref_lock );
171    cluster->pmgr.pref_nr = 0;
[19]172    cluster->pmgr.pref_tbl[0] = XPTR( local_cxy , &process_zero );
[1]173    for( lpid = 1 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
174    {
175        cluster->pmgr.pref_tbl[lpid] = XPTR_NULL;
176    }
177
178    // initialise local_list in process manager
[23]179        remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
180    xlist_root_init( XPTR( local_cxy , &cluster->pmgr.local_root ) );
[1]181    cluster->pmgr.local_nr = 0;
182
183    // initialise copies_lists in process manager
[101]184    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
[1]185    {
186            remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] ) );
187        cluster->pmgr.copies_nr[lpid] = 0;
188        xlist_root_init( XPTR( local_cxy , &cluster->pmgr.copies_root[lpid] ) );
[19]189    }
[1]190
[438]191#if DEBUG_CLUSTER_INIT
[433]192cycle = (uint32_t)hal_get_cycles();
[438]193if( DEBUG_CLUSTER_INIT < cycle )
[437]194printk("\n[DBG] %s , thread %x exit for cluster %x / cycle %d\n",
195__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
[433]196#endif
[50]197
[124]198    hal_fence();
[1]199
200        return 0;
201} // end cluster_init()
202
203////////////////////////////////////////
204bool_t cluster_is_undefined( cxy_t cxy )
205{
206    cluster_t * cluster = LOCAL_CLUSTER;
207
208    uint32_t y_width = cluster->y_width;
209
210    uint32_t x = cxy >> y_width;
211    uint32_t y = cxy & ((1<<y_width)-1);
212
[19]213    if( x >= cluster->x_size ) return true;
214    if( y >= cluster->y_size ) return true;
[1]215
216    return false;
217}
218
219////////////////////////////////////////////////////////////////////////////////////
220//  Cores related functions
221////////////////////////////////////////////////////////////////////////////////////
222
223/////////////////////////////////
224lid_t cluster_select_local_core()
225{
[440]226    uint32_t      min = 1000;
227    lid_t         sel = 0;
228    uint32_t      nthreads;
229    lid_t         lid;
230    scheduler_t * sched;
[1]231
232    cluster_t * cluster = LOCAL_CLUSTER;
233
234    for( lid = 0 ; lid < cluster->cores_nr ; lid++ )
235    {
[440]236        sched    = &cluster->core_tbl[lid].scheduler;
237        nthreads = sched->u_threads_nr + sched->k_threads_nr;
238
239        if( nthreads < min )
[1]240        {
[440]241            min = nthreads;
[1]242            sel = lid;
243        }
[19]244    }
[1]245    return sel;
246}
247
248////////////////////////////////////////////////////////////////////////////////////
[428]249//  Process related functions
[1]250////////////////////////////////////////////////////////////////////////////////////
251
[433]252
253//////////////////////////////////////////////////////
254xptr_t cluster_get_owner_process_from_pid( pid_t pid )
255{
256    xptr_t      root_xp;       // xptr on root of list of processes in owner cluster
[436]257    xptr_t      lock_xp;       // xptr on lock protecting this list
[433]258    xptr_t      iter_xp;       // iterator
259    xptr_t      current_xp;    // xptr on current process descriptor
260    process_t * current_ptr;   // local pointer on current process
261    pid_t       current_pid;   // current process identifier
262    bool_t      found;
263
264    cluster_t * cluster = LOCAL_CLUSTER;
265
266    // get owner cluster and lpid
267    cxy_t  owner_cxy = CXY_FROM_PID( pid );
268
269    // get lock & root of list of process in owner cluster
270    root_xp = XPTR( owner_cxy , &cluster->pmgr.local_root );
271    lock_xp = XPTR( owner_cxy , &cluster->pmgr.local_lock );
272
273    // take the lock protecting the list of processes
274    remote_spinlock_lock( lock_xp );
275
276    // scan list of processes in owner cluster
277    found = false;
278    XLIST_FOREACH( root_xp , iter_xp )
279    {
280        current_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
281        current_ptr = GET_PTR( current_xp );
282        current_pid = hal_remote_lw( XPTR( owner_cxy , &current_ptr->pid ) );
283
284        if( current_pid == pid )
285        {
286            found = true;
287            break;
288        }
289    }
290
291    // release the lock protecting the list of processes
292    remote_spinlock_unlock( lock_xp );
293
294    // return extended pointer on process descriptor in owner cluster
295    if( found ) return current_xp;
296    else        return XPTR_NULL;
297
[436]298}  // end cluster_get_owner_process_from_pid()
299
[1]300//////////////////////////////////////////////////////////
301xptr_t cluster_get_reference_process_from_pid( pid_t pid )
[19]302{
[23]303    xptr_t ref_xp;   // extended pointer on reference process descriptor
[1]304
305    cluster_t * cluster = LOCAL_CLUSTER;
306
307    // get owner cluster and lpid
308    cxy_t  owner_cxy = CXY_FROM_PID( pid );
309    lpid_t lpid      = LPID_FROM_PID( pid );
310
[19]311    // Check valid PID
[23]312    if( lpid >= CONFIG_MAX_PROCESS_PER_CLUSTER )  return XPTR_NULL;
[1]313
314    if( local_cxy == owner_cxy )   // local cluster is owner cluster
[19]315    {
[23]316        ref_xp = cluster->pmgr.pref_tbl[lpid];
[1]317    }
318    else                              // use a remote_lwd to access owner cluster
319    {
[23]320        ref_xp = (xptr_t)hal_remote_lwd( XPTR( owner_cxy , &cluster->pmgr.pref_tbl[lpid] ) );
[1]321    }
322
[23]323    return ref_xp;
[1]324}
325
[416]326///////////////////////////////////////////////
327error_t cluster_pid_alloc( process_t * process,
328                           pid_t     * pid )
[1]329{
330    lpid_t      lpid;
331    bool_t      found;
332
[440]333#if DEBUG_CLUSTER_PID_ALLOC
334uint32_t cycle = (uint32_t)hal_get_cycles();
335if( DEBUG_CLUSTER_PID_ALLOC < cycle )
336printk("\n[DBG] %s : thread %x enters in cluster %x / cycle %d\n",
337__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
338#endif
339
[1]340    pmgr_t    * pm         = &LOCAL_CLUSTER->pmgr;
341
342    // get the process manager lock
343    spinlock_lock( &pm->pref_lock );
344
345    // search an empty slot
346    found = false;
347    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
348    {
349        if( pm->pref_tbl[lpid] == XPTR_NULL )
350        {
351            found = true;
352            break;
353        }
354    }
355
356    if( found )
357    {
358        // register process in pref_tbl[]
[416]359        pm->pref_tbl[lpid] = XPTR( local_cxy , process );
[1]360        pm->pref_nr++;
361
362        // returns pid
363        *pid = PID( local_cxy , lpid );
364
[416]365        // release the processs_manager lock
366        spinlock_unlock( &pm->pref_lock );
367
368        return 0;
[1]369    }
370    else
371    {
[416]372        // release the processs_manager lock
373        spinlock_unlock( &pm->pref_lock );
374
375        return -1;
[19]376    }
[1]377
[440]378#if DEBUG_CLUSTER_PID_ALLOC
379cycle = (uint32_t)hal_get_cycles();
380if( DEBUG_CLUSTER_PID_ALLOC < cycle )
381printk("\n[DBG] %s : thread %x exit in cluster %x / pid %x / cycle %d\n",
382__FUNCTION__ , CURRENT_THREAD , local_cxy , *pid , cycle );
383#endif
384
[1]385} // end cluster_pid_alloc()
386
387/////////////////////////////////////
388void cluster_pid_release( pid_t pid )
389{
[440]390
391#if DEBUG_CLUSTER_PID_RELEASE
392uint32_t cycle = (uint32_t)hal_get_cycles();
393if( DEBUG_CLUSTER_PID_RELEASE < cycle )
394printk("\n[DBG] %s : thread %x enters in cluster %x / pid %x / cycle %d\n",
395__FUNCTION__ , CURRENT_THREAD , local_cxy , pid , cycle );
396#endif
397
[1]398    cxy_t  owner_cxy  = CXY_FROM_PID( pid );
399    lpid_t lpid       = LPID_FROM_PID( pid );
400
[409]401    pmgr_t  * pm = &LOCAL_CLUSTER->pmgr;
402
[440]403    // check lpid
404    assert( (lpid < CONFIG_MAX_PROCESS_PER_CLUSTER), __FUNCTION__ ,
405    "illegal LPID = %d" , lpid );
[1]406
[440]407    // check owner cluster
408    assert( (owner_cxy == local_cxy) , __FUNCTION__ ,
409    "local_cluster %x !=  owner_cluster %x" , local_cxy , owner_cxy );
410
[1]411    // get the process manager lock
412    spinlock_lock( &pm->pref_lock );
413
414    // remove process from pref_tbl[]
415    pm->pref_tbl[lpid] = XPTR_NULL;
416    pm->pref_nr--;
417
418    // release the processs_manager lock
419    spinlock_unlock( &pm->pref_lock );
420
[440]421#if DEBUG_CLUSTER_PID_RELEASE
422cycle = (uint32_t)hal_get_cycles();
423if( DEBUG_CLUSTER_PID_RELEASE < cycle )
424printk("\n[DBG] %s : thread %x exit in cluster %x / cycle %d\n",
425__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
426#endif
427
[1]428} // end cluster_pid_release()
429
430///////////////////////////////////////////////////////////
431process_t * cluster_get_local_process_from_pid( pid_t pid )
432{
[23]433    xptr_t         process_xp;
434    process_t    * process_ptr;
435    xptr_t         root_xp;
436    xptr_t         iter_xp;
437    bool_t         found;
[19]438
[23]439    found   = false;
440    root_xp = XPTR( local_cxy , &LOCAL_CLUSTER->pmgr.local_root );
441
442    XLIST_FOREACH( root_xp , iter_xp )
[1]443    {
[23]444        process_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
445        process_ptr = (process_t *)GET_PTR( process_xp );
446        if( process_ptr->pid == pid )
[1]447        {
[23]448            found = true;
[1]449            break;
450        }
451    }
452
[23]453    if (found ) return process_ptr;
454    else        return NULL;
455
[1]456}  // end cluster_get_local_process_from_pid()
457
458//////////////////////////////////////////////////////
459void cluster_process_local_link( process_t * process )
460{
[407]461    uint32_t irq_state;
[1]462    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
463
464    // get lock protecting the process manager local list
[407]465    remote_spinlock_lock_busy( XPTR( local_cxy , &pm->local_lock ) , & irq_state );
[1]466
[428]467    xlist_add_last( XPTR( local_cxy , &pm->local_root ),
468                    XPTR( local_cxy , &process->local_list ) );
[1]469    pm->local_nr++;
470
471    // release lock protecting the process manager local list
[407]472    remote_spinlock_unlock_busy( XPTR( local_cxy , &pm->local_lock ) , irq_state );
[1]473}
474
475////////////////////////////////////////////////////////
476void cluster_process_local_unlink( process_t * process )
477{
[407]478    uint32_t irq_state;
[1]479    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
480
481    // get lock protecting the process manager local list
[407]482    remote_spinlock_lock_busy( XPTR( local_cxy , &pm->local_lock ) , &irq_state );
[1]483
[23]484    xlist_unlink( XPTR( local_cxy , &process->local_list ) );
[1]485    pm->local_nr--;
486
487    // release lock protecting the process manager local list
[407]488    remote_spinlock_unlock_busy( XPTR( local_cxy , &pm->local_lock ) , irq_state );
[1]489}
490
491///////////////////////////////////////////////////////
492void cluster_process_copies_link( process_t * process )
493{
[436]494    reg_t    irq_state;
[1]495    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
496
[438]497#if DEBUG_CLUSTER_PROCESS_COPIES
[436]498uint32_t cycle = (uint32_t)hal_get_cycles();
[438]499if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
[436]500printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n",
501__FUNCTION__ , local_cxy , process , cycle );
502#endif
503
[1]504    // get owner cluster identifier CXY and process LPID
505    pid_t    pid        = process->pid;
506    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
507    lpid_t   lpid       = LPID_FROM_PID( pid );
508
509    // get extended pointer on lock protecting copies_list[lpid]
[120]510    xptr_t copies_lock  = XPTR( owner_cxy , &pm->copies_lock[lpid] );
[1]511
512    // get extended pointer on the copies_list[lpid] root
[120]513    xptr_t copies_root  = XPTR( owner_cxy , &pm->copies_root[lpid] );
[1]514
515    // get extended pointer on the local copies_list entry
516    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
517
[19]518    // get lock protecting copies_list[lpid]
[407]519    remote_spinlock_lock_busy( copies_lock , &irq_state );
[1]520
[436]521    // add copy to copies_list
[1]522    xlist_add_first( copies_root , copies_entry );
523    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , 1 );
524
[19]525    // release lock protecting copies_list[lpid]
[407]526    remote_spinlock_unlock_busy( copies_lock , irq_state );
[1]527
[438]528#if DEBUG_CLUSTER_PROCESS_COPIES
[436]529cycle = (uint32_t)hal_get_cycles();
[438]530if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
[436]531printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n",
532__FUNCTION__ , local_cxy , process , cycle );
533#endif
534
535}  // end cluster_process_copies_link()
536
[1]537/////////////////////////////////////////////////////////
538void cluster_process_copies_unlink( process_t * process )
539{
[407]540    uint32_t irq_state;
[1]541    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
542
[438]543#if DEBUG_CLUSTER_PROCESS_COPIES
[436]544uint32_t cycle = (uint32_t)hal_get_cycles();
[438]545if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
[436]546printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n",
547__FUNCTION__ , local_cxy , process , cycle );
548#endif
549
[1]550    // get owner cluster identifier CXY and process LPID
551    pid_t    pid        = process->pid;
552    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
553    lpid_t   lpid       = LPID_FROM_PID( pid );
554
555    // get extended pointer on lock protecting copies_list[lpid]
[436]556    xptr_t copies_lock  = XPTR( owner_cxy , &pm->copies_lock[lpid] );
[1]557
558    // get extended pointer on the local copies_list entry
559    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
560
[19]561    // get lock protecting copies_list[lpid]
[407]562    remote_spinlock_lock_busy( copies_lock , &irq_state );
[1]563
[436]564    // remove copy from copies_list
[1]565    xlist_unlink( copies_entry );
566    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , -1 );
567
[19]568    // release lock protecting copies_list[lpid]
[407]569    remote_spinlock_unlock_busy( copies_lock , irq_state );
[1]570
[438]571#if DEBUG_CLUSTER_PROCESS_COPIES
[436]572cycle = (uint32_t)hal_get_cycles();
[438]573if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
[436]574printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n",
575__FUNCTION__ , local_cxy , process , cycle );
576#endif
577
578}  // end cluster_process_copies_unlink()
579
[428]580///////////////////////////////////////////
581void cluster_processes_display( cxy_t cxy )
[1]582{
[428]583    xptr_t        root_xp;
584    xptr_t        iter_xp;
585    xptr_t        process_xp;     
[1]586
[428]587    // get extended pointer on root of process in cluster cxy
588    root_xp = XPTR( cxy , &LOCAL_CLUSTER->pmgr.local_root );
[1]589
[428]590    // skip one line
[433]591    printk("\n***** processes in cluster %x / cycle %d\n", cxy , (uint32_t)hal_get_cycles() );
[1]592
[428]593    // loop on all reference processes in cluster cxy
594    XLIST_FOREACH( root_xp , iter_xp )
595    {
596        process_xp = XLIST_ELEMENT( iter_xp , process_t , local_list );
597        process_display( process_xp );
598    }
599}  // end cluster_processes_display()
[1]600
[19]601
Note: See TracBrowser for help on using the repository browser.