source: trunk/kernel/kern/cluster.c @ 560

Last change on this file since 560 was 557, checked in by nicolas.van.phan@…, 6 years ago

Add cluster_info[][] array in cluster descriptor

File size: 21.5 KB
Line 
1/*
2 * cluster.c - Cluster-Manager related operations
3 *
4 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *         Mohamed Lamine Karaoui (2015)
6 *         Alain Greiner (2016,2017,2018)
7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
10 * This file is part of ALMOS-MKH..
11 *
12 * ALMOS-MKH. is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
16 * ALMOS-MKH. is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <kernel_config.h>
27#include <hal_kernel_types.h>
28#include <hal_atomic.h>
29#include <hal_special.h>
30#include <hal_ppm.h>
31#include <remote_fifo.h>
32#include <printk.h>
33#include <errno.h>
34#include <spinlock.h>
35#include <core.h>
36#include <chdev.h>
37#include <scheduler.h>
38#include <list.h>
39#include <cluster.h>
40#include <boot_info.h>
41#include <bits.h>
42#include <ppm.h>
43#include <thread.h>
44#include <kmem.h>
45#include <process.h>
46#include <dqdt.h>
47
48/////////////////////////////////////////////////////////////////////////////////////
49// Extern global variables
50/////////////////////////////////////////////////////////////////////////////////////
51
52extern process_t           process_zero;     // allocated in kernel_init.c file
53extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c file
54
55///////////////////////////////////////////////n
56error_t cluster_init( struct boot_info_s * info )
57{
58    error_t         error;
59    lpid_t          lpid;     // local process_index
60    lid_t           lid;      // local core index
61    uint32_t        i;        // index in loop on external peripherals
62    boot_device_t * dev;      // pointer on external peripheral
63    uint32_t        func;     // external peripheral functionnal type
64
65        cluster_t * cluster = LOCAL_CLUSTER;
66
67    // initialize cluster global parameters
68        cluster->paddr_width     = info->paddr_width;
69        cluster->x_width         = info->x_width;
70        cluster->y_width         = info->y_width;
71        cluster->x_size          = info->x_size;
72        cluster->y_size          = info->y_size;
73    cluster->x_max           = info->x_max; // [FIXME]
74        cluster->y_max           = info->y_max; // [FIXME]
75        cluster->io_cxy          = info->io_cxy;
76
77    // initialize the cluster_info[][] array
78    int x;
79    int y;
80    for (x = 0; x < CONFIG_MAX_CLUSTERS_X; x++) {
81        for (y = 0; y < CONFIG_MAX_CLUSTERS_Y;y++) {
82            cluster->cluster_info[x][y] = info->cluster_info[x][y];
83        }
84    }
85    // initialize external peripherals channels
86    for( i = 0 ; i < info->ext_dev_nr ; i++ )
87    {
88        dev  = &info->ext_dev[i];
89        func = FUNC_FROM_TYPE( dev->type );   
90        if( func == DEV_FUNC_TXT ) cluster->nb_txt_channels = dev->channels;
91        if( func == DEV_FUNC_NIC ) cluster->nb_nic_channels = dev->channels;
92        if( func == DEV_FUNC_IOC ) cluster->nb_ioc_channels = dev->channels;
93        if( func == DEV_FUNC_FBF ) cluster->nb_fbf_channels = dev->channels;
94    }
95
96    // initialize cluster local parameters
97        cluster->cores_nr        = info->cores_nr;
98
99    // initialize the lock protecting the embedded kcm allocator
100        spinlock_init( &cluster->kcm_lock );
101
102#if DEBUG_CLUSTER_INIT
103uint32_t cycle = (uint32_t)hal_get_cycles();
104if( DEBUG_CLUSTER_INIT < cycle )
105printk("\n[DBG] %s : thread %x enters for cluster %x / cycle %d\n",
106__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
107#endif
108
109    // initialises DQDT
110    cluster->dqdt_root_level = dqdt_init( info->x_max, // [FIXME]
111                                          info->y_max, // [FIXME]
112                                          info->y_width ) - 1;
113
114    // initialises embedded PPM
115        error = hal_ppm_init( info );
116
117    if( error )
118    {
119        printk("\n[ERROR] in %s : cannot initialize PPM in cluster %x\n",
120               __FUNCTION__ , local_cxy );
121        return ENOMEM;
122    }
123
124#if( DEBUG_CLUSTER_INIT & 1 )
125cycle = (uint32_t)hal_get_cycles();
126if( DEBUG_CLUSTER_INIT < cycle )
127printk("\n[DBG] %s : PPM initialized in cluster %x / cycle %d\n",
128__FUNCTION__ , local_cxy , cycle );
129#endif
130
131    // initialises embedded KHM
132        khm_init( &cluster->khm );
133
134#if( DEBUG_CLUSTER_INIT & 1 )
135cycle = (uint32_t)hal_get_cycles();
136if( DEBUG_CLUSTER_INIT < cycle )
137printk("\n[DBG] %s : KHM initialized in cluster %x at cycle %d\n",
138__FUNCTION__ , local_cxy , hal_get_cycles() );
139#endif
140
141    // initialises embedded KCM
142        kcm_init( &cluster->kcm , KMEM_KCM );
143
144#if( DEBUG_CLUSTER_INIT & 1 )
145cycle = (uint32_t)hal_get_cycles();
146if( DEBUG_CLUSTER_INIT < cycle )
147printk("\n[DBG] %s : KCM initialized in cluster %x at cycle %d\n",
148__FUNCTION__ , local_cxy , hal_get_cycles() );
149#endif
150
151    // initialises all cores descriptors
152        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
153        {
154                core_init( &cluster->core_tbl[lid],    // target core descriptor
155                       lid,                        // local core index
156                       info->core[lid].gid );      // gid from boot_info_t
157        }
158
159#if( DEBUG_CLUSTER_INIT & 1 )
160cycle = (uint32_t)hal_get_cycles();
161if( DEBUG_CLUSTER_INIT < cycle )
162printk("\n[DBG] %s : cores initialized in cluster %x / cycle %d\n",
163__FUNCTION__ , local_cxy , cycle );
164#endif
165
166    // initialises RPC FIFOs
167        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
168    {
169            local_fifo_init( &cluster->rpc_fifo[lid] );
170        cluster->rpc_threads[lid] = 0;
171    }
172
173#if( DEBUG_CLUSTER_INIT & 1 )
174cycle = (uint32_t)hal_get_cycles();
175if( DEBUG_CLUSTER_INIT < cycle )
176printk("\n[DBG] %s : RPC fifo inialized in cluster %x at cycle %d\n",
177__FUNCTION__ , local_cxy , hal_get_cycles() );
178#endif
179
180    // initialise pref_tbl[] in process manager
181        spinlock_init( &cluster->pmgr.pref_lock );
182    cluster->pmgr.pref_nr = 0;
183    cluster->pmgr.pref_tbl[0] = XPTR( local_cxy , &process_zero );
184    for( lpid = 1 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
185    {
186        cluster->pmgr.pref_tbl[lpid] = XPTR_NULL;
187    }
188
189    // initialise local_list in process manager
190        remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
191    xlist_root_init( XPTR( local_cxy , &cluster->pmgr.local_root ) );
192    cluster->pmgr.local_nr = 0;
193
194    // initialise copies_lists in process manager
195    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
196    {
197            remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] ) );
198        cluster->pmgr.copies_nr[lpid] = 0;
199        xlist_root_init( XPTR( local_cxy , &cluster->pmgr.copies_root[lpid] ) );
200    }
201
202#if DEBUG_CLUSTER_INIT
203cycle = (uint32_t)hal_get_cycles();
204if( DEBUG_CLUSTER_INIT < cycle )
205printk("\n[DBG] %s , thread %x exit for cluster %x / cycle %d\n",
206__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
207#endif
208
209    hal_fence();
210
211        return 0;
212} // end cluster_init()
213
214////////////////////////////////////////
215bool_t cluster_is_undefined( cxy_t cxy )
216{
217    cluster_t * cluster = LOCAL_CLUSTER;
218
219    uint32_t y_width = cluster->y_width;
220
221    uint32_t x = cxy >> y_width;
222    uint32_t y = cxy & ((1<<y_width)-1);
223
224    if( x >= cluster->x_size ) return true;
225    if( y >= cluster->y_size ) return true;
226
227    return false;
228}
229
230////////////////////////////////////////////////////////////////////////////////////
231//  Cores related functions
232////////////////////////////////////////////////////////////////////////////////////
233
234/////////////////////////////////
235lid_t cluster_select_local_core( void )
236{
237    uint32_t      min = 1000;
238    lid_t         sel = 0;
239    uint32_t      nthreads;
240    lid_t         lid;
241    scheduler_t * sched;
242
243    cluster_t * cluster = LOCAL_CLUSTER;
244
245    for( lid = 0 ; lid < cluster->cores_nr ; lid++ )
246    {
247        sched    = &cluster->core_tbl[lid].scheduler;
248        nthreads = sched->u_threads_nr + sched->k_threads_nr;
249
250        if( nthreads < min )
251        {
252            min = nthreads;
253            sel = lid;
254        }
255    }
256    return sel;
257}
258
259////////////////////////////////////////////////////////////////////////////////////
260//  Process related functions
261////////////////////////////////////////////////////////////////////////////////////
262
263
264//////////////////////////////////////////////////////
265xptr_t cluster_get_process_from_pid_in_cxy( cxy_t cxy,
266                                            pid_t pid )
267{
268    xptr_t      root_xp;       // xptr on root of list of processes in owner cluster
269    xptr_t      lock_xp;       // xptr on lock protecting this list
270    xptr_t      iter_xp;       // iterator
271    xptr_t      current_xp;    // xptr on current process descriptor
272    bool_t      found;
273
274    cluster_t * cluster = LOCAL_CLUSTER;
275
276    // get owner cluster and lpid
277    cxy_t   owner_cxy = CXY_FROM_PID( pid );
278    lpid_t  lpid      = LPID_FROM_PID( pid );
279
280    // get lock & root of list of copies from owner cluster
281    root_xp = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
282    lock_xp = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
283
284    // take the lock protecting the list of processes
285    remote_spinlock_lock( lock_xp );
286
287    // scan list of processes
288    found = false;
289    XLIST_FOREACH( root_xp , iter_xp )
290    {
291        current_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
292
293        if( GET_CXY( current_xp ) == cxy )
294        {
295            found = true;
296            break;
297        }
298    }
299
300    // release the lock protecting the list of processes
301    remote_spinlock_unlock( lock_xp );
302
303    // return extended pointer on process descriptor in owner cluster
304    if( found ) return current_xp;
305    else        return XPTR_NULL;
306
307}  // end cluster_get_process_from_pid_in_cxy()
308
309
310//////////////////////////////////////////////////////
311xptr_t cluster_get_owner_process_from_pid( pid_t pid )
312{
313    xptr_t      root_xp;       // xptr on root of list of processes in owner cluster
314    xptr_t      lock_xp;       // xptr on lock protecting this list
315    xptr_t      iter_xp;       // iterator
316    xptr_t      current_xp;    // xptr on current process descriptor
317    process_t * current_ptr;   // local pointer on current process
318    pid_t       current_pid;   // current process identifier
319    bool_t      found;
320
321    cluster_t * cluster = LOCAL_CLUSTER;
322
323    // get owner cluster and lpid
324    cxy_t  owner_cxy = CXY_FROM_PID( pid );
325
326    // get lock & root of list of process in owner cluster
327    root_xp = XPTR( owner_cxy , &cluster->pmgr.local_root );
328    lock_xp = XPTR( owner_cxy , &cluster->pmgr.local_lock );
329
330    // take the lock protecting the list of processes
331    remote_spinlock_lock( lock_xp );
332
333    // scan list of processes in owner cluster
334    found = false;
335    XLIST_FOREACH( root_xp , iter_xp )
336    {
337        current_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
338        current_ptr = GET_PTR( current_xp );
339        current_pid = hal_remote_lw( XPTR( owner_cxy , &current_ptr->pid ) );
340
341        if( current_pid == pid )
342        {
343            found = true;
344            break;
345        }
346    }
347
348    // release the lock protecting the list of processes
349    remote_spinlock_unlock( lock_xp );
350
351    // return extended pointer on process descriptor in owner cluster
352    if( found ) return current_xp;
353    else        return XPTR_NULL;
354
355}  // end cluster_get_owner_process_from_pid()
356
357
358//////////////////////////////////////////////////////////
359xptr_t cluster_get_reference_process_from_pid( pid_t pid )
360{
361    xptr_t ref_xp;   // extended pointer on reference process descriptor
362
363    cluster_t * cluster = LOCAL_CLUSTER;
364
365    // get owner cluster and lpid
366    cxy_t  owner_cxy = CXY_FROM_PID( pid );
367    lpid_t lpid      = LPID_FROM_PID( pid );
368
369    // Check valid PID
370    if( lpid >= CONFIG_MAX_PROCESS_PER_CLUSTER )  return XPTR_NULL;
371
372    if( local_cxy == owner_cxy )   // local cluster is owner cluster
373    {
374        ref_xp = cluster->pmgr.pref_tbl[lpid];
375    }
376    else                              // use a remote_lwd to access owner cluster
377    {
378        ref_xp = (xptr_t)hal_remote_lwd( XPTR( owner_cxy , &cluster->pmgr.pref_tbl[lpid] ) );
379    }
380
381    return ref_xp;
382}
383
384///////////////////////////////////////////////
385error_t cluster_pid_alloc( process_t * process,
386                           pid_t     * pid )
387{
388    lpid_t      lpid;
389    bool_t      found;
390
391#if DEBUG_CLUSTER_PID_ALLOC
392uint32_t cycle = (uint32_t)hal_get_cycles();
393if( DEBUG_CLUSTER_PID_ALLOC < cycle )
394printk("\n[DBG] %s : thread %x enters in cluster %x / cycle %d\n",
395__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
396#endif
397
398    pmgr_t    * pm         = &LOCAL_CLUSTER->pmgr;
399
400    // get the process manager lock
401    spinlock_lock( &pm->pref_lock );
402
403    // search an empty slot
404    found = false;
405    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
406    {
407        if( pm->pref_tbl[lpid] == XPTR_NULL )
408        {
409            found = true;
410            break;
411        }
412    }
413
414    if( found )
415    {
416        // register process in pref_tbl[]
417        pm->pref_tbl[lpid] = XPTR( local_cxy , process );
418        pm->pref_nr++;
419
420        // returns pid
421        *pid = PID( local_cxy , lpid );
422
423        // release the processs_manager lock
424        spinlock_unlock( &pm->pref_lock );
425
426        return 0;
427    }
428    else
429    {
430        // release the processs_manager lock
431        spinlock_unlock( &pm->pref_lock );
432
433        return -1;
434    }
435
436#if DEBUG_CLUSTER_PID_ALLOC
437cycle = (uint32_t)hal_get_cycles();
438if( DEBUG_CLUSTER_PID_ALLOC < cycle )
439printk("\n[DBG] %s : thread %x exit in cluster %x / pid %x / cycle %d\n",
440__FUNCTION__ , CURRENT_THREAD , local_cxy , *pid , cycle );
441#endif
442
443} // end cluster_pid_alloc()
444
445/////////////////////////////////////
446void cluster_pid_release( pid_t pid )
447{
448
449#if DEBUG_CLUSTER_PID_RELEASE
450uint32_t cycle = (uint32_t)hal_get_cycles();
451if( DEBUG_CLUSTER_PID_RELEASE < cycle )
452printk("\n[DBG] %s : thread %x enters in cluster %x / pid %x / cycle %d\n",
453__FUNCTION__ , CURRENT_THREAD , local_cxy , pid , cycle );
454#endif
455
456    cxy_t  owner_cxy  = CXY_FROM_PID( pid );
457    lpid_t lpid       = LPID_FROM_PID( pid );
458
459    pmgr_t  * pm = &LOCAL_CLUSTER->pmgr;
460
461    // check lpid
462    assert( (lpid < CONFIG_MAX_PROCESS_PER_CLUSTER),
463    "illegal LPID = %d" , lpid );
464
465    // check owner cluster
466    assert( (owner_cxy == local_cxy) ,
467    "local_cluster %x !=  owner_cluster %x" , local_cxy , owner_cxy );
468
469    // get the process manager lock
470    spinlock_lock( &pm->pref_lock );
471
472    // remove process from pref_tbl[]
473    pm->pref_tbl[lpid] = XPTR_NULL;
474    pm->pref_nr--;
475
476    // release the processs_manager lock
477    spinlock_unlock( &pm->pref_lock );
478
479#if DEBUG_CLUSTER_PID_RELEASE
480cycle = (uint32_t)hal_get_cycles();
481if( DEBUG_CLUSTER_PID_RELEASE < cycle )
482printk("\n[DBG] %s : thread %x exit in cluster %x / cycle %d\n",
483__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
484#endif
485
486} // end cluster_pid_release()
487
488///////////////////////////////////////////////////////////
489process_t * cluster_get_local_process_from_pid( pid_t pid )
490{
491    xptr_t         process_xp;
492    process_t    * process_ptr;
493    xptr_t         root_xp;
494    xptr_t         iter_xp;
495    bool_t         found;
496
497    found   = false;
498    root_xp = XPTR( local_cxy , &LOCAL_CLUSTER->pmgr.local_root );
499
500    XLIST_FOREACH( root_xp , iter_xp )
501    {
502        process_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
503        process_ptr = (process_t *)GET_PTR( process_xp );
504        if( process_ptr->pid == pid )
505        {
506            found = true;
507            break;
508        }
509    }
510
511    if (found ) return process_ptr;
512    else        return NULL;
513
514}  // end cluster_get_local_process_from_pid()
515
516//////////////////////////////////////////////////////
517void cluster_process_local_link( process_t * process )
518{
519    reg_t    save_sr;
520
521    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
522
523    // get extended pointers on local process list root & lock
524    xptr_t root_xp = XPTR( local_cxy , &pm->local_root );
525    xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock );
526
527    // get lock protecting the process manager local list
528    remote_spinlock_lock_busy( lock_xp , &save_sr );
529
530    // register process in local list
531    xlist_add_last( root_xp , XPTR( local_cxy , &process->local_list ) );
532    pm->local_nr++;
533
534    // release lock protecting the process manager local list
535    remote_spinlock_unlock_busy( lock_xp , save_sr );
536}
537
538////////////////////////////////////////////////////////
539void cluster_process_local_unlink( process_t * process )
540{
541    reg_t save_sr;
542
543    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
544
545    // get extended pointers on local process list lock
546    xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock );
547
548    // get lock protecting the process manager local list
549    remote_spinlock_lock_busy( lock_xp , &save_sr );
550
551    // remove process from local list
552    xlist_unlink( XPTR( local_cxy , &process->local_list ) );
553    pm->local_nr--;
554
555    // release lock protecting the process manager local list
556    remote_spinlock_unlock_busy( lock_xp , save_sr );
557}
558
559///////////////////////////////////////////////////////
560void cluster_process_copies_link( process_t * process )
561{
562    reg_t    irq_state;
563    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
564
565#if DEBUG_CLUSTER_PROCESS_COPIES
566uint32_t cycle = (uint32_t)hal_get_cycles();
567if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
568printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n",
569__FUNCTION__ , local_cxy , process , cycle );
570#endif
571
572    // get owner cluster identifier CXY and process LPID
573    pid_t    pid        = process->pid;
574    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
575    lpid_t   lpid       = LPID_FROM_PID( pid );
576
577    // get extended pointer on lock protecting copies_list[lpid]
578    xptr_t copies_lock  = XPTR( owner_cxy , &pm->copies_lock[lpid] );
579
580    // get extended pointer on the copies_list[lpid] root
581    xptr_t copies_root  = XPTR( owner_cxy , &pm->copies_root[lpid] );
582
583    // get extended pointer on the local copies_list entry
584    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
585
586    // get lock protecting copies_list[lpid]
587    remote_spinlock_lock_busy( copies_lock , &irq_state );
588
589    // add copy to copies_list
590    xlist_add_first( copies_root , copies_entry );
591    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , 1 );
592
593    // release lock protecting copies_list[lpid]
594    remote_spinlock_unlock_busy( copies_lock , irq_state );
595
596#if DEBUG_CLUSTER_PROCESS_COPIES
597cycle = (uint32_t)hal_get_cycles();
598if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
599printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n",
600__FUNCTION__ , local_cxy , process , cycle );
601#endif
602
603}  // end cluster_process_copies_link()
604
605/////////////////////////////////////////////////////////
606void cluster_process_copies_unlink( process_t * process )
607{
608    uint32_t irq_state;
609    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
610
611#if DEBUG_CLUSTER_PROCESS_COPIES
612uint32_t cycle = (uint32_t)hal_get_cycles();
613if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
614printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n",
615__FUNCTION__ , local_cxy , process , cycle );
616#endif
617
618    // get owner cluster identifier CXY and process LPID
619    pid_t    pid        = process->pid;
620    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
621    lpid_t   lpid       = LPID_FROM_PID( pid );
622
623    // get extended pointer on lock protecting copies_list[lpid]
624    xptr_t copies_lock  = XPTR( owner_cxy , &pm->copies_lock[lpid] );
625
626    // get extended pointer on the local copies_list entry
627    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
628
629    // get lock protecting copies_list[lpid]
630    remote_spinlock_lock_busy( copies_lock , &irq_state );
631
632    // remove copy from copies_list
633    xlist_unlink( copies_entry );
634    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , -1 );
635
636    // release lock protecting copies_list[lpid]
637    remote_spinlock_unlock_busy( copies_lock , irq_state );
638
639#if DEBUG_CLUSTER_PROCESS_COPIES
640cycle = (uint32_t)hal_get_cycles();
641if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
642printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n",
643__FUNCTION__ , local_cxy , process , cycle );
644#endif
645
646}  // end cluster_process_copies_unlink()
647
648///////////////////////////////////////////
649void cluster_processes_display( cxy_t cxy )
650{
651    xptr_t        root_xp;
652    xptr_t        lock_xp;
653    xptr_t        iter_xp;
654    xptr_t        process_xp;
655    cxy_t         txt0_cxy;
656    chdev_t     * txt0_ptr;
657    xptr_t        txt0_xp;
658    xptr_t        txt0_lock_xp;
659    reg_t         txt0_save_sr;     // save SR to take TXT0 lock in busy mode     
660
661    assert( (cluster_is_undefined( cxy ) == false),
662    "illegal cluster index" );
663
664    // get extended pointer on root and lock for local process list in cluster
665    root_xp = XPTR( cxy , &LOCAL_CLUSTER->pmgr.local_root );
666    lock_xp = XPTR( cxy , &LOCAL_CLUSTER->pmgr.local_lock );
667
668    // get pointers on TXT0 chdev
669    txt0_xp  = chdev_dir.txt_tx[0];
670    txt0_cxy = GET_CXY( txt0_xp );
671    txt0_ptr = GET_PTR( txt0_xp );
672
673    // get extended pointer on TXT0 lock
674    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
675
676    // get lock on local process list
677    remote_spinlock_lock( lock_xp );
678
679    // get TXT0 lock in busy waiting mode
680    remote_spinlock_lock_busy( txt0_lock_xp , &txt0_save_sr );
681     
682    // display header
683    nolock_printk("\n***** processes in cluster %x / cycle %d\n",
684    cxy , (uint32_t)hal_get_cycles() );
685
686    // loop on all processes in cluster cxy
687    XLIST_FOREACH( root_xp , iter_xp )
688    {
689        process_xp = XLIST_ELEMENT( iter_xp , process_t , local_list );
690        process_display( process_xp );
691    }
692
693    // release TXT0 lock in busy waiting mode
694    remote_spinlock_unlock_busy( txt0_lock_xp , txt0_save_sr );
695
696    // release lock on local process list
697    remote_spinlock_unlock( lock_xp );
698
699}  // end cluster_processes_display()
700
701
Note: See TracBrowser for help on using the repository browser.