source: trunk/kernel/kern/cluster.c @ 562

Last change on this file since 562 was 562, checked in by nicolas.van.phan@…, 6 years ago

Disable DQDT and remove y_max FOR GOOD

File size: 22.0 KB
Line 
1/*
2 * cluster.c - Cluster-Manager related operations
3 *
4 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *         Mohamed Lamine Karaoui (2015)
6 *         Alain Greiner (2016,2017,2018)
7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
10 * This file is part of ALMOS-MKH..
11 *
12 * ALMOS-MKH. is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
16 * ALMOS-MKH. is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <kernel_config.h>
27#include <hal_kernel_types.h>
28#include <hal_atomic.h>
29#include <hal_special.h>
30#include <hal_ppm.h>
31#include <remote_fifo.h>
32#include <printk.h>
33#include <errno.h>
34#include <spinlock.h>
35#include <core.h>
36#include <chdev.h>
37#include <scheduler.h>
38#include <list.h>
39#include <cluster.h>
40#include <boot_info.h>
41#include <bits.h>
42#include <ppm.h>
43#include <thread.h>
44#include <kmem.h>
45#include <process.h>
46#include <dqdt.h>
47#include <cluster_info.h>
48
49/////////////////////////////////////////////////////////////////////////////////////
50// Extern global variables
51/////////////////////////////////////////////////////////////////////////////////////
52
53extern process_t           process_zero;     // allocated in kernel_init.c file
54extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c file
55
56///////////////////////////////////////////////n
57error_t cluster_init( struct boot_info_s * info )
58{
59    error_t         error;
60    lpid_t          lpid;     // local process_index
61    lid_t           lid;      // local core index
62    uint32_t        i;        // index in loop on external peripherals
63    boot_device_t * dev;      // pointer on external peripheral
64    uint32_t        func;     // external peripheral functionnal type
65
66        cluster_t * cluster = LOCAL_CLUSTER;
67
68    // initialize cluster global parameters
69        cluster->paddr_width     = info->paddr_width;
70        cluster->x_width         = info->x_width;
71        cluster->y_width         = info->y_width;
72        cluster->x_size          = info->x_size;
73        cluster->y_size          = info->y_size;
74        cluster->io_cxy          = info->io_cxy;
75
76    // initialize the cluster_info[][] array
77    int x;
78    int y;
79    for (x = 0; x < CONFIG_MAX_CLUSTERS_X; x++) {
80        for (y = 0; y < CONFIG_MAX_CLUSTERS_Y;y++) {
81            cluster->cluster_info[x][y] = info->cluster_info[x][y];
82        }
83    }
84    // initialize external peripherals channels
85    for( i = 0 ; i < info->ext_dev_nr ; i++ )
86    {
87        dev  = &info->ext_dev[i];
88        func = FUNC_FROM_TYPE( dev->type );   
89        if( func == DEV_FUNC_TXT ) cluster->nb_txt_channels = dev->channels;
90        if( func == DEV_FUNC_NIC ) cluster->nb_nic_channels = dev->channels;
91        if( func == DEV_FUNC_IOC ) cluster->nb_ioc_channels = dev->channels;
92        if( func == DEV_FUNC_FBF ) cluster->nb_fbf_channels = dev->channels;
93    }
94
95    // initialize cluster local parameters
96        cluster->cores_nr        = info->cores_nr;
97
98    // initialize the lock protecting the embedded kcm allocator
99        spinlock_init( &cluster->kcm_lock );
100
101#if DEBUG_CLUSTER_INIT
102uint32_t cycle = (uint32_t)hal_get_cycles();
103if( DEBUG_CLUSTER_INIT < cycle )
104printk("\n[DBG] %s : thread %x enters for cluster %x / cycle %d\n",
105__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
106#endif
107
108    // initialises DQDT
109    cluster->dqdt_root_level = dqdt_init( info->x_size,
110                                          info->y_size,
111                                          info->y_width ) - 1;
112
113    // initialises embedded PPM
114        error = hal_ppm_init( info );
115
116    if( error )
117    {
118        printk("\n[ERROR] in %s : cannot initialize PPM in cluster %x\n",
119               __FUNCTION__ , local_cxy );
120        return ENOMEM;
121    }
122
123#if( DEBUG_CLUSTER_INIT & 1 )
124cycle = (uint32_t)hal_get_cycles();
125if( DEBUG_CLUSTER_INIT < cycle )
126printk("\n[DBG] %s : PPM initialized in cluster %x / cycle %d\n",
127__FUNCTION__ , local_cxy , cycle );
128#endif
129
130    // initialises embedded KHM
131        khm_init( &cluster->khm );
132
133#if( DEBUG_CLUSTER_INIT & 1 )
134cycle = (uint32_t)hal_get_cycles();
135if( DEBUG_CLUSTER_INIT < cycle )
136printk("\n[DBG] %s : KHM initialized in cluster %x at cycle %d\n",
137__FUNCTION__ , local_cxy , hal_get_cycles() );
138#endif
139
140    // initialises embedded KCM
141        kcm_init( &cluster->kcm , KMEM_KCM );
142
143#if( DEBUG_CLUSTER_INIT & 1 )
144cycle = (uint32_t)hal_get_cycles();
145if( DEBUG_CLUSTER_INIT < cycle )
146printk("\n[DBG] %s : KCM initialized in cluster %x at cycle %d\n",
147__FUNCTION__ , local_cxy , hal_get_cycles() );
148#endif
149
150    // initialises all cores descriptors
151        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
152        {
153                core_init( &cluster->core_tbl[lid],    // target core descriptor
154                       lid,                        // local core index
155                       info->core[lid].gid );      // gid from boot_info_t
156        }
157
158#if( DEBUG_CLUSTER_INIT & 1 )
159cycle = (uint32_t)hal_get_cycles();
160if( DEBUG_CLUSTER_INIT < cycle )
161printk("\n[DBG] %s : cores initialized in cluster %x / cycle %d\n",
162__FUNCTION__ , local_cxy , cycle );
163#endif
164
165    // initialises RPC FIFOs
166        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
167    {
168            local_fifo_init( &cluster->rpc_fifo[lid] );
169        cluster->rpc_threads[lid] = 0;
170    }
171
172#if( DEBUG_CLUSTER_INIT & 1 )
173cycle = (uint32_t)hal_get_cycles();
174if( DEBUG_CLUSTER_INIT < cycle )
175printk("\n[DBG] %s : RPC fifo inialized in cluster %x at cycle %d\n",
176__FUNCTION__ , local_cxy , hal_get_cycles() );
177#endif
178
179    // initialise pref_tbl[] in process manager
180        spinlock_init( &cluster->pmgr.pref_lock );
181    cluster->pmgr.pref_nr = 0;
182    cluster->pmgr.pref_tbl[0] = XPTR( local_cxy , &process_zero );
183    for( lpid = 1 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
184    {
185        cluster->pmgr.pref_tbl[lpid] = XPTR_NULL;
186    }
187
188    // initialise local_list in process manager
189        remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
190    xlist_root_init( XPTR( local_cxy , &cluster->pmgr.local_root ) );
191    cluster->pmgr.local_nr = 0;
192
193    // initialise copies_lists in process manager
194    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
195    {
196            remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] ) );
197        cluster->pmgr.copies_nr[lpid] = 0;
198        xlist_root_init( XPTR( local_cxy , &cluster->pmgr.copies_root[lpid] ) );
199    }
200
201#if DEBUG_CLUSTER_INIT
202cycle = (uint32_t)hal_get_cycles();
203if( DEBUG_CLUSTER_INIT < cycle )
204printk("\n[DBG] %s , thread %x exit for cluster %x / cycle %d\n",
205__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
206#endif
207
208    hal_fence();
209
210        return 0;
211} // end cluster_init()
212
213/////////////////////////////////
214cxy_t cluster_random_select( void )
215{
216    uint32_t  x_size;
217    uint32_t  y_size;
218    uint32_t  y_width;
219    uint32_t  index;
220    uint32_t  x;
221    uint32_t  y;
222
223    do {
224        x_size     = LOCAL_CLUSTER->x_size;
225        y_size     = LOCAL_CLUSTER->y_size;
226        y_width   = LOCAL_CLUSTER->y_width;
227        index     = ( hal_get_cycles() + hal_get_gid() ) % (x_size * y_size);
228        x         = index / y_size;
229        y         = index % y_size;
230    } while ( cluster_info_is_active( LOCAL_CLUSTER->cluster_info[x][y] ) == 0 );
231
232    return (x<<y_width) + y;
233}
234
235////////////////////////////////////////
236bool_t cluster_is_undefined( cxy_t cxy )
237{
238    cluster_t * cluster = LOCAL_CLUSTER;
239
240    uint32_t y_width = cluster->y_width;
241
242    uint32_t x = cxy >> y_width;
243    uint32_t y = cxy & ((1<<y_width)-1);
244
245    if( x >= cluster->x_size ) return true;
246    if( y >= cluster->y_size ) return true;
247
248    return false;
249}
250
251////////////////////////////////////////////////////////////////////////////////////
252//  Cores related functions
253////////////////////////////////////////////////////////////////////////////////////
254
255/////////////////////////////////
256lid_t cluster_select_local_core( void )
257{
258    uint32_t      min = 1000;
259    lid_t         sel = 0;
260    uint32_t      nthreads;
261    lid_t         lid;
262    scheduler_t * sched;
263
264    cluster_t * cluster = LOCAL_CLUSTER;
265
266    for( lid = 0 ; lid < cluster->cores_nr ; lid++ )
267    {
268        sched    = &cluster->core_tbl[lid].scheduler;
269        nthreads = sched->u_threads_nr + sched->k_threads_nr;
270
271        if( nthreads < min )
272        {
273            min = nthreads;
274            sel = lid;
275        }
276    }
277    return sel;
278}
279
280////////////////////////////////////////////////////////////////////////////////////
281//  Process related functions
282////////////////////////////////////////////////////////////////////////////////////
283
284
285//////////////////////////////////////////////////////
286xptr_t cluster_get_process_from_pid_in_cxy( cxy_t cxy,
287                                            pid_t pid )
288{
289    xptr_t      root_xp;       // xptr on root of list of processes in owner cluster
290    xptr_t      lock_xp;       // xptr on lock protecting this list
291    xptr_t      iter_xp;       // iterator
292    xptr_t      current_xp;    // xptr on current process descriptor
293    bool_t      found;
294
295    cluster_t * cluster = LOCAL_CLUSTER;
296
297    // get owner cluster and lpid
298    cxy_t   owner_cxy = CXY_FROM_PID( pid );
299    lpid_t  lpid      = LPID_FROM_PID( pid );
300
301    // get lock & root of list of copies from owner cluster
302    root_xp = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
303    lock_xp = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
304
305    // take the lock protecting the list of processes
306    remote_spinlock_lock( lock_xp );
307
308    // scan list of processes
309    found = false;
310    XLIST_FOREACH( root_xp , iter_xp )
311    {
312        current_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
313
314        if( GET_CXY( current_xp ) == cxy )
315        {
316            found = true;
317            break;
318        }
319    }
320
321    // release the lock protecting the list of processes
322    remote_spinlock_unlock( lock_xp );
323
324    // return extended pointer on process descriptor in owner cluster
325    if( found ) return current_xp;
326    else        return XPTR_NULL;
327
328}  // end cluster_get_process_from_pid_in_cxy()
329
330
331//////////////////////////////////////////////////////
332xptr_t cluster_get_owner_process_from_pid( pid_t pid )
333{
334    xptr_t      root_xp;       // xptr on root of list of processes in owner cluster
335    xptr_t      lock_xp;       // xptr on lock protecting this list
336    xptr_t      iter_xp;       // iterator
337    xptr_t      current_xp;    // xptr on current process descriptor
338    process_t * current_ptr;   // local pointer on current process
339    pid_t       current_pid;   // current process identifier
340    bool_t      found;
341
342    cluster_t * cluster = LOCAL_CLUSTER;
343
344    // get owner cluster and lpid
345    cxy_t  owner_cxy = CXY_FROM_PID( pid );
346
347    // get lock & root of list of process in owner cluster
348    root_xp = XPTR( owner_cxy , &cluster->pmgr.local_root );
349    lock_xp = XPTR( owner_cxy , &cluster->pmgr.local_lock );
350
351    // take the lock protecting the list of processes
352    remote_spinlock_lock( lock_xp );
353
354    // scan list of processes in owner cluster
355    found = false;
356    XLIST_FOREACH( root_xp , iter_xp )
357    {
358        current_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
359        current_ptr = GET_PTR( current_xp );
360        current_pid = hal_remote_lw( XPTR( owner_cxy , &current_ptr->pid ) );
361
362        if( current_pid == pid )
363        {
364            found = true;
365            break;
366        }
367    }
368
369    // release the lock protecting the list of processes
370    remote_spinlock_unlock( lock_xp );
371
372    // return extended pointer on process descriptor in owner cluster
373    if( found ) return current_xp;
374    else        return XPTR_NULL;
375
376}  // end cluster_get_owner_process_from_pid()
377
378
379//////////////////////////////////////////////////////////
380xptr_t cluster_get_reference_process_from_pid( pid_t pid )
381{
382    xptr_t ref_xp;   // extended pointer on reference process descriptor
383
384    cluster_t * cluster = LOCAL_CLUSTER;
385
386    // get owner cluster and lpid
387    cxy_t  owner_cxy = CXY_FROM_PID( pid );
388    lpid_t lpid      = LPID_FROM_PID( pid );
389
390    // Check valid PID
391    if( lpid >= CONFIG_MAX_PROCESS_PER_CLUSTER )  return XPTR_NULL;
392
393    if( local_cxy == owner_cxy )   // local cluster is owner cluster
394    {
395        ref_xp = cluster->pmgr.pref_tbl[lpid];
396    }
397    else                              // use a remote_lwd to access owner cluster
398    {
399        ref_xp = (xptr_t)hal_remote_lwd( XPTR( owner_cxy , &cluster->pmgr.pref_tbl[lpid] ) );
400    }
401
402    return ref_xp;
403}
404
405///////////////////////////////////////////////
406error_t cluster_pid_alloc( process_t * process,
407                           pid_t     * pid )
408{
409    lpid_t      lpid;
410    bool_t      found;
411
412#if DEBUG_CLUSTER_PID_ALLOC
413uint32_t cycle = (uint32_t)hal_get_cycles();
414if( DEBUG_CLUSTER_PID_ALLOC < cycle )
415printk("\n[DBG] %s : thread %x enters in cluster %x / cycle %d\n",
416__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
417#endif
418
419    pmgr_t    * pm         = &LOCAL_CLUSTER->pmgr;
420
421    // get the process manager lock
422    spinlock_lock( &pm->pref_lock );
423
424    // search an empty slot
425    found = false;
426    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
427    {
428        if( pm->pref_tbl[lpid] == XPTR_NULL )
429        {
430            found = true;
431            break;
432        }
433    }
434
435    if( found )
436    {
437        // register process in pref_tbl[]
438        pm->pref_tbl[lpid] = XPTR( local_cxy , process );
439        pm->pref_nr++;
440
441        // returns pid
442        *pid = PID( local_cxy , lpid );
443
444        // release the processs_manager lock
445        spinlock_unlock( &pm->pref_lock );
446
447        return 0;
448    }
449    else
450    {
451        // release the processs_manager lock
452        spinlock_unlock( &pm->pref_lock );
453
454        return -1;
455    }
456
457#if DEBUG_CLUSTER_PID_ALLOC
458cycle = (uint32_t)hal_get_cycles();
459if( DEBUG_CLUSTER_PID_ALLOC < cycle )
460printk("\n[DBG] %s : thread %x exit in cluster %x / pid %x / cycle %d\n",
461__FUNCTION__ , CURRENT_THREAD , local_cxy , *pid , cycle );
462#endif
463
464} // end cluster_pid_alloc()
465
466/////////////////////////////////////
467void cluster_pid_release( pid_t pid )
468{
469
470#if DEBUG_CLUSTER_PID_RELEASE
471uint32_t cycle = (uint32_t)hal_get_cycles();
472if( DEBUG_CLUSTER_PID_RELEASE < cycle )
473printk("\n[DBG] %s : thread %x enters in cluster %x / pid %x / cycle %d\n",
474__FUNCTION__ , CURRENT_THREAD , local_cxy , pid , cycle );
475#endif
476
477    cxy_t  owner_cxy  = CXY_FROM_PID( pid );
478    lpid_t lpid       = LPID_FROM_PID( pid );
479
480    pmgr_t  * pm = &LOCAL_CLUSTER->pmgr;
481
482    // check lpid
483    assert( (lpid < CONFIG_MAX_PROCESS_PER_CLUSTER),
484    "illegal LPID = %d" , lpid );
485
486    // check owner cluster
487    assert( (owner_cxy == local_cxy) ,
488    "local_cluster %x !=  owner_cluster %x" , local_cxy , owner_cxy );
489
490    // get the process manager lock
491    spinlock_lock( &pm->pref_lock );
492
493    // remove process from pref_tbl[]
494    pm->pref_tbl[lpid] = XPTR_NULL;
495    pm->pref_nr--;
496
497    // release the processs_manager lock
498    spinlock_unlock( &pm->pref_lock );
499
500#if DEBUG_CLUSTER_PID_RELEASE
501cycle = (uint32_t)hal_get_cycles();
502if( DEBUG_CLUSTER_PID_RELEASE < cycle )
503printk("\n[DBG] %s : thread %x exit in cluster %x / cycle %d\n",
504__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
505#endif
506
507} // end cluster_pid_release()
508
509///////////////////////////////////////////////////////////
510process_t * cluster_get_local_process_from_pid( pid_t pid )
511{
512    xptr_t         process_xp;
513    process_t    * process_ptr;
514    xptr_t         root_xp;
515    xptr_t         iter_xp;
516    bool_t         found;
517
518    found   = false;
519    root_xp = XPTR( local_cxy , &LOCAL_CLUSTER->pmgr.local_root );
520
521    XLIST_FOREACH( root_xp , iter_xp )
522    {
523        process_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
524        process_ptr = (process_t *)GET_PTR( process_xp );
525        if( process_ptr->pid == pid )
526        {
527            found = true;
528            break;
529        }
530    }
531
532    if (found ) return process_ptr;
533    else        return NULL;
534
535}  // end cluster_get_local_process_from_pid()
536
537//////////////////////////////////////////////////////
538void cluster_process_local_link( process_t * process )
539{
540    reg_t    save_sr;
541
542    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
543
544    // get extended pointers on local process list root & lock
545    xptr_t root_xp = XPTR( local_cxy , &pm->local_root );
546    xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock );
547
548    // get lock protecting the process manager local list
549    remote_spinlock_lock_busy( lock_xp , &save_sr );
550
551    // register process in local list
552    xlist_add_last( root_xp , XPTR( local_cxy , &process->local_list ) );
553    pm->local_nr++;
554
555    // release lock protecting the process manager local list
556    remote_spinlock_unlock_busy( lock_xp , save_sr );
557}
558
559////////////////////////////////////////////////////////
560void cluster_process_local_unlink( process_t * process )
561{
562    reg_t save_sr;
563
564    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
565
566    // get extended pointers on local process list lock
567    xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock );
568
569    // get lock protecting the process manager local list
570    remote_spinlock_lock_busy( lock_xp , &save_sr );
571
572    // remove process from local list
573    xlist_unlink( XPTR( local_cxy , &process->local_list ) );
574    pm->local_nr--;
575
576    // release lock protecting the process manager local list
577    remote_spinlock_unlock_busy( lock_xp , save_sr );
578}
579
580///////////////////////////////////////////////////////
581void cluster_process_copies_link( process_t * process )
582{
583    reg_t    irq_state;
584    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
585
586#if DEBUG_CLUSTER_PROCESS_COPIES
587uint32_t cycle = (uint32_t)hal_get_cycles();
588if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
589printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n",
590__FUNCTION__ , local_cxy , process , cycle );
591#endif
592
593    // get owner cluster identifier CXY and process LPID
594    pid_t    pid        = process->pid;
595    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
596    lpid_t   lpid       = LPID_FROM_PID( pid );
597
598    // get extended pointer on lock protecting copies_list[lpid]
599    xptr_t copies_lock  = XPTR( owner_cxy , &pm->copies_lock[lpid] );
600
601    // get extended pointer on the copies_list[lpid] root
602    xptr_t copies_root  = XPTR( owner_cxy , &pm->copies_root[lpid] );
603
604    // get extended pointer on the local copies_list entry
605    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
606
607    // get lock protecting copies_list[lpid]
608    remote_spinlock_lock_busy( copies_lock , &irq_state );
609
610    // add copy to copies_list
611    xlist_add_first( copies_root , copies_entry );
612    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , 1 );
613
614    // release lock protecting copies_list[lpid]
615    remote_spinlock_unlock_busy( copies_lock , irq_state );
616
617#if DEBUG_CLUSTER_PROCESS_COPIES
618cycle = (uint32_t)hal_get_cycles();
619if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
620printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n",
621__FUNCTION__ , local_cxy , process , cycle );
622#endif
623
624}  // end cluster_process_copies_link()
625
626/////////////////////////////////////////////////////////
627void cluster_process_copies_unlink( process_t * process )
628{
629    uint32_t irq_state;
630    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
631
632#if DEBUG_CLUSTER_PROCESS_COPIES
633uint32_t cycle = (uint32_t)hal_get_cycles();
634if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
635printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n",
636__FUNCTION__ , local_cxy , process , cycle );
637#endif
638
639    // get owner cluster identifier CXY and process LPID
640    pid_t    pid        = process->pid;
641    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
642    lpid_t   lpid       = LPID_FROM_PID( pid );
643
644    // get extended pointer on lock protecting copies_list[lpid]
645    xptr_t copies_lock  = XPTR( owner_cxy , &pm->copies_lock[lpid] );
646
647    // get extended pointer on the local copies_list entry
648    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
649
650    // get lock protecting copies_list[lpid]
651    remote_spinlock_lock_busy( copies_lock , &irq_state );
652
653    // remove copy from copies_list
654    xlist_unlink( copies_entry );
655    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , -1 );
656
657    // release lock protecting copies_list[lpid]
658    remote_spinlock_unlock_busy( copies_lock , irq_state );
659
660#if DEBUG_CLUSTER_PROCESS_COPIES
661cycle = (uint32_t)hal_get_cycles();
662if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
663printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n",
664__FUNCTION__ , local_cxy , process , cycle );
665#endif
666
667}  // end cluster_process_copies_unlink()
668
669///////////////////////////////////////////
670void cluster_processes_display( cxy_t cxy )
671{
672    xptr_t        root_xp;
673    xptr_t        lock_xp;
674    xptr_t        iter_xp;
675    xptr_t        process_xp;
676    cxy_t         txt0_cxy;
677    chdev_t     * txt0_ptr;
678    xptr_t        txt0_xp;
679    xptr_t        txt0_lock_xp;
680    reg_t         txt0_save_sr;     // save SR to take TXT0 lock in busy mode     
681
682    assert( (cluster_is_undefined( cxy ) == false),
683    "illegal cluster index" );
684
685    // get extended pointer on root and lock for local process list in cluster
686    root_xp = XPTR( cxy , &LOCAL_CLUSTER->pmgr.local_root );
687    lock_xp = XPTR( cxy , &LOCAL_CLUSTER->pmgr.local_lock );
688
689    // get pointers on TXT0 chdev
690    txt0_xp  = chdev_dir.txt_tx[0];
691    txt0_cxy = GET_CXY( txt0_xp );
692    txt0_ptr = GET_PTR( txt0_xp );
693
694    // get extended pointer on TXT0 lock
695    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
696
697    // get lock on local process list
698    remote_spinlock_lock( lock_xp );
699
700    // get TXT0 lock in busy waiting mode
701    remote_spinlock_lock_busy( txt0_lock_xp , &txt0_save_sr );
702     
703    // display header
704    nolock_printk("\n***** processes in cluster %x / cycle %d\n",
705    cxy , (uint32_t)hal_get_cycles() );
706
707    // loop on all processes in cluster cxy
708    XLIST_FOREACH( root_xp , iter_xp )
709    {
710        process_xp = XLIST_ELEMENT( iter_xp , process_t , local_list );
711        process_display( process_xp );
712    }
713
714    // release TXT0 lock in busy waiting mode
715    remote_spinlock_unlock_busy( txt0_lock_xp , txt0_save_sr );
716
717    // release lock on local process list
718    remote_spinlock_unlock( lock_xp );
719
720}  // end cluster_processes_display()
721
722
Note: See TracBrowser for help on using the repository browser.