source: trunk/kernel/kern/cluster.c @ 561

Last change on this file since 561 was 561, checked in by nicolas.van.phan@…, 6 years ago

Remove y_max in all functions except dqdt_init()

File size: 22.2 KB
Line 
1/*
2 * cluster.c - Cluster-Manager related operations
3 *
4 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *         Mohamed Lamine Karaoui (2015)
6 *         Alain Greiner (2016,2017,2018)
7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
10 * This file is part of ALMOS-MKH..
11 *
12 * ALMOS-MKH. is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
16 * ALMOS-MKH. is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <kernel_config.h>
27#include <hal_kernel_types.h>
28#include <hal_atomic.h>
29#include <hal_special.h>
30#include <hal_ppm.h>
31#include <remote_fifo.h>
32#include <printk.h>
33#include <errno.h>
34#include <spinlock.h>
35#include <core.h>
36#include <chdev.h>
37#include <scheduler.h>
38#include <list.h>
39#include <cluster.h>
40#include <boot_info.h>
41#include <bits.h>
42#include <ppm.h>
43#include <thread.h>
44#include <kmem.h>
45#include <process.h>
46#include <dqdt.h>
47#include <cluster_info.h>
48
49/////////////////////////////////////////////////////////////////////////////////////
50// Extern global variables
51/////////////////////////////////////////////////////////////////////////////////////
52
53extern process_t           process_zero;     // allocated in kernel_init.c file
54extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c file
55
56///////////////////////////////////////////////n
57error_t cluster_init( struct boot_info_s * info )
58{
59    error_t         error;
60    lpid_t          lpid;     // local process_index
61    lid_t           lid;      // local core index
62    uint32_t        i;        // index in loop on external peripherals
63    boot_device_t * dev;      // pointer on external peripheral
64    uint32_t        func;     // external peripheral functionnal type
65
66        cluster_t * cluster = LOCAL_CLUSTER;
67
68    // initialize cluster global parameters
69        cluster->paddr_width     = info->paddr_width;
70        cluster->x_width         = info->x_width;
71        cluster->y_width         = info->y_width;
72        cluster->x_size          = info->x_size;
73        cluster->y_size          = info->y_size;
74    cluster->x_max           = info->x_max; // [FIXME]
75        cluster->y_max           = info->y_max; // [FIXME]
76        cluster->io_cxy          = info->io_cxy;
77
78    // initialize the cluster_info[][] array
79    int x;
80    int y;
81    for (x = 0; x < CONFIG_MAX_CLUSTERS_X; x++) {
82        for (y = 0; y < CONFIG_MAX_CLUSTERS_Y;y++) {
83            cluster->cluster_info[x][y] = info->cluster_info[x][y];
84        }
85    }
86    // initialize external peripherals channels
87    for( i = 0 ; i < info->ext_dev_nr ; i++ )
88    {
89        dev  = &info->ext_dev[i];
90        func = FUNC_FROM_TYPE( dev->type );   
91        if( func == DEV_FUNC_TXT ) cluster->nb_txt_channels = dev->channels;
92        if( func == DEV_FUNC_NIC ) cluster->nb_nic_channels = dev->channels;
93        if( func == DEV_FUNC_IOC ) cluster->nb_ioc_channels = dev->channels;
94        if( func == DEV_FUNC_FBF ) cluster->nb_fbf_channels = dev->channels;
95    }
96
97    // initialize cluster local parameters
98        cluster->cores_nr        = info->cores_nr;
99
100    // initialize the lock protecting the embedded kcm allocator
101        spinlock_init( &cluster->kcm_lock );
102
103#if DEBUG_CLUSTER_INIT
104uint32_t cycle = (uint32_t)hal_get_cycles();
105if( DEBUG_CLUSTER_INIT < cycle )
106printk("\n[DBG] %s : thread %x enters for cluster %x / cycle %d\n",
107__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
108#endif
109
110    // initialises DQDT
111    cluster->dqdt_root_level = dqdt_init( info->x_max, // [FIXME]
112                                          info->y_max, // [FIXME]
113                                          info->y_width ) - 1;
114
115    // initialises embedded PPM
116        error = hal_ppm_init( info );
117
118    if( error )
119    {
120        printk("\n[ERROR] in %s : cannot initialize PPM in cluster %x\n",
121               __FUNCTION__ , local_cxy );
122        return ENOMEM;
123    }
124
125#if( DEBUG_CLUSTER_INIT & 1 )
126cycle = (uint32_t)hal_get_cycles();
127if( DEBUG_CLUSTER_INIT < cycle )
128printk("\n[DBG] %s : PPM initialized in cluster %x / cycle %d\n",
129__FUNCTION__ , local_cxy , cycle );
130#endif
131
132    // initialises embedded KHM
133        khm_init( &cluster->khm );
134
135#if( DEBUG_CLUSTER_INIT & 1 )
136cycle = (uint32_t)hal_get_cycles();
137if( DEBUG_CLUSTER_INIT < cycle )
138printk("\n[DBG] %s : KHM initialized in cluster %x at cycle %d\n",
139__FUNCTION__ , local_cxy , hal_get_cycles() );
140#endif
141
142    // initialises embedded KCM
143        kcm_init( &cluster->kcm , KMEM_KCM );
144
145#if( DEBUG_CLUSTER_INIT & 1 )
146cycle = (uint32_t)hal_get_cycles();
147if( DEBUG_CLUSTER_INIT < cycle )
148printk("\n[DBG] %s : KCM initialized in cluster %x at cycle %d\n",
149__FUNCTION__ , local_cxy , hal_get_cycles() );
150#endif
151
152    // initialises all cores descriptors
153        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
154        {
155                core_init( &cluster->core_tbl[lid],    // target core descriptor
156                       lid,                        // local core index
157                       info->core[lid].gid );      // gid from boot_info_t
158        }
159
160#if( DEBUG_CLUSTER_INIT & 1 )
161cycle = (uint32_t)hal_get_cycles();
162if( DEBUG_CLUSTER_INIT < cycle )
163printk("\n[DBG] %s : cores initialized in cluster %x / cycle %d\n",
164__FUNCTION__ , local_cxy , cycle );
165#endif
166
167    // initialises RPC FIFOs
168        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
169    {
170            local_fifo_init( &cluster->rpc_fifo[lid] );
171        cluster->rpc_threads[lid] = 0;
172    }
173
174#if( DEBUG_CLUSTER_INIT & 1 )
175cycle = (uint32_t)hal_get_cycles();
176if( DEBUG_CLUSTER_INIT < cycle )
177printk("\n[DBG] %s : RPC fifo inialized in cluster %x at cycle %d\n",
178__FUNCTION__ , local_cxy , hal_get_cycles() );
179#endif
180
181    // initialise pref_tbl[] in process manager
182        spinlock_init( &cluster->pmgr.pref_lock );
183    cluster->pmgr.pref_nr = 0;
184    cluster->pmgr.pref_tbl[0] = XPTR( local_cxy , &process_zero );
185    for( lpid = 1 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
186    {
187        cluster->pmgr.pref_tbl[lpid] = XPTR_NULL;
188    }
189
190    // initialise local_list in process manager
191        remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
192    xlist_root_init( XPTR( local_cxy , &cluster->pmgr.local_root ) );
193    cluster->pmgr.local_nr = 0;
194
195    // initialise copies_lists in process manager
196    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
197    {
198            remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] ) );
199        cluster->pmgr.copies_nr[lpid] = 0;
200        xlist_root_init( XPTR( local_cxy , &cluster->pmgr.copies_root[lpid] ) );
201    }
202
203#if DEBUG_CLUSTER_INIT
204cycle = (uint32_t)hal_get_cycles();
205if( DEBUG_CLUSTER_INIT < cycle )
206printk("\n[DBG] %s , thread %x exit for cluster %x / cycle %d\n",
207__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
208#endif
209
210    hal_fence();
211
212        return 0;
213} // end cluster_init()
214
215/////////////////////////////////
216cxy_t cluster_random_select( void )
217{
218    uint32_t  x_size;
219    uint32_t  y_size;
220    uint32_t  y_width;
221    uint32_t  index;
222    uint32_t  x;
223    uint32_t  y;
224
225    do {
226        x_size     = LOCAL_CLUSTER->x_size;
227        y_size     = LOCAL_CLUSTER->y_size;
228        y_width   = LOCAL_CLUSTER->y_width;
229        index     = ( hal_get_cycles() + hal_get_gid() ) % (x_size * y_size);
230        x         = index / y_size;
231        y         = index % y_size;
232    } while ( cluster_info_is_active( LOCAL_CLUSTER->cluster_info[x][y] ) == 0 );
233
234    return (x<<y_width) + y;
235}
236
237////////////////////////////////////////
238bool_t cluster_is_undefined( cxy_t cxy )
239{
240    cluster_t * cluster = LOCAL_CLUSTER;
241
242    uint32_t y_width = cluster->y_width;
243
244    uint32_t x = cxy >> y_width;
245    uint32_t y = cxy & ((1<<y_width)-1);
246
247    if( x >= cluster->x_size ) return true;
248    if( y >= cluster->y_size ) return true;
249
250    return false;
251}
252
253////////////////////////////////////////////////////////////////////////////////////
254//  Cores related functions
255////////////////////////////////////////////////////////////////////////////////////
256
257/////////////////////////////////
258lid_t cluster_select_local_core( void )
259{
260    uint32_t      min = 1000;
261    lid_t         sel = 0;
262    uint32_t      nthreads;
263    lid_t         lid;
264    scheduler_t * sched;
265
266    cluster_t * cluster = LOCAL_CLUSTER;
267
268    for( lid = 0 ; lid < cluster->cores_nr ; lid++ )
269    {
270        sched    = &cluster->core_tbl[lid].scheduler;
271        nthreads = sched->u_threads_nr + sched->k_threads_nr;
272
273        if( nthreads < min )
274        {
275            min = nthreads;
276            sel = lid;
277        }
278    }
279    return sel;
280}
281
282////////////////////////////////////////////////////////////////////////////////////
283//  Process related functions
284////////////////////////////////////////////////////////////////////////////////////
285
286
287//////////////////////////////////////////////////////
288xptr_t cluster_get_process_from_pid_in_cxy( cxy_t cxy,
289                                            pid_t pid )
290{
291    xptr_t      root_xp;       // xptr on root of list of processes in owner cluster
292    xptr_t      lock_xp;       // xptr on lock protecting this list
293    xptr_t      iter_xp;       // iterator
294    xptr_t      current_xp;    // xptr on current process descriptor
295    bool_t      found;
296
297    cluster_t * cluster = LOCAL_CLUSTER;
298
299    // get owner cluster and lpid
300    cxy_t   owner_cxy = CXY_FROM_PID( pid );
301    lpid_t  lpid      = LPID_FROM_PID( pid );
302
303    // get lock & root of list of copies from owner cluster
304    root_xp = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
305    lock_xp = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
306
307    // take the lock protecting the list of processes
308    remote_spinlock_lock( lock_xp );
309
310    // scan list of processes
311    found = false;
312    XLIST_FOREACH( root_xp , iter_xp )
313    {
314        current_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
315
316        if( GET_CXY( current_xp ) == cxy )
317        {
318            found = true;
319            break;
320        }
321    }
322
323    // release the lock protecting the list of processes
324    remote_spinlock_unlock( lock_xp );
325
326    // return extended pointer on process descriptor in owner cluster
327    if( found ) return current_xp;
328    else        return XPTR_NULL;
329
330}  // end cluster_get_process_from_pid_in_cxy()
331
332
333//////////////////////////////////////////////////////
334xptr_t cluster_get_owner_process_from_pid( pid_t pid )
335{
336    xptr_t      root_xp;       // xptr on root of list of processes in owner cluster
337    xptr_t      lock_xp;       // xptr on lock protecting this list
338    xptr_t      iter_xp;       // iterator
339    xptr_t      current_xp;    // xptr on current process descriptor
340    process_t * current_ptr;   // local pointer on current process
341    pid_t       current_pid;   // current process identifier
342    bool_t      found;
343
344    cluster_t * cluster = LOCAL_CLUSTER;
345
346    // get owner cluster and lpid
347    cxy_t  owner_cxy = CXY_FROM_PID( pid );
348
349    // get lock & root of list of process in owner cluster
350    root_xp = XPTR( owner_cxy , &cluster->pmgr.local_root );
351    lock_xp = XPTR( owner_cxy , &cluster->pmgr.local_lock );
352
353    // take the lock protecting the list of processes
354    remote_spinlock_lock( lock_xp );
355
356    // scan list of processes in owner cluster
357    found = false;
358    XLIST_FOREACH( root_xp , iter_xp )
359    {
360        current_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
361        current_ptr = GET_PTR( current_xp );
362        current_pid = hal_remote_lw( XPTR( owner_cxy , &current_ptr->pid ) );
363
364        if( current_pid == pid )
365        {
366            found = true;
367            break;
368        }
369    }
370
371    // release the lock protecting the list of processes
372    remote_spinlock_unlock( lock_xp );
373
374    // return extended pointer on process descriptor in owner cluster
375    if( found ) return current_xp;
376    else        return XPTR_NULL;
377
378}  // end cluster_get_owner_process_from_pid()
379
380
381//////////////////////////////////////////////////////////
382xptr_t cluster_get_reference_process_from_pid( pid_t pid )
383{
384    xptr_t ref_xp;   // extended pointer on reference process descriptor
385
386    cluster_t * cluster = LOCAL_CLUSTER;
387
388    // get owner cluster and lpid
389    cxy_t  owner_cxy = CXY_FROM_PID( pid );
390    lpid_t lpid      = LPID_FROM_PID( pid );
391
392    // Check valid PID
393    if( lpid >= CONFIG_MAX_PROCESS_PER_CLUSTER )  return XPTR_NULL;
394
395    if( local_cxy == owner_cxy )   // local cluster is owner cluster
396    {
397        ref_xp = cluster->pmgr.pref_tbl[lpid];
398    }
399    else                              // use a remote_lwd to access owner cluster
400    {
401        ref_xp = (xptr_t)hal_remote_lwd( XPTR( owner_cxy , &cluster->pmgr.pref_tbl[lpid] ) );
402    }
403
404    return ref_xp;
405}
406
407///////////////////////////////////////////////
408error_t cluster_pid_alloc( process_t * process,
409                           pid_t     * pid )
410{
411    lpid_t      lpid;
412    bool_t      found;
413
414#if DEBUG_CLUSTER_PID_ALLOC
415uint32_t cycle = (uint32_t)hal_get_cycles();
416if( DEBUG_CLUSTER_PID_ALLOC < cycle )
417printk("\n[DBG] %s : thread %x enters in cluster %x / cycle %d\n",
418__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
419#endif
420
421    pmgr_t    * pm         = &LOCAL_CLUSTER->pmgr;
422
423    // get the process manager lock
424    spinlock_lock( &pm->pref_lock );
425
426    // search an empty slot
427    found = false;
428    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
429    {
430        if( pm->pref_tbl[lpid] == XPTR_NULL )
431        {
432            found = true;
433            break;
434        }
435    }
436
437    if( found )
438    {
439        // register process in pref_tbl[]
440        pm->pref_tbl[lpid] = XPTR( local_cxy , process );
441        pm->pref_nr++;
442
443        // returns pid
444        *pid = PID( local_cxy , lpid );
445
446        // release the processs_manager lock
447        spinlock_unlock( &pm->pref_lock );
448
449        return 0;
450    }
451    else
452    {
453        // release the processs_manager lock
454        spinlock_unlock( &pm->pref_lock );
455
456        return -1;
457    }
458
459#if DEBUG_CLUSTER_PID_ALLOC
460cycle = (uint32_t)hal_get_cycles();
461if( DEBUG_CLUSTER_PID_ALLOC < cycle )
462printk("\n[DBG] %s : thread %x exit in cluster %x / pid %x / cycle %d\n",
463__FUNCTION__ , CURRENT_THREAD , local_cxy , *pid , cycle );
464#endif
465
466} // end cluster_pid_alloc()
467
468/////////////////////////////////////
469void cluster_pid_release( pid_t pid )
470{
471
472#if DEBUG_CLUSTER_PID_RELEASE
473uint32_t cycle = (uint32_t)hal_get_cycles();
474if( DEBUG_CLUSTER_PID_RELEASE < cycle )
475printk("\n[DBG] %s : thread %x enters in cluster %x / pid %x / cycle %d\n",
476__FUNCTION__ , CURRENT_THREAD , local_cxy , pid , cycle );
477#endif
478
479    cxy_t  owner_cxy  = CXY_FROM_PID( pid );
480    lpid_t lpid       = LPID_FROM_PID( pid );
481
482    pmgr_t  * pm = &LOCAL_CLUSTER->pmgr;
483
484    // check lpid
485    assert( (lpid < CONFIG_MAX_PROCESS_PER_CLUSTER),
486    "illegal LPID = %d" , lpid );
487
488    // check owner cluster
489    assert( (owner_cxy == local_cxy) ,
490    "local_cluster %x !=  owner_cluster %x" , local_cxy , owner_cxy );
491
492    // get the process manager lock
493    spinlock_lock( &pm->pref_lock );
494
495    // remove process from pref_tbl[]
496    pm->pref_tbl[lpid] = XPTR_NULL;
497    pm->pref_nr--;
498
499    // release the processs_manager lock
500    spinlock_unlock( &pm->pref_lock );
501
502#if DEBUG_CLUSTER_PID_RELEASE
503cycle = (uint32_t)hal_get_cycles();
504if( DEBUG_CLUSTER_PID_RELEASE < cycle )
505printk("\n[DBG] %s : thread %x exit in cluster %x / cycle %d\n",
506__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
507#endif
508
509} // end cluster_pid_release()
510
511///////////////////////////////////////////////////////////
512process_t * cluster_get_local_process_from_pid( pid_t pid )
513{
514    xptr_t         process_xp;
515    process_t    * process_ptr;
516    xptr_t         root_xp;
517    xptr_t         iter_xp;
518    bool_t         found;
519
520    found   = false;
521    root_xp = XPTR( local_cxy , &LOCAL_CLUSTER->pmgr.local_root );
522
523    XLIST_FOREACH( root_xp , iter_xp )
524    {
525        process_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
526        process_ptr = (process_t *)GET_PTR( process_xp );
527        if( process_ptr->pid == pid )
528        {
529            found = true;
530            break;
531        }
532    }
533
534    if (found ) return process_ptr;
535    else        return NULL;
536
537}  // end cluster_get_local_process_from_pid()
538
539//////////////////////////////////////////////////////
540void cluster_process_local_link( process_t * process )
541{
542    reg_t    save_sr;
543
544    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
545
546    // get extended pointers on local process list root & lock
547    xptr_t root_xp = XPTR( local_cxy , &pm->local_root );
548    xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock );
549
550    // get lock protecting the process manager local list
551    remote_spinlock_lock_busy( lock_xp , &save_sr );
552
553    // register process in local list
554    xlist_add_last( root_xp , XPTR( local_cxy , &process->local_list ) );
555    pm->local_nr++;
556
557    // release lock protecting the process manager local list
558    remote_spinlock_unlock_busy( lock_xp , save_sr );
559}
560
561////////////////////////////////////////////////////////
562void cluster_process_local_unlink( process_t * process )
563{
564    reg_t save_sr;
565
566    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
567
568    // get extended pointers on local process list lock
569    xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock );
570
571    // get lock protecting the process manager local list
572    remote_spinlock_lock_busy( lock_xp , &save_sr );
573
574    // remove process from local list
575    xlist_unlink( XPTR( local_cxy , &process->local_list ) );
576    pm->local_nr--;
577
578    // release lock protecting the process manager local list
579    remote_spinlock_unlock_busy( lock_xp , save_sr );
580}
581
582///////////////////////////////////////////////////////
583void cluster_process_copies_link( process_t * process )
584{
585    reg_t    irq_state;
586    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
587
588#if DEBUG_CLUSTER_PROCESS_COPIES
589uint32_t cycle = (uint32_t)hal_get_cycles();
590if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
591printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n",
592__FUNCTION__ , local_cxy , process , cycle );
593#endif
594
595    // get owner cluster identifier CXY and process LPID
596    pid_t    pid        = process->pid;
597    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
598    lpid_t   lpid       = LPID_FROM_PID( pid );
599
600    // get extended pointer on lock protecting copies_list[lpid]
601    xptr_t copies_lock  = XPTR( owner_cxy , &pm->copies_lock[lpid] );
602
603    // get extended pointer on the copies_list[lpid] root
604    xptr_t copies_root  = XPTR( owner_cxy , &pm->copies_root[lpid] );
605
606    // get extended pointer on the local copies_list entry
607    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
608
609    // get lock protecting copies_list[lpid]
610    remote_spinlock_lock_busy( copies_lock , &irq_state );
611
612    // add copy to copies_list
613    xlist_add_first( copies_root , copies_entry );
614    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , 1 );
615
616    // release lock protecting copies_list[lpid]
617    remote_spinlock_unlock_busy( copies_lock , irq_state );
618
619#if DEBUG_CLUSTER_PROCESS_COPIES
620cycle = (uint32_t)hal_get_cycles();
621if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
622printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n",
623__FUNCTION__ , local_cxy , process , cycle );
624#endif
625
626}  // end cluster_process_copies_link()
627
628/////////////////////////////////////////////////////////
629void cluster_process_copies_unlink( process_t * process )
630{
631    uint32_t irq_state;
632    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
633
634#if DEBUG_CLUSTER_PROCESS_COPIES
635uint32_t cycle = (uint32_t)hal_get_cycles();
636if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
637printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n",
638__FUNCTION__ , local_cxy , process , cycle );
639#endif
640
641    // get owner cluster identifier CXY and process LPID
642    pid_t    pid        = process->pid;
643    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
644    lpid_t   lpid       = LPID_FROM_PID( pid );
645
646    // get extended pointer on lock protecting copies_list[lpid]
647    xptr_t copies_lock  = XPTR( owner_cxy , &pm->copies_lock[lpid] );
648
649    // get extended pointer on the local copies_list entry
650    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
651
652    // get lock protecting copies_list[lpid]
653    remote_spinlock_lock_busy( copies_lock , &irq_state );
654
655    // remove copy from copies_list
656    xlist_unlink( copies_entry );
657    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , -1 );
658
659    // release lock protecting copies_list[lpid]
660    remote_spinlock_unlock_busy( copies_lock , irq_state );
661
662#if DEBUG_CLUSTER_PROCESS_COPIES
663cycle = (uint32_t)hal_get_cycles();
664if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
665printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n",
666__FUNCTION__ , local_cxy , process , cycle );
667#endif
668
669}  // end cluster_process_copies_unlink()
670
671///////////////////////////////////////////
672void cluster_processes_display( cxy_t cxy )
673{
674    xptr_t        root_xp;
675    xptr_t        lock_xp;
676    xptr_t        iter_xp;
677    xptr_t        process_xp;
678    cxy_t         txt0_cxy;
679    chdev_t     * txt0_ptr;
680    xptr_t        txt0_xp;
681    xptr_t        txt0_lock_xp;
682    reg_t         txt0_save_sr;     // save SR to take TXT0 lock in busy mode     
683
684    assert( (cluster_is_undefined( cxy ) == false),
685    "illegal cluster index" );
686
687    // get extended pointer on root and lock for local process list in cluster
688    root_xp = XPTR( cxy , &LOCAL_CLUSTER->pmgr.local_root );
689    lock_xp = XPTR( cxy , &LOCAL_CLUSTER->pmgr.local_lock );
690
691    // get pointers on TXT0 chdev
692    txt0_xp  = chdev_dir.txt_tx[0];
693    txt0_cxy = GET_CXY( txt0_xp );
694    txt0_ptr = GET_PTR( txt0_xp );
695
696    // get extended pointer on TXT0 lock
697    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
698
699    // get lock on local process list
700    remote_spinlock_lock( lock_xp );
701
702    // get TXT0 lock in busy waiting mode
703    remote_spinlock_lock_busy( txt0_lock_xp , &txt0_save_sr );
704     
705    // display header
706    nolock_printk("\n***** processes in cluster %x / cycle %d\n",
707    cxy , (uint32_t)hal_get_cycles() );
708
709    // loop on all processes in cluster cxy
710    XLIST_FOREACH( root_xp , iter_xp )
711    {
712        process_xp = XLIST_ELEMENT( iter_xp , process_t , local_list );
713        process_display( process_xp );
714    }
715
716    // release TXT0 lock in busy waiting mode
717    remote_spinlock_unlock_busy( txt0_lock_xp , txt0_save_sr );
718
719    // release lock on local process list
720    remote_spinlock_unlock( lock_xp );
721
722}  // end cluster_processes_display()
723
724
Note: See TracBrowser for help on using the repository browser.