source: trunk/kernel/kern/cluster.c @ 557

Last change on this file since 557 was 557, checked in by nicolas.van.phan@…, 6 years ago

Add cluster_info[][] array in cluster descriptor

File size: 21.5 KB
RevLine 
[1]1/*
2 * cluster.c - Cluster-Manager related operations
[19]3 *
[1]4 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *         Mohamed Lamine Karaoui (2015)
[437]6 *         Alain Greiner (2016,2017,2018)
[1]7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
10 * This file is part of ALMOS-MKH..
11 *
12 * ALMOS-MKH. is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
16 * ALMOS-MKH. is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
[14]26#include <kernel_config.h>
[456]27#include <hal_kernel_types.h>
[1]28#include <hal_atomic.h>
29#include <hal_special.h>
[50]30#include <hal_ppm.h>
[407]31#include <remote_fifo.h>
[1]32#include <printk.h>
33#include <errno.h>
34#include <spinlock.h>
35#include <core.h>
[443]36#include <chdev.h>
[1]37#include <scheduler.h>
38#include <list.h>
39#include <cluster.h>
40#include <boot_info.h>
41#include <bits.h>
42#include <ppm.h>
43#include <thread.h>
44#include <kmem.h>
45#include <process.h>
46#include <dqdt.h>
47
[408]48/////////////////////////////////////////////////////////////////////////////////////
[1]49// Extern global variables
[408]50/////////////////////////////////////////////////////////////////////////////////////
[1]51
[443]52extern process_t           process_zero;     // allocated in kernel_init.c file
53extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c file
[1]54
[443]55///////////////////////////////////////////////n
[1]56error_t cluster_init( struct boot_info_s * info )
57{
[428]58    error_t         error;
59    lpid_t          lpid;     // local process_index
60    lid_t           lid;      // local core index
61    uint32_t        i;        // index in loop on external peripherals
62    boot_device_t * dev;      // pointer on external peripheral
63    uint32_t        func;     // external peripheral functionnal type
[1]64
65        cluster_t * cluster = LOCAL_CLUSTER;
66
67    // initialize cluster global parameters
[19]68        cluster->paddr_width     = info->paddr_width;
[1]69        cluster->x_width         = info->x_width;
70        cluster->y_width         = info->y_width;
71        cluster->x_size          = info->x_size;
72        cluster->y_size          = info->y_size;
[530]73    cluster->x_max           = info->x_max; // [FIXME]
74        cluster->y_max           = info->y_max; // [FIXME]
[1]75        cluster->io_cxy          = info->io_cxy;
76
[557]77    // initialize the cluster_info[][] array
78    int x;
79    int y;
80    for (x = 0; x < CONFIG_MAX_CLUSTERS_X; x++) {
81        for (y = 0; y < CONFIG_MAX_CLUSTERS_Y;y++) {
82            cluster->cluster_info[x][y] = info->cluster_info[x][y];
83        }
84    }
[428]85    // initialize external peripherals channels
86    for( i = 0 ; i < info->ext_dev_nr ; i++ )
87    {
88        dev  = &info->ext_dev[i];
89        func = FUNC_FROM_TYPE( dev->type );   
90        if( func == DEV_FUNC_TXT ) cluster->nb_txt_channels = dev->channels;
91        if( func == DEV_FUNC_NIC ) cluster->nb_nic_channels = dev->channels;
92        if( func == DEV_FUNC_IOC ) cluster->nb_ioc_channels = dev->channels;
93        if( func == DEV_FUNC_FBF ) cluster->nb_fbf_channels = dev->channels;
94    }
95
[1]96    // initialize cluster local parameters
97        cluster->cores_nr        = info->cores_nr;
98
[19]99    // initialize the lock protecting the embedded kcm allocator
[1]100        spinlock_init( &cluster->kcm_lock );
101
[438]102#if DEBUG_CLUSTER_INIT
[433]103uint32_t cycle = (uint32_t)hal_get_cycles();
[438]104if( DEBUG_CLUSTER_INIT < cycle )
[437]105printk("\n[DBG] %s : thread %x enters for cluster %x / cycle %d\n",
106__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
[433]107#endif
[50]108
[19]109    // initialises DQDT
[530]110    cluster->dqdt_root_level = dqdt_init( info->x_max, // [FIXME]
111                                          info->y_max, // [FIXME]
[438]112                                          info->y_width ) - 1;
[1]113
114    // initialises embedded PPM
[50]115        error = hal_ppm_init( info );
[1]116
[50]117    if( error )
118    {
119        printk("\n[ERROR] in %s : cannot initialize PPM in cluster %x\n",
120               __FUNCTION__ , local_cxy );
121        return ENOMEM;
122    }
123
[438]124#if( DEBUG_CLUSTER_INIT & 1 )
[433]125cycle = (uint32_t)hal_get_cycles();
[438]126if( DEBUG_CLUSTER_INIT < cycle )
[437]127printk("\n[DBG] %s : PPM initialized in cluster %x / cycle %d\n",
[433]128__FUNCTION__ , local_cxy , cycle );
129#endif
[50]130
[1]131    // initialises embedded KHM
132        khm_init( &cluster->khm );
[19]133
[438]134#if( DEBUG_CLUSTER_INIT & 1 )
[457]135cycle = (uint32_t)hal_get_cycles();
[438]136if( DEBUG_CLUSTER_INIT < cycle )
[437]137printk("\n[DBG] %s : KHM initialized in cluster %x at cycle %d\n",
138__FUNCTION__ , local_cxy , hal_get_cycles() );
139#endif
[50]140
[19]141    // initialises embedded KCM
[5]142        kcm_init( &cluster->kcm , KMEM_KCM );
[1]143
[438]144#if( DEBUG_CLUSTER_INIT & 1 )
[457]145cycle = (uint32_t)hal_get_cycles();
[438]146if( DEBUG_CLUSTER_INIT < cycle )
[437]147printk("\n[DBG] %s : KCM initialized in cluster %x at cycle %d\n",
148__FUNCTION__ , local_cxy , hal_get_cycles() );
149#endif
[50]150
[296]151    // initialises all cores descriptors
[1]152        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
153        {
154                core_init( &cluster->core_tbl[lid],    // target core descriptor
155                       lid,                        // local core index
156                       info->core[lid].gid );      // gid from boot_info_t
157        }
[19]158
[438]159#if( DEBUG_CLUSTER_INIT & 1 )
[433]160cycle = (uint32_t)hal_get_cycles();
[438]161if( DEBUG_CLUSTER_INIT < cycle )
[437]162printk("\n[DBG] %s : cores initialized in cluster %x / cycle %d\n",
[433]163__FUNCTION__ , local_cxy , cycle );
164#endif
[50]165
[440]166    // initialises RPC FIFOs
167        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
168    {
169            local_fifo_init( &cluster->rpc_fifo[lid] );
170        cluster->rpc_threads[lid] = 0;
171    }
[1]172
[438]173#if( DEBUG_CLUSTER_INIT & 1 )
[437]174cycle = (uint32_t)hal_get_cycles();
[438]175if( DEBUG_CLUSTER_INIT < cycle )
[437]176printk("\n[DBG] %s : RPC fifo inialized in cluster %x at cycle %d\n",
[407]177__FUNCTION__ , local_cxy , hal_get_cycles() );
[437]178#endif
[50]179
[1]180    // initialise pref_tbl[] in process manager
181        spinlock_init( &cluster->pmgr.pref_lock );
182    cluster->pmgr.pref_nr = 0;
[19]183    cluster->pmgr.pref_tbl[0] = XPTR( local_cxy , &process_zero );
[1]184    for( lpid = 1 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
185    {
186        cluster->pmgr.pref_tbl[lpid] = XPTR_NULL;
187    }
188
189    // initialise local_list in process manager
[23]190        remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
191    xlist_root_init( XPTR( local_cxy , &cluster->pmgr.local_root ) );
[1]192    cluster->pmgr.local_nr = 0;
193
194    // initialise copies_lists in process manager
[101]195    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
[1]196    {
197            remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] ) );
198        cluster->pmgr.copies_nr[lpid] = 0;
199        xlist_root_init( XPTR( local_cxy , &cluster->pmgr.copies_root[lpid] ) );
[19]200    }
[1]201
[438]202#if DEBUG_CLUSTER_INIT
[433]203cycle = (uint32_t)hal_get_cycles();
[438]204if( DEBUG_CLUSTER_INIT < cycle )
[437]205printk("\n[DBG] %s , thread %x exit for cluster %x / cycle %d\n",
206__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
[433]207#endif
[50]208
[124]209    hal_fence();
[1]210
211        return 0;
212} // end cluster_init()
213
214////////////////////////////////////////
215bool_t cluster_is_undefined( cxy_t cxy )
216{
217    cluster_t * cluster = LOCAL_CLUSTER;
218
219    uint32_t y_width = cluster->y_width;
220
221    uint32_t x = cxy >> y_width;
222    uint32_t y = cxy & ((1<<y_width)-1);
223
[19]224    if( x >= cluster->x_size ) return true;
225    if( y >= cluster->y_size ) return true;
[1]226
227    return false;
228}
229
230////////////////////////////////////////////////////////////////////////////////////
231//  Cores related functions
232////////////////////////////////////////////////////////////////////////////////////
233
234/////////////////////////////////
[485]235lid_t cluster_select_local_core( void )
[1]236{
[440]237    uint32_t      min = 1000;
238    lid_t         sel = 0;
239    uint32_t      nthreads;
240    lid_t         lid;
241    scheduler_t * sched;
[1]242
243    cluster_t * cluster = LOCAL_CLUSTER;
244
245    for( lid = 0 ; lid < cluster->cores_nr ; lid++ )
246    {
[440]247        sched    = &cluster->core_tbl[lid].scheduler;
248        nthreads = sched->u_threads_nr + sched->k_threads_nr;
249
250        if( nthreads < min )
[1]251        {
[440]252            min = nthreads;
[1]253            sel = lid;
254        }
[19]255    }
[1]256    return sel;
257}
258
259////////////////////////////////////////////////////////////////////////////////////
[428]260//  Process related functions
[1]261////////////////////////////////////////////////////////////////////////////////////
262
[433]263
264//////////////////////////////////////////////////////
[443]265xptr_t cluster_get_process_from_pid_in_cxy( cxy_t cxy,
266                                            pid_t pid )
267{
268    xptr_t      root_xp;       // xptr on root of list of processes in owner cluster
269    xptr_t      lock_xp;       // xptr on lock protecting this list
270    xptr_t      iter_xp;       // iterator
271    xptr_t      current_xp;    // xptr on current process descriptor
272    bool_t      found;
273
274    cluster_t * cluster = LOCAL_CLUSTER;
275
276    // get owner cluster and lpid
277    cxy_t   owner_cxy = CXY_FROM_PID( pid );
278    lpid_t  lpid      = LPID_FROM_PID( pid );
279
280    // get lock & root of list of copies from owner cluster
281    root_xp = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
282    lock_xp = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
283
284    // take the lock protecting the list of processes
285    remote_spinlock_lock( lock_xp );
286
287    // scan list of processes
288    found = false;
289    XLIST_FOREACH( root_xp , iter_xp )
290    {
291        current_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
292
293        if( GET_CXY( current_xp ) == cxy )
294        {
295            found = true;
296            break;
297        }
298    }
299
300    // release the lock protecting the list of processes
301    remote_spinlock_unlock( lock_xp );
302
303    // return extended pointer on process descriptor in owner cluster
304    if( found ) return current_xp;
305    else        return XPTR_NULL;
306
307}  // end cluster_get_process_from_pid_in_cxy()
308
309
310//////////////////////////////////////////////////////
[433]311xptr_t cluster_get_owner_process_from_pid( pid_t pid )
312{
313    xptr_t      root_xp;       // xptr on root of list of processes in owner cluster
[436]314    xptr_t      lock_xp;       // xptr on lock protecting this list
[433]315    xptr_t      iter_xp;       // iterator
316    xptr_t      current_xp;    // xptr on current process descriptor
317    process_t * current_ptr;   // local pointer on current process
318    pid_t       current_pid;   // current process identifier
319    bool_t      found;
320
321    cluster_t * cluster = LOCAL_CLUSTER;
322
323    // get owner cluster and lpid
324    cxy_t  owner_cxy = CXY_FROM_PID( pid );
325
326    // get lock & root of list of process in owner cluster
327    root_xp = XPTR( owner_cxy , &cluster->pmgr.local_root );
328    lock_xp = XPTR( owner_cxy , &cluster->pmgr.local_lock );
329
330    // take the lock protecting the list of processes
331    remote_spinlock_lock( lock_xp );
332
333    // scan list of processes in owner cluster
334    found = false;
335    XLIST_FOREACH( root_xp , iter_xp )
336    {
337        current_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
338        current_ptr = GET_PTR( current_xp );
339        current_pid = hal_remote_lw( XPTR( owner_cxy , &current_ptr->pid ) );
340
341        if( current_pid == pid )
342        {
343            found = true;
344            break;
345        }
346    }
347
348    // release the lock protecting the list of processes
349    remote_spinlock_unlock( lock_xp );
350
351    // return extended pointer on process descriptor in owner cluster
352    if( found ) return current_xp;
353    else        return XPTR_NULL;
354
[436]355}  // end cluster_get_owner_process_from_pid()
356
[443]357
[1]358//////////////////////////////////////////////////////////
359xptr_t cluster_get_reference_process_from_pid( pid_t pid )
[19]360{
[23]361    xptr_t ref_xp;   // extended pointer on reference process descriptor
[1]362
363    cluster_t * cluster = LOCAL_CLUSTER;
364
365    // get owner cluster and lpid
366    cxy_t  owner_cxy = CXY_FROM_PID( pid );
367    lpid_t lpid      = LPID_FROM_PID( pid );
368
[19]369    // Check valid PID
[23]370    if( lpid >= CONFIG_MAX_PROCESS_PER_CLUSTER )  return XPTR_NULL;
[1]371
372    if( local_cxy == owner_cxy )   // local cluster is owner cluster
[19]373    {
[23]374        ref_xp = cluster->pmgr.pref_tbl[lpid];
[1]375    }
376    else                              // use a remote_lwd to access owner cluster
377    {
[23]378        ref_xp = (xptr_t)hal_remote_lwd( XPTR( owner_cxy , &cluster->pmgr.pref_tbl[lpid] ) );
[1]379    }
380
[23]381    return ref_xp;
[1]382}
383
[416]384///////////////////////////////////////////////
385error_t cluster_pid_alloc( process_t * process,
386                           pid_t     * pid )
[1]387{
388    lpid_t      lpid;
389    bool_t      found;
390
[440]391#if DEBUG_CLUSTER_PID_ALLOC
392uint32_t cycle = (uint32_t)hal_get_cycles();
393if( DEBUG_CLUSTER_PID_ALLOC < cycle )
394printk("\n[DBG] %s : thread %x enters in cluster %x / cycle %d\n",
395__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
396#endif
397
[1]398    pmgr_t    * pm         = &LOCAL_CLUSTER->pmgr;
399
400    // get the process manager lock
401    spinlock_lock( &pm->pref_lock );
402
403    // search an empty slot
404    found = false;
405    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
406    {
407        if( pm->pref_tbl[lpid] == XPTR_NULL )
408        {
409            found = true;
410            break;
411        }
412    }
413
414    if( found )
415    {
416        // register process in pref_tbl[]
[416]417        pm->pref_tbl[lpid] = XPTR( local_cxy , process );
[1]418        pm->pref_nr++;
419
420        // returns pid
421        *pid = PID( local_cxy , lpid );
422
[416]423        // release the processs_manager lock
424        spinlock_unlock( &pm->pref_lock );
425
426        return 0;
[1]427    }
428    else
429    {
[416]430        // release the processs_manager lock
431        spinlock_unlock( &pm->pref_lock );
432
433        return -1;
[19]434    }
[1]435
[440]436#if DEBUG_CLUSTER_PID_ALLOC
437cycle = (uint32_t)hal_get_cycles();
438if( DEBUG_CLUSTER_PID_ALLOC < cycle )
439printk("\n[DBG] %s : thread %x exit in cluster %x / pid %x / cycle %d\n",
440__FUNCTION__ , CURRENT_THREAD , local_cxy , *pid , cycle );
441#endif
442
[1]443} // end cluster_pid_alloc()
444
445/////////////////////////////////////
446void cluster_pid_release( pid_t pid )
447{
[440]448
449#if DEBUG_CLUSTER_PID_RELEASE
450uint32_t cycle = (uint32_t)hal_get_cycles();
451if( DEBUG_CLUSTER_PID_RELEASE < cycle )
452printk("\n[DBG] %s : thread %x enters in cluster %x / pid %x / cycle %d\n",
453__FUNCTION__ , CURRENT_THREAD , local_cxy , pid , cycle );
454#endif
455
[1]456    cxy_t  owner_cxy  = CXY_FROM_PID( pid );
457    lpid_t lpid       = LPID_FROM_PID( pid );
458
[409]459    pmgr_t  * pm = &LOCAL_CLUSTER->pmgr;
460
[440]461    // check lpid
[492]462    assert( (lpid < CONFIG_MAX_PROCESS_PER_CLUSTER),
[440]463    "illegal LPID = %d" , lpid );
[1]464
[440]465    // check owner cluster
[492]466    assert( (owner_cxy == local_cxy) ,
[440]467    "local_cluster %x !=  owner_cluster %x" , local_cxy , owner_cxy );
468
[1]469    // get the process manager lock
470    spinlock_lock( &pm->pref_lock );
471
472    // remove process from pref_tbl[]
473    pm->pref_tbl[lpid] = XPTR_NULL;
474    pm->pref_nr--;
475
476    // release the processs_manager lock
477    spinlock_unlock( &pm->pref_lock );
478
[440]479#if DEBUG_CLUSTER_PID_RELEASE
480cycle = (uint32_t)hal_get_cycles();
481if( DEBUG_CLUSTER_PID_RELEASE < cycle )
482printk("\n[DBG] %s : thread %x exit in cluster %x / cycle %d\n",
483__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
484#endif
485
[1]486} // end cluster_pid_release()
487
488///////////////////////////////////////////////////////////
489process_t * cluster_get_local_process_from_pid( pid_t pid )
490{
[23]491    xptr_t         process_xp;
492    process_t    * process_ptr;
493    xptr_t         root_xp;
494    xptr_t         iter_xp;
495    bool_t         found;
[19]496
[23]497    found   = false;
498    root_xp = XPTR( local_cxy , &LOCAL_CLUSTER->pmgr.local_root );
499
500    XLIST_FOREACH( root_xp , iter_xp )
[1]501    {
[23]502        process_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
503        process_ptr = (process_t *)GET_PTR( process_xp );
504        if( process_ptr->pid == pid )
[1]505        {
[23]506            found = true;
[1]507            break;
508        }
509    }
510
[23]511    if (found ) return process_ptr;
512    else        return NULL;
513
[1]514}  // end cluster_get_local_process_from_pid()
515
516//////////////////////////////////////////////////////
517void cluster_process_local_link( process_t * process )
518{
[443]519    reg_t    save_sr;
520
[1]521    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
522
[443]523    // get extended pointers on local process list root & lock
524    xptr_t root_xp = XPTR( local_cxy , &pm->local_root );
525    xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock );
526
[1]527    // get lock protecting the process manager local list
[443]528    remote_spinlock_lock_busy( lock_xp , &save_sr );
[1]529
[443]530    // register process in local list
531    xlist_add_last( root_xp , XPTR( local_cxy , &process->local_list ) );
[1]532    pm->local_nr++;
533
534    // release lock protecting the process manager local list
[443]535    remote_spinlock_unlock_busy( lock_xp , save_sr );
[1]536}
537
538////////////////////////////////////////////////////////
539void cluster_process_local_unlink( process_t * process )
540{
[443]541    reg_t save_sr;
542
[1]543    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
544
[443]545    // get extended pointers on local process list lock
546    xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock );
547
[1]548    // get lock protecting the process manager local list
[443]549    remote_spinlock_lock_busy( lock_xp , &save_sr );
[1]550
[443]551    // remove process from local list
[23]552    xlist_unlink( XPTR( local_cxy , &process->local_list ) );
[1]553    pm->local_nr--;
554
555    // release lock protecting the process manager local list
[443]556    remote_spinlock_unlock_busy( lock_xp , save_sr );
[1]557}
558
559///////////////////////////////////////////////////////
560void cluster_process_copies_link( process_t * process )
561{
[436]562    reg_t    irq_state;
[1]563    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
564
[438]565#if DEBUG_CLUSTER_PROCESS_COPIES
[436]566uint32_t cycle = (uint32_t)hal_get_cycles();
[438]567if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
[436]568printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n",
569__FUNCTION__ , local_cxy , process , cycle );
570#endif
571
[1]572    // get owner cluster identifier CXY and process LPID
573    pid_t    pid        = process->pid;
574    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
575    lpid_t   lpid       = LPID_FROM_PID( pid );
576
577    // get extended pointer on lock protecting copies_list[lpid]
[120]578    xptr_t copies_lock  = XPTR( owner_cxy , &pm->copies_lock[lpid] );
[1]579
580    // get extended pointer on the copies_list[lpid] root
[120]581    xptr_t copies_root  = XPTR( owner_cxy , &pm->copies_root[lpid] );
[1]582
583    // get extended pointer on the local copies_list entry
584    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
585
[19]586    // get lock protecting copies_list[lpid]
[407]587    remote_spinlock_lock_busy( copies_lock , &irq_state );
[1]588
[436]589    // add copy to copies_list
[1]590    xlist_add_first( copies_root , copies_entry );
591    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , 1 );
592
[19]593    // release lock protecting copies_list[lpid]
[407]594    remote_spinlock_unlock_busy( copies_lock , irq_state );
[1]595
[438]596#if DEBUG_CLUSTER_PROCESS_COPIES
[436]597cycle = (uint32_t)hal_get_cycles();
[438]598if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
[436]599printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n",
600__FUNCTION__ , local_cxy , process , cycle );
601#endif
602
603}  // end cluster_process_copies_link()
604
[1]605/////////////////////////////////////////////////////////
606void cluster_process_copies_unlink( process_t * process )
607{
[407]608    uint32_t irq_state;
[1]609    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
610
[438]611#if DEBUG_CLUSTER_PROCESS_COPIES
[436]612uint32_t cycle = (uint32_t)hal_get_cycles();
[438]613if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
[436]614printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n",
615__FUNCTION__ , local_cxy , process , cycle );
616#endif
617
[1]618    // get owner cluster identifier CXY and process LPID
619    pid_t    pid        = process->pid;
620    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
621    lpid_t   lpid       = LPID_FROM_PID( pid );
622
623    // get extended pointer on lock protecting copies_list[lpid]
[436]624    xptr_t copies_lock  = XPTR( owner_cxy , &pm->copies_lock[lpid] );
[1]625
626    // get extended pointer on the local copies_list entry
627    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
628
[19]629    // get lock protecting copies_list[lpid]
[407]630    remote_spinlock_lock_busy( copies_lock , &irq_state );
[1]631
[436]632    // remove copy from copies_list
[1]633    xlist_unlink( copies_entry );
634    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , -1 );
635
[19]636    // release lock protecting copies_list[lpid]
[407]637    remote_spinlock_unlock_busy( copies_lock , irq_state );
[1]638
[438]639#if DEBUG_CLUSTER_PROCESS_COPIES
[436]640cycle = (uint32_t)hal_get_cycles();
[438]641if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
[436]642printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n",
643__FUNCTION__ , local_cxy , process , cycle );
644#endif
645
646}  // end cluster_process_copies_unlink()
647
[428]648///////////////////////////////////////////
649void cluster_processes_display( cxy_t cxy )
[1]650{
[428]651    xptr_t        root_xp;
[443]652    xptr_t        lock_xp;
[428]653    xptr_t        iter_xp;
[443]654    xptr_t        process_xp;
655    cxy_t         txt0_cxy;
656    chdev_t     * txt0_ptr;
657    xptr_t        txt0_xp;
658    xptr_t        txt0_lock_xp;
659    reg_t         txt0_save_sr;     // save SR to take TXT0 lock in busy mode     
[1]660
[443]661    assert( (cluster_is_undefined( cxy ) == false),
[492]662    "illegal cluster index" );
[443]663
664    // get extended pointer on root and lock for local process list in cluster
[428]665    root_xp = XPTR( cxy , &LOCAL_CLUSTER->pmgr.local_root );
[443]666    lock_xp = XPTR( cxy , &LOCAL_CLUSTER->pmgr.local_lock );
[1]667
[443]668    // get pointers on TXT0 chdev
669    txt0_xp  = chdev_dir.txt_tx[0];
670    txt0_cxy = GET_CXY( txt0_xp );
671    txt0_ptr = GET_PTR( txt0_xp );
[1]672
[443]673    // get extended pointer on TXT0 lock
674    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
675
676    // get lock on local process list
677    remote_spinlock_lock( lock_xp );
678
679    // get TXT0 lock in busy waiting mode
680    remote_spinlock_lock_busy( txt0_lock_xp , &txt0_save_sr );
681     
682    // display header
683    nolock_printk("\n***** processes in cluster %x / cycle %d\n",
684    cxy , (uint32_t)hal_get_cycles() );
685
686    // loop on all processes in cluster cxy
[428]687    XLIST_FOREACH( root_xp , iter_xp )
688    {
689        process_xp = XLIST_ELEMENT( iter_xp , process_t , local_list );
690        process_display( process_xp );
691    }
[443]692
693    // release TXT0 lock in busy waiting mode
694    remote_spinlock_unlock_busy( txt0_lock_xp , txt0_save_sr );
695
696    // release lock on local process list
697    remote_spinlock_unlock( lock_xp );
698
[428]699}  // end cluster_processes_display()
[1]700
[19]701
Note: See TracBrowser for help on using the repository browser.