source: trunk/kernel/kern/cluster.c @ 562

Last change on this file since 562 was 562, checked in by nicolas.van.phan@…, 6 years ago

Disable DQDT and remove y_max FOR GOOD

File size: 22.0 KB
RevLine 
[1]1/*
2 * cluster.c - Cluster-Manager related operations
[19]3 *
[1]4 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *         Mohamed Lamine Karaoui (2015)
[437]6 *         Alain Greiner (2016,2017,2018)
[1]7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
10 * This file is part of ALMOS-MKH..
11 *
12 * ALMOS-MKH. is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
16 * ALMOS-MKH. is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
[14]26#include <kernel_config.h>
[456]27#include <hal_kernel_types.h>
[1]28#include <hal_atomic.h>
29#include <hal_special.h>
[50]30#include <hal_ppm.h>
[407]31#include <remote_fifo.h>
[1]32#include <printk.h>
33#include <errno.h>
34#include <spinlock.h>
35#include <core.h>
[443]36#include <chdev.h>
[1]37#include <scheduler.h>
38#include <list.h>
39#include <cluster.h>
40#include <boot_info.h>
41#include <bits.h>
42#include <ppm.h>
43#include <thread.h>
44#include <kmem.h>
45#include <process.h>
46#include <dqdt.h>
[561]47#include <cluster_info.h>
[1]48
[408]49/////////////////////////////////////////////////////////////////////////////////////
[1]50// Extern global variables
[408]51/////////////////////////////////////////////////////////////////////////////////////
[1]52
[443]53extern process_t           process_zero;     // allocated in kernel_init.c file
54extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c file
[1]55
[443]56///////////////////////////////////////////////n
[1]57error_t cluster_init( struct boot_info_s * info )
58{
[428]59    error_t         error;
60    lpid_t          lpid;     // local process_index
61    lid_t           lid;      // local core index
62    uint32_t        i;        // index in loop on external peripherals
63    boot_device_t * dev;      // pointer on external peripheral
64    uint32_t        func;     // external peripheral functionnal type
[1]65
66        cluster_t * cluster = LOCAL_CLUSTER;
67
68    // initialize cluster global parameters
[19]69        cluster->paddr_width     = info->paddr_width;
[1]70        cluster->x_width         = info->x_width;
71        cluster->y_width         = info->y_width;
72        cluster->x_size          = info->x_size;
73        cluster->y_size          = info->y_size;
74        cluster->io_cxy          = info->io_cxy;
75
[557]76    // initialize the cluster_info[][] array
77    int x;
78    int y;
79    for (x = 0; x < CONFIG_MAX_CLUSTERS_X; x++) {
80        for (y = 0; y < CONFIG_MAX_CLUSTERS_Y;y++) {
81            cluster->cluster_info[x][y] = info->cluster_info[x][y];
82        }
83    }
[428]84    // initialize external peripherals channels
85    for( i = 0 ; i < info->ext_dev_nr ; i++ )
86    {
87        dev  = &info->ext_dev[i];
88        func = FUNC_FROM_TYPE( dev->type );   
89        if( func == DEV_FUNC_TXT ) cluster->nb_txt_channels = dev->channels;
90        if( func == DEV_FUNC_NIC ) cluster->nb_nic_channels = dev->channels;
91        if( func == DEV_FUNC_IOC ) cluster->nb_ioc_channels = dev->channels;
92        if( func == DEV_FUNC_FBF ) cluster->nb_fbf_channels = dev->channels;
93    }
94
[1]95    // initialize cluster local parameters
96        cluster->cores_nr        = info->cores_nr;
97
[19]98    // initialize the lock protecting the embedded kcm allocator
[1]99        spinlock_init( &cluster->kcm_lock );
100
[438]101#if DEBUG_CLUSTER_INIT
[433]102uint32_t cycle = (uint32_t)hal_get_cycles();
[438]103if( DEBUG_CLUSTER_INIT < cycle )
[437]104printk("\n[DBG] %s : thread %x enters for cluster %x / cycle %d\n",
105__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
[433]106#endif
[50]107
[19]108    // initialises DQDT
[562]109    cluster->dqdt_root_level = dqdt_init( info->x_size,
110                                          info->y_size,
[438]111                                          info->y_width ) - 1;
[1]112
113    // initialises embedded PPM
[50]114        error = hal_ppm_init( info );
[1]115
[50]116    if( error )
117    {
118        printk("\n[ERROR] in %s : cannot initialize PPM in cluster %x\n",
119               __FUNCTION__ , local_cxy );
120        return ENOMEM;
121    }
122
[438]123#if( DEBUG_CLUSTER_INIT & 1 )
[433]124cycle = (uint32_t)hal_get_cycles();
[438]125if( DEBUG_CLUSTER_INIT < cycle )
[437]126printk("\n[DBG] %s : PPM initialized in cluster %x / cycle %d\n",
[433]127__FUNCTION__ , local_cxy , cycle );
128#endif
[50]129
[1]130    // initialises embedded KHM
131        khm_init( &cluster->khm );
[19]132
[438]133#if( DEBUG_CLUSTER_INIT & 1 )
[457]134cycle = (uint32_t)hal_get_cycles();
[438]135if( DEBUG_CLUSTER_INIT < cycle )
[437]136printk("\n[DBG] %s : KHM initialized in cluster %x at cycle %d\n",
137__FUNCTION__ , local_cxy , hal_get_cycles() );
138#endif
[50]139
[19]140    // initialises embedded KCM
[5]141        kcm_init( &cluster->kcm , KMEM_KCM );
[1]142
[438]143#if( DEBUG_CLUSTER_INIT & 1 )
[457]144cycle = (uint32_t)hal_get_cycles();
[438]145if( DEBUG_CLUSTER_INIT < cycle )
[437]146printk("\n[DBG] %s : KCM initialized in cluster %x at cycle %d\n",
147__FUNCTION__ , local_cxy , hal_get_cycles() );
148#endif
[50]149
[296]150    // initialises all cores descriptors
[1]151        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
152        {
153                core_init( &cluster->core_tbl[lid],    // target core descriptor
154                       lid,                        // local core index
155                       info->core[lid].gid );      // gid from boot_info_t
156        }
[19]157
[438]158#if( DEBUG_CLUSTER_INIT & 1 )
[433]159cycle = (uint32_t)hal_get_cycles();
[438]160if( DEBUG_CLUSTER_INIT < cycle )
[437]161printk("\n[DBG] %s : cores initialized in cluster %x / cycle %d\n",
[433]162__FUNCTION__ , local_cxy , cycle );
163#endif
[50]164
[440]165    // initialises RPC FIFOs
166        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
167    {
168            local_fifo_init( &cluster->rpc_fifo[lid] );
169        cluster->rpc_threads[lid] = 0;
170    }
[1]171
[438]172#if( DEBUG_CLUSTER_INIT & 1 )
[437]173cycle = (uint32_t)hal_get_cycles();
[438]174if( DEBUG_CLUSTER_INIT < cycle )
[437]175printk("\n[DBG] %s : RPC fifo inialized in cluster %x at cycle %d\n",
[407]176__FUNCTION__ , local_cxy , hal_get_cycles() );
[437]177#endif
[50]178
[1]179    // initialise pref_tbl[] in process manager
180        spinlock_init( &cluster->pmgr.pref_lock );
181    cluster->pmgr.pref_nr = 0;
[19]182    cluster->pmgr.pref_tbl[0] = XPTR( local_cxy , &process_zero );
[1]183    for( lpid = 1 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
184    {
185        cluster->pmgr.pref_tbl[lpid] = XPTR_NULL;
186    }
187
188    // initialise local_list in process manager
[23]189        remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
190    xlist_root_init( XPTR( local_cxy , &cluster->pmgr.local_root ) );
[1]191    cluster->pmgr.local_nr = 0;
192
193    // initialise copies_lists in process manager
[101]194    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
[1]195    {
196            remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] ) );
197        cluster->pmgr.copies_nr[lpid] = 0;
198        xlist_root_init( XPTR( local_cxy , &cluster->pmgr.copies_root[lpid] ) );
[19]199    }
[1]200
[438]201#if DEBUG_CLUSTER_INIT
[433]202cycle = (uint32_t)hal_get_cycles();
[438]203if( DEBUG_CLUSTER_INIT < cycle )
[437]204printk("\n[DBG] %s , thread %x exit for cluster %x / cycle %d\n",
205__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
[433]206#endif
[50]207
[124]208    hal_fence();
[1]209
210        return 0;
211} // end cluster_init()
212
[561]213/////////////////////////////////
214cxy_t cluster_random_select( void )
215{
216    uint32_t  x_size;
217    uint32_t  y_size;
218    uint32_t  y_width;
219    uint32_t  index;
220    uint32_t  x;
221    uint32_t  y;
222
223    do {
224        x_size     = LOCAL_CLUSTER->x_size;
225        y_size     = LOCAL_CLUSTER->y_size;
226        y_width   = LOCAL_CLUSTER->y_width;
227        index     = ( hal_get_cycles() + hal_get_gid() ) % (x_size * y_size);
228        x         = index / y_size;
229        y         = index % y_size;
230    } while ( cluster_info_is_active( LOCAL_CLUSTER->cluster_info[x][y] ) == 0 );
231
232    return (x<<y_width) + y;
233}
234
[1]235////////////////////////////////////////
236bool_t cluster_is_undefined( cxy_t cxy )
237{
238    cluster_t * cluster = LOCAL_CLUSTER;
239
240    uint32_t y_width = cluster->y_width;
241
242    uint32_t x = cxy >> y_width;
243    uint32_t y = cxy & ((1<<y_width)-1);
244
[19]245    if( x >= cluster->x_size ) return true;
246    if( y >= cluster->y_size ) return true;
[1]247
248    return false;
249}
250
251////////////////////////////////////////////////////////////////////////////////////
252//  Cores related functions
253////////////////////////////////////////////////////////////////////////////////////
254
255/////////////////////////////////
[485]256lid_t cluster_select_local_core( void )
[1]257{
[440]258    uint32_t      min = 1000;
259    lid_t         sel = 0;
260    uint32_t      nthreads;
261    lid_t         lid;
262    scheduler_t * sched;
[1]263
264    cluster_t * cluster = LOCAL_CLUSTER;
265
266    for( lid = 0 ; lid < cluster->cores_nr ; lid++ )
267    {
[440]268        sched    = &cluster->core_tbl[lid].scheduler;
269        nthreads = sched->u_threads_nr + sched->k_threads_nr;
270
271        if( nthreads < min )
[1]272        {
[440]273            min = nthreads;
[1]274            sel = lid;
275        }
[19]276    }
[1]277    return sel;
278}
279
280////////////////////////////////////////////////////////////////////////////////////
[428]281//  Process related functions
[1]282////////////////////////////////////////////////////////////////////////////////////
283
[433]284
285//////////////////////////////////////////////////////
[443]286xptr_t cluster_get_process_from_pid_in_cxy( cxy_t cxy,
287                                            pid_t pid )
288{
289    xptr_t      root_xp;       // xptr on root of list of processes in owner cluster
290    xptr_t      lock_xp;       // xptr on lock protecting this list
291    xptr_t      iter_xp;       // iterator
292    xptr_t      current_xp;    // xptr on current process descriptor
293    bool_t      found;
294
295    cluster_t * cluster = LOCAL_CLUSTER;
296
297    // get owner cluster and lpid
298    cxy_t   owner_cxy = CXY_FROM_PID( pid );
299    lpid_t  lpid      = LPID_FROM_PID( pid );
300
301    // get lock & root of list of copies from owner cluster
302    root_xp = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
303    lock_xp = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
304
305    // take the lock protecting the list of processes
306    remote_spinlock_lock( lock_xp );
307
308    // scan list of processes
309    found = false;
310    XLIST_FOREACH( root_xp , iter_xp )
311    {
312        current_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
313
314        if( GET_CXY( current_xp ) == cxy )
315        {
316            found = true;
317            break;
318        }
319    }
320
321    // release the lock protecting the list of processes
322    remote_spinlock_unlock( lock_xp );
323
324    // return extended pointer on process descriptor in owner cluster
325    if( found ) return current_xp;
326    else        return XPTR_NULL;
327
328}  // end cluster_get_process_from_pid_in_cxy()
329
330
331//////////////////////////////////////////////////////
[433]332xptr_t cluster_get_owner_process_from_pid( pid_t pid )
333{
334    xptr_t      root_xp;       // xptr on root of list of processes in owner cluster
[436]335    xptr_t      lock_xp;       // xptr on lock protecting this list
[433]336    xptr_t      iter_xp;       // iterator
337    xptr_t      current_xp;    // xptr on current process descriptor
338    process_t * current_ptr;   // local pointer on current process
339    pid_t       current_pid;   // current process identifier
340    bool_t      found;
341
342    cluster_t * cluster = LOCAL_CLUSTER;
343
344    // get owner cluster and lpid
345    cxy_t  owner_cxy = CXY_FROM_PID( pid );
346
347    // get lock & root of list of process in owner cluster
348    root_xp = XPTR( owner_cxy , &cluster->pmgr.local_root );
349    lock_xp = XPTR( owner_cxy , &cluster->pmgr.local_lock );
350
351    // take the lock protecting the list of processes
352    remote_spinlock_lock( lock_xp );
353
354    // scan list of processes in owner cluster
355    found = false;
356    XLIST_FOREACH( root_xp , iter_xp )
357    {
358        current_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
359        current_ptr = GET_PTR( current_xp );
360        current_pid = hal_remote_lw( XPTR( owner_cxy , &current_ptr->pid ) );
361
362        if( current_pid == pid )
363        {
364            found = true;
365            break;
366        }
367    }
368
369    // release the lock protecting the list of processes
370    remote_spinlock_unlock( lock_xp );
371
372    // return extended pointer on process descriptor in owner cluster
373    if( found ) return current_xp;
374    else        return XPTR_NULL;
375
[436]376}  // end cluster_get_owner_process_from_pid()
377
[443]378
[1]379//////////////////////////////////////////////////////////
380xptr_t cluster_get_reference_process_from_pid( pid_t pid )
[19]381{
[23]382    xptr_t ref_xp;   // extended pointer on reference process descriptor
[1]383
384    cluster_t * cluster = LOCAL_CLUSTER;
385
386    // get owner cluster and lpid
387    cxy_t  owner_cxy = CXY_FROM_PID( pid );
388    lpid_t lpid      = LPID_FROM_PID( pid );
389
[19]390    // Check valid PID
[23]391    if( lpid >= CONFIG_MAX_PROCESS_PER_CLUSTER )  return XPTR_NULL;
[1]392
393    if( local_cxy == owner_cxy )   // local cluster is owner cluster
[19]394    {
[23]395        ref_xp = cluster->pmgr.pref_tbl[lpid];
[1]396    }
397    else                              // use a remote_lwd to access owner cluster
398    {
[23]399        ref_xp = (xptr_t)hal_remote_lwd( XPTR( owner_cxy , &cluster->pmgr.pref_tbl[lpid] ) );
[1]400    }
401
[23]402    return ref_xp;
[1]403}
404
[416]405///////////////////////////////////////////////
406error_t cluster_pid_alloc( process_t * process,
407                           pid_t     * pid )
[1]408{
409    lpid_t      lpid;
410    bool_t      found;
411
[440]412#if DEBUG_CLUSTER_PID_ALLOC
413uint32_t cycle = (uint32_t)hal_get_cycles();
414if( DEBUG_CLUSTER_PID_ALLOC < cycle )
415printk("\n[DBG] %s : thread %x enters in cluster %x / cycle %d\n",
416__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
417#endif
418
[1]419    pmgr_t    * pm         = &LOCAL_CLUSTER->pmgr;
420
421    // get the process manager lock
422    spinlock_lock( &pm->pref_lock );
423
424    // search an empty slot
425    found = false;
426    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
427    {
428        if( pm->pref_tbl[lpid] == XPTR_NULL )
429        {
430            found = true;
431            break;
432        }
433    }
434
435    if( found )
436    {
437        // register process in pref_tbl[]
[416]438        pm->pref_tbl[lpid] = XPTR( local_cxy , process );
[1]439        pm->pref_nr++;
440
441        // returns pid
442        *pid = PID( local_cxy , lpid );
443
[416]444        // release the processs_manager lock
445        spinlock_unlock( &pm->pref_lock );
446
447        return 0;
[1]448    }
449    else
450    {
[416]451        // release the processs_manager lock
452        spinlock_unlock( &pm->pref_lock );
453
454        return -1;
[19]455    }
[1]456
[440]457#if DEBUG_CLUSTER_PID_ALLOC
458cycle = (uint32_t)hal_get_cycles();
459if( DEBUG_CLUSTER_PID_ALLOC < cycle )
460printk("\n[DBG] %s : thread %x exit in cluster %x / pid %x / cycle %d\n",
461__FUNCTION__ , CURRENT_THREAD , local_cxy , *pid , cycle );
462#endif
463
[1]464} // end cluster_pid_alloc()
465
466/////////////////////////////////////
467void cluster_pid_release( pid_t pid )
468{
[440]469
470#if DEBUG_CLUSTER_PID_RELEASE
471uint32_t cycle = (uint32_t)hal_get_cycles();
472if( DEBUG_CLUSTER_PID_RELEASE < cycle )
473printk("\n[DBG] %s : thread %x enters in cluster %x / pid %x / cycle %d\n",
474__FUNCTION__ , CURRENT_THREAD , local_cxy , pid , cycle );
475#endif
476
[1]477    cxy_t  owner_cxy  = CXY_FROM_PID( pid );
478    lpid_t lpid       = LPID_FROM_PID( pid );
479
[409]480    pmgr_t  * pm = &LOCAL_CLUSTER->pmgr;
481
[440]482    // check lpid
[492]483    assert( (lpid < CONFIG_MAX_PROCESS_PER_CLUSTER),
[440]484    "illegal LPID = %d" , lpid );
[1]485
[440]486    // check owner cluster
[492]487    assert( (owner_cxy == local_cxy) ,
[440]488    "local_cluster %x !=  owner_cluster %x" , local_cxy , owner_cxy );
489
[1]490    // get the process manager lock
491    spinlock_lock( &pm->pref_lock );
492
493    // remove process from pref_tbl[]
494    pm->pref_tbl[lpid] = XPTR_NULL;
495    pm->pref_nr--;
496
497    // release the processs_manager lock
498    spinlock_unlock( &pm->pref_lock );
499
[440]500#if DEBUG_CLUSTER_PID_RELEASE
501cycle = (uint32_t)hal_get_cycles();
502if( DEBUG_CLUSTER_PID_RELEASE < cycle )
503printk("\n[DBG] %s : thread %x exit in cluster %x / cycle %d\n",
504__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
505#endif
506
[1]507} // end cluster_pid_release()
508
509///////////////////////////////////////////////////////////
510process_t * cluster_get_local_process_from_pid( pid_t pid )
511{
[23]512    xptr_t         process_xp;
513    process_t    * process_ptr;
514    xptr_t         root_xp;
515    xptr_t         iter_xp;
516    bool_t         found;
[19]517
[23]518    found   = false;
519    root_xp = XPTR( local_cxy , &LOCAL_CLUSTER->pmgr.local_root );
520
521    XLIST_FOREACH( root_xp , iter_xp )
[1]522    {
[23]523        process_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
524        process_ptr = (process_t *)GET_PTR( process_xp );
525        if( process_ptr->pid == pid )
[1]526        {
[23]527            found = true;
[1]528            break;
529        }
530    }
531
[23]532    if (found ) return process_ptr;
533    else        return NULL;
534
[1]535}  // end cluster_get_local_process_from_pid()
536
537//////////////////////////////////////////////////////
538void cluster_process_local_link( process_t * process )
539{
[443]540    reg_t    save_sr;
541
[1]542    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
543
[443]544    // get extended pointers on local process list root & lock
545    xptr_t root_xp = XPTR( local_cxy , &pm->local_root );
546    xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock );
547
[1]548    // get lock protecting the process manager local list
[443]549    remote_spinlock_lock_busy( lock_xp , &save_sr );
[1]550
[443]551    // register process in local list
552    xlist_add_last( root_xp , XPTR( local_cxy , &process->local_list ) );
[1]553    pm->local_nr++;
554
555    // release lock protecting the process manager local list
[443]556    remote_spinlock_unlock_busy( lock_xp , save_sr );
[1]557}
558
559////////////////////////////////////////////////////////
560void cluster_process_local_unlink( process_t * process )
561{
[443]562    reg_t save_sr;
563
[1]564    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
565
[443]566    // get extended pointers on local process list lock
567    xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock );
568
[1]569    // get lock protecting the process manager local list
[443]570    remote_spinlock_lock_busy( lock_xp , &save_sr );
[1]571
[443]572    // remove process from local list
[23]573    xlist_unlink( XPTR( local_cxy , &process->local_list ) );
[1]574    pm->local_nr--;
575
576    // release lock protecting the process manager local list
[443]577    remote_spinlock_unlock_busy( lock_xp , save_sr );
[1]578}
579
580///////////////////////////////////////////////////////
581void cluster_process_copies_link( process_t * process )
582{
[436]583    reg_t    irq_state;
[1]584    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
585
[438]586#if DEBUG_CLUSTER_PROCESS_COPIES
[436]587uint32_t cycle = (uint32_t)hal_get_cycles();
[438]588if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
[436]589printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n",
590__FUNCTION__ , local_cxy , process , cycle );
591#endif
592
[1]593    // get owner cluster identifier CXY and process LPID
594    pid_t    pid        = process->pid;
595    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
596    lpid_t   lpid       = LPID_FROM_PID( pid );
597
598    // get extended pointer on lock protecting copies_list[lpid]
[120]599    xptr_t copies_lock  = XPTR( owner_cxy , &pm->copies_lock[lpid] );
[1]600
601    // get extended pointer on the copies_list[lpid] root
[120]602    xptr_t copies_root  = XPTR( owner_cxy , &pm->copies_root[lpid] );
[1]603
604    // get extended pointer on the local copies_list entry
605    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
606
[19]607    // get lock protecting copies_list[lpid]
[407]608    remote_spinlock_lock_busy( copies_lock , &irq_state );
[1]609
[436]610    // add copy to copies_list
[1]611    xlist_add_first( copies_root , copies_entry );
612    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , 1 );
613
[19]614    // release lock protecting copies_list[lpid]
[407]615    remote_spinlock_unlock_busy( copies_lock , irq_state );
[1]616
[438]617#if DEBUG_CLUSTER_PROCESS_COPIES
[436]618cycle = (uint32_t)hal_get_cycles();
[438]619if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
[436]620printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n",
621__FUNCTION__ , local_cxy , process , cycle );
622#endif
623
624}  // end cluster_process_copies_link()
625
[1]626/////////////////////////////////////////////////////////
627void cluster_process_copies_unlink( process_t * process )
628{
[407]629    uint32_t irq_state;
[1]630    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
631
[438]632#if DEBUG_CLUSTER_PROCESS_COPIES
[436]633uint32_t cycle = (uint32_t)hal_get_cycles();
[438]634if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
[436]635printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n",
636__FUNCTION__ , local_cxy , process , cycle );
637#endif
638
[1]639    // get owner cluster identifier CXY and process LPID
640    pid_t    pid        = process->pid;
641    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
642    lpid_t   lpid       = LPID_FROM_PID( pid );
643
644    // get extended pointer on lock protecting copies_list[lpid]
[436]645    xptr_t copies_lock  = XPTR( owner_cxy , &pm->copies_lock[lpid] );
[1]646
647    // get extended pointer on the local copies_list entry
648    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
649
[19]650    // get lock protecting copies_list[lpid]
[407]651    remote_spinlock_lock_busy( copies_lock , &irq_state );
[1]652
[436]653    // remove copy from copies_list
[1]654    xlist_unlink( copies_entry );
655    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , -1 );
656
[19]657    // release lock protecting copies_list[lpid]
[407]658    remote_spinlock_unlock_busy( copies_lock , irq_state );
[1]659
[438]660#if DEBUG_CLUSTER_PROCESS_COPIES
[436]661cycle = (uint32_t)hal_get_cycles();
[438]662if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
[436]663printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n",
664__FUNCTION__ , local_cxy , process , cycle );
665#endif
666
667}  // end cluster_process_copies_unlink()
668
[428]669///////////////////////////////////////////
670void cluster_processes_display( cxy_t cxy )
[1]671{
[428]672    xptr_t        root_xp;
[443]673    xptr_t        lock_xp;
[428]674    xptr_t        iter_xp;
[443]675    xptr_t        process_xp;
676    cxy_t         txt0_cxy;
677    chdev_t     * txt0_ptr;
678    xptr_t        txt0_xp;
679    xptr_t        txt0_lock_xp;
680    reg_t         txt0_save_sr;     // save SR to take TXT0 lock in busy mode     
[1]681
[443]682    assert( (cluster_is_undefined( cxy ) == false),
[492]683    "illegal cluster index" );
[443]684
685    // get extended pointer on root and lock for local process list in cluster
[428]686    root_xp = XPTR( cxy , &LOCAL_CLUSTER->pmgr.local_root );
[443]687    lock_xp = XPTR( cxy , &LOCAL_CLUSTER->pmgr.local_lock );
[1]688
[443]689    // get pointers on TXT0 chdev
690    txt0_xp  = chdev_dir.txt_tx[0];
691    txt0_cxy = GET_CXY( txt0_xp );
692    txt0_ptr = GET_PTR( txt0_xp );
[1]693
[443]694    // get extended pointer on TXT0 lock
695    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
696
697    // get lock on local process list
698    remote_spinlock_lock( lock_xp );
699
700    // get TXT0 lock in busy waiting mode
701    remote_spinlock_lock_busy( txt0_lock_xp , &txt0_save_sr );
702     
703    // display header
704    nolock_printk("\n***** processes in cluster %x / cycle %d\n",
705    cxy , (uint32_t)hal_get_cycles() );
706
707    // loop on all processes in cluster cxy
[428]708    XLIST_FOREACH( root_xp , iter_xp )
709    {
710        process_xp = XLIST_ELEMENT( iter_xp , process_t , local_list );
711        process_display( process_xp );
712    }
[443]713
714    // release TXT0 lock in busy waiting mode
715    remote_spinlock_unlock_busy( txt0_lock_xp , txt0_save_sr );
716
717    // release lock on local process list
718    remote_spinlock_unlock( lock_xp );
719
[428]720}  // end cluster_processes_display()
[1]721
[19]722
Note: See TracBrowser for help on using the repository browser.