source: trunk/kernel/kern/cluster.c @ 511

Last change on this file since 511 was 492, checked in by viala@…, 6 years ago

Refactoring assert calling to conform with new assert macro.

Made with this command for the general case.
find ./kernel/ hal/ -name "*.c" | xargs sed -i -e '/assert(/ s/,[ ]*FUNCTION[ ]*,/,/'

And some done by hand.

File size: 21.2 KB
RevLine 
[1]1/*
2 * cluster.c - Cluster-Manager related operations
[19]3 *
[1]4 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *         Mohamed Lamine Karaoui (2015)
[437]6 *         Alain Greiner (2016,2017,2018)
[1]7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
10 * This file is part of ALMOS-MKH..
11 *
12 * ALMOS-MKH. is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
16 * ALMOS-MKH. is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
[14]26#include <kernel_config.h>
[456]27#include <hal_kernel_types.h>
[1]28#include <hal_atomic.h>
29#include <hal_special.h>
[50]30#include <hal_ppm.h>
[407]31#include <remote_fifo.h>
[1]32#include <printk.h>
33#include <errno.h>
34#include <spinlock.h>
35#include <core.h>
[443]36#include <chdev.h>
[1]37#include <scheduler.h>
38#include <list.h>
39#include <cluster.h>
40#include <boot_info.h>
41#include <bits.h>
42#include <ppm.h>
43#include <thread.h>
44#include <kmem.h>
45#include <process.h>
46#include <dqdt.h>
47
[408]48/////////////////////////////////////////////////////////////////////////////////////
[1]49// Extern global variables
[408]50/////////////////////////////////////////////////////////////////////////////////////
[1]51
[443]52extern process_t           process_zero;     // allocated in kernel_init.c file
53extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c file
[1]54
[443]55///////////////////////////////////////////////n
[1]56error_t cluster_init( struct boot_info_s * info )
57{
[428]58    error_t         error;
59    lpid_t          lpid;     // local process_index
60    lid_t           lid;      // local core index
61    uint32_t        i;        // index in loop on external peripherals
62    boot_device_t * dev;      // pointer on external peripheral
63    uint32_t        func;     // external peripheral functionnal type
[1]64
65        cluster_t * cluster = LOCAL_CLUSTER;
66
67    // initialize cluster global parameters
[19]68        cluster->paddr_width     = info->paddr_width;
[1]69        cluster->x_width         = info->x_width;
70        cluster->y_width         = info->y_width;
71        cluster->x_size          = info->x_size;
72        cluster->y_size          = info->y_size;
73        cluster->io_cxy          = info->io_cxy;
74
[428]75    // initialize external peripherals channels
76    for( i = 0 ; i < info->ext_dev_nr ; i++ )
77    {
78        dev  = &info->ext_dev[i];
79        func = FUNC_FROM_TYPE( dev->type );   
80        if( func == DEV_FUNC_TXT ) cluster->nb_txt_channels = dev->channels;
81        if( func == DEV_FUNC_NIC ) cluster->nb_nic_channels = dev->channels;
82        if( func == DEV_FUNC_IOC ) cluster->nb_ioc_channels = dev->channels;
83        if( func == DEV_FUNC_FBF ) cluster->nb_fbf_channels = dev->channels;
84    }
85
[1]86    // initialize cluster local parameters
87        cluster->cores_nr        = info->cores_nr;
88
[19]89    // initialize the lock protecting the embedded kcm allocator
[1]90        spinlock_init( &cluster->kcm_lock );
91
[438]92#if DEBUG_CLUSTER_INIT
[433]93uint32_t cycle = (uint32_t)hal_get_cycles();
[438]94if( DEBUG_CLUSTER_INIT < cycle )
[437]95printk("\n[DBG] %s : thread %x enters for cluster %x / cycle %d\n",
96__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
[433]97#endif
[50]98
[19]99    // initialises DQDT
100    cluster->dqdt_root_level = dqdt_init( info->x_size,
101                                          info->y_size,
[438]102                                          info->y_width ) - 1;
[1]103
104    // initialises embedded PPM
[50]105        error = hal_ppm_init( info );
[1]106
[50]107    if( error )
108    {
109        printk("\n[ERROR] in %s : cannot initialize PPM in cluster %x\n",
110               __FUNCTION__ , local_cxy );
111        return ENOMEM;
112    }
113
[438]114#if( DEBUG_CLUSTER_INIT & 1 )
[433]115cycle = (uint32_t)hal_get_cycles();
[438]116if( DEBUG_CLUSTER_INIT < cycle )
[437]117printk("\n[DBG] %s : PPM initialized in cluster %x / cycle %d\n",
[433]118__FUNCTION__ , local_cxy , cycle );
119#endif
[50]120
[1]121    // initialises embedded KHM
122        khm_init( &cluster->khm );
[19]123
[438]124#if( DEBUG_CLUSTER_INIT & 1 )
[457]125cycle = (uint32_t)hal_get_cycles();
[438]126if( DEBUG_CLUSTER_INIT < cycle )
[437]127printk("\n[DBG] %s : KHM initialized in cluster %x at cycle %d\n",
128__FUNCTION__ , local_cxy , hal_get_cycles() );
129#endif
[50]130
[19]131    // initialises embedded KCM
[5]132        kcm_init( &cluster->kcm , KMEM_KCM );
[1]133
[438]134#if( DEBUG_CLUSTER_INIT & 1 )
[457]135cycle = (uint32_t)hal_get_cycles();
[438]136if( DEBUG_CLUSTER_INIT < cycle )
[437]137printk("\n[DBG] %s : KCM initialized in cluster %x at cycle %d\n",
138__FUNCTION__ , local_cxy , hal_get_cycles() );
139#endif
[50]140
[296]141    // initialises all cores descriptors
[1]142        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
143        {
144                core_init( &cluster->core_tbl[lid],    // target core descriptor
145                       lid,                        // local core index
146                       info->core[lid].gid );      // gid from boot_info_t
147        }
[19]148
[438]149#if( DEBUG_CLUSTER_INIT & 1 )
[433]150cycle = (uint32_t)hal_get_cycles();
[438]151if( DEBUG_CLUSTER_INIT < cycle )
[437]152printk("\n[DBG] %s : cores initialized in cluster %x / cycle %d\n",
[433]153__FUNCTION__ , local_cxy , cycle );
154#endif
[50]155
[440]156    // initialises RPC FIFOs
157        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
158    {
159            local_fifo_init( &cluster->rpc_fifo[lid] );
160        cluster->rpc_threads[lid] = 0;
161    }
[1]162
[438]163#if( DEBUG_CLUSTER_INIT & 1 )
[437]164cycle = (uint32_t)hal_get_cycles();
[438]165if( DEBUG_CLUSTER_INIT < cycle )
[437]166printk("\n[DBG] %s : RPC fifo inialized in cluster %x at cycle %d\n",
[407]167__FUNCTION__ , local_cxy , hal_get_cycles() );
[437]168#endif
[50]169
[1]170    // initialise pref_tbl[] in process manager
171        spinlock_init( &cluster->pmgr.pref_lock );
172    cluster->pmgr.pref_nr = 0;
[19]173    cluster->pmgr.pref_tbl[0] = XPTR( local_cxy , &process_zero );
[1]174    for( lpid = 1 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
175    {
176        cluster->pmgr.pref_tbl[lpid] = XPTR_NULL;
177    }
178
179    // initialise local_list in process manager
[23]180        remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
181    xlist_root_init( XPTR( local_cxy , &cluster->pmgr.local_root ) );
[1]182    cluster->pmgr.local_nr = 0;
183
184    // initialise copies_lists in process manager
[101]185    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
[1]186    {
187            remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] ) );
188        cluster->pmgr.copies_nr[lpid] = 0;
189        xlist_root_init( XPTR( local_cxy , &cluster->pmgr.copies_root[lpid] ) );
[19]190    }
[1]191
[438]192#if DEBUG_CLUSTER_INIT
[433]193cycle = (uint32_t)hal_get_cycles();
[438]194if( DEBUG_CLUSTER_INIT < cycle )
[437]195printk("\n[DBG] %s , thread %x exit for cluster %x / cycle %d\n",
196__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
[433]197#endif
[50]198
[124]199    hal_fence();
[1]200
201        return 0;
202} // end cluster_init()
203
204////////////////////////////////////////
205bool_t cluster_is_undefined( cxy_t cxy )
206{
207    cluster_t * cluster = LOCAL_CLUSTER;
208
209    uint32_t y_width = cluster->y_width;
210
211    uint32_t x = cxy >> y_width;
212    uint32_t y = cxy & ((1<<y_width)-1);
213
[19]214    if( x >= cluster->x_size ) return true;
215    if( y >= cluster->y_size ) return true;
[1]216
217    return false;
218}
219
220////////////////////////////////////////////////////////////////////////////////////
221//  Cores related functions
222////////////////////////////////////////////////////////////////////////////////////
223
224/////////////////////////////////
[485]225lid_t cluster_select_local_core( void )
[1]226{
[440]227    uint32_t      min = 1000;
228    lid_t         sel = 0;
229    uint32_t      nthreads;
230    lid_t         lid;
231    scheduler_t * sched;
[1]232
233    cluster_t * cluster = LOCAL_CLUSTER;
234
235    for( lid = 0 ; lid < cluster->cores_nr ; lid++ )
236    {
[440]237        sched    = &cluster->core_tbl[lid].scheduler;
238        nthreads = sched->u_threads_nr + sched->k_threads_nr;
239
240        if( nthreads < min )
[1]241        {
[440]242            min = nthreads;
[1]243            sel = lid;
244        }
[19]245    }
[1]246    return sel;
247}
248
249////////////////////////////////////////////////////////////////////////////////////
[428]250//  Process related functions
[1]251////////////////////////////////////////////////////////////////////////////////////
252
[433]253
254//////////////////////////////////////////////////////
[443]255xptr_t cluster_get_process_from_pid_in_cxy( cxy_t cxy,
256                                            pid_t pid )
257{
258    xptr_t      root_xp;       // xptr on root of list of processes in owner cluster
259    xptr_t      lock_xp;       // xptr on lock protecting this list
260    xptr_t      iter_xp;       // iterator
261    xptr_t      current_xp;    // xptr on current process descriptor
262    bool_t      found;
263
264    cluster_t * cluster = LOCAL_CLUSTER;
265
266    // get owner cluster and lpid
267    cxy_t   owner_cxy = CXY_FROM_PID( pid );
268    lpid_t  lpid      = LPID_FROM_PID( pid );
269
270    // get lock & root of list of copies from owner cluster
271    root_xp = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
272    lock_xp = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
273
274    // take the lock protecting the list of processes
275    remote_spinlock_lock( lock_xp );
276
277    // scan list of processes
278    found = false;
279    XLIST_FOREACH( root_xp , iter_xp )
280    {
281        current_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
282
283        if( GET_CXY( current_xp ) == cxy )
284        {
285            found = true;
286            break;
287        }
288    }
289
290    // release the lock protecting the list of processes
291    remote_spinlock_unlock( lock_xp );
292
293    // return extended pointer on process descriptor in owner cluster
294    if( found ) return current_xp;
295    else        return XPTR_NULL;
296
297}  // end cluster_get_process_from_pid_in_cxy()
298
299
300//////////////////////////////////////////////////////
[433]301xptr_t cluster_get_owner_process_from_pid( pid_t pid )
302{
303    xptr_t      root_xp;       // xptr on root of list of processes in owner cluster
[436]304    xptr_t      lock_xp;       // xptr on lock protecting this list
[433]305    xptr_t      iter_xp;       // iterator
306    xptr_t      current_xp;    // xptr on current process descriptor
307    process_t * current_ptr;   // local pointer on current process
308    pid_t       current_pid;   // current process identifier
309    bool_t      found;
310
311    cluster_t * cluster = LOCAL_CLUSTER;
312
313    // get owner cluster and lpid
314    cxy_t  owner_cxy = CXY_FROM_PID( pid );
315
316    // get lock & root of list of process in owner cluster
317    root_xp = XPTR( owner_cxy , &cluster->pmgr.local_root );
318    lock_xp = XPTR( owner_cxy , &cluster->pmgr.local_lock );
319
320    // take the lock protecting the list of processes
321    remote_spinlock_lock( lock_xp );
322
323    // scan list of processes in owner cluster
324    found = false;
325    XLIST_FOREACH( root_xp , iter_xp )
326    {
327        current_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
328        current_ptr = GET_PTR( current_xp );
329        current_pid = hal_remote_lw( XPTR( owner_cxy , &current_ptr->pid ) );
330
331        if( current_pid == pid )
332        {
333            found = true;
334            break;
335        }
336    }
337
338    // release the lock protecting the list of processes
339    remote_spinlock_unlock( lock_xp );
340
341    // return extended pointer on process descriptor in owner cluster
342    if( found ) return current_xp;
343    else        return XPTR_NULL;
344
[436]345}  // end cluster_get_owner_process_from_pid()
346
[443]347
[1]348//////////////////////////////////////////////////////////
349xptr_t cluster_get_reference_process_from_pid( pid_t pid )
[19]350{
[23]351    xptr_t ref_xp;   // extended pointer on reference process descriptor
[1]352
353    cluster_t * cluster = LOCAL_CLUSTER;
354
355    // get owner cluster and lpid
356    cxy_t  owner_cxy = CXY_FROM_PID( pid );
357    lpid_t lpid      = LPID_FROM_PID( pid );
358
[19]359    // Check valid PID
[23]360    if( lpid >= CONFIG_MAX_PROCESS_PER_CLUSTER )  return XPTR_NULL;
[1]361
362    if( local_cxy == owner_cxy )   // local cluster is owner cluster
[19]363    {
[23]364        ref_xp = cluster->pmgr.pref_tbl[lpid];
[1]365    }
366    else                              // use a remote_lwd to access owner cluster
367    {
[23]368        ref_xp = (xptr_t)hal_remote_lwd( XPTR( owner_cxy , &cluster->pmgr.pref_tbl[lpid] ) );
[1]369    }
370
[23]371    return ref_xp;
[1]372}
373
[416]374///////////////////////////////////////////////
375error_t cluster_pid_alloc( process_t * process,
376                           pid_t     * pid )
[1]377{
378    lpid_t      lpid;
379    bool_t      found;
380
[440]381#if DEBUG_CLUSTER_PID_ALLOC
382uint32_t cycle = (uint32_t)hal_get_cycles();
383if( DEBUG_CLUSTER_PID_ALLOC < cycle )
384printk("\n[DBG] %s : thread %x enters in cluster %x / cycle %d\n",
385__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
386#endif
387
[1]388    pmgr_t    * pm         = &LOCAL_CLUSTER->pmgr;
389
390    // get the process manager lock
391    spinlock_lock( &pm->pref_lock );
392
393    // search an empty slot
394    found = false;
395    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
396    {
397        if( pm->pref_tbl[lpid] == XPTR_NULL )
398        {
399            found = true;
400            break;
401        }
402    }
403
404    if( found )
405    {
406        // register process in pref_tbl[]
[416]407        pm->pref_tbl[lpid] = XPTR( local_cxy , process );
[1]408        pm->pref_nr++;
409
410        // returns pid
411        *pid = PID( local_cxy , lpid );
412
[416]413        // release the processs_manager lock
414        spinlock_unlock( &pm->pref_lock );
415
416        return 0;
[1]417    }
418    else
419    {
[416]420        // release the processs_manager lock
421        spinlock_unlock( &pm->pref_lock );
422
423        return -1;
[19]424    }
[1]425
[440]426#if DEBUG_CLUSTER_PID_ALLOC
427cycle = (uint32_t)hal_get_cycles();
428if( DEBUG_CLUSTER_PID_ALLOC < cycle )
429printk("\n[DBG] %s : thread %x exit in cluster %x / pid %x / cycle %d\n",
430__FUNCTION__ , CURRENT_THREAD , local_cxy , *pid , cycle );
431#endif
432
[1]433} // end cluster_pid_alloc()
434
435/////////////////////////////////////
436void cluster_pid_release( pid_t pid )
437{
[440]438
439#if DEBUG_CLUSTER_PID_RELEASE
440uint32_t cycle = (uint32_t)hal_get_cycles();
441if( DEBUG_CLUSTER_PID_RELEASE < cycle )
442printk("\n[DBG] %s : thread %x enters in cluster %x / pid %x / cycle %d\n",
443__FUNCTION__ , CURRENT_THREAD , local_cxy , pid , cycle );
444#endif
445
[1]446    cxy_t  owner_cxy  = CXY_FROM_PID( pid );
447    lpid_t lpid       = LPID_FROM_PID( pid );
448
[409]449    pmgr_t  * pm = &LOCAL_CLUSTER->pmgr;
450
[440]451    // check lpid
[492]452    assert( (lpid < CONFIG_MAX_PROCESS_PER_CLUSTER),
[440]453    "illegal LPID = %d" , lpid );
[1]454
[440]455    // check owner cluster
[492]456    assert( (owner_cxy == local_cxy) ,
[440]457    "local_cluster %x !=  owner_cluster %x" , local_cxy , owner_cxy );
458
[1]459    // get the process manager lock
460    spinlock_lock( &pm->pref_lock );
461
462    // remove process from pref_tbl[]
463    pm->pref_tbl[lpid] = XPTR_NULL;
464    pm->pref_nr--;
465
466    // release the processs_manager lock
467    spinlock_unlock( &pm->pref_lock );
468
[440]469#if DEBUG_CLUSTER_PID_RELEASE
470cycle = (uint32_t)hal_get_cycles();
471if( DEBUG_CLUSTER_PID_RELEASE < cycle )
472printk("\n[DBG] %s : thread %x exit in cluster %x / cycle %d\n",
473__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
474#endif
475
[1]476} // end cluster_pid_release()
477
478///////////////////////////////////////////////////////////
479process_t * cluster_get_local_process_from_pid( pid_t pid )
480{
[23]481    xptr_t         process_xp;
482    process_t    * process_ptr;
483    xptr_t         root_xp;
484    xptr_t         iter_xp;
485    bool_t         found;
[19]486
[23]487    found   = false;
488    root_xp = XPTR( local_cxy , &LOCAL_CLUSTER->pmgr.local_root );
489
490    XLIST_FOREACH( root_xp , iter_xp )
[1]491    {
[23]492        process_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
493        process_ptr = (process_t *)GET_PTR( process_xp );
494        if( process_ptr->pid == pid )
[1]495        {
[23]496            found = true;
[1]497            break;
498        }
499    }
500
[23]501    if (found ) return process_ptr;
502    else        return NULL;
503
[1]504}  // end cluster_get_local_process_from_pid()
505
506//////////////////////////////////////////////////////
507void cluster_process_local_link( process_t * process )
508{
[443]509    reg_t    save_sr;
510
[1]511    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
512
[443]513    // get extended pointers on local process list root & lock
514    xptr_t root_xp = XPTR( local_cxy , &pm->local_root );
515    xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock );
516
[1]517    // get lock protecting the process manager local list
[443]518    remote_spinlock_lock_busy( lock_xp , &save_sr );
[1]519
[443]520    // register process in local list
521    xlist_add_last( root_xp , XPTR( local_cxy , &process->local_list ) );
[1]522    pm->local_nr++;
523
524    // release lock protecting the process manager local list
[443]525    remote_spinlock_unlock_busy( lock_xp , save_sr );
[1]526}
527
528////////////////////////////////////////////////////////
529void cluster_process_local_unlink( process_t * process )
530{
[443]531    reg_t save_sr;
532
[1]533    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
534
[443]535    // get extended pointers on local process list lock
536    xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock );
537
[1]538    // get lock protecting the process manager local list
[443]539    remote_spinlock_lock_busy( lock_xp , &save_sr );
[1]540
[443]541    // remove process from local list
[23]542    xlist_unlink( XPTR( local_cxy , &process->local_list ) );
[1]543    pm->local_nr--;
544
545    // release lock protecting the process manager local list
[443]546    remote_spinlock_unlock_busy( lock_xp , save_sr );
[1]547}
548
549///////////////////////////////////////////////////////
550void cluster_process_copies_link( process_t * process )
551{
[436]552    reg_t    irq_state;
[1]553    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
554
[438]555#if DEBUG_CLUSTER_PROCESS_COPIES
[436]556uint32_t cycle = (uint32_t)hal_get_cycles();
[438]557if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
[436]558printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n",
559__FUNCTION__ , local_cxy , process , cycle );
560#endif
561
[1]562    // get owner cluster identifier CXY and process LPID
563    pid_t    pid        = process->pid;
564    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
565    lpid_t   lpid       = LPID_FROM_PID( pid );
566
567    // get extended pointer on lock protecting copies_list[lpid]
[120]568    xptr_t copies_lock  = XPTR( owner_cxy , &pm->copies_lock[lpid] );
[1]569
570    // get extended pointer on the copies_list[lpid] root
[120]571    xptr_t copies_root  = XPTR( owner_cxy , &pm->copies_root[lpid] );
[1]572
573    // get extended pointer on the local copies_list entry
574    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
575
[19]576    // get lock protecting copies_list[lpid]
[407]577    remote_spinlock_lock_busy( copies_lock , &irq_state );
[1]578
[436]579    // add copy to copies_list
[1]580    xlist_add_first( copies_root , copies_entry );
581    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , 1 );
582
[19]583    // release lock protecting copies_list[lpid]
[407]584    remote_spinlock_unlock_busy( copies_lock , irq_state );
[1]585
[438]586#if DEBUG_CLUSTER_PROCESS_COPIES
[436]587cycle = (uint32_t)hal_get_cycles();
[438]588if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
[436]589printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n",
590__FUNCTION__ , local_cxy , process , cycle );
591#endif
592
593}  // end cluster_process_copies_link()
594
[1]595/////////////////////////////////////////////////////////
596void cluster_process_copies_unlink( process_t * process )
597{
[407]598    uint32_t irq_state;
[1]599    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
600
[438]601#if DEBUG_CLUSTER_PROCESS_COPIES
[436]602uint32_t cycle = (uint32_t)hal_get_cycles();
[438]603if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
[436]604printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n",
605__FUNCTION__ , local_cxy , process , cycle );
606#endif
607
[1]608    // get owner cluster identifier CXY and process LPID
609    pid_t    pid        = process->pid;
610    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
611    lpid_t   lpid       = LPID_FROM_PID( pid );
612
613    // get extended pointer on lock protecting copies_list[lpid]
[436]614    xptr_t copies_lock  = XPTR( owner_cxy , &pm->copies_lock[lpid] );
[1]615
616    // get extended pointer on the local copies_list entry
617    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
618
[19]619    // get lock protecting copies_list[lpid]
[407]620    remote_spinlock_lock_busy( copies_lock , &irq_state );
[1]621
[436]622    // remove copy from copies_list
[1]623    xlist_unlink( copies_entry );
624    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , -1 );
625
[19]626    // release lock protecting copies_list[lpid]
[407]627    remote_spinlock_unlock_busy( copies_lock , irq_state );
[1]628
[438]629#if DEBUG_CLUSTER_PROCESS_COPIES
[436]630cycle = (uint32_t)hal_get_cycles();
[438]631if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
[436]632printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n",
633__FUNCTION__ , local_cxy , process , cycle );
634#endif
635
636}  // end cluster_process_copies_unlink()
637
[428]638///////////////////////////////////////////
639void cluster_processes_display( cxy_t cxy )
[1]640{
[428]641    xptr_t        root_xp;
[443]642    xptr_t        lock_xp;
[428]643    xptr_t        iter_xp;
[443]644    xptr_t        process_xp;
645    cxy_t         txt0_cxy;
646    chdev_t     * txt0_ptr;
647    xptr_t        txt0_xp;
648    xptr_t        txt0_lock_xp;
649    reg_t         txt0_save_sr;     // save SR to take TXT0 lock in busy mode     
[1]650
[443]651    assert( (cluster_is_undefined( cxy ) == false),
[492]652    "illegal cluster index" );
[443]653
654    // get extended pointer on root and lock for local process list in cluster
[428]655    root_xp = XPTR( cxy , &LOCAL_CLUSTER->pmgr.local_root );
[443]656    lock_xp = XPTR( cxy , &LOCAL_CLUSTER->pmgr.local_lock );
[1]657
[443]658    // get pointers on TXT0 chdev
659    txt0_xp  = chdev_dir.txt_tx[0];
660    txt0_cxy = GET_CXY( txt0_xp );
661    txt0_ptr = GET_PTR( txt0_xp );
[1]662
[443]663    // get extended pointer on TXT0 lock
664    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
665
666    // get lock on local process list
667    remote_spinlock_lock( lock_xp );
668
669    // get TXT0 lock in busy waiting mode
670    remote_spinlock_lock_busy( txt0_lock_xp , &txt0_save_sr );
671     
672    // display header
673    nolock_printk("\n***** processes in cluster %x / cycle %d\n",
674    cxy , (uint32_t)hal_get_cycles() );
675
676    // loop on all processes in cluster cxy
[428]677    XLIST_FOREACH( root_xp , iter_xp )
678    {
679        process_xp = XLIST_ELEMENT( iter_xp , process_t , local_list );
680        process_display( process_xp );
681    }
[443]682
683    // release TXT0 lock in busy waiting mode
684    remote_spinlock_unlock_busy( txt0_lock_xp , txt0_save_sr );
685
686    // release lock on local process list
687    remote_spinlock_unlock( lock_xp );
688
[428]689}  // end cluster_processes_display()
[1]690
[19]691
Note: See TracBrowser for help on using the repository browser.