source: trunk/kernel/kern/cluster.c

Last change on this file was 683, checked in by alain, 3 years ago

All modifications required to support the <tcp_chat> application
including error recovery in case of packet loss.A

File size: 24.0 KB
RevLine 
[1]1/*
2 * cluster.c - Cluster-Manager related operations
[19]3 *
[683]4 * Author  Ghassan Almaless       (2008,2009,2010,2011,2012)
[1]5 *         Mohamed Lamine Karaoui (2015)
[683]6 *         Alain Greiner          (2016,2017,2018,2019,2020)
[1]7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
10 * This file is part of ALMOS-MKH..
11 *
12 * ALMOS-MKH. is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
16 * ALMOS-MKH. is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
[14]26#include <kernel_config.h>
[456]27#include <hal_kernel_types.h>
[1]28#include <hal_atomic.h>
29#include <hal_special.h>
[50]30#include <hal_ppm.h>
[564]31#include <hal_macros.h>
[407]32#include <remote_fifo.h>
[1]33#include <printk.h>
34#include <errno.h>
[564]35#include <queuelock.h>
[1]36#include <core.h>
[443]37#include <chdev.h>
[1]38#include <scheduler.h>
39#include <list.h>
40#include <cluster.h>
41#include <boot_info.h>
42#include <bits.h>
43#include <ppm.h>
44#include <thread.h>
45#include <kmem.h>
46#include <process.h>
47#include <dqdt.h>
48
[408]49/////////////////////////////////////////////////////////////////////////////////////
[1]50// Extern global variables
[408]51/////////////////////////////////////////////////////////////////////////////////////
[1]52
[564]53extern process_t           process_zero;     // allocated in kernel_init.c
54extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c
[1]55
[564]56
57
58///////////////////////////////////////////////////
59void cluster_info_init( struct boot_info_s * info )
[1]60{
[428]61    boot_device_t * dev;      // pointer on external peripheral
62    uint32_t        func;     // external peripheral functionnal type
[564]63    uint32_t        x;
64    uint32_t        y;
65    uint32_t        i;   
[1]66
67        cluster_t * cluster = LOCAL_CLUSTER;
68
69    // initialize cluster global parameters
[19]70        cluster->paddr_width     = info->paddr_width;
[1]71        cluster->x_width         = info->x_width;
72        cluster->y_width         = info->y_width;
73        cluster->x_size          = info->x_size;
74        cluster->y_size          = info->y_size;
75        cluster->io_cxy          = info->io_cxy;
[683]76        cluster->sys_clk         = info->sys_clk;
[1]77
[557]78    // initialize the cluster_info[][] array
[637]79    for( x = 0 ; x < CONFIG_MAX_CLUSTERS_X ; x++ ) 
[564]80    {
[637]81        for( y = 0; y < CONFIG_MAX_CLUSTERS_Y ; y++ ) 
[564]82        {
[557]83            cluster->cluster_info[x][y] = info->cluster_info[x][y];
84        }
85    }
[564]86
[428]87    // initialize external peripherals channels
88    for( i = 0 ; i < info->ext_dev_nr ; i++ )
89    {
90        dev  = &info->ext_dev[i];
91        func = FUNC_FROM_TYPE( dev->type );   
92        if( func == DEV_FUNC_TXT ) cluster->nb_txt_channels = dev->channels;
93        if( func == DEV_FUNC_NIC ) cluster->nb_nic_channels = dev->channels;
94        if( func == DEV_FUNC_IOC ) cluster->nb_ioc_channels = dev->channels;
95        if( func == DEV_FUNC_FBF ) cluster->nb_fbf_channels = dev->channels;
96    }
97
[637]98    // initialize number of local cores
[564]99        cluster->cores_nr  = info->cores_nr;
[1]100
[564]101}  // end cluster_info_init()
102
[637]103//////////////////////////////////////
104void cluster_info_display( cxy_t cxy )
105{
106    uint32_t  x;
107    uint32_t  y;
108    uint32_t  ncores;
109
110    cluster_t * cluster = LOCAL_CLUSTER;
111
112    // get x_size & y_size from target cluster
113    uint32_t  x_size = hal_remote_l32( XPTR( cxy , &cluster->x_size ) );
114    uint32_t  y_size = hal_remote_l32( XPTR( cxy , &cluster->y_size ) );
115
116    // get pointers on TXT0 chdev
117    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
118    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
119    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
120
121    // get extended pointer on remote TXT0 lock
122    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
123
124    // get TXT0 lock
125    remote_busylock_acquire( lock_xp );
126
127    nolock_printk("\n***** cluster_info in cluster %x / x_size %d / y_size %d\n",
128    cxy, x_size, y_size );
129 
130    for( x = 0 ; x < x_size ; x++ )
131    {
132        for( y = 0 ; y < y_size ; y++ )
133        {
134            ncores = (uint32_t)hal_remote_lb( XPTR( cxy , &cluster->cluster_info[x][y] ) );
135            nolock_printk(" - ncores[%d][%d] = %d\n", x, y, ncores );
136        }
137    }
138
139    // release TXT0 lock
140    remote_busylock_release( lock_xp );
141
142}  // end cluster_info_display()
143
[564]144/////////////////////////////////////////////////////////
145error_t cluster_manager_init( struct boot_info_s * info )
146{
147    error_t         error;
148    lpid_t          lpid;     // local process_index
149    lid_t           lid;      // local core index
150
151        cluster_t * cluster = LOCAL_CLUSTER;
152
[438]153#if DEBUG_CLUSTER_INIT
[593]154uint32_t   cycle = (uint32_t)hal_get_cycles();
155thread_t * this  = CURRENT_THREAD;
[438]156if( DEBUG_CLUSTER_INIT < cycle )
[593]157printk("\n[%s] thread[%x,%x] enters for cluster %x / cycle %d\n",
158__FUNCTION__, this->process->pid, this->trdid, local_cxy , cycle );
[433]159#endif
[50]160
[637]161#if (DEBUG_CLUSTER_INIT & 1)
162cluster_info_display( local_cxy );
163#endif
164
[1]165    // initialises embedded PPM
[50]166        error = hal_ppm_init( info );
[1]167
[50]168    if( error )
169    {
170        printk("\n[ERROR] in %s : cannot initialize PPM in cluster %x\n",
171               __FUNCTION__ , local_cxy );
172        return ENOMEM;
173    }
174
[438]175#if( DEBUG_CLUSTER_INIT & 1 )
[433]176cycle = (uint32_t)hal_get_cycles();
[438]177if( DEBUG_CLUSTER_INIT < cycle )
[593]178printk("\n[%s] PPM initialized in cluster %x / cycle %d\n",
[433]179__FUNCTION__ , local_cxy , cycle );
180#endif
[50]181
[19]182    // initialises embedded KCM
[635]183    uint32_t  i;
184    for( i = 0 ; i < 6 ; i++ ) kcm_init( &cluster->kcm[i] , i+6 );
[1]185
[438]186#if( DEBUG_CLUSTER_INIT & 1 )
[457]187cycle = (uint32_t)hal_get_cycles();
[438]188if( DEBUG_CLUSTER_INIT < cycle )
[635]189printk("\n[%s] KCM[6:11] initialized in cluster %x at cycle %d\n",
[437]190__FUNCTION__ , local_cxy , hal_get_cycles() );
191#endif
[50]192
[296]193    // initialises all cores descriptors
[1]194        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
195        {
196                core_init( &cluster->core_tbl[lid],    // target core descriptor
197                       lid,                        // local core index
198                       info->core[lid].gid );      // gid from boot_info_t
199        }
[19]200
[438]201#if( DEBUG_CLUSTER_INIT & 1 )
[433]202cycle = (uint32_t)hal_get_cycles();
[438]203if( DEBUG_CLUSTER_INIT < cycle )
[593]204printk("\n[%s] cores initialized in cluster %x / cycle %d\n",
[433]205__FUNCTION__ , local_cxy , cycle );
206#endif
[50]207
[440]208    // initialises RPC FIFOs
209        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
210    {
[564]211            remote_fifo_init( &cluster->rpc_fifo[lid] );
[440]212        cluster->rpc_threads[lid] = 0;
213    }
[1]214
[438]215#if( DEBUG_CLUSTER_INIT & 1 )
[437]216cycle = (uint32_t)hal_get_cycles();
[438]217if( DEBUG_CLUSTER_INIT < cycle )
[593]218printk("\n[%s] RPC fifo inialized in cluster %x at cycle %d\n",
[407]219__FUNCTION__ , local_cxy , hal_get_cycles() );
[437]220#endif
[50]221
[1]222    // initialise pref_tbl[] in process manager
[564]223        queuelock_init( &cluster->pmgr.pref_lock , LOCK_CLUSTER_PREFTBL );
[1]224    cluster->pmgr.pref_nr = 0;
[19]225    cluster->pmgr.pref_tbl[0] = XPTR( local_cxy , &process_zero );
[580]226    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
[1]227    {
228        cluster->pmgr.pref_tbl[lpid] = XPTR_NULL;
229    }
230
231    // initialise local_list in process manager
[23]232    xlist_root_init( XPTR( local_cxy , &cluster->pmgr.local_root ) );
[1]233    cluster->pmgr.local_nr = 0;
[564]234        remote_queuelock_init( XPTR( local_cxy , &cluster->pmgr.local_lock ) ,
235                           LOCK_CLUSTER_LOCALS );
[1]236
237    // initialise copies_lists in process manager
[101]238    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
[1]239    {
240        cluster->pmgr.copies_nr[lpid] = 0;
241        xlist_root_init( XPTR( local_cxy , &cluster->pmgr.copies_root[lpid] ) );
[564]242            remote_queuelock_init( XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] ),
243                               LOCK_CLUSTER_COPIES );
[19]244    }
[1]245
[438]246#if DEBUG_CLUSTER_INIT
[433]247cycle = (uint32_t)hal_get_cycles();
[438]248if( DEBUG_CLUSTER_INIT < cycle )
[593]249printk("\n[%s] thread[%x,%x] exit for cluster %x / cycle %d\n",
250__FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle );
[433]251#endif
[50]252
[124]253    hal_fence();
[1]254
255        return 0;
[564]256} // end cluster_manager_init()
[1]257
[564]258///////////////////////////////////
[561]259cxy_t cluster_random_select( void )
260{
261    uint32_t  index;
[564]262    uint32_t  x;   
[561]263    uint32_t  y;
[564]264    cxy_t     cxy;
[561]265
[564]266    uint32_t  x_size    = LOCAL_CLUSTER->x_size;
267    uint32_t  y_size    = LOCAL_CLUSTER->y_size;
268
269    do 
270    {
[561]271        index     = ( hal_get_cycles() + hal_get_gid() ) % (x_size * y_size);
272        x         = index / y_size;
273        y         = index % y_size;
[564]274        cxy       = HAL_CXY_FROM_XY( x , y );
275    }
276    while ( cluster_is_active( cxy ) == false );
[561]277
[564]278    return ( cxy );
[561]279}
280
[637]281/////////////////////////////////////////////
282inline bool_t cluster_is_active ( cxy_t cxy )
[1]283{
[564]284    uint32_t x = HAL_X_FROM_CXY( cxy );
285    uint32_t y = HAL_Y_FROM_CXY( cxy );
286
287    return ( LOCAL_CLUSTER->cluster_info[x][y] != 0 );
288}
289
[1]290////////////////////////////////////////////////////////////////////////////////////
291//  Cores related functions
292////////////////////////////////////////////////////////////////////////////////////
293
[637]294/////////////////////////////////////////////
295lid_t cluster_select_local_core( cxy_t  cxy )
[1]296{
[637]297    uint32_t      min = 1000000;
[440]298    lid_t         sel = 0;
299    uint32_t      nthreads;
300    lid_t         lid;
301    scheduler_t * sched;
[637]302    cluster_t   * cluster = LOCAL_CLUSTER;
303    uint32_t      ncores = hal_remote_l32( XPTR( cxy , &cluster->cores_nr ) );
[1]304
[637]305    for( lid = 0 ; lid < ncores ; lid++ )
[1]306    {
[637]307        sched  = &cluster->core_tbl[lid].scheduler;
[440]308
[637]309        nthreads = hal_remote_l32( XPTR( cxy , &sched->u_threads_nr ) ) +
310                   hal_remote_l32( XPTR( cxy , &sched->k_threads_nr ) );
311
[440]312        if( nthreads < min )
[1]313        {
[440]314            min = nthreads;
[1]315            sel = lid;
316        }
[19]317    }
[1]318    return sel;
319}
320
321////////////////////////////////////////////////////////////////////////////////////
[428]322//  Process related functions
[1]323////////////////////////////////////////////////////////////////////////////////////
324
[433]325
326//////////////////////////////////////////////////////
[443]327xptr_t cluster_get_process_from_pid_in_cxy( cxy_t cxy,
328                                            pid_t pid )
329{
330    xptr_t      root_xp;       // xptr on root of list of processes in owner cluster
331    xptr_t      lock_xp;       // xptr on lock protecting this list
332    xptr_t      iter_xp;       // iterator
333    xptr_t      current_xp;    // xptr on current process descriptor
334    bool_t      found;
335
336    cluster_t * cluster = LOCAL_CLUSTER;
337
338    // get owner cluster and lpid
339    cxy_t   owner_cxy = CXY_FROM_PID( pid );
340    lpid_t  lpid      = LPID_FROM_PID( pid );
341
342    // get lock & root of list of copies from owner cluster
343    root_xp = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
344    lock_xp = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
345
346    // take the lock protecting the list of processes
[564]347    remote_queuelock_acquire( lock_xp );
[443]348
349    // scan list of processes
350    found = false;
351    XLIST_FOREACH( root_xp , iter_xp )
352    {
353        current_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
354
355        if( GET_CXY( current_xp ) == cxy )
356        {
357            found = true;
358            break;
359        }
360    }
361
362    // release the lock protecting the list of processes
[564]363    remote_queuelock_release( lock_xp );
[443]364
365    // return extended pointer on process descriptor in owner cluster
366    if( found ) return current_xp;
367    else        return XPTR_NULL;
368
369}  // end cluster_get_process_from_pid_in_cxy()
370
371
372//////////////////////////////////////////////////////
[433]373xptr_t cluster_get_owner_process_from_pid( pid_t pid )
374{
375    xptr_t      root_xp;       // xptr on root of list of processes in owner cluster
[436]376    xptr_t      lock_xp;       // xptr on lock protecting this list
[433]377    xptr_t      iter_xp;       // iterator
378    xptr_t      current_xp;    // xptr on current process descriptor
379    process_t * current_ptr;   // local pointer on current process
380    pid_t       current_pid;   // current process identifier
381    bool_t      found;
382
383    cluster_t * cluster = LOCAL_CLUSTER;
384
385    // get owner cluster and lpid
386    cxy_t  owner_cxy = CXY_FROM_PID( pid );
387
388    // get lock & root of list of process in owner cluster
389    root_xp = XPTR( owner_cxy , &cluster->pmgr.local_root );
390    lock_xp = XPTR( owner_cxy , &cluster->pmgr.local_lock );
391
392    // take the lock protecting the list of processes
[564]393    remote_queuelock_acquire( lock_xp );
[433]394
395    // scan list of processes in owner cluster
396    found = false;
397    XLIST_FOREACH( root_xp , iter_xp )
398    {
399        current_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
400        current_ptr = GET_PTR( current_xp );
[564]401        current_pid = hal_remote_l32( XPTR( owner_cxy , &current_ptr->pid ) );
[433]402
403        if( current_pid == pid )
404        {
405            found = true;
406            break;
407        }
408    }
409
410    // release the lock protecting the list of processes
[564]411    remote_queuelock_release( lock_xp );
[433]412
413    // return extended pointer on process descriptor in owner cluster
414    if( found ) return current_xp;
415    else        return XPTR_NULL;
416
[436]417}  // end cluster_get_owner_process_from_pid()
418
[443]419
[1]420//////////////////////////////////////////////////////////
421xptr_t cluster_get_reference_process_from_pid( pid_t pid )
[19]422{
[23]423    xptr_t ref_xp;   // extended pointer on reference process descriptor
[1]424
425    cluster_t * cluster = LOCAL_CLUSTER;
426
427    // get owner cluster and lpid
428    cxy_t  owner_cxy = CXY_FROM_PID( pid );
429    lpid_t lpid      = LPID_FROM_PID( pid );
430
[19]431    // Check valid PID
[23]432    if( lpid >= CONFIG_MAX_PROCESS_PER_CLUSTER )  return XPTR_NULL;
[1]433
434    if( local_cxy == owner_cxy )   // local cluster is owner cluster
[19]435    {
[23]436        ref_xp = cluster->pmgr.pref_tbl[lpid];
[1]437    }
438    else                              // use a remote_lwd to access owner cluster
439    {
[564]440        ref_xp = (xptr_t)hal_remote_l64( XPTR( owner_cxy , &cluster->pmgr.pref_tbl[lpid] ) );
[1]441    }
442
[23]443    return ref_xp;
[1]444}
445
[416]446///////////////////////////////////////////////
447error_t cluster_pid_alloc( process_t * process,
448                           pid_t     * pid )
[1]449{
450    lpid_t      lpid;
451    bool_t      found;
452
[440]453#if DEBUG_CLUSTER_PID_ALLOC
[593]454uint32_t   cycle = (uint32_t)hal_get_cycles();
455thread_t * this  = CURRENT_THREAD;
[440]456if( DEBUG_CLUSTER_PID_ALLOC < cycle )
[593]457printk("\n[%s] thread[%x,%x] enters in cluster %x / cycle %d\n",
458__FUNCTION__ , this->process->pid , this->trdid , local_cxy , cycle );
[440]459#endif
460
[1]461    pmgr_t    * pm         = &LOCAL_CLUSTER->pmgr;
462
[564]463    // get the lock protecting pref_tbl
464    queuelock_acquire( &pm->pref_lock );
[1]465
466    // search an empty slot
467    found = false;
468    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
469    {
470        if( pm->pref_tbl[lpid] == XPTR_NULL )
471        {
472            found = true;
473            break;
474        }
475    }
476
477    if( found )
478    {
479        // register process in pref_tbl[]
[416]480        pm->pref_tbl[lpid] = XPTR( local_cxy , process );
[1]481        pm->pref_nr++;
482
483        // returns pid
484        *pid = PID( local_cxy , lpid );
485
[416]486        // release the processs_manager lock
[564]487        queuelock_release( &pm->pref_lock );
[416]488
489        return 0;
[1]490    }
491    else
492    {
[564]493        // release the lock
494        queuelock_release( &pm->pref_lock );
[416]495
[564]496        return 0xFFFFFFFF;
[19]497    }
[1]498
[440]499#if DEBUG_CLUSTER_PID_ALLOC
500cycle = (uint32_t)hal_get_cycles();
501if( DEBUG_CLUSTER_PID_ALLOC < cycle )
[593]502printk("\n[%s] thread[%x,%x] exit in cluster %x / cycle %d\n",
503__FUNCTION__ , this->process->pid , this->trdid , local_cxy , cycle );
[440]504#endif
505
[1]506} // end cluster_pid_alloc()
507
508/////////////////////////////////////
509void cluster_pid_release( pid_t pid )
510{
[440]511
512#if DEBUG_CLUSTER_PID_RELEASE
[593]513uint32_t   cycle = (uint32_t)hal_get_cycles();
514thread_t * this  = CURRENT_THREAD;
515if( DEBUG_CLUSTER_PID_ALLOC < cycle )
516printk("\n[%s] thread[%x,%x] enters in cluster %x / pid %x / cycle %d\n",
517__FUNCTION__ , this->process->pid , this->trdid , local_cxy , pid, cycle );
[440]518#endif
519
[1]520    cxy_t  owner_cxy  = CXY_FROM_PID( pid );
521    lpid_t lpid       = LPID_FROM_PID( pid );
522
[409]523    pmgr_t  * pm = &LOCAL_CLUSTER->pmgr;
524
[440]525    // check lpid
[669]526    assert( __FUNCTION__, (lpid < CONFIG_MAX_PROCESS_PER_CLUSTER),
[440]527    "illegal LPID = %d" , lpid );
[1]528
[440]529    // check owner cluster
[669]530    assert( __FUNCTION__, (owner_cxy == local_cxy) ,
[440]531    "local_cluster %x !=  owner_cluster %x" , local_cxy , owner_cxy );
532
[564]533    // get the lock protecting pref_tbl
534    queuelock_acquire( &pm->pref_lock );
[1]535
536    // remove process from pref_tbl[]
537    pm->pref_tbl[lpid] = XPTR_NULL;
538    pm->pref_nr--;
539
540    // release the processs_manager lock
[564]541    queuelock_release( &pm->pref_lock );
[1]542
[440]543#if DEBUG_CLUSTER_PID_RELEASE
544cycle = (uint32_t)hal_get_cycles();
[593]545if( DEBUG_CLUSTER_PID_ALLOC < cycle )
546printk("\n[%s] thread[%x,%x] exit in cluster %x / cycle %d\n",
547__FUNCTION__ , this->process->pid , this->trdid , local_cxy , cycle );
[440]548#endif
549
[1]550} // end cluster_pid_release()
551
552///////////////////////////////////////////////////////////
553process_t * cluster_get_local_process_from_pid( pid_t pid )
554{
[23]555    xptr_t         process_xp;
556    process_t    * process_ptr;
557    xptr_t         root_xp;
558    xptr_t         iter_xp;
559    bool_t         found;
[19]560
[23]561    found   = false;
562    root_xp = XPTR( local_cxy , &LOCAL_CLUSTER->pmgr.local_root );
563
564    XLIST_FOREACH( root_xp , iter_xp )
[1]565    {
[23]566        process_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
567        process_ptr = (process_t *)GET_PTR( process_xp );
568        if( process_ptr->pid == pid )
[1]569        {
[23]570            found = true;
[1]571            break;
572        }
573    }
574
[23]575    if (found ) return process_ptr;
576    else        return NULL;
577
[1]578}  // end cluster_get_local_process_from_pid()
579
580//////////////////////////////////////////////////////
581void cluster_process_local_link( process_t * process )
582{
583    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
584
[443]585    // get extended pointers on local process list root & lock
586    xptr_t root_xp = XPTR( local_cxy , &pm->local_root );
587    xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock );
588
[564]589    // get lock protecting the local list
590    remote_queuelock_acquire( lock_xp );
[1]591
[443]592    // register process in local list
593    xlist_add_last( root_xp , XPTR( local_cxy , &process->local_list ) );
[1]594    pm->local_nr++;
595
[564]596    // release lock protecting the local list
597    remote_queuelock_release( lock_xp );
[1]598}
599
600////////////////////////////////////////////////////////
601void cluster_process_local_unlink( process_t * process )
602{
603    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
604
[443]605    // get extended pointers on local process list lock
606    xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock );
607
[564]608    // get lock protecting the local list
609    remote_queuelock_acquire( lock_xp );
[1]610
[443]611    // remove process from local list
[23]612    xlist_unlink( XPTR( local_cxy , &process->local_list ) );
[1]613    pm->local_nr--;
614
[564]615    // release lock protecting the local list
616    remote_queuelock_release( lock_xp );
[1]617}
618
619///////////////////////////////////////////////////////
620void cluster_process_copies_link( process_t * process )
621{
622    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
623
[438]624#if DEBUG_CLUSTER_PROCESS_COPIES
[593]625uint32_t   cycle = (uint32_t)hal_get_cycles();
626thread_t * this  = CURRENT_THREAD;
[438]627if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
[593]628printk("\n[%s] thread[%x,%x] enters for process %x / cycle %d\n",
629__FUNCTION__ , this->process->pid , this->trdid , process->pid , cycle );
[436]630#endif
631
[1]632    // get owner cluster identifier CXY and process LPID
633    pid_t    pid        = process->pid;
634    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
635    lpid_t   lpid       = LPID_FROM_PID( pid );
636
637    // get extended pointer on lock protecting copies_list[lpid]
[120]638    xptr_t copies_lock  = XPTR( owner_cxy , &pm->copies_lock[lpid] );
[1]639
640    // get extended pointer on the copies_list[lpid] root
[120]641    xptr_t copies_root  = XPTR( owner_cxy , &pm->copies_root[lpid] );
[1]642
643    // get extended pointer on the local copies_list entry
644    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
645
[19]646    // get lock protecting copies_list[lpid]
[564]647    remote_queuelock_acquire( copies_lock );
[1]648
[436]649    // add copy to copies_list
[1]650    xlist_add_first( copies_root , copies_entry );
651    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , 1 );
652
[19]653    // release lock protecting copies_list[lpid]
[564]654    remote_queuelock_release( copies_lock );
[1]655
[438]656#if DEBUG_CLUSTER_PROCESS_COPIES
[436]657cycle = (uint32_t)hal_get_cycles();
[438]658if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
[593]659printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
660__FUNCTION__ , this->process->pid , this->trdid , process->pid , cycle );
[436]661#endif
662
663}  // end cluster_process_copies_link()
664
[1]665/////////////////////////////////////////////////////////
666void cluster_process_copies_unlink( process_t * process )
667{
668    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
669
[438]670#if DEBUG_CLUSTER_PROCESS_COPIES
[593]671uint32_t   cycle = (uint32_t)hal_get_cycles();
672thread_t * this  = CURRENT_THREAD;
[438]673if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
[593]674printk("\n[%s] thread[%x,%x] enters for process %x / cycle %d\n",
675__FUNCTION__ , this->process->pid , this->trdid , process->pid , cycle );
[436]676#endif
677
[1]678    // get owner cluster identifier CXY and process LPID
679    pid_t    pid        = process->pid;
680    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
681    lpid_t   lpid       = LPID_FROM_PID( pid );
682
683    // get extended pointer on lock protecting copies_list[lpid]
[436]684    xptr_t copies_lock  = XPTR( owner_cxy , &pm->copies_lock[lpid] );
[1]685
686    // get extended pointer on the local copies_list entry
687    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
688
[19]689    // get lock protecting copies_list[lpid]
[564]690    remote_queuelock_acquire( copies_lock );
[1]691
[436]692    // remove copy from copies_list
[1]693    xlist_unlink( copies_entry );
694    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , -1 );
695
[19]696    // release lock protecting copies_list[lpid]
[564]697    remote_queuelock_release( copies_lock );
[1]698
[438]699#if DEBUG_CLUSTER_PROCESS_COPIES
[436]700cycle = (uint32_t)hal_get_cycles();
[438]701if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
[593]702printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
703__FUNCTION__ , this->process->pid , this->trdid , process->pid , cycle );
[436]704#endif
705
706}  // end cluster_process_copies_unlink()
707
[583]708////////////////////////////////////////////
709void cluster_processes_display( cxy_t   cxy,
710                                bool_t  owned )
[1]711{
[428]712    xptr_t        root_xp;
[443]713    xptr_t        lock_xp;
[428]714    xptr_t        iter_xp;
[443]715    xptr_t        process_xp;
[583]716    process_t   * process_ptr;
717    cxy_t         process_cxy;
718    pid_t         pid;
[443]719    cxy_t         txt0_cxy;
720    chdev_t     * txt0_ptr;
721    xptr_t        txt0_xp;
722    xptr_t        txt0_lock_xp;
[627]723    uint32_t      pref_nr;       // number of owned processes in cluster cxy
[1]724
[669]725assert( __FUNCTION__, (cluster_is_active( cxy ) ), "illegal cluster index" );
[443]726
727    // get extended pointer on root and lock for local process list in cluster
[428]728    root_xp = XPTR( cxy , &LOCAL_CLUSTER->pmgr.local_root );
[443]729    lock_xp = XPTR( cxy , &LOCAL_CLUSTER->pmgr.local_lock );
[1]730
[627]731    // get number of owned processes in cluster cxy
732    pref_nr = hal_remote_l32( XPTR( cxy , &LOCAL_CLUSTER->pmgr.pref_nr ) );
733
734    // display nothing if no user process in cluster cxy
735    if( (owned != false) && (pref_nr < 2) ) return;
736   
[443]737    // get pointers on TXT0 chdev
738    txt0_xp  = chdev_dir.txt_tx[0];
739    txt0_cxy = GET_CXY( txt0_xp );
740    txt0_ptr = GET_PTR( txt0_xp );
[1]741
[443]742    // get extended pointer on TXT0 lock
743    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
744
745    // get lock on local process list
[564]746    remote_queuelock_acquire( lock_xp );
[443]747
[564]748    // get TXT0 lock
749    remote_busylock_acquire( txt0_lock_xp );
[443]750     
751    nolock_printk("\n***** processes in cluster %x / cycle %d\n",
752    cxy , (uint32_t)hal_get_cycles() );
753
754    // loop on all processes in cluster cxy
[428]755    XLIST_FOREACH( root_xp , iter_xp )
756    {
[583]757        process_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
758        process_ptr = GET_PTR( process_xp );
759        process_cxy = GET_CXY( process_xp );
760
761        // get process PID
762        pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
763
764        if( owned )  // display only user & owned processes
765        {
766            if( (CXY_FROM_PID( pid ) == cxy) && (LPID_FROM_PID( pid ) != 0) )
767            {
768                process_display( process_xp );
769            }
770        }
771        else         // display all local processes
772        {
773            process_display( process_xp );
774        }
[428]775    }
[443]776
[564]777    // release TXT0 lock
778    remote_busylock_release( txt0_lock_xp );
[443]779
780    // release lock on local process list
[564]781    remote_queuelock_release( lock_xp );
[443]782
[428]783}  // end cluster_processes_display()
[1]784
[19]785
Note: See TracBrowser for help on using the repository browser.