/* * cluster.c - Cluster-Manager related operations * * Author Ghassan Almaless (2008,2009,2010,2011,2012) * Mohamed Lamine Karaoui (2015) * Alain Greiner (2016,2017,2018,2019,2020) * * Copyright (c) UPMC Sorbonne Universites * * This file is part of ALMOS-MKH.. * * ALMOS-MKH. is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2.0 of the License. * * ALMOS-MKH. is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with ALMOS-MKH.; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include ///////////////////////////////////////////////////////////////////////////////////// // Extern global variables ///////////////////////////////////////////////////////////////////////////////////// extern process_t process_zero; // allocated in kernel_init.c extern chdev_directory_t chdev_dir; // allocated in kernel_init.c /////////////////////////////////////////////////// void cluster_info_init( struct boot_info_s * info ) { boot_device_t * dev; // pointer on external peripheral uint32_t func; // external peripheral functionnal type uint32_t x; uint32_t y; uint32_t i; cluster_t * cluster = LOCAL_CLUSTER; // initialize cluster global parameters cluster->paddr_width = info->paddr_width; cluster->x_width = info->x_width; cluster->y_width = info->y_width; cluster->x_size = info->x_size; cluster->y_size = info->y_size; cluster->io_cxy = info->io_cxy; // initialize the cluster_info[][] array for( x = 0 ; x < CONFIG_MAX_CLUSTERS_X ; x++ ) { for( y = 0; y < CONFIG_MAX_CLUSTERS_Y ; y++ ) { cluster->cluster_info[x][y] = info->cluster_info[x][y]; } } // initialize external peripherals channels for( i = 0 ; i < info->ext_dev_nr ; i++ ) { dev = &info->ext_dev[i]; func = FUNC_FROM_TYPE( dev->type ); if( func == DEV_FUNC_TXT ) cluster->nb_txt_channels = dev->channels; if( func == DEV_FUNC_NIC ) cluster->nb_nic_channels = dev->channels; if( func == DEV_FUNC_IOC ) cluster->nb_ioc_channels = dev->channels; if( func == DEV_FUNC_FBF ) cluster->nb_fbf_channels = dev->channels; } // initialize number of local cores cluster->cores_nr = info->cores_nr; } // end cluster_info_init() ////////////////////////////////////// void cluster_info_display( cxy_t cxy ) { uint32_t x; uint32_t y; uint32_t ncores; cluster_t * cluster = LOCAL_CLUSTER; // get x_size & y_size from target cluster uint32_t x_size = hal_remote_l32( XPTR( cxy , &cluster->x_size ) ); uint32_t y_size = hal_remote_l32( XPTR( cxy , &cluster->y_size ) ); // get pointers on TXT0 chdev xptr_t txt0_xp = chdev_dir.txt_tx[0]; cxy_t txt0_cxy = GET_CXY( txt0_xp ); chdev_t * txt0_ptr = GET_PTR( txt0_xp ); // get extended pointer on remote TXT0 lock xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); // get TXT0 lock remote_busylock_acquire( lock_xp ); nolock_printk("\n***** cluster_info in cluster %x / x_size %d / y_size %d\n", cxy, x_size, y_size ); for( x = 0 ; x < x_size ; x++ ) { for( y = 0 ; y < y_size ; y++ ) { ncores = (uint32_t)hal_remote_lb( XPTR( cxy , &cluster->cluster_info[x][y] ) ); nolock_printk(" - ncores[%d][%d] = %d\n", x, y, ncores ); } } // release TXT0 lock remote_busylock_release( lock_xp ); } // end cluster_info_display() ///////////////////////////////////////////////////////// error_t cluster_manager_init( struct boot_info_s * info ) { error_t error; lpid_t lpid; // local process_index lid_t lid; // local core index cluster_t * cluster = LOCAL_CLUSTER; #if DEBUG_CLUSTER_INIT uint32_t cycle = (uint32_t)hal_get_cycles(); thread_t * this = CURRENT_THREAD; if( DEBUG_CLUSTER_INIT < cycle ) printk("\n[%s] thread[%x,%x] enters for cluster %x / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, local_cxy , cycle ); #endif #if (DEBUG_CLUSTER_INIT & 1) cluster_info_display( local_cxy ); #endif // initialises embedded PPM error = hal_ppm_init( info ); if( error ) { printk("\n[ERROR] in %s : cannot initialize PPM in cluster %x\n", __FUNCTION__ , local_cxy ); return ENOMEM; } #if( DEBUG_CLUSTER_INIT & 1 ) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_CLUSTER_INIT < cycle ) printk("\n[%s] PPM initialized in cluster %x / cycle %d\n", __FUNCTION__ , local_cxy , cycle ); #endif // initialises embedded KHM khm_init( &cluster->khm ); #if( DEBUG_CLUSTER_INIT & 1 ) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_CLUSTER_INIT < cycle ) printk("\n[%s] KHM initialized in cluster %x at cycle %d\n", __FUNCTION__ , local_cxy , hal_get_cycles() ); #endif // initialises embedded KCM uint32_t i; for( i = 0 ; i < 6 ; i++ ) kcm_init( &cluster->kcm[i] , i+6 ); #if( DEBUG_CLUSTER_INIT & 1 ) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_CLUSTER_INIT < cycle ) printk("\n[%s] KCM[6:11] initialized in cluster %x at cycle %d\n", __FUNCTION__ , local_cxy , hal_get_cycles() ); #endif // initialises all cores descriptors for( lid = 0 ; lid < cluster->cores_nr; lid++ ) { core_init( &cluster->core_tbl[lid], // target core descriptor lid, // local core index info->core[lid].gid ); // gid from boot_info_t } #if( DEBUG_CLUSTER_INIT & 1 ) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_CLUSTER_INIT < cycle ) printk("\n[%s] cores initialized in cluster %x / cycle %d\n", __FUNCTION__ , local_cxy , cycle ); #endif // initialises RPC FIFOs for( lid = 0 ; lid < cluster->cores_nr; lid++ ) { remote_fifo_init( &cluster->rpc_fifo[lid] ); cluster->rpc_threads[lid] = 0; } #if( DEBUG_CLUSTER_INIT & 1 ) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_CLUSTER_INIT < cycle ) printk("\n[%s] RPC fifo inialized in cluster %x at cycle %d\n", __FUNCTION__ , local_cxy , hal_get_cycles() ); #endif // initialise pref_tbl[] in process manager queuelock_init( &cluster->pmgr.pref_lock , LOCK_CLUSTER_PREFTBL ); cluster->pmgr.pref_nr = 0; cluster->pmgr.pref_tbl[0] = XPTR( local_cxy , &process_zero ); for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ ) { cluster->pmgr.pref_tbl[lpid] = XPTR_NULL; } // initialise local_list in process manager xlist_root_init( XPTR( local_cxy , &cluster->pmgr.local_root ) ); cluster->pmgr.local_nr = 0; remote_queuelock_init( XPTR( local_cxy , &cluster->pmgr.local_lock ) , LOCK_CLUSTER_LOCALS ); // initialise copies_lists in process manager for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ ) { cluster->pmgr.copies_nr[lpid] = 0; xlist_root_init( XPTR( local_cxy , &cluster->pmgr.copies_root[lpid] ) ); remote_queuelock_init( XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] ), LOCK_CLUSTER_COPIES ); } #if DEBUG_CLUSTER_INIT cycle = (uint32_t)hal_get_cycles(); if( DEBUG_CLUSTER_INIT < cycle ) printk("\n[%s] thread[%x,%x] exit for cluster %x / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle ); #endif hal_fence(); return 0; } // end cluster_manager_init() /////////////////////////////////// cxy_t cluster_random_select( void ) { uint32_t index; uint32_t x; uint32_t y; cxy_t cxy; uint32_t x_size = LOCAL_CLUSTER->x_size; uint32_t y_size = LOCAL_CLUSTER->y_size; do { index = ( hal_get_cycles() + hal_get_gid() ) % (x_size * y_size); x = index / y_size; y = index % y_size; cxy = HAL_CXY_FROM_XY( x , y ); } while ( cluster_is_active( cxy ) == false ); return ( cxy ); } ///////////////////////////////////////////// inline bool_t cluster_is_active ( cxy_t cxy ) { uint32_t x = HAL_X_FROM_CXY( cxy ); uint32_t y = HAL_Y_FROM_CXY( cxy ); return ( LOCAL_CLUSTER->cluster_info[x][y] != 0 ); } //////////////////////////////////////////////////////////////////////////////////// // Cores related functions //////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////// lid_t cluster_select_local_core( cxy_t cxy ) { uint32_t min = 1000000; lid_t sel = 0; uint32_t nthreads; lid_t lid; scheduler_t * sched; cluster_t * cluster = LOCAL_CLUSTER; uint32_t ncores = hal_remote_l32( XPTR( cxy , &cluster->cores_nr ) ); for( lid = 0 ; lid < ncores ; lid++ ) { sched = &cluster->core_tbl[lid].scheduler; nthreads = hal_remote_l32( XPTR( cxy , &sched->u_threads_nr ) ) + hal_remote_l32( XPTR( cxy , &sched->k_threads_nr ) ); if( nthreads < min ) { min = nthreads; sel = lid; } } return sel; } //////////////////////////////////////////////////////////////////////////////////// // Process related functions //////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////// xptr_t cluster_get_process_from_pid_in_cxy( cxy_t cxy, pid_t pid ) { xptr_t root_xp; // xptr on root of list of processes in owner cluster xptr_t lock_xp; // xptr on lock protecting this list xptr_t iter_xp; // iterator xptr_t current_xp; // xptr on current process descriptor bool_t found; cluster_t * cluster = LOCAL_CLUSTER; // get owner cluster and lpid cxy_t owner_cxy = CXY_FROM_PID( pid ); lpid_t lpid = LPID_FROM_PID( pid ); // get lock & root of list of copies from owner cluster root_xp = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] ); lock_xp = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] ); // take the lock protecting the list of processes remote_queuelock_acquire( lock_xp ); // scan list of processes found = false; XLIST_FOREACH( root_xp , iter_xp ) { current_xp = XLIST_ELEMENT( iter_xp , process_t , copies_list ); if( GET_CXY( current_xp ) == cxy ) { found = true; break; } } // release the lock protecting the list of processes remote_queuelock_release( lock_xp ); // return extended pointer on process descriptor in owner cluster if( found ) return current_xp; else return XPTR_NULL; } // end cluster_get_process_from_pid_in_cxy() ////////////////////////////////////////////////////// xptr_t cluster_get_owner_process_from_pid( pid_t pid ) { xptr_t root_xp; // xptr on root of list of processes in owner cluster xptr_t lock_xp; // xptr on lock protecting this list xptr_t iter_xp; // iterator xptr_t current_xp; // xptr on current process descriptor process_t * current_ptr; // local pointer on current process pid_t current_pid; // current process identifier bool_t found; cluster_t * cluster = LOCAL_CLUSTER; // get owner cluster and lpid cxy_t owner_cxy = CXY_FROM_PID( pid ); // get lock & root of list of process in owner cluster root_xp = XPTR( owner_cxy , &cluster->pmgr.local_root ); lock_xp = XPTR( owner_cxy , &cluster->pmgr.local_lock ); // take the lock protecting the list of processes remote_queuelock_acquire( lock_xp ); // scan list of processes in owner cluster found = false; XLIST_FOREACH( root_xp , iter_xp ) { current_xp = XLIST_ELEMENT( iter_xp , process_t , local_list ); current_ptr = GET_PTR( current_xp ); current_pid = hal_remote_l32( XPTR( owner_cxy , ¤t_ptr->pid ) ); if( current_pid == pid ) { found = true; break; } } // release the lock protecting the list of processes remote_queuelock_release( lock_xp ); // return extended pointer on process descriptor in owner cluster if( found ) return current_xp; else return XPTR_NULL; } // end cluster_get_owner_process_from_pid() ////////////////////////////////////////////////////////// xptr_t cluster_get_reference_process_from_pid( pid_t pid ) { xptr_t ref_xp; // extended pointer on reference process descriptor cluster_t * cluster = LOCAL_CLUSTER; // get owner cluster and lpid cxy_t owner_cxy = CXY_FROM_PID( pid ); lpid_t lpid = LPID_FROM_PID( pid ); // Check valid PID if( lpid >= CONFIG_MAX_PROCESS_PER_CLUSTER ) return XPTR_NULL; if( local_cxy == owner_cxy ) // local cluster is owner cluster { ref_xp = cluster->pmgr.pref_tbl[lpid]; } else // use a remote_lwd to access owner cluster { ref_xp = (xptr_t)hal_remote_l64( XPTR( owner_cxy , &cluster->pmgr.pref_tbl[lpid] ) ); } return ref_xp; } /////////////////////////////////////////////// error_t cluster_pid_alloc( process_t * process, pid_t * pid ) { lpid_t lpid; bool_t found; #if DEBUG_CLUSTER_PID_ALLOC uint32_t cycle = (uint32_t)hal_get_cycles(); thread_t * this = CURRENT_THREAD; if( DEBUG_CLUSTER_PID_ALLOC < cycle ) printk("\n[%s] thread[%x,%x] enters in cluster %x / cycle %d\n", __FUNCTION__ , this->process->pid , this->trdid , local_cxy , cycle ); #endif pmgr_t * pm = &LOCAL_CLUSTER->pmgr; // get the lock protecting pref_tbl queuelock_acquire( &pm->pref_lock ); // search an empty slot found = false; for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ ) { if( pm->pref_tbl[lpid] == XPTR_NULL ) { found = true; break; } } if( found ) { // register process in pref_tbl[] pm->pref_tbl[lpid] = XPTR( local_cxy , process ); pm->pref_nr++; // returns pid *pid = PID( local_cxy , lpid ); // release the processs_manager lock queuelock_release( &pm->pref_lock ); return 0; } else { // release the lock queuelock_release( &pm->pref_lock ); return 0xFFFFFFFF; } #if DEBUG_CLUSTER_PID_ALLOC cycle = (uint32_t)hal_get_cycles(); if( DEBUG_CLUSTER_PID_ALLOC < cycle ) printk("\n[%s] thread[%x,%x] exit in cluster %x / cycle %d\n", __FUNCTION__ , this->process->pid , this->trdid , local_cxy , cycle ); #endif } // end cluster_pid_alloc() ///////////////////////////////////// void cluster_pid_release( pid_t pid ) { #if DEBUG_CLUSTER_PID_RELEASE uint32_t cycle = (uint32_t)hal_get_cycles(); thread_t * this = CURRENT_THREAD; if( DEBUG_CLUSTER_PID_ALLOC < cycle ) printk("\n[%s] thread[%x,%x] enters in cluster %x / pid %x / cycle %d\n", __FUNCTION__ , this->process->pid , this->trdid , local_cxy , pid, cycle ); #endif cxy_t owner_cxy = CXY_FROM_PID( pid ); lpid_t lpid = LPID_FROM_PID( pid ); pmgr_t * pm = &LOCAL_CLUSTER->pmgr; // check lpid assert( __FUNCTION__, (lpid < CONFIG_MAX_PROCESS_PER_CLUSTER), "illegal LPID = %d" , lpid ); // check owner cluster assert( __FUNCTION__, (owner_cxy == local_cxy) , "local_cluster %x != owner_cluster %x" , local_cxy , owner_cxy ); // get the lock protecting pref_tbl queuelock_acquire( &pm->pref_lock ); // remove process from pref_tbl[] pm->pref_tbl[lpid] = XPTR_NULL; pm->pref_nr--; // release the processs_manager lock queuelock_release( &pm->pref_lock ); #if DEBUG_CLUSTER_PID_RELEASE cycle = (uint32_t)hal_get_cycles(); if( DEBUG_CLUSTER_PID_ALLOC < cycle ) printk("\n[%s] thread[%x,%x] exit in cluster %x / cycle %d\n", __FUNCTION__ , this->process->pid , this->trdid , local_cxy , cycle ); #endif } // end cluster_pid_release() /////////////////////////////////////////////////////////// process_t * cluster_get_local_process_from_pid( pid_t pid ) { xptr_t process_xp; process_t * process_ptr; xptr_t root_xp; xptr_t iter_xp; bool_t found; found = false; root_xp = XPTR( local_cxy , &LOCAL_CLUSTER->pmgr.local_root ); XLIST_FOREACH( root_xp , iter_xp ) { process_xp = XLIST_ELEMENT( iter_xp , process_t , local_list ); process_ptr = (process_t *)GET_PTR( process_xp ); if( process_ptr->pid == pid ) { found = true; break; } } if (found ) return process_ptr; else return NULL; } // end cluster_get_local_process_from_pid() ////////////////////////////////////////////////////// void cluster_process_local_link( process_t * process ) { pmgr_t * pm = &LOCAL_CLUSTER->pmgr; // get extended pointers on local process list root & lock xptr_t root_xp = XPTR( local_cxy , &pm->local_root ); xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock ); // get lock protecting the local list remote_queuelock_acquire( lock_xp ); // register process in local list xlist_add_last( root_xp , XPTR( local_cxy , &process->local_list ) ); pm->local_nr++; // release lock protecting the local list remote_queuelock_release( lock_xp ); } //////////////////////////////////////////////////////// void cluster_process_local_unlink( process_t * process ) { pmgr_t * pm = &LOCAL_CLUSTER->pmgr; // get extended pointers on local process list lock xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock ); // get lock protecting the local list remote_queuelock_acquire( lock_xp ); // remove process from local list xlist_unlink( XPTR( local_cxy , &process->local_list ) ); pm->local_nr--; // release lock protecting the local list remote_queuelock_release( lock_xp ); } /////////////////////////////////////////////////////// void cluster_process_copies_link( process_t * process ) { pmgr_t * pm = &LOCAL_CLUSTER->pmgr; #if DEBUG_CLUSTER_PROCESS_COPIES uint32_t cycle = (uint32_t)hal_get_cycles(); thread_t * this = CURRENT_THREAD; if( DEBUG_CLUSTER_PROCESS_COPIES < cycle ) printk("\n[%s] thread[%x,%x] enters for process %x / cycle %d\n", __FUNCTION__ , this->process->pid , this->trdid , process->pid , cycle ); #endif // get owner cluster identifier CXY and process LPID pid_t pid = process->pid; cxy_t owner_cxy = CXY_FROM_PID( pid ); lpid_t lpid = LPID_FROM_PID( pid ); // get extended pointer on lock protecting copies_list[lpid] xptr_t copies_lock = XPTR( owner_cxy , &pm->copies_lock[lpid] ); // get extended pointer on the copies_list[lpid] root xptr_t copies_root = XPTR( owner_cxy , &pm->copies_root[lpid] ); // get extended pointer on the local copies_list entry xptr_t copies_entry = XPTR( local_cxy , &process->copies_list ); // get lock protecting copies_list[lpid] remote_queuelock_acquire( copies_lock ); // add copy to copies_list xlist_add_first( copies_root , copies_entry ); hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , 1 ); // release lock protecting copies_list[lpid] remote_queuelock_release( copies_lock ); #if DEBUG_CLUSTER_PROCESS_COPIES cycle = (uint32_t)hal_get_cycles(); if( DEBUG_CLUSTER_PROCESS_COPIES < cycle ) printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", __FUNCTION__ , this->process->pid , this->trdid , process->pid , cycle ); #endif } // end cluster_process_copies_link() ///////////////////////////////////////////////////////// void cluster_process_copies_unlink( process_t * process ) { pmgr_t * pm = &LOCAL_CLUSTER->pmgr; #if DEBUG_CLUSTER_PROCESS_COPIES uint32_t cycle = (uint32_t)hal_get_cycles(); thread_t * this = CURRENT_THREAD; if( DEBUG_CLUSTER_PROCESS_COPIES < cycle ) printk("\n[%s] thread[%x,%x] enters for process %x / cycle %d\n", __FUNCTION__ , this->process->pid , this->trdid , process->pid , cycle ); #endif // get owner cluster identifier CXY and process LPID pid_t pid = process->pid; cxy_t owner_cxy = CXY_FROM_PID( pid ); lpid_t lpid = LPID_FROM_PID( pid ); // get extended pointer on lock protecting copies_list[lpid] xptr_t copies_lock = XPTR( owner_cxy , &pm->copies_lock[lpid] ); // get extended pointer on the local copies_list entry xptr_t copies_entry = XPTR( local_cxy , &process->copies_list ); // get lock protecting copies_list[lpid] remote_queuelock_acquire( copies_lock ); // remove copy from copies_list xlist_unlink( copies_entry ); hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , -1 ); // release lock protecting copies_list[lpid] remote_queuelock_release( copies_lock ); #if DEBUG_CLUSTER_PROCESS_COPIES cycle = (uint32_t)hal_get_cycles(); if( DEBUG_CLUSTER_PROCESS_COPIES < cycle ) printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", __FUNCTION__ , this->process->pid , this->trdid , process->pid , cycle ); #endif } // end cluster_process_copies_unlink() //////////////////////////////////////////// void cluster_processes_display( cxy_t cxy, bool_t owned ) { xptr_t root_xp; xptr_t lock_xp; xptr_t iter_xp; xptr_t process_xp; process_t * process_ptr; cxy_t process_cxy; pid_t pid; cxy_t txt0_cxy; chdev_t * txt0_ptr; xptr_t txt0_xp; xptr_t txt0_lock_xp; uint32_t pref_nr; // number of owned processes in cluster cxy assert( __FUNCTION__, (cluster_is_active( cxy ) ), "illegal cluster index" ); // get extended pointer on root and lock for local process list in cluster root_xp = XPTR( cxy , &LOCAL_CLUSTER->pmgr.local_root ); lock_xp = XPTR( cxy , &LOCAL_CLUSTER->pmgr.local_lock ); // get number of owned processes in cluster cxy pref_nr = hal_remote_l32( XPTR( cxy , &LOCAL_CLUSTER->pmgr.pref_nr ) ); // display nothing if no user process in cluster cxy if( (owned != false) && (pref_nr < 2) ) return; // get pointers on TXT0 chdev txt0_xp = chdev_dir.txt_tx[0]; txt0_cxy = GET_CXY( txt0_xp ); txt0_ptr = GET_PTR( txt0_xp ); // get extended pointer on TXT0 lock txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); // get lock on local process list remote_queuelock_acquire( lock_xp ); // get TXT0 lock remote_busylock_acquire( txt0_lock_xp ); nolock_printk("\n***** processes in cluster %x / cycle %d\n", cxy , (uint32_t)hal_get_cycles() ); // loop on all processes in cluster cxy XLIST_FOREACH( root_xp , iter_xp ) { process_xp = XLIST_ELEMENT( iter_xp , process_t , local_list ); process_ptr = GET_PTR( process_xp ); process_cxy = GET_CXY( process_xp ); // get process PID pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) ); if( owned ) // display only user & owned processes { if( (CXY_FROM_PID( pid ) == cxy) && (LPID_FROM_PID( pid ) != 0) ) { process_display( process_xp ); } } else // display all local processes { process_display( process_xp ); } } // release TXT0 lock remote_busylock_release( txt0_lock_xp ); // release lock on local process list remote_queuelock_release( lock_xp ); } // end cluster_processes_display()