/* * process.c - process related functions definition. * * Authors Ghassan Almaless (2008,2009,2010,2011,2012) * Mohamed Lamine Karaoui (2015) * Alain Greiner (2016,2017,2018,2019) * * Copyright (c) UPMC Sorbonne Universites * * This file is part of ALMOS-MKH. * * ALMOS-MKH is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2.0 of the License. * * ALMOS-MKH is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with ALMOS-MKH; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include ////////////////////////////////////////////////////////////////////////////////////////// // Extern global variables ////////////////////////////////////////////////////////////////////////////////////////// extern process_t process_zero; // allocated in kernel_init.c extern chdev_directory_t chdev_dir; // allocated in kernel_init.c ////////////////////////////////////////////////////////////////////////////////////////// // Process initialisation related functions ////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////// process_t * process_alloc( void ) { kmem_req_t req; req.type = KMEM_PROCESS; req.size = sizeof(process_t); req.flags = AF_KERNEL; return (process_t *)kmem_alloc( &req ); } //////////////////////////////////////// void process_free( process_t * process ) { kmem_req_t req; req.type = KMEM_PROCESS; req.ptr = process; kmem_free( &req ); } ///////////////////////////////////////////////// void process_reference_init( process_t * process, pid_t pid, xptr_t parent_xp ) { xptr_t process_xp; cxy_t parent_cxy; process_t * parent_ptr; xptr_t stdin_xp; xptr_t stdout_xp; xptr_t stderr_xp; uint32_t stdin_id; uint32_t stdout_id; uint32_t stderr_id; error_t error; uint32_t txt_id; char rx_path[40]; char tx_path[40]; xptr_t file_xp; xptr_t chdev_xp; chdev_t * chdev_ptr; cxy_t chdev_cxy; pid_t parent_pid; // build extended pointer on this reference process process_xp = XPTR( local_cxy , process ); // get parent process cluster and local pointer parent_cxy = GET_CXY( parent_xp ); parent_ptr = GET_PTR( parent_xp ); // get parent_pid parent_pid = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) ); #if DEBUG_PROCESS_REFERENCE_INIT thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_REFERENCE_INIT < cycle ) printk("\n[%s] thread[%x,%x] enter to initalialize process %x / cycle %d\n", __FUNCTION__, parent_pid, this->trdid, pid, cycle ); #endif // initialize pid, ref_xp, parent_xp, owner_xp, term_state fields process->pid = pid; process->ref_xp = XPTR( local_cxy , process ); process->owner_xp = XPTR( local_cxy , process ); process->parent_xp = parent_xp; process->term_state = 0; // initialize VFS root inode and CWD inode process->vfs_root_xp = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->vfs_root_xp ) ); process->cwd_xp = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->cwd_xp ) ); // initialize vmm as empty error = vmm_init( process ); assert( (error == 0) , "cannot initialize VMM\n" ); #if (DEBUG_PROCESS_REFERENCE_INIT & 1) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_REFERENCE_INIT < cycle ) printk("\n[%s] thread[%x,%x] / vmm empty for process %x / cycle %d\n", __FUNCTION__, parent_pid, this->trdid, pid, cycle ); #endif // initialize fd_array as empty process_fd_init( process ); // define the stdin/stdout/stderr pseudo files <=> select a TXT terminal. if( (pid == 1) || (parent_pid == 1) ) // INIT or KSH process { // select a TXT channel if( pid == 1 ) txt_id = 0; // INIT else txt_id = process_txt_alloc(); // KSH // attach process to TXT process_txt_attach( process , txt_id ); #if (DEBUG_PROCESS_REFERENCE_INIT & 1) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_REFERENCE_INIT < cycle ) printk("\n[%s] thread[%x,%x] / process %x attached to TXT%d / cycle %d\n", __FUNCTION__, parent_pid, this->trdid, pid, txt_id, cycle ); #endif // build path to TXT_RX[i] and TXT_TX[i] chdevs snprintf( rx_path , 40 , "/dev/external/txt%d_rx", txt_id ); snprintf( tx_path , 40 , "/dev/external/txt%d_tx", txt_id ); // create stdin pseudo file error = vfs_open( process->vfs_root_xp, rx_path, process_xp, O_RDONLY, 0, // FIXME chmod &stdin_xp, &stdin_id ); assert( (error == 0) , "cannot open stdin pseudo file" ); assert( (stdin_id == 0) , "stdin index must be 0" ); #if (DEBUG_PROCESS_REFERENCE_INIT & 1) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_REFERENCE_INIT < cycle ) printk("\n[%s] thread[%x,%x] / stdin open for process %x / cycle %d\n", __FUNCTION__, parent_pid, this->trdid, pid, cycle ); #endif // create stdout pseudo file error = vfs_open( process->vfs_root_xp, tx_path, process_xp, O_WRONLY, 0, // FIXME chmod &stdout_xp, &stdout_id ); assert( (error == 0) , "cannot open stdout pseudo file" ); assert( (stdout_id == 1) , "stdout index must be 1" ); #if (DEBUG_PROCESS_REFERENCE_INIT & 1) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_REFERENCE_INIT < cycle ) printk("\n[%s] thread[%x,%x] / stdout open for process %x / cycle %d\n", __FUNCTION__, parent_pid, this->trdid, pid, cycle ); #endif // create stderr pseudo file error = vfs_open( process->vfs_root_xp, tx_path, process_xp, O_WRONLY, 0, // FIXME chmod &stderr_xp, &stderr_id ); assert( (error == 0) , "cannot open stderr pseudo file" ); assert( (stderr_id == 2) , "stderr index must be 2" ); #if (DEBUG_PROCESS_REFERENCE_INIT & 1) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_REFERENCE_INIT < cycle ) printk("\n[%s] thread[%x,%x] / stderr open for process %x / cycle %d\n", __FUNCTION__, parent_pid, this->trdid, pid, cycle ); #endif } else // normal user process { // get extended pointer on stdin pseudo file in parent process file_xp = (xptr_t)hal_remote_l64( XPTR( parent_cxy , &parent_ptr->fd_array.array[0] ) ); // get extended pointer on parent process TXT chdev chdev_xp = chdev_from_file( file_xp ); // get cluster and local pointer on chdev chdev_cxy = GET_CXY( chdev_xp ); chdev_ptr = GET_PTR( chdev_xp ); // get parent process TXT terminal index txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) ); // attach child process to parent process TXT terminal process_txt_attach( process , txt_id ); // copy all open files from parent process fd_array to this process process_fd_remote_copy( XPTR( local_cxy , &process->fd_array ), XPTR( parent_cxy , &parent_ptr->fd_array ) ); } // initialize lock protecting CWD changes remote_busylock_init( XPTR( local_cxy , &process->cwd_lock ), LOCK_PROCESS_CWD ); #if (DEBUG_PROCESS_REFERENCE_INIT & 1) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_REFERENCE_INIT < cycle ) printk("\n[%s] thread[%x,%x] / set fd_array for process %x / cycle %d\n", __FUNCTION__, parent_pid, this->trdid, pid , cycle ); #endif // reset children list root xlist_root_init( XPTR( local_cxy , &process->children_root ) ); process->children_nr = 0; remote_queuelock_init( XPTR( local_cxy , &process->children_lock ), LOCK_PROCESS_CHILDREN ); // reset semaphore / mutex / barrier / condvar list roots and lock xlist_root_init( XPTR( local_cxy , &process->sem_root ) ); xlist_root_init( XPTR( local_cxy , &process->mutex_root ) ); xlist_root_init( XPTR( local_cxy , &process->barrier_root ) ); xlist_root_init( XPTR( local_cxy , &process->condvar_root ) ); remote_queuelock_init( XPTR( local_cxy , &process->sync_lock ), LOCK_PROCESS_USERSYNC ); // reset open directories root and lock xlist_root_init( XPTR( local_cxy , &process->dir_root ) ); remote_queuelock_init( XPTR( local_cxy , &process->dir_lock ), LOCK_PROCESS_DIR ); // register new process in the local cluster manager pref_tbl[] lpid_t lpid = LPID_FROM_PID( pid ); LOCAL_CLUSTER->pmgr.pref_tbl[lpid] = XPTR( local_cxy , process ); // register new process descriptor in local cluster manager local_list cluster_process_local_link( process ); // register new process descriptor in local cluster manager copies_list cluster_process_copies_link( process ); // initialize th_tbl[] array and associated threads uint32_t i; for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ ) { process->th_tbl[i] = NULL; } process->th_nr = 0; rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL ); hal_fence(); #if (DEBUG_PROCESS_REFERENCE_INIT & 1) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_REFERENCE_INIT < cycle ) printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", __FUNCTION__, parent_pid, this->trdid, pid, cycle ); #endif } // process_reference_init() ///////////////////////////////////////////////////// error_t process_copy_init( process_t * local_process, xptr_t reference_process_xp ) { error_t error; // get reference process cluster and local pointer cxy_t ref_cxy = GET_CXY( reference_process_xp ); process_t * ref_ptr = GET_PTR( reference_process_xp ); // initialize PID, REF_XP, PARENT_XP, and STATE local_process->pid = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->pid ) ); local_process->parent_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->parent_xp ) ); local_process->ref_xp = reference_process_xp; local_process->owner_xp = reference_process_xp; local_process->term_state = 0; #if DEBUG_PROCESS_COPY_INIT thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_COPY_INIT < cycle ) printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle ); #endif // check user process assert( (local_process->pid != 0), "PID cannot be 0" ); // reset local process vmm error = vmm_init( local_process ); assert( (error == 0) , "cannot initialize VMM\n"); // reset process file descriptors array process_fd_init( local_process ); // reset vfs_root_xp / vfs_bin_xp / cwd_xp fields local_process->vfs_root_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) ); local_process->vfs_bin_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) ); local_process->cwd_xp = XPTR_NULL; // reset children list root (not used in a process descriptor copy) xlist_root_init( XPTR( local_cxy , &local_process->children_root ) ); local_process->children_nr = 0; remote_queuelock_init( XPTR( local_cxy , &local_process->children_lock ), LOCK_PROCESS_CHILDREN ); // reset children_list (not used in a process descriptor copy) xlist_entry_init( XPTR( local_cxy , &local_process->children_list ) ); // reset semaphores list root (not used in a process descriptor copy) xlist_root_init( XPTR( local_cxy , &local_process->sem_root ) ); xlist_root_init( XPTR( local_cxy , &local_process->mutex_root ) ); xlist_root_init( XPTR( local_cxy , &local_process->barrier_root ) ); xlist_root_init( XPTR( local_cxy , &local_process->condvar_root ) ); // initialize th_tbl[] array and associated fields uint32_t i; for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ ) { local_process->th_tbl[i] = NULL; } local_process->th_nr = 0; rwlock_init( &local_process->th_lock , LOCK_PROCESS_THTBL ); // register new process descriptor in local cluster manager local_list cluster_process_local_link( local_process ); // register new process descriptor in owner cluster manager copies_list cluster_process_copies_link( local_process ); hal_fence(); #if DEBUG_PROCESS_COPY_INIT cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_COPY_INIT < cycle ) printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle ); #endif return 0; } // end process_copy_init() /////////////////////////////////////////// void process_destroy( process_t * process ) { xptr_t parent_xp; process_t * parent_ptr; cxy_t parent_cxy; xptr_t children_lock_xp; xptr_t children_nr_xp; pid_t pid = process->pid; // check no more threads assert( (process->th_nr == 0), "process %x in cluster %x contains threads", pid , local_cxy ); #if DEBUG_PROCESS_DESTROY thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_DESTROY < cycle ) printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle ); #endif // Destroy VMM vmm_destroy( process ); #if (DEBUG_PROCESS_DESTROY & 1) if( DEBUG_PROCESS_DESTROY < cycle ) printk("\n[%s] thread[%x,%x] destroyed VMM for process %x in cluster %x\n", __FUNCTION__, this->process->pid, this->trdid, pid, local_cxy ); #endif // remove process from local_list in local cluster manager cluster_process_local_unlink( process ); #if (DEBUG_PROCESS_DESTROY & 1) if( DEBUG_PROCESS_DESTROY < cycle ) printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from local list\n", __FUNCTION__, this->process->pid, this->trdid, pid, local_cxy ); #endif // remove process from copies_list in owner cluster manager cluster_process_copies_unlink( process ); #if (DEBUG_PROCESS_DESTROY & 1) if( DEBUG_PROCESS_DESTROY < cycle ) printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from copies list\n", __FUNCTION__, this->process->pid, this->trdid, pid, local_cxy ); #endif // remove process from children_list // and release PID if owner cluster if( CXY_FROM_PID( pid ) == local_cxy ) { // get pointers on parent process parent_xp = process->parent_xp; parent_cxy = GET_CXY( parent_xp ); parent_ptr = GET_PTR( parent_xp ); // get extended pointer on children_lock in parent process children_lock_xp = XPTR( parent_cxy , &parent_ptr->children_lock ); children_nr_xp = XPTR( parent_cxy , &parent_ptr->children_nr ); // remove process from children_list remote_queuelock_acquire( children_lock_xp ); xlist_unlink( XPTR( local_cxy , &process->children_list ) ); hal_remote_atomic_add( children_nr_xp , -1 ); remote_queuelock_release( children_lock_xp ); #if (DEBUG_PROCESS_DESTROY & 1) if( DEBUG_PROCESS_DESTROY < cycle ) printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from children list\n", __FUNCTION__, this->process->pid, this->trdid, pid, local_cxy ); #endif // release the process PID to cluster manager cluster_pid_release( pid ); #if (DEBUG_PROCESS_DESTROY & 1) if( DEBUG_PROCESS_DESTROY < cycle ) printk("\n[%s] thread[%x,%x] released process PID %x to pmgr in cluster %x\n", __FUNCTION__, this->process->pid, this->trdid, pid, local_cxy ); #endif } // FIXME decrement the refcount on file pointer by vfs_bin_xp [AG] // FIXME close all open files [AG] // FIXME synchronize dirty files [AG] // release memory allocated to process descriptor process_free( process ); #if DEBUG_PROCESS_DESTROY cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_DESTROY < cycle ) printk("\n[%s] thread[%x,%x] exit / process %x in cluster %x / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle ); #endif } // end process_destroy() /////////////////////////////////////////////////////////////////// const char * process_action_str( process_sigactions_t action_type ) { switch ( action_type ) { case BLOCK_ALL_THREADS: return "BLOCK"; case UNBLOCK_ALL_THREADS: return "UNBLOCK"; case DELETE_ALL_THREADS: return "DELETE"; default: return "undefined"; } } //////////////////////////////////////// void process_sigaction( pid_t pid, uint32_t type ) { cxy_t owner_cxy; // owner cluster identifier lpid_t lpid; // process index in owner cluster cluster_t * cluster; // pointer on cluster manager xptr_t root_xp; // extended pointer on root of copies xptr_t lock_xp; // extended pointer on lock protecting copies xptr_t iter_xp; // iterator on copies list xptr_t process_xp; // extended pointer on process copy cxy_t process_cxy; // process copy cluster identifier process_t * process_ptr; // local pointer on process copy reg_t save_sr; // for critical section thread_t * client; // pointer on client thread xptr_t client_xp; // extended pointer on client thread process_t * local; // pointer on process copy in local cluster uint32_t remote_nr; // number of remote process copies rpc_desc_t rpc; // shared RPC descriptor uint32_t responses; // shared RPC responses counter client = CURRENT_THREAD; client_xp = XPTR( local_cxy , client ); local = NULL; remote_nr = 0; // check calling thread can yield thread_assert_can_yield( client , __FUNCTION__ ); #if DEBUG_PROCESS_SIGACTION uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_SIGACTION < cycle ) printk("\n[%s] thread[%x,%x] enter to %s process %x / cycle %d\n", __FUNCTION__ , client->process->pid, client->trdid, process_action_str( type ) , pid , cycle ); #endif // get pointer on local cluster manager cluster = LOCAL_CLUSTER; // get owner cluster identifier and process lpid owner_cxy = CXY_FROM_PID( pid ); lpid = LPID_FROM_PID( pid ); // get root of list of copies and lock from owner cluster root_xp = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] ); lock_xp = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] ); // check action type assert( ((type == DELETE_ALL_THREADS ) || (type == BLOCK_ALL_THREADS ) || (type == UNBLOCK_ALL_THREADS )), "illegal action type" ); // This client thread send parallel RPCs to all remote clusters containing // target process copies, wait all responses, and then handles directly // the threads in local cluster, when required. // The client thread allocates a - shared - RPC descriptor in the stack, // because all parallel, non-blocking, server threads use the same input // arguments, and use the shared RPC response field // mask IRQs hal_disable_irq( &save_sr); // client thread blocks itself thread_block( client_xp , THREAD_BLOCKED_RPC ); // initialize RPC responses counter responses = 0; // initialize shared RPC descriptor // can be shared, because no out arguments rpc.rsp = &responses; rpc.blocking = false; rpc.index = RPC_PROCESS_SIGACTION; rpc.thread = client; rpc.lid = client->core->lid; rpc.args[0] = pid; rpc.args[1] = type; // take the lock protecting process copies remote_queuelock_acquire( lock_xp ); // scan list of process copies XLIST_FOREACH( root_xp , iter_xp ) { // get extended pointers and cluster on process process_xp = XLIST_ELEMENT( iter_xp , process_t , copies_list ); process_cxy = GET_CXY( process_xp ); process_ptr = GET_PTR( process_xp ); if( process_cxy == local_cxy ) // process copy is local { local = process_ptr; } else // process copy is remote { // update number of remote process copies remote_nr++; // atomically increment RPC responses counter hal_atomic_add( &responses , 1 ); #if DEBUG_PROCESS_SIGACTION if( DEBUG_PROCESS_SIGACTION < cycle ) printk("\n[%s] thread[%x,%x] send RPC to cluster %x for process %x\n", __FUNCTION__, client->process->pid, client->trdid, process_cxy, pid ); #endif // call RPC in target cluster rpc_send( process_cxy , &rpc ); } } // end list of copies // release the lock protecting process copies remote_queuelock_release( lock_xp ); // restore IRQs hal_restore_irq( save_sr); // - if there is remote process copies, the client thread deschedules, // (it will be unblocked by the last RPC server thread). // - if there is no remote copies, the client thread unblock itself. if( remote_nr ) { sched_yield("blocked on rpc_process_sigaction"); } else { thread_unblock( client_xp , THREAD_BLOCKED_RPC ); } // handle the local process copy if required if( local != NULL ) { #if DEBUG_PROCESS_SIGACTION if( DEBUG_PROCESS_SIGACTION < cycle ) printk("\n[%s] thread[%x,%x] handles local process %x in cluster %x\n", __FUNCTION__, client->process->pid, client->trdid, pid , local_cxy ); #endif if (type == DELETE_ALL_THREADS ) process_delete_threads ( local , client_xp ); else if(type == BLOCK_ALL_THREADS ) process_block_threads ( local ); else if(type == UNBLOCK_ALL_THREADS ) process_unblock_threads( local ); } #if DEBUG_PROCESS_SIGACTION cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_SIGACTION < cycle ) printk("\n[%s] thread[%x,%x] exit after %s process %x / cycle %d\n", __FUNCTION__, client->process->pid, client->trdid, process_action_str( type ), pid, cycle ); #endif } // end process_sigaction() ///////////////////////////////////////////////// void process_block_threads( process_t * process ) { thread_t * target; // pointer on target thread thread_t * this; // pointer on calling thread uint32_t ltid; // index in process th_tbl[] cxy_t owner_cxy; // target process owner cluster uint32_t count; // requests counter volatile uint32_t ack_count; // acknowledges counter // get calling thread pointer this = CURRENT_THREAD; #if DEBUG_PROCESS_SIGACTION pid_t pid = process->pid; uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_SIGACTION < cycle ) printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle ); #endif // check target process is an user process assert( (LPID_FROM_PID( process->pid ) != 0 ), "process %x is not an user process\n", process->pid ); // get target process owner cluster owner_cxy = CXY_FROM_PID( process->pid ); // get lock protecting process th_tbl[] rwlock_rd_acquire( &process->th_lock ); // loop on target process local threads // we use both "ltid" and "count" because it can exist "holes" in th_tbl // - if the calling thread and the target thread are not running on the same // core, we ask the target scheduler to acknowlege the blocking // to be sure that the target thread is not running. // - if the calling thread and the target thread are running on the same core, // we don't need confirmation from scheduler. for( ltid = 0 , count = 0 , ack_count = 0 ; count < process->th_nr ; ltid++ ) { target = process->th_tbl[ltid]; if( target != NULL ) // thread exist { count++; // set the global blocked bit in target thread descriptor. thread_block( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL ); if( this->core->lid != target->core->lid ) { // increment responses counter hal_atomic_add( (void*)&ack_count , 1 ); // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor thread_set_req_ack( target , (uint32_t *)&ack_count ); // force scheduling on target thread dev_pic_send_ipi( local_cxy , target->core->lid ); } } } // release lock protecting process th_tbl[] rwlock_rd_release( &process->th_lock ); // wait other threads acknowledges TODO this could be improved... while( 1 ) { // exit when all scheduler acknowledges received if ( ack_count == 0 ) break; // wait 1000 cycles before retry hal_fixed_delay( 1000 ); } #if DEBUG_PROCESS_SIGACTION cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_SIGACTION < cycle ) printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle ); #endif } // end process_block_threads() ///////////////////////////////////////////////// void process_delete_threads( process_t * process, xptr_t client_xp ) { thread_t * this; // pointer on calling thread thread_t * target; // local pointer on target thread xptr_t target_xp; // extended pointer on target thread cxy_t owner_cxy; // owner process cluster uint32_t ltid; // index in process th_tbl uint32_t count; // threads counter // get calling thread pointer this = CURRENT_THREAD; // get target process owner cluster owner_cxy = CXY_FROM_PID( process->pid ); #if DEBUG_PROCESS_SIGACTION uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_SIGACTION < cycle ) printk("\n[%s] thread[%x,%x] enter in cluster %x for process %x / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, local_cxy, process->pid, cycle ); #endif // check target process is an user process assert( (LPID_FROM_PID( process->pid ) != 0), "process %x is not an user process\n", process->pid ); // get lock protecting process th_tbl[] rwlock_wr_acquire( &process->th_lock ); // loop on target process local threads // we use both "ltid" and "count" because it can exist "holes" in th_tbl for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ ) { target = process->th_tbl[ltid]; if( target != NULL ) // valid thread { count++; target_xp = XPTR( local_cxy , target ); // main thread and client thread should not be deleted if( ((ltid != 0) || (owner_cxy != local_cxy)) && // not main thread (client_xp) != target_xp ) // not client thread { // mark target thread for delete and block it thread_delete( target_xp , process->pid , false ); // not forced } } } // release lock protecting process th_tbl[] rwlock_wr_release( &process->th_lock ); #if DEBUG_PROCESS_SIGACTION cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_SIGACTION < cycle ) printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle ); #endif } // end process_delete_threads() /////////////////////////////////////////////////// void process_unblock_threads( process_t * process ) { thread_t * target; // pointer on target thead thread_t * this; // pointer on calling thread uint32_t ltid; // index in process th_tbl uint32_t count; // requests counter // get calling thread pointer this = CURRENT_THREAD; #if DEBUG_PROCESS_SIGACTION pid_t pid = process->pid; uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_SIGACTION < cycle ) printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle ); #endif // check target process is an user process assert( ( LPID_FROM_PID( process->pid ) != 0 ), "process %x is not an user process\n", process->pid ); // get lock protecting process th_tbl[] rwlock_rd_acquire( &process->th_lock ); // loop on process threads to unblock all threads // we use both "ltid" and "count" because it can exist "holes" in th_tbl for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ ) { target = process->th_tbl[ltid]; if( target != NULL ) // thread found { count++; // reset the global blocked bit in target thread descriptor. thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL ); } } // release lock protecting process th_tbl[] rwlock_rd_release( &process->th_lock ); #if DEBUG_PROCESS_SIGACTION cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_SIGACTION < cycle ) printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle ); #endif } // end process_unblock_threads() /////////////////////////////////////////////// process_t * process_get_local_copy( pid_t pid ) { error_t error; process_t * process_ptr; // local pointer on process xptr_t process_xp; // extended pointer on process cluster_t * cluster = LOCAL_CLUSTER; #if DEBUG_PROCESS_GET_LOCAL_COPY thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle ) printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle ); #endif // get lock protecting local list of processes remote_queuelock_acquire( XPTR( local_cxy , &cluster->pmgr.local_lock ) ); // scan the local list of process descriptors to find the process xptr_t iter; bool_t found = false; XLIST_FOREACH( XPTR( local_cxy , &cluster->pmgr.local_root ) , iter ) { process_xp = XLIST_ELEMENT( iter , process_t , local_list ); process_ptr = GET_PTR( process_xp ); if( process_ptr->pid == pid ) { found = true; break; } } // release lock protecting local list of processes remote_queuelock_release( XPTR( local_cxy , &cluster->pmgr.local_lock ) ); // allocate memory for a new local process descriptor // and initialise it from reference cluster if not found if( !found ) { // get extended pointer on reference process descriptor xptr_t ref_xp = cluster_get_reference_process_from_pid( pid ); assert( (ref_xp != XPTR_NULL) , "illegal pid\n" ); // allocate memory for local process descriptor process_ptr = process_alloc(); if( process_ptr == NULL ) return NULL; // initialize local process descriptor copy error = process_copy_init( process_ptr , ref_xp ); if( error ) return NULL; } #if DEBUG_PROCESS_GET_LOCAL_COPY cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle ) printk("\n[%s] thread[%x,%x] exit in cluster %x / process %x / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, local_cxy, process_ptr, cycle ); #endif return process_ptr; } // end process_get_local_copy() //////////////////////////////////////////// pid_t process_get_ppid( xptr_t process_xp ) { cxy_t process_cxy; process_t * process_ptr; xptr_t parent_xp; cxy_t parent_cxy; process_t * parent_ptr; // get process cluster and local pointer process_cxy = GET_CXY( process_xp ); process_ptr = GET_PTR( process_xp ); // get pointers on parent process parent_xp = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) ); parent_cxy = GET_CXY( parent_xp ); parent_ptr = GET_PTR( parent_xp ); return hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) ); } ////////////////////////////////////////////////////////////////////////////////////////// // File descriptor array related functions ////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////// void process_fd_init( process_t * process ) { uint32_t fd; // initialize lock remote_queuelock_init( XPTR( local_cxy , &process->fd_array.lock ), LOCK_PROCESS_FDARRAY ); // initialize number of open files process->fd_array.current = 0; // initialize array for ( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ ) { process->fd_array.array[fd] = XPTR_NULL; } } //////////////////////////////////////////////////// error_t process_fd_register( xptr_t process_xp, xptr_t file_xp, uint32_t * fdid ) { bool_t found; uint32_t id; xptr_t xp; // get reference process cluster and local pointer process_t * process_ptr = GET_PTR( process_xp ); cxy_t process_cxy = GET_CXY( process_xp ); // check client process is reference process assert( (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->ref_xp ) ) ), "client process must be reference process\n" ); #if DEBUG_PROCESS_FD_REGISTER thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); pid_t pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) ); if( DEBUG_PROCESS_FD_REGISTER < cycle ) printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, pid, cycle ); #endif // build extended pointer on lock protecting reference fd_array xptr_t lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock ); // take lock protecting reference fd_array remote_queuelock_acquire( lock_xp ); found = false; for ( id = 0; id < CONFIG_PROCESS_FILE_MAX_NR ; id++ ) { xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) ); if ( xp == XPTR_NULL ) { // update reference fd_array hal_remote_s64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) , file_xp ); hal_remote_atomic_add( XPTR( process_cxy , &process_ptr->fd_array.current ) , 1 ); // exit *fdid = id; found = true; break; } } // release lock protecting fd_array remote_queuelock_release( lock_xp ); #if DEBUG_PROCESS_FD_REGISTER cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_FD_REGISTER < cycle ) printk("\n[%s] thread[%x,%x] exit for process %x / fdid %d / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, pid, id, cycle ); #endif if ( !found ) return -1; else return 0; } // end process_fd_register() //////////////////////////////////////////////// xptr_t process_fd_get_xptr( process_t * process, uint32_t fdid ) { xptr_t file_xp; xptr_t lock_xp; // access local copy of process descriptor file_xp = process->fd_array.array[fdid]; if( file_xp == XPTR_NULL ) { // get reference process cluster and local pointer xptr_t ref_xp = process->ref_xp; cxy_t ref_cxy = GET_CXY( ref_xp ); process_t * ref_ptr = GET_PTR( ref_xp ); // build extended pointer on lock protecting reference fd_array lock_xp = XPTR( ref_cxy , &ref_ptr->fd_array.lock ); // take lock protecting reference fd_array remote_queuelock_acquire( lock_xp ); // access reference process descriptor file_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->fd_array.array[fdid] ) ); // update local fd_array if found if( file_xp != XPTR_NULL ) process->fd_array.array[fdid] = file_xp; // release lock protecting reference fd_array remote_queuelock_release( lock_xp ); } return file_xp; } // end process_fd_get_xptr() /////////////////////////////////////////// void process_fd_remote_copy( xptr_t dst_xp, xptr_t src_xp ) { uint32_t fd; xptr_t entry; // get cluster and local pointer for src fd_array cxy_t src_cxy = GET_CXY( src_xp ); fd_array_t * src_ptr = GET_PTR( src_xp ); // get cluster and local pointer for dst fd_array cxy_t dst_cxy = GET_CXY( dst_xp ); fd_array_t * dst_ptr = GET_PTR( dst_xp ); // get the remote lock protecting the src fd_array remote_queuelock_acquire( XPTR( src_cxy , &src_ptr->lock ) ); // loop on all fd_array entries for( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ ) { entry = (xptr_t)hal_remote_l64( XPTR( src_cxy , &src_ptr->array[fd] ) ); if( entry != XPTR_NULL ) { // increment file descriptor refcount vfs_file_count_up( entry ); // copy entry in destination process fd_array hal_remote_s64( XPTR( dst_cxy , &dst_ptr->array[fd] ) , entry ); } } // release lock on source process fd_array remote_queuelock_release( XPTR( src_cxy , &src_ptr->lock ) ); } // end process_fd_remote_copy() //////////////////////////////////// bool_t process_fd_array_full( void ) { // get extended pointer on reference process xptr_t ref_xp = CURRENT_THREAD->process->ref_xp; // get reference process cluster and local pointer process_t * ref_ptr = GET_PTR( ref_xp ); cxy_t ref_cxy = GET_CXY( ref_xp ); // get number of open file descriptors from reference fd_array uint32_t current = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->fd_array.current ) ); return ( current >= CONFIG_PROCESS_FILE_MAX_NR ); } //////////////////////////////////////////////////////////////////////////////////// // Thread related functions //////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////// error_t process_register_thread( process_t * process, thread_t * thread, trdid_t * trdid ) { ltid_t ltid; bool_t found = false; // check arguments assert( (process != NULL) , "process argument is NULL" ); assert( (thread != NULL) , "thread argument is NULL" ); // get the lock protecting th_tbl for all threads // but the idle thread executing kernel_init (cannot yield) if( thread->type != THREAD_IDLE ) rwlock_wr_acquire( &process->th_lock ); // scan th_tbl for( ltid = 0 ; ltid < CONFIG_THREADS_MAX_PER_CLUSTER ; ltid++ ) { if( process->th_tbl[ltid] == NULL ) { found = true; break; } } if( found ) { // register thread in th_tbl[] process->th_tbl[ltid] = thread; process->th_nr++; // returns trdid *trdid = TRDID( local_cxy , ltid ); } // release the lock protecting th_tbl if( thread->type != THREAD_IDLE ) rwlock_wr_release( &process->th_lock ); return (found) ? 0 : 0xFFFFFFFF; } // end process_register_thread() ///////////////////////////////////////////////// bool_t process_remove_thread( thread_t * thread ) { uint32_t count; // number of threads in local process descriptor process_t * process = thread->process; // get thread local index ltid_t ltid = LTID_FROM_TRDID( thread->trdid ); // get the lock protecting th_tbl[] rwlock_wr_acquire( &process->th_lock ); // get number of threads count = process->th_nr; // check thread assert( (thread != NULL) , "thread argument is NULL" ); // check th_nr value assert( (count > 0) , "process th_nr cannot be 0\n" ); // remove thread from th_tbl[] process->th_tbl[ltid] = NULL; process->th_nr = count-1; // release lock protecting th_tbl rwlock_wr_release( &process->th_lock ); return (count == 1); } // end process_remove_thread() ///////////////////////////////////////////////////////// error_t process_make_fork( xptr_t parent_process_xp, xptr_t parent_thread_xp, pid_t * child_pid, thread_t ** child_thread ) { process_t * process; // local pointer on child process descriptor thread_t * thread; // local pointer on child thread descriptor pid_t new_pid; // process identifier for child process pid_t parent_pid; // process identifier for parent process xptr_t ref_xp; // extended pointer on reference process xptr_t vfs_bin_xp; // extended pointer on .elf file error_t error; // get cluster and local pointer for parent process cxy_t parent_process_cxy = GET_CXY( parent_process_xp ); process_t * parent_process_ptr = GET_PTR( parent_process_xp ); // get parent process PID and extended pointer on .elf file parent_pid = hal_remote_l32 (XPTR( parent_process_cxy , &parent_process_ptr->pid)); vfs_bin_xp = hal_remote_l64(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp)); // get extended pointer on reference process ref_xp = hal_remote_l64( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) ); // check parent process is the reference process assert( (parent_process_xp == ref_xp ) , "parent process must be the reference process\n" ); #if DEBUG_PROCESS_MAKE_FORK uint32_t cycle = (uint32_t)hal_get_cycles(); thread_t * this = CURRENT_THREAD; trdid_t trdid = this->trdid; pid_t pid = this->process->pid; if( DEBUG_PROCESS_MAKE_FORK < cycle ) printk("\n[%s] thread[%x,%x] enter / cluster %x / cycle %d\n", __FUNCTION__, pid, trdid, local_cxy, cycle ); #endif // allocate a process descriptor process = process_alloc(); if( process == NULL ) { printk("\n[ERROR] in %s : cannot get process in cluster %x\n", __FUNCTION__, local_cxy ); return -1; } // allocate a child PID from local cluster error = cluster_pid_alloc( process , &new_pid ); if( error ) { printk("\n[ERROR] in %s : cannot get PID in cluster %x\n", __FUNCTION__, local_cxy ); process_free( process ); return -1; } #if( DEBUG_PROCESS_MAKE_FORK & 1 ) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_MAKE_FORK < cycle ) printk("\n[%s] thread[%x,%x] allocated process %x / cycle %d\n", __FUNCTION__, pid, trdid, new_pid, cycle ); #endif // initializes child process descriptor from parent process descriptor process_reference_init( process, new_pid, parent_process_xp ); #if( DEBUG_PROCESS_MAKE_FORK & 1 ) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_MAKE_FORK < cycle ) printk("\n[%s] thread[%x,%x] initialized child_process %x / cycle %d\n", __FUNCTION__, pid, trdid, new_pid, cycle ); #endif // copy VMM from parent descriptor to child descriptor error = vmm_fork_copy( process, parent_process_xp ); if( error ) { printk("\n[ERROR] in %s : cannot copy VMM in cluster %x\n", __FUNCTION__, local_cxy ); process_free( process ); cluster_pid_release( new_pid ); return -1; } #if( DEBUG_PROCESS_MAKE_FORK & 1 ) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_MAKE_FORK < cycle ) printk("\n[%s] thread[%x,%x] copied VMM from parent to child / cycle %d\n", __FUNCTION__, pid, trdid, cycle ); #endif // if parent_process is INIT, or if parent_process is the TXT owner, // the child_process becomes the owner of its TXT terminal if( (parent_pid == 1) || process_txt_is_owner( parent_process_xp ) ) { process_txt_set_ownership( XPTR( local_cxy , process ) ); #if( DEBUG_PROCESS_MAKE_FORK & 1 ) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_MAKE_EXEC < cycle ) printk("\n[%s] thread[%x,%x] / child takes TXT ownership / cycle %d\n", __FUNCTION__ , pid, trdid, cycle ); #endif } // update extended pointer on .elf file process->vfs_bin_xp = vfs_bin_xp; // create child thread descriptor from parent thread descriptor error = thread_user_fork( parent_thread_xp, process, &thread ); if( error ) { printk("\n[ERROR] in %s : cannot create thread in cluster %x\n", __FUNCTION__, local_cxy ); process_free( process ); cluster_pid_release( new_pid ); return -1; } // check main thread LTID assert( (LTID_FROM_TRDID(thread->trdid) == 0) , "main thread must have LTID == 0\n" ); #if( DEBUG_PROCESS_MAKE_FORK & 1 ) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_MAKE_FORK < cycle ) printk("\n[%s] thread[%x,%x] created main thread %x / cycle %d\n", __FUNCTION__, pid, trdid, thread, cycle ); #endif // set Copy_On_Write flag in parent process GPT // this includes all replicated GPT copies if( parent_process_cxy == local_cxy ) // reference is local { vmm_set_cow( parent_process_ptr ); } else // reference is remote { rpc_vmm_set_cow_client( parent_process_cxy, parent_process_ptr ); } // set Copy_On_Write flag in child process GPT vmm_set_cow( process ); #if( DEBUG_PROCESS_MAKE_FORK & 1 ) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_MAKE_FORK < cycle ) printk("\n[%s] thread[%x,%x] set COW in parent and child / cycle %d\n", __FUNCTION__, pid, trdid, cycle ); #endif // get extended pointers on parent children_root, children_lock and children_nr xptr_t children_root_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_root ); xptr_t children_lock_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_lock ); xptr_t children_nr_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_nr ); // register process in parent children list remote_queuelock_acquire( children_lock_xp ); xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) ); hal_remote_atomic_add( children_nr_xp , 1 ); remote_queuelock_release( children_lock_xp ); // return success *child_thread = thread; *child_pid = new_pid; #if DEBUG_PROCESS_MAKE_FORK cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_MAKE_FORK < cycle ) printk("\n[%s] thread[%x,%x] exit / created process %x / cycle %d\n", __FUNCTION__, pid, trdid, new_pid, cycle ); #endif return 0; } // end process_make_fork() ///////////////////////////////////////////////////// error_t process_make_exec( exec_info_t * exec_info ) { thread_t * thread; // local pointer on this thread process_t * process; // local pointer on this process pid_t pid; // this process identifier xptr_t ref_xp; // reference process for this process error_t error; // value returned by called functions char * path; // path to .elf file xptr_t file_xp; // extended pointer on .elf file descriptor uint32_t file_id; // file index in fd_array uint32_t args_nr; // number of main thread arguments char ** args_pointers; // array of pointers on main thread arguments // get thread, process, pid and ref_xp thread = CURRENT_THREAD; process = thread->process; pid = process->pid; ref_xp = process->ref_xp; // get relevant infos from exec_info path = exec_info->path; args_nr = exec_info->args_nr; args_pointers = exec_info->args_pointers; #if DEBUG_PROCESS_MAKE_EXEC uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_MAKE_EXEC < cycle ) printk("\n[%s] thread[%x,%x] enters for %s / cycle %d\n", __FUNCTION__, pid, thread->trdid, path, cycle ); #endif // open the file identified by file_xp = XPTR_NULL; file_id = 0xFFFFFFFF; error = vfs_open( process->vfs_root_xp, path, ref_xp, O_RDONLY, 0, &file_xp, &file_id ); if( error ) { printk("\n[ERROR] in %s : failed to open file <%s>\n", __FUNCTION__ , path ); return -1; } #if (DEBUG_PROCESS_MAKE_EXEC & 1) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_MAKE_EXEC < cycle ) printk("\n[%s] thread[%x,%x] opened file <%s> / cycle %d\n", __FUNCTION__, pid, thread->trdid, path, cycle ); #endif // delete all threads other than this main thread in all clusters process_sigaction( pid , DELETE_ALL_THREADS ); #if (DEBUG_PROCESS_MAKE_EXEC & 1) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_MAKE_EXEC < cycle ) printk("\n[%s] thread[%x,%x] deleted all threads / cycle %d\n", __FUNCTION__, pid, thread->trdid, cycle ); #endif // reset local process VMM vmm_destroy( process ); #if( DEBUG_PROCESS_MAKE_EXEC & 1 ) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_MAKE_EXEC < cycle ) printk("\n[%s] thread[%x,%x] reset VMM / cycle %d\n", __FUNCTION__, pid, thread->trdid, cycle ); #endif // re-initialize the VMM (kentry/args/envs vsegs registration) error = vmm_init( process ); if( error ) { printk("\n[ERROR] in %s : cannot initialise VMM for %s\n", __FUNCTION__ , path ); vfs_close( file_xp , file_id ); // FIXME restore old process VMM return -1; } #if( DEBUG_PROCESS_MAKE_EXEC & 1 ) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_MAKE_EXEC < cycle ) printk("\n[%s] thread[%x,%x] / kentry/args/envs vsegs registered / cycle %d\n", __FUNCTION__, pid, thread->trdid, cycle ); #endif // register code & data vsegs as well as entry-point in process VMM, // and register extended pointer on .elf file in process descriptor error = elf_load_process( file_xp , process ); if( error ) { printk("\n[ERROR] in %s : failed to access <%s>\n", __FUNCTION__ , path ); vfs_close( file_xp , file_id ); // FIXME restore old process VMM return -1; } #if( DEBUG_PROCESS_MAKE_EXEC & 1 ) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_MAKE_EXEC < cycle ) printk("\n[%s] thread[%x,%x] / code/data vsegs registered / cycle %d\n", __FUNCTION__, pid, thread->trdid, cycle ); #endif // update the existing main thread descriptor... and jump to user code error = thread_user_exec( (void *)process->vmm.entry_point, args_nr, args_pointers ); if( error ) { printk("\n[ERROR] in %s : cannot update main thread for %s\n", __FUNCTION__ , path ); vfs_close( file_xp , file_id ); // FIXME restore old process VMM return -1; } assert( false, "we should not execute this code"); return 0; } // end process_make_exec() /////////////////////////////////////////////// void process_zero_create( process_t * process ) { error_t error; pid_t pid; #if DEBUG_PROCESS_ZERO_CREATE uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_ZERO_CREATE < cycle ) printk("\n[%s] enter / cluster %x / cycle %d\n", __FUNCTION__, local_cxy, cycle ); #endif // get PID from local cluster manager for this kernel process error = cluster_pid_alloc( process , &pid ); if( error || (LPID_FROM_PID( pid ) != 0) ) { printk("\n[PANIC] in %s : cannot get valid PID in cluster %x / PID = %x\n", __FUNCTION__ , local_cxy, pid ); hal_core_sleep(); } // initialize PID, REF_XP, PARENT_XP, and STATE // the kernel process_zero is its own parent_process, // reference_process, and owner_process, and cannot be killed... process->pid = pid; process->ref_xp = XPTR( local_cxy , process ); process->owner_xp = XPTR( local_cxy , process ); process->parent_xp = XPTR( local_cxy , process ); process->term_state = 0; // reset th_tbl[] array and associated fields uint32_t i; for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ ) { process->th_tbl[i] = NULL; } process->th_nr = 0; rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL ); // reset children list as empty xlist_root_init( XPTR( local_cxy , &process->children_root ) ); process->children_nr = 0; remote_queuelock_init( XPTR( local_cxy , &process->children_lock ), LOCK_PROCESS_CHILDREN ); // register kernel process in cluster manager local_list cluster_process_local_link( process ); hal_fence(); #if DEBUG_PROCESS_ZERO_CREATE cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_ZERO_CREATE < cycle ) printk("\n[%s] exit / cluster %x / cycle %d\n", __FUNCTION__, local_cxy, cycle ); #endif } // end process_zero_create() //////////////////////////////// void process_init_create( void ) { process_t * process; // local pointer on process descriptor pid_t pid; // process_init identifier thread_t * thread; // local pointer on main thread pthread_attr_t attr; // main thread attributes lid_t lid; // selected core local index for main thread xptr_t file_xp; // extended pointer on .elf file descriptor uint32_t file_id; // file index in fd_array error_t error; #if DEBUG_PROCESS_INIT_CREATE thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_INIT_CREATE < cycle ) printk("\n[%s] thread[%x,%x] enter / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, cycle ); #endif // allocates memory for process descriptor from local cluster process = process_alloc(); // check memory allocator assert( (process != NULL), "no memory for process descriptor in cluster %x\n", local_cxy ); // set the CWD and VFS_ROOT fields in process descriptor process->cwd_xp = process_zero.vfs_root_xp; process->vfs_root_xp = process_zero.vfs_root_xp; // get PID from local cluster error = cluster_pid_alloc( process , &pid ); // check PID allocator assert( (error == 0), "cannot allocate PID in cluster %x\n", local_cxy ); // check PID value assert( (pid == 1) , "process INIT must be first process in cluster 0\n" ); // initialize process descriptor / parent is local process_zero process_reference_init( process, pid, XPTR( local_cxy , &process_zero ) ); #if(DEBUG_PROCESS_INIT_CREATE & 1) if( DEBUG_PROCESS_INIT_CREATE < cycle ) printk("\n[%s] thread[%x,%x] initialized process descriptor\n", __FUNCTION__, this->process->pid, this->trdid ); #endif // open the file identified by CONFIG_PROCESS_INIT_PATH file_xp = XPTR_NULL; file_id = -1; error = vfs_open( process->vfs_root_xp, CONFIG_PROCESS_INIT_PATH, XPTR( local_cxy , process ), O_RDONLY, 0, &file_xp, &file_id ); assert( (error == 0), "failed to open file <%s>\n", CONFIG_PROCESS_INIT_PATH ); #if(DEBUG_PROCESS_INIT_CREATE & 1) if( DEBUG_PROCESS_INIT_CREATE < cycle ) printk("\n[%s] thread[%x,%x] open .elf file decriptor\n", __FUNCTION__, this->process->pid, this->trdid ); #endif // register "code" and "data" vsegs as well as entry-point // in process VMM, using information contained in the elf file. error = elf_load_process( file_xp , process ); assert( (error == 0), "cannot access .elf file <%s>\n", CONFIG_PROCESS_INIT_PATH ); #if(DEBUG_PROCESS_INIT_CREATE & 1) if( DEBUG_PROCESS_INIT_CREATE < cycle ) printk("\n[%s] thread[%x,%x] registered code/data vsegs in VMM\n", __FUNCTION__, this->process->pid, this->trdid ); #endif // get extended pointers on process_zero children_root, children_lock xptr_t children_root_xp = XPTR( local_cxy , &process_zero.children_root ); xptr_t children_lock_xp = XPTR( local_cxy , &process_zero.children_lock ); // take lock protecting kernel process children list remote_queuelock_acquire( children_lock_xp ); // register process INIT in parent local process_zero xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) ); hal_atomic_add( &process_zero.children_nr , 1 ); // release lock protecting kernel process children list remote_queuelock_release( children_lock_xp ); #if(DEBUG_PROCESS_INIT_CREATE & 1) if( DEBUG_PROCESS_INIT_CREATE < cycle ) printk("\n[%s] thread[%x,%x] registered init process in parent\n", __FUNCTION__, this->process->pid, this->trdid ); #endif // select a core in local cluster to execute the main thread lid = cluster_select_local_core(); // initialize pthread attributes for main thread attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED; attr.cxy = local_cxy; attr.lid = lid; // create and initialize thread descriptor error = thread_user_create( pid, (void *)process->vmm.entry_point, NULL, &attr, &thread ); assert( (error == 0), "cannot create main thread for <%s>\n", CONFIG_PROCESS_INIT_PATH ); assert( (thread->trdid == 0), "main thread must have index 0 for <%s>\n", CONFIG_PROCESS_INIT_PATH ); #if(DEBUG_PROCESS_INIT_CREATE & 1) if( DEBUG_PROCESS_INIT_CREATE < cycle ) printk("\n[%s] thread[%x,%x] created main thread\n", __FUNCTION__, this->process->pid, this->trdid ); #endif // activate thread thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL ); hal_fence(); #if DEBUG_PROCESS_INIT_CREATE cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_INIT_CREATE < cycle ) printk("\n[%s] thread[%x,%x] exit / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, cycle ); #endif } // end process_init_create() ///////////////////////////////////////// void process_display( xptr_t process_xp ) { process_t * process_ptr; cxy_t process_cxy; xptr_t parent_xp; // extended pointer on parent process process_t * parent_ptr; cxy_t parent_cxy; xptr_t owner_xp; // extended pointer on owner process process_t * owner_ptr; cxy_t owner_cxy; pid_t pid; pid_t ppid; lpid_t lpid; uint32_t state; uint32_t th_nr; xptr_t txt_file_xp; // extended pointer on TXT_RX file descriptor xptr_t txt_chdev_xp; // extended pointer on TXT_RX chdev chdev_t * txt_chdev_ptr; cxy_t txt_chdev_cxy; xptr_t txt_owner_xp; // extended pointer on TXT owner process xptr_t elf_file_xp; // extended pointer on .elf file cxy_t elf_file_cxy; vfs_file_t * elf_file_ptr; vfs_inode_t * elf_inode_ptr; // local pointer on .elf inode char txt_name[CONFIG_VFS_MAX_NAME_LENGTH]; char elf_name[CONFIG_VFS_MAX_NAME_LENGTH]; // get cluster and local pointer on process process_ptr = GET_PTR( process_xp ); process_cxy = GET_CXY( process_xp ); // get process PID, LPID, and state pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) ); lpid = LPID_FROM_PID( pid ); state = hal_remote_l32( XPTR( process_cxy , &process_ptr->term_state ) ); // get process PPID parent_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) ); parent_cxy = GET_CXY( parent_xp ); parent_ptr = GET_PTR( parent_xp ); ppid = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) ); // get number of threads th_nr = hal_remote_l32( XPTR( process_cxy , &process_ptr->th_nr ) ); // get pointers on owner process descriptor owner_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) ); owner_cxy = GET_CXY( owner_xp ); owner_ptr = GET_PTR( owner_xp ); // get process TXT name and .elf name if( lpid ) // user process { // get extended pointer on file descriptor associated to TXT_RX txt_file_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->fd_array.array[0] ) ); assert( (txt_file_xp != XPTR_NULL) , "process must be attached to one TXT terminal\n" ); // get TXT_RX chdev pointers txt_chdev_xp = chdev_from_file( txt_file_xp ); txt_chdev_cxy = GET_CXY( txt_chdev_xp ); txt_chdev_ptr = GET_PTR( txt_chdev_xp ); // get TXT_RX name and ownership hal_remote_strcpy( XPTR( local_cxy , txt_name ) , XPTR( txt_chdev_cxy , txt_chdev_ptr->name ) ); txt_owner_xp = (xptr_t)hal_remote_l64( XPTR( txt_chdev_cxy, &txt_chdev_ptr->ext.txt.owner_xp ) ); // get process .elf name elf_file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->vfs_bin_xp ) ); elf_file_cxy = GET_CXY( elf_file_xp ); elf_file_ptr = GET_PTR( elf_file_xp ); elf_inode_ptr = hal_remote_lpt( XPTR( elf_file_cxy , &elf_file_ptr->inode ) ); vfs_inode_get_name( XPTR( elf_file_cxy , elf_inode_ptr ) , elf_name ); } else // kernel process_zero { // TXT name and .elf name are not registered in kernel process_zero strcpy( txt_name , "txt0_rx" ); txt_owner_xp = process_xp; strcpy( elf_name , "kernel.elf" ); } // display process info if( txt_owner_xp == process_xp ) { nolock_printk("PID %X | %s (FG) | %X | PPID %X | TS %X | %d | %s\n", pid, txt_name, process_ptr, ppid, state, th_nr, elf_name ); } else { nolock_printk("PID %X | %s (BG) | %X | PPID %X | TS %X | %d | %s\n", pid, txt_name, process_ptr, ppid, state, th_nr, elf_name ); } } // end process_display() //////////////////////////////////////////////////////////////////////////////////////// // Terminals related functions //////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////// uint32_t process_txt_alloc( void ) { uint32_t index; // TXT terminal index xptr_t chdev_xp; // extended pointer on TXT_RX chdev chdev_t * chdev_ptr; // local pointer on TXT_RX chdev cxy_t chdev_cxy; // TXT_RX chdev cluster xptr_t root_xp; // extended pointer on owner field in chdev // scan the user TXT_RX chdevs (TXT0 is reserved for kernel) for( index = 1 ; index < LOCAL_CLUSTER->nb_txt_channels ; index ++ ) { // get pointers on TXT_RX[index] chdev_xp = chdev_dir.txt_rx[index]; chdev_cxy = GET_CXY( chdev_xp ); chdev_ptr = GET_PTR( chdev_xp ); // get extended pointer on root of attached process root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root ); // return free TXT index if found if( xlist_is_empty( root_xp ) ) return index; } assert( false , "no free TXT terminal found" ); return -1; } // end process_txt_alloc() ///////////////////////////////////////////// void process_txt_attach( process_t * process, uint32_t txt_id ) { xptr_t chdev_xp; // extended pointer on TXT_RX chdev cxy_t chdev_cxy; // TXT_RX chdev cluster chdev_t * chdev_ptr; // local pointer on TXT_RX chdev xptr_t root_xp; // extended pointer on list root in chdev xptr_t lock_xp; // extended pointer on list lock in chdev // check process is in owner cluster assert( (CXY_FROM_PID( process->pid ) == local_cxy) , "process descriptor not in owner cluster" ); // check terminal index assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) , "illegal TXT terminal index" ); // get pointers on TXT_RX[txt_id] chdev chdev_xp = chdev_dir.txt_rx[txt_id]; chdev_cxy = GET_CXY( chdev_xp ); chdev_ptr = GET_PTR( chdev_xp ); // get extended pointer on root & lock of attached process list root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root ); lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock ); // get lock protecting list of processes attached to TXT remote_busylock_acquire( lock_xp ); // insert process in attached process list xlist_add_last( root_xp , XPTR( local_cxy , &process->txt_list ) ); // release lock protecting list of processes attached to TXT remote_busylock_release( lock_xp ); #if DEBUG_PROCESS_TXT thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_TXT < cycle ) printk("\n[%s] thread[%x,%x] attached process %x to TXT %d / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, process->pid, txt_id , cycle ); #endif } // end process_txt_attach() ///////////////////////////////////////////// void process_txt_detach( xptr_t process_xp ) { process_t * process_ptr; // local pointer on process in owner cluster cxy_t process_cxy; // process owner cluster pid_t process_pid; // process identifier xptr_t file_xp; // extended pointer on stdin file xptr_t chdev_xp; // extended pointer on TXT_RX chdev cxy_t chdev_cxy; // TXT_RX chdev cluster chdev_t * chdev_ptr; // local pointer on TXT_RX chdev xptr_t lock_xp; // extended pointer on list lock in chdev // get process cluster, local pointer, and PID process_cxy = GET_CXY( process_xp ); process_ptr = GET_PTR( process_xp ); process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) ); // check process descriptor in owner cluster assert( (CXY_FROM_PID( process_pid ) == process_cxy ) , "process descriptor not in owner cluster" ); // release TXT ownership (does nothing if not TXT owner) process_txt_transfer_ownership( process_xp ); // get extended pointer on process stdin file file_xp = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) ); // get pointers on TXT_RX chdev chdev_xp = chdev_from_file( file_xp ); chdev_cxy = GET_CXY( chdev_xp ); chdev_ptr = (chdev_t *)GET_PTR( chdev_xp ); // get extended pointer on lock protecting attached process list lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock ); // get lock protecting list of processes attached to TXT remote_busylock_acquire( lock_xp ); // unlink process from attached process list xlist_unlink( XPTR( process_cxy , &process_ptr->txt_list ) ); // release lock protecting list of processes attached to TXT remote_busylock_release( lock_xp ); #if DEBUG_PROCESS_TXT thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); uint32_t txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) ); if( DEBUG_PROCESS_TXT < cycle ) printk("\n[%s] thread[%x,%x] detached process %x from TXT %d / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, process_pid, txt_id, cycle ); #endif } // end process_txt_detach() /////////////////////////////////////////////////// void process_txt_set_ownership( xptr_t process_xp ) { process_t * process_ptr; cxy_t process_cxy; pid_t process_pid; xptr_t file_xp; xptr_t txt_xp; chdev_t * txt_ptr; cxy_t txt_cxy; // get pointers on process in owner cluster process_cxy = GET_CXY( process_xp ); process_ptr = GET_PTR( process_xp ); process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) ); // check owner cluster assert( (process_cxy == CXY_FROM_PID( process_pid )) , "process descriptor not in owner cluster\n" ); // get extended pointer on stdin pseudo file file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) ); // get pointers on TXT chdev txt_xp = chdev_from_file( file_xp ); txt_cxy = GET_CXY( txt_xp ); txt_ptr = GET_PTR( txt_xp ); // set owner field in TXT chdev hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp ); #if DEBUG_PROCESS_TXT thread_t * this = CURRENT_THREAD; uint32_t cycle = (uint32_t)hal_get_cycles(); uint32_t txt_id = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) ); if( DEBUG_PROCESS_TXT < cycle ) printk("\n[%s] thread[%x,%x] give TXT %d to process %x / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, txt_id, process_pid, cycle ); #endif } // end process_txt_set ownership() //////////////////////////////////////////////////////// void process_txt_transfer_ownership( xptr_t process_xp ) { process_t * process_ptr; // local pointer on process releasing ownership cxy_t process_cxy; // process cluster pid_t process_pid; // process identifier xptr_t file_xp; // extended pointer on TXT_RX pseudo file xptr_t txt_xp; // extended pointer on TXT_RX chdev chdev_t * txt_ptr; // local pointer on TXT_RX chdev cxy_t txt_cxy; // cluster of TXT_RX chdev uint32_t txt_id; // TXT_RX channel xptr_t owner_xp; // extended pointer on current TXT_RX owner xptr_t root_xp; // extended pointer on root of attached process list xptr_t lock_xp; // extended pointer on lock protecting attached process list xptr_t iter_xp; // iterator for xlist xptr_t current_xp; // extended pointer on current process process_t * current_ptr; // local pointer on current process cxy_t current_cxy; // cluster for current process #if DEBUG_PROCESS_TXT thread_t * this = CURRENT_THREAD; uint32_t cycle; #endif // get pointers on process in owner cluster process_cxy = GET_CXY( process_xp ); process_ptr = GET_PTR( process_xp ); process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) ); // check owner cluster assert( (process_cxy == CXY_FROM_PID( process_pid )) , "process descriptor not in owner cluster\n" ); // get extended pointer on stdin pseudo file file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) ); // get pointers on TXT chdev txt_xp = chdev_from_file( file_xp ); txt_cxy = GET_CXY( txt_xp ); txt_ptr = GET_PTR( txt_xp ); // get extended pointer on TXT_RX owner and TXT channel owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) ); txt_id = hal_remote_l32 ( XPTR( txt_cxy , &txt_ptr->channel ) ); // transfer ownership only if process is the TXT owner if( (owner_xp == process_xp) && (txt_id > 0) ) { // get extended pointers on root and lock of attached processes list root_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.root ); lock_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.lock ); // get lock remote_busylock_acquire( lock_xp ); if( process_get_ppid( process_xp ) != 1 ) // process is not KSH { // scan attached process list to find KSH process XLIST_FOREACH( root_xp , iter_xp ) { current_xp = XLIST_ELEMENT( iter_xp , process_t , txt_list ); current_cxy = GET_CXY( current_xp ); current_ptr = GET_PTR( current_xp ); if( process_get_ppid( current_xp ) == 1 ) // current is KSH { // release lock remote_busylock_release( lock_xp ); // set owner field in TXT chdev hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp ); #if DEBUG_PROCESS_TXT cycle = (uint32_t)hal_get_cycles(); uint32_t ksh_pid = hal_remote_l32( XPTR( current_cxy , ¤t_ptr->pid ) ); if( DEBUG_PROCESS_TXT < cycle ) printk("\n[%s] thread[%x,%x] release TXT %d to KSH %x / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, txt_id, ksh_pid, cycle ); process_txt_display( txt_id ); #endif return; } } // release lock remote_busylock_release( lock_xp ); // PANIC if KSH not found assert( false , "KSH process not found for TXT %d" ); return; } else // process is KSH { // scan attached process list to find another process XLIST_FOREACH( root_xp , iter_xp ) { current_xp = XLIST_ELEMENT( iter_xp , process_t , txt_list ); current_cxy = GET_CXY( current_xp ); current_ptr = GET_PTR( current_xp ); if( current_xp != process_xp ) // current is not KSH { // release lock remote_busylock_release( lock_xp ); // set owner field in TXT chdev hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp ); #if DEBUG_PROCESS_TXT cycle = (uint32_t)hal_get_cycles(); uint32_t new_pid = hal_remote_l32( XPTR( current_cxy , ¤t_ptr->pid ) ); if( DEBUG_PROCESS_TXT < cycle ) printk("\n[%s] thread[%x,%x] release TXT %d to process %x / cycle %d\n", __FUNCTION__,this->process->pid, this->trdid, txt_id, new_pid, cycle ); process_txt_display( txt_id ); #endif return; } } // release lock remote_busylock_release( lock_xp ); // no more owner for TXT if no other process found hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL ); #if DEBUG_PROCESS_TXT cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_TXT < cycle ) printk("\n[%s] thread[%x,%x] release TXT %d to nobody / cycle %d\n", __FUNCTION__, this->process->pid, this->trdid, txt_id, cycle ); process_txt_display( txt_id ); #endif return; } } else { #if DEBUG_PROCESS_TXT cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_TXT < cycle ) printk("\n[%s] thread %x in process %d does nothing (not TXT owner) / cycle %d\n", __FUNCTION__, this->trdid, process_pid, cycle ); process_txt_display( txt_id ); #endif } } // end process_txt_transfer_ownership() //////////////////////////////////////////////// bool_t process_txt_is_owner( xptr_t process_xp ) { // get local pointer and cluster of process in owner cluster cxy_t process_cxy = GET_CXY( process_xp ); process_t * process_ptr = GET_PTR( process_xp ); // check calling thread execute in target process owner cluster pid_t process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) ); assert( (process_cxy == CXY_FROM_PID( process_pid )) , "process descriptor not in owner cluster\n" ); // get extended pointer on stdin pseudo file xptr_t file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) ); // get pointers on TXT chdev xptr_t txt_xp = chdev_from_file( file_xp ); cxy_t txt_cxy = GET_CXY( txt_xp ); chdev_t * txt_ptr = GET_PTR( txt_xp ); // get extended pointer on TXT_RX owner process xptr_t owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) ); return (process_xp == owner_xp); } // end process_txt_is_owner() //////////////////////////////////////////////// xptr_t process_txt_get_owner( uint32_t channel ) { xptr_t txt_rx_xp = chdev_dir.txt_rx[channel]; cxy_t txt_rx_cxy = GET_CXY( txt_rx_xp ); chdev_t * txt_rx_ptr = GET_PTR( txt_rx_xp ); return (xptr_t)hal_remote_l64( XPTR( txt_rx_cxy , &txt_rx_ptr->ext.txt.owner_xp ) ); } // end process_txt_get_owner() /////////////////////////////////////////// void process_txt_display( uint32_t txt_id ) { xptr_t chdev_xp; cxy_t chdev_cxy; chdev_t * chdev_ptr; xptr_t root_xp; xptr_t lock_xp; xptr_t current_xp; xptr_t iter_xp; cxy_t txt0_cxy; chdev_t * txt0_ptr; xptr_t txt0_xp; xptr_t txt0_lock_xp; assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) , "illegal TXT terminal index" ); // get pointers on TXT0 chdev txt0_xp = chdev_dir.txt_tx[0]; txt0_cxy = GET_CXY( txt0_xp ); txt0_ptr = GET_PTR( txt0_xp ); // get extended pointer on TXT0 lock txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); // get pointers on TXT_RX[txt_id] chdev chdev_xp = chdev_dir.txt_rx[txt_id]; chdev_cxy = GET_CXY( chdev_xp ); chdev_ptr = GET_PTR( chdev_xp ); // get extended pointer on root & lock of attached process list root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root ); lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock ); // get lock on attached process list remote_busylock_acquire( lock_xp ); // get TXT0 lock in busy waiting mode remote_busylock_acquire( txt0_lock_xp ); // display header nolock_printk("\n***** processes attached to TXT_%d / cycle %d\n", txt_id , (uint32_t)hal_get_cycles() ); // scan attached process list XLIST_FOREACH( root_xp , iter_xp ) { current_xp = XLIST_ELEMENT( iter_xp , process_t , txt_list ); process_display( current_xp ); } // release TXT0 lock in busy waiting mode remote_busylock_release( txt0_lock_xp ); // release lock on attached process list remote_busylock_release( lock_xp ); } // end process_txt_display