/* * process.c - process related management * * Authors Ghassan Almaless (2008,2009,2010,2011,2012) * Mohamed Lamine Karaoui (2015) * Alain Greiner (2016,2017,2018) * * Copyright (c) UPMC Sorbonne Universites * * This file is part of ALMOS-MKH. * * ALMOS-MKH is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2.0 of the License. * * ALMOS-MKH is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with ALMOS-MKH; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include ////////////////////////////////////////////////////////////////////////////////////////// // Extern global variables ////////////////////////////////////////////////////////////////////////////////////////// extern process_t process_zero; // allocated in kernel_init.c extern chdev_directory_t chdev_dir; // allocated in kernel_init.c ////////////////////////////////////////////////////////////////////////////////////////// // Process initialisation related functions ////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////// process_t * process_alloc() { kmem_req_t req; req.type = KMEM_PROCESS; req.size = sizeof(process_t); req.flags = AF_KERNEL; return (process_t *)kmem_alloc( &req ); } //////////////////////////////////////// void process_free( process_t * process ) { kmem_req_t req; req.type = KMEM_PROCESS; req.ptr = process; kmem_free( &req ); } ///////////////////////////////////////////////// void process_reference_init( process_t * process, pid_t pid, xptr_t parent_xp, xptr_t model_xp ) { cxy_t parent_cxy; process_t * parent_ptr; cxy_t model_cxy; process_t * model_ptr; xptr_t stdin_xp; xptr_t stdout_xp; xptr_t stderr_xp; uint32_t stdin_id; uint32_t stdout_id; uint32_t stderr_id; error_t error; uint32_t txt_id; char rx_path[40]; char tx_path[40]; xptr_t file_xp; xptr_t chdev_xp; chdev_t * chdev_ptr; cxy_t chdev_cxy; pid_t model_pid; pid_t parent_pid; // get model process cluster and local pointer model_cxy = GET_CXY( model_xp ); model_ptr = GET_PTR( model_xp ); // get parent process cluster and local pointer parent_cxy = GET_CXY( parent_xp ); parent_ptr = GET_PTR( parent_xp ); // get model_pid and parent_pid parent_pid = hal_remote_lw( XPTR( parent_cxy , &parent_ptr->pid ) ); model_pid = hal_remote_lw( XPTR( model_cxy , &model_ptr->pid ) ); #if DEBUG_PROCESS_REFERENCE_INIT uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_REFERENCE_INIT ) printk("\n[DBG] %s : thread %x enter / pid = %x / ppid = %x / model_pid = %x / cycle %d\n", __FUNCTION__ , CURRENT_THREAD , pid , parent_pid , model_pid , cycle ); #endif // initialize PID, REF_XP, PARENT_XP, and STATE process->pid = pid; process->ref_xp = XPTR( local_cxy , process ); process->parent_xp = parent_xp; process->term_state = 0; // initialize vmm as empty error = vmm_init( process ); assert( (error == 0) , __FUNCTION__ , "cannot initialize VMM\n" ); #if (DEBUG_PROCESS_REFERENCE_INIT & 1) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_REFERENCE_INIT ) printk("\n[DBG] %s : thread %x / vmm empty for process %x / cycle %d\n", __FUNCTION__ , CURRENT_THREAD , pid , cycle ); #endif // initialize fd_array as empty process_fd_init( process ); // define the stdin/stdout/stderr pseudo files <=> select a TXT terminal. // - if INIT (pid == 1) => link to kernel TXT[0] // - if KSH[i] (model_pid == 1) => allocate a free TXT[i] // - if USER process => same terminal as model if( (pid == 1) || (model_pid == 1)) // INIT or KSH process { if (pid == 1 ) txt_id = 0; // INIT else txt_id = process_txt_alloc(); // KSH[i] // attach process to TXT[txt_id] process_txt_attach( process , txt_id ); // build path to TXT_RX[i] and TXT_TX[i] chdevs snprintf( rx_path , 40 , "/dev/external/txt%d_rx", txt_id ); snprintf( tx_path , 40 , "/dev/external/txt%d_tx", txt_id ); // create stdin pseudo file error = vfs_open( process, rx_path, O_RDONLY, 0, // FIXME chmod &stdin_xp, &stdin_id ); assert( (error == 0) , __FUNCTION__ , "cannot open stdin pseudo file" ); assert( (stdin_id == 0) , __FUNCTION__ , "stdin index must be 0" ); #if (DEBUG_PROCESS_REFERENCE_INIT & 1) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_REFERENCE_INIT ) printk("\n[DBG] %s : thread %x / stdin open for process %x / cycle %d\n", __FUNCTION__ , CURRENT_THREAD , pid , cycle ); #endif // create stdout pseudo file error = vfs_open( process, tx_path, O_WRONLY, 0, // FIXME chmod &stdout_xp, &stdout_id ); assert( (error == 0) , __FUNCTION__ , "cannot open stdout pseudo file" ); assert( (stdout_id == 1) , __FUNCTION__ , "stdout index must be 1" ); #if (DEBUG_PROCESS_REFERENCE_INIT & 1) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_REFERENCE_INIT ) printk("\n[DBG] %s : thread %x / stdout open for process %x / cycle %d\n", __FUNCTION__ , CURRENT_THREAD , pid , cycle ); #endif // create stderr pseudo file error = vfs_open( process, tx_path, O_WRONLY, 0, // FIXME chmod &stderr_xp, &stderr_id ); assert( (error == 0) , __FUNCTION__ , "cannot open stderr pseudo file" ); assert( (stderr_id == 2) , __FUNCTION__ , "stderr index must be 2" ); #if (DEBUG_PROCESS_REFERENCE_INIT & 1) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_REFERENCE_INIT ) printk("\n[DBG] %s : thread %x / stderr open for process %x / cycle %d\n", __FUNCTION__ , CURRENT_THREAD , pid , cycle ); #endif } else // normal user process { // get extended pointer on stdin pseudo file in model process file_xp = (xptr_t)hal_remote_lwd( XPTR( model_cxy , &model_ptr->fd_array.array[0] ) ); // get extended pointer on model process TXT chdev chdev_xp = chdev_from_file( file_xp ); // get cluster and local pointer on chdev chdev_cxy = GET_CXY( chdev_xp ); chdev_ptr = GET_PTR( chdev_xp ); // get TXT terminal index txt_id = hal_remote_lw( XPTR( chdev_cxy , &chdev_ptr->channel ) ); // attach process to TXT[txt_id] process_txt_attach( process , txt_id ); // copy all open files from model process fd_array to this process process_fd_remote_copy( XPTR( local_cxy , &process->fd_array ), XPTR( model_cxy , &model_ptr->fd_array ) ); } // initialize specific inodes root and cwd process->vfs_root_xp = (xptr_t)hal_remote_lwd( XPTR( model_cxy, &model_ptr->vfs_root_xp ) ); process->vfs_cwd_xp = (xptr_t)hal_remote_lwd( XPTR( model_cxy, &model_ptr->vfs_cwd_xp ) ); vfs_inode_remote_up( process->vfs_root_xp ); vfs_inode_remote_up( process->vfs_cwd_xp ); remote_rwlock_init( XPTR( local_cxy , &process->cwd_lock ) ); #if (DEBUG_PROCESS_REFERENCE_INIT & 1) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_REFERENCE_INIT ) printk("\n[DBG] %s : thread %x / fd_array for process %x / cycle %d\n", __FUNCTION__ , CURRENT_THREAD , pid , cycle ); #endif // reset children list root xlist_root_init( XPTR( local_cxy , &process->children_root ) ); process->children_nr = 0; remote_spinlock_init( XPTR( local_cxy , &process->children_lock ) ); // reset semaphore / mutex / barrier / condvar list roots xlist_root_init( XPTR( local_cxy , &process->sem_root ) ); xlist_root_init( XPTR( local_cxy , &process->mutex_root ) ); xlist_root_init( XPTR( local_cxy , &process->barrier_root ) ); xlist_root_init( XPTR( local_cxy , &process->condvar_root ) ); remote_spinlock_init( XPTR( local_cxy , &process->sync_lock ) ); // register new process in the local cluster manager pref_tbl[] lpid_t lpid = LPID_FROM_PID( pid ); LOCAL_CLUSTER->pmgr.pref_tbl[lpid] = XPTR( local_cxy , process ); // register new process descriptor in local cluster manager local_list cluster_process_local_link( process ); // register new process descriptor in local cluster manager copies_list cluster_process_copies_link( process ); // reset th_tbl[] array as empty in process descriptor uint32_t i; for( i = 0 ; i < CONFIG_THREAD_MAX_PER_CLUSTER ; i++ ) { process->th_tbl[i] = NULL; } process->th_nr = 0; spinlock_init( &process->th_lock ); hal_fence(); #if (DEBUG_PROCESS_REFERENCE_INIT & 1) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_REFERENCE_INIT ) printk("\n[DBG] %s : thread %x exit / process %x / cycle %d\n", __FUNCTION__ , CURRENT_THREAD , pid , cycle ); #endif } // process_reference_init() ///////////////////////////////////////////////////// error_t process_copy_init( process_t * local_process, xptr_t reference_process_xp ) { error_t error; // get reference process cluster and local pointer cxy_t ref_cxy = GET_CXY( reference_process_xp ); process_t * ref_ptr = GET_PTR( reference_process_xp ); // initialize PID, REF_XP, PARENT_XP, and STATE local_process->pid = hal_remote_lw( XPTR( ref_cxy , &ref_ptr->pid ) ); local_process->parent_xp = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->parent_xp ) ); local_process->ref_xp = reference_process_xp; local_process->term_state = 0; #if DEBUG_PROCESS_COPY_INIT uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_COPY_INIT ) printk("\n[DBG] %s : thread %x enter for process %x\n", __FUNCTION__ , CURRENT_THREAD , local_process->pid ); #endif // reset local process vmm error = vmm_init( local_process ); assert( (error == 0) , __FUNCTION__ , "cannot initialize VMM\n"); // reset process file descriptors array process_fd_init( local_process ); // reset vfs_root_xp / vfs_bin_xp / vfs_cwd_xp fields local_process->vfs_root_xp = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) ); local_process->vfs_bin_xp = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) ); local_process->vfs_cwd_xp = XPTR_NULL; // reset children list root (not used in a process descriptor copy) xlist_root_init( XPTR( local_cxy , &local_process->children_root ) ); local_process->children_nr = 0; remote_spinlock_init( XPTR( local_cxy , &local_process->children_lock ) ); // reset children_list (not used in a process descriptor copy) xlist_entry_init( XPTR( local_cxy , &local_process->children_list ) ); // reset semaphores list root (not used in a process descriptor copy) xlist_root_init( XPTR( local_cxy , &local_process->sem_root ) ); xlist_root_init( XPTR( local_cxy , &local_process->mutex_root ) ); xlist_root_init( XPTR( local_cxy , &local_process->barrier_root ) ); xlist_root_init( XPTR( local_cxy , &local_process->condvar_root ) ); // reset th_tbl[] array as empty uint32_t i; for( i = 0 ; i < CONFIG_THREAD_MAX_PER_CLUSTER ; i++ ) { local_process->th_tbl[i] = NULL; } local_process->th_nr = 0; spinlock_init( &local_process->th_lock ); // register new process descriptor in local cluster manager local_list cluster_process_local_link( local_process ); // register new process descriptor in owner cluster manager copies_list cluster_process_copies_link( local_process ); hal_fence(); #if DEBUG_PROCESS_COPY_INIT cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_COPY_INIT ) printk("\n[DBG] %s : thread %x exit for process %x\n", __FUNCTION__ , CURRENT_THREAD , local_process->pid ); #endif return 0; } // end process_copy_init() /////////////////////////////////////////// void process_destroy( process_t * process ) { xptr_t parent_xp; process_t * parent_ptr; cxy_t parent_cxy; xptr_t children_lock_xp; pid_t pid = process->pid; assert( (process->th_nr == 0) , __FUNCTION__ , "process %x in cluster %x has still active threads", pid , local_cxy ); #if DEBUG_PROCESS_DESTROY uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_DESTROY ) printk("\n[DBG] %s : thread %x enter in cluster %x / pid %x / process %x / cycle %d\n", __FUNCTION__ , CURRENT_THREAD , pid , process , cycle ); #endif // remove process from local_list in local cluster manager cluster_process_local_unlink( process ); // remove process from copies_list in owner cluster manager cluster_process_copies_unlink( process ); // remove process from children_list if process is in owner cluster if( CXY_FROM_PID( pid ) == local_cxy ) { // get pointers on parent process parent_xp = process->parent_xp; parent_cxy = GET_CXY( parent_xp ); parent_ptr = GET_PTR( parent_xp ); // get extended pointer on children_lock in parent process children_lock_xp = XPTR( parent_cxy , &parent_ptr->children_lock ); // remove process from children_list remote_spinlock_lock( children_lock_xp ); xlist_unlink( XPTR( local_cxy , &process->children_list ) ); remote_spinlock_unlock( children_lock_xp ); } // release the process PID to cluster manager if owner cluster if( CXY_FROM_PID( pid ) == local_cxy ) cluster_pid_release( pid ); // FIXME close all open files and update dirty [AG] // decrease refcount for bin file, root file and cwd file if( process->vfs_bin_xp != XPTR_NULL ) vfs_file_count_down( process->vfs_bin_xp ); if( process->vfs_root_xp != XPTR_NULL ) vfs_file_count_down( process->vfs_root_xp ); if( process->vfs_cwd_xp != XPTR_NULL ) vfs_file_count_down( process->vfs_cwd_xp ); // Destroy VMM vmm_destroy( process ); // release memory allocated to process descriptor process_free( process ); #if DEBUG_PROCESS_DESTROY cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_DESTROY ) printk("\n[DBG] %s : thread %x exit / destroyed process %x (pid = %x) / cycle %d\n", __FUNCTION__ , CURRENT_THREAD , process, pid, cycle ); #endif } // end process_destroy() ///////////////////////////////////////////////// char * process_action_str( uint32_t action_type ) { if ( action_type == BLOCK_ALL_THREADS ) return "BLOCK"; else if( action_type == UNBLOCK_ALL_THREADS ) return "UNBLOCK"; else if( action_type == DELETE_ALL_THREADS ) return "DELETE"; else return "undefined"; } //////////////////////////////////////// void process_sigaction( pid_t pid, uint32_t action_type ) { cxy_t owner_cxy; // owner cluster identifier lpid_t lpid; // process index in owner cluster cluster_t * cluster; // pointer on cluster manager xptr_t root_xp; // extended pointer on root of copies xptr_t lock_xp; // extended pointer on lock protecting copies xptr_t iter_xp; // iterator on copies list xptr_t process_xp; // extended pointer on process copy cxy_t process_cxy; // process copy cluster identifier reg_t save_sr; // for critical section rpc_desc_t rpc; // shared RPC descriptor thread_t * client = CURRENT_THREAD; #if DEBUG_PROCESS_SIGACTION uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_SIGACTION < cycle ) printk("\n[DBG] %s : thread %x enter to %s process %x / cycle %d\n", __FUNCTION__ , client, process_action_str( action_type ) , pid , cycle ); #endif // get pointer on local cluster manager cluster = LOCAL_CLUSTER; // get owner cluster identifier and process lpid owner_cxy = CXY_FROM_PID( pid ); lpid = LPID_FROM_PID( pid ); // get root of list of copies, lock, and number of copies from owner cluster root_xp = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] ); lock_xp = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] ); // check action type assert( ((action_type == DELETE_ALL_THREADS ) || (action_type == BLOCK_ALL_THREADS ) || (action_type == UNBLOCK_ALL_THREADS )), __FUNCTION__ , "illegal action type" ); // allocate a - shared - RPC descriptor in client thread stack // it can be shared because all parallel, non-blocking, server threads // use the same input arguments, and use the shared RPC response field // the client thread makes the following sequence: // 1. mask interrupts // 2. block itself // 3. send RPC requests to all copies // 4. unmask interrupts // 5. deschedule // mask IRQs hal_disable_irq( &save_sr); // client register blocking condition for itself thread_block( XPTR( local_cxy , client ) , THREAD_BLOCKED_RPC ); // take the lock protecting the copies remote_spinlock_lock( lock_xp ); // initialize shared RPC descriptor rpc.responses = 0; rpc.blocking = false; rpc.index = RPC_PROCESS_SIGACTION; rpc.thread = client; rpc.lid = client->core->lid; rpc.args[0] = action_type; rpc.args[1] = pid; // send RPCs to all clusters containing process copiess XLIST_FOREACH( root_xp , iter_xp ) { // atomically increment responses counter hal_atomic_add( (void *)&rpc.responses , 1 ); process_xp = XLIST_ELEMENT( iter_xp , process_t , copies_list ); process_cxy = GET_CXY( process_xp ); #if DEBUG_PROCESS_SIGACTION if( DEBUG_PROCESS_SIGACTION < cycle ) printk("\n[DBG] %s : send RPC to %s process %x in cluster %x\n", __FUNCTION__ , process_action_str( action_type ) , pid , process_cxy ); #endif // call RPC in target cluster rpc_process_sigaction_client( process_cxy , &rpc ); } // release the lock protecting process copies remote_spinlock_unlock( lock_xp ); // restore IRQs hal_restore_irq( save_sr); // client thread deschedule : will be unblocked by the last RPC server thread sched_yield("blocked on rpc_process_sigaction"); #if DEBUG_PROCESS_SIGACTION cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_SIGACTION < cycle ) printk("\n[DBG] %s : thread %x exit after %s process %x in cluster %x / cycle %d\n", __FUNCTION__ , client, process_action_str( action_type ) , pid , local_cxy , cycle ); #endif } // end process_sigaction() ///////////////////////////////////////////////// void process_block_threads( process_t * process, xptr_t client_xp ) { thread_t * target; // pointer on target thread thread_t * this; // pointer on calling thread uint32_t ltid; // index in process th_tbl cxy_t owner_cxy; // target process owner cluster uint32_t count; // requests counter volatile uint32_t ack_count; // scheduler acknowledge counter // get calling thread pointer this = CURRENT_THREAD; // get target process owner cluster owner_cxy = CXY_FROM_PID( process->pid ); #if DEBUG_PROCESS_SIGACTION uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_SIGACTION < cycle ) printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n", __FUNCTION__ , this , process->pid , local_cxy , cycle ); #endif // get lock protecting process th_tbl[] spinlock_lock( &process->th_lock ); // loop on target process local threads // we use both "ltid" and "count" because it can exist "holes" in th_tbl for( ltid = 0 , count = 0 , ack_count = 0 ; count < process->th_nr ; ltid++ ) { target = process->th_tbl[ltid]; if( target != NULL ) // thread exist { count++; // main thread and client thread should not be blocked if( ((ltid != 0) || (owner_cxy != local_cxy)) && // not main thread (client_xp) != XPTR( local_cxy , target ) ) // not client thread { // set the global blocked bit in target thread descriptor. thread_block( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL ); // - if the calling thread and the target thread are on the same core, // we don't need confirmation from scheduler, // - if the calling thread and the target thread are not running on the same // core, we ask the target scheduler to acknowlege the blocking // to be sure that the target thread is not running. if( this->core->lid != target->core->lid ) { // increment responses counter hal_atomic_add( (void*)&ack_count , 1 ); // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor thread_set_req_ack( target , (uint32_t *)&ack_count ); // force scheduling on target thread dev_pic_send_ipi( local_cxy , target->core->lid ); } } } } // release lock protecting process th_tbl[] spinlock_unlock( &process->th_lock ); // wait acknowledges while( 1 ) { // exit when all scheduler acknoledges received if ( ack_count == 0 ) break; // wait 1000 cycles before retry hal_fixed_delay( 1000 ); } #if DEBUG_PROCESS_SIGACTION cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_SIGACTION < cycle ) printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n", __FUNCTION__ , this , process->pid , local_cxy , cycle ); #endif } // end process_block_threads() ///////////////////////////////////////////////// void process_delete_threads( process_t * process, xptr_t client_xp ) { thread_t * this; // pointer on calling thread thread_t * target; // local pointer on target thread xptr_t target_xp; // extended pointer on target thread cxy_t owner_cxy; // owner process cluster uint32_t ltid; // index in process th_tbl uint32_t count; // threads counter // get calling thread pointer this = CURRENT_THREAD; // get target process owner cluster owner_cxy = CXY_FROM_PID( process->pid ); #if DEBUG_PROCESS_SIGACTION uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_SIGACTION < cycle ) printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n", __FUNCTION__ , this , process->pid , local_cxy , cycle ); #endif // get lock protecting process th_tbl[] spinlock_lock( &process->th_lock ); // loop on target process local threads // we use both "ltid" and "count" because it can exist "holes" in th_tbl for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ ) { target = process->th_tbl[ltid]; if( target != NULL ) // valid thread { count++; target_xp = XPTR( local_cxy , target ); // main thread and client thread should not be blocked if( ((ltid != 0) || (owner_cxy != local_cxy)) && // not main thread (client_xp) != target_xp ) // not client thread { // mark target thread for delete and block it thread_delete( target_xp , process->pid , false ); // not forced } } } // release lock protecting process th_tbl[] spinlock_unlock( &process->th_lock ); #if DEBUG_PROCESS_SIGACTION cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_SIGACTION < cycle ) printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n", __FUNCTION__ , this , process->pid , local_cxy , cycle ); #endif } // end process_delete_threads() /////////////////////////////////////////////////// void process_unblock_threads( process_t * process ) { thread_t * target; // pointer on target thead thread_t * this; // pointer on calling thread uint32_t ltid; // index in process th_tbl uint32_t count; // requests counter // get calling thread pointer this = CURRENT_THREAD; #if DEBUG_PROCESS_SIGACTION uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_SIGACTION < cycle ) printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n", __FUNCTION__ , this , process->pid , local_cxy , cycle ); #endif // get lock protecting process th_tbl[] spinlock_lock( &process->th_lock ); // loop on process threads to unblock all threads // we use both "ltid" and "count" because it can exist "holes" in th_tbl for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ ) { target = process->th_tbl[ltid]; if( target != NULL ) // thread found { count++; // reset the global blocked bit in target thread descriptor. thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL ); } } // release lock protecting process th_tbl[] spinlock_unlock( &process->th_lock ); #if DEBUG_PROCESS_SIGACTION cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_SIGACTION < cycle ) printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n", __FUNCTION__ , this , process->pid , local_cxy , cycle ); #endif } // end process_unblock_threads() /////////////////////////////////////////////// process_t * process_get_local_copy( pid_t pid ) { error_t error; process_t * process_ptr; // local pointer on process xptr_t process_xp; // extended pointer on process cluster_t * cluster = LOCAL_CLUSTER; // get lock protecting local list of processes remote_spinlock_lock( XPTR( local_cxy , &cluster->pmgr.local_lock ) ); // scan the local list of process descriptors to find the process xptr_t iter; bool_t found = false; XLIST_FOREACH( XPTR( local_cxy , &cluster->pmgr.local_root ) , iter ) { process_xp = XLIST_ELEMENT( iter , process_t , local_list ); process_ptr = GET_PTR( process_xp ); if( process_ptr->pid == pid ) { found = true; break; } } // release lock protecting local list of processes remote_spinlock_unlock( XPTR( local_cxy , &cluster->pmgr.local_lock ) ); // allocate memory for a new local process descriptor // and initialise it from reference cluster if not found if( !found ) { // get extended pointer on reference process descriptor xptr_t ref_xp = cluster_get_reference_process_from_pid( pid ); assert( (ref_xp != XPTR_NULL) , __FUNCTION__ , "illegal pid\n" ); // allocate memory for local process descriptor process_ptr = process_alloc(); if( process_ptr == NULL ) return NULL; // initialize local process descriptor copy error = process_copy_init( process_ptr , ref_xp ); if( error ) return NULL; } #if DEBUG_PROCESS_GET_LOCAL_COPY uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle ) printk("\n[DBG] %s : enter in cluster %x / pid %x / process %x / cycle %d\n", __FUNCTION__ , local_cxy , pid , process_ptr , cycle ); #endif return process_ptr; } // end process_get_local_copy() //////////////////////////////////////////// pid_t process_get_ppid( xptr_t process_xp ) { cxy_t process_cxy; process_t * process_ptr; xptr_t parent_xp; cxy_t parent_cxy; process_t * parent_ptr; // get process cluster and local pointer process_cxy = GET_CXY( process_xp ); process_ptr = GET_PTR( process_xp ); // get pointers on parent process parent_xp = (xptr_t)hal_remote_lwd( XPTR( process_cxy , &process_ptr->parent_xp ) ); parent_cxy = GET_CXY( parent_xp ); parent_ptr = GET_PTR( parent_xp ); return hal_remote_lw( XPTR( parent_cxy , &parent_ptr->pid ) ); } ////////////////////////////////////////////////////////////////////////////////////////// // File descriptor array related functions ////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////// void process_fd_init( process_t * process ) { uint32_t fd; remote_spinlock_init( XPTR( local_cxy , &process->fd_array.lock ) ); process->fd_array.current = 0; // initialize array for ( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ ) { process->fd_array.array[fd] = XPTR_NULL; } } ////////////////////////////// bool_t process_fd_array_full() { // get extended pointer on reference process xptr_t ref_xp = CURRENT_THREAD->process->ref_xp; // get reference process cluster and local pointer process_t * ref_ptr = GET_PTR( ref_xp ); cxy_t ref_cxy = GET_CXY( ref_xp ); // get number of open file descriptors from reference fd_array uint32_t current = hal_remote_lw( XPTR( ref_cxy , &ref_ptr->fd_array.current ) ); return ( current >= CONFIG_PROCESS_FILE_MAX_NR ); } ///////////////////////////////////////////////// error_t process_fd_register( process_t * process, xptr_t file_xp, uint32_t * fdid ) { bool_t found; uint32_t id; xptr_t xp; // get reference process cluster and local pointer xptr_t ref_xp = process->ref_xp; process_t * ref_ptr = GET_PTR( ref_xp ); cxy_t ref_cxy = GET_CXY( ref_xp ); // take lock protecting reference fd_array remote_spinlock_lock( XPTR( ref_cxy , &ref_ptr->fd_array.lock ) ); found = false; for ( id = 0; id < CONFIG_PROCESS_FILE_MAX_NR ; id++ ) { xp = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->fd_array.array[id] ) ); if ( xp == XPTR_NULL ) { found = true; hal_remote_swd( XPTR( ref_cxy , &ref_ptr->fd_array.array[id] ) , file_xp ); hal_remote_atomic_add( XPTR( ref_cxy , &ref_ptr->fd_array.current ) , 1 ); *fdid = id; break; } } // release lock protecting reference fd_array remote_spinlock_unlock( XPTR( ref_cxy , &ref_ptr->fd_array.lock ) ); if ( !found ) return -1; else return 0; } //////////////////////////////////////////////// xptr_t process_fd_get_xptr( process_t * process, uint32_t fdid ) { xptr_t file_xp; // access local copy of process descriptor file_xp = process->fd_array.array[fdid]; if( file_xp == XPTR_NULL ) { // get reference process cluster and local pointer xptr_t ref_xp = process->ref_xp; cxy_t ref_cxy = GET_CXY( ref_xp ); process_t * ref_ptr = GET_PTR( ref_xp ); // access reference process descriptor file_xp = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->fd_array.array[fdid] ) ); // update local fd_array if found if( file_xp != XPTR_NULL ) { process->fd_array.array[fdid] = file_xp; } } return file_xp; } // end process_fd_get_xptr() /////////////////////////////////////////// void process_fd_remote_copy( xptr_t dst_xp, xptr_t src_xp ) { uint32_t fd; xptr_t entry; // get cluster and local pointer for src fd_array cxy_t src_cxy = GET_CXY( src_xp ); fd_array_t * src_ptr = GET_PTR( src_xp ); // get cluster and local pointer for dst fd_array cxy_t dst_cxy = GET_CXY( dst_xp ); fd_array_t * dst_ptr = GET_PTR( dst_xp ); // get the remote lock protecting the src fd_array remote_spinlock_lock( XPTR( src_cxy , &src_ptr->lock ) ); // loop on all fd_array entries for( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ ) { entry = (xptr_t)hal_remote_lwd( XPTR( src_cxy , &src_ptr->array[fd] ) ); if( entry != XPTR_NULL ) { // increment file descriptor ref count vfs_file_count_up( entry ); // copy entry in destination process fd_array hal_remote_swd( XPTR( dst_cxy , &dst_ptr->array[fd] ) , entry ); } } // release lock on source process fd_array remote_spinlock_unlock( XPTR( src_cxy , &src_ptr->lock ) ); } // end process_fd_remote_copy() //////////////////////////////////////////////////////////////////////////////////// // Thread related functions //////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////// error_t process_register_thread( process_t * process, thread_t * thread, trdid_t * trdid ) { ltid_t ltid; bool_t found = false; assert( (process != NULL) , __FUNCTION__ , "process argument is NULL" ); assert( (thread != NULL) , __FUNCTION__ , "thread argument is NULL" ); // take lock protecting th_tbl spinlock_lock( &process->th_lock ); // search a free slot in th_tbl[] for( ltid = 0 ; ltid < CONFIG_THREAD_MAX_PER_CLUSTER ; ltid++ ) { if( process->th_tbl[ltid] == NULL ) { found = true; break; } } if( found ) { // register thread in th_tbl[] process->th_tbl[ltid] = thread; process->th_nr++; // returns trdid *trdid = TRDID( local_cxy , ltid ); } // release lock protecting th_tbl hal_fence(); spinlock_unlock( &process->th_lock ); return (found) ? 0 : ENOMEM; } // end process_register_thread() /////////////////////////////////////////////// void process_remove_thread( thread_t * thread ) { assert( (thread != NULL) , __FUNCTION__ , "thread argument is NULL" ); process_t * process = thread->process; // get thread local index ltid_t ltid = LTID_FROM_TRDID( thread->trdid ); // take lock protecting th_tbl spinlock_lock( &process->th_lock ); assert( (process->th_nr) , __FUNCTION__ , "process th_nr cannot be 0\n" ); // remove thread from th_tbl[] process->th_tbl[ltid] = NULL; process->th_nr--; hal_fence(); // release lock protecting th_tbl spinlock_unlock( &process->th_lock ); } // process_remove_thread() ///////////////////////////////////////////////////////// error_t process_make_fork( xptr_t parent_process_xp, xptr_t parent_thread_xp, pid_t * child_pid, thread_t ** child_thread ) { process_t * process; // local pointer on child process descriptor thread_t * thread; // local pointer on child thread descriptor pid_t new_pid; // process identifier for child process pid_t parent_pid; // process identifier for parent process xptr_t ref_xp; // extended pointer on reference process xptr_t vfs_bin_xp; // extended pointer on .elf file error_t error; // get cluster and local pointer for parent process cxy_t parent_process_cxy = GET_CXY( parent_process_xp ); process_t * parent_process_ptr = GET_PTR( parent_process_xp ); // get parent process PID and extended pointer on .elf file parent_pid = hal_remote_lw (XPTR( parent_process_cxy , &parent_process_ptr->pid)); vfs_bin_xp = hal_remote_lwd(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp)); // check parent process is the reference process ref_xp = hal_remote_lwd( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) ); assert( (parent_process_xp == ref_xp ) , __FUNCTION__ , "parent process must be the reference process\n" ); #if DEBUG_PROCESS_MAKE_FORK uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_MAKE_FORK < cycle ) printk("\n[DBG] %s : thread %x enter for process %x / cluster %x / cycle %d\n", __FUNCTION__, CURRENT_THREAD, parent_pid, local_cxy, cycle ); #endif // allocate a process descriptor process = process_alloc(); if( process == NULL ) { printk("\n[ERROR] in %s : cannot get process in cluster %x\n", __FUNCTION__, local_cxy ); return -1; } // allocate a child PID from local cluster error = cluster_pid_alloc( process , &new_pid ); if( error ) { printk("\n[ERROR] in %s : cannot get PID in cluster %x\n", __FUNCTION__, local_cxy ); process_free( process ); return -1; } // initializes child process descriptor from parent process descriptor process_reference_init( process, new_pid, parent_process_xp, parent_process_xp ); #if( DEBUG_PROCESS_MAKE_FORK & 1 ) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_MAKE_FORK < cycle ) printk("\n[DBG] %s : thread %x created child_process %x / child_pid %x / cycle %d\n", __FUNCTION__, CURRENT_THREAD, process, new_pid, cycle ); #endif // copy VMM from parent descriptor to child descriptor error = vmm_fork_copy( process, parent_process_xp ); if( error ) { printk("\n[ERROR] in %s : cannot copy VMM in cluster %x\n", __FUNCTION__, local_cxy ); process_free( process ); cluster_pid_release( new_pid ); return -1; } #if( DEBUG_PROCESS_MAKE_FORK & 1 ) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_MAKE_FORK < cycle ) printk("\n[DBG] %s : thread %x copied VMM from parent %x to child %x / cycle %d\n", __FUNCTION__ , CURRENT_THREAD , parent_pid, new_pid, cycle ); #endif // update extended pointer on .elf file process->vfs_bin_xp = vfs_bin_xp; // create child thread descriptor from parent thread descriptor error = thread_user_fork( parent_thread_xp, process, &thread ); if( error ) { printk("\n[ERROR] in %s : cannot create thread in cluster %x\n", __FUNCTION__, local_cxy ); process_free( process ); cluster_pid_release( new_pid ); return -1; } // check main thread LTID assert( (LTID_FROM_TRDID(thread->trdid) == 0) , __FUNCTION__ , "main thread must have LTID == 0\n" ); #if( DEBUG_PROCESS_MAKE_FORK & 1 ) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_MAKE_FORK < cycle ) printk("\n[DBG] %s : thread %x created child thread %x on core[%x,%d] / cycle %d\n", __FUNCTION__ , CURRENT_THREAD, thread, local_cxy, thread->core->lid, cycle ); #endif // set Copy_On_Write flag in parent process GPT // this includes all replicated GPT copies if( parent_process_cxy == local_cxy ) // reference is local { vmm_set_cow( parent_process_ptr ); } else // reference is remote { rpc_vmm_set_cow_client( parent_process_cxy, parent_process_ptr ); } // set Copy_On_Write flag in child process GPT vmm_set_cow( process ); #if( DEBUG_PROCESS_MAKE_FORK & 1 ) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_MAKE_FORK < cycle ) printk("\n[DBG] %s : thread %x set COW in parent and child / cycle %d\n", __FUNCTION__ , CURRENT_THREAD, cycle ); #endif // get extended pointers on parent children_root, children_lock and children_nr xptr_t children_root_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_root ); xptr_t children_lock_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_lock ); xptr_t children_nr_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_nr ); // register process in parent children list remote_spinlock_lock( children_lock_xp ); xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) ); hal_remote_atomic_add( children_nr_xp , 1 ); remote_spinlock_unlock( children_lock_xp ); // return success *child_thread = thread; *child_pid = new_pid; #if DEBUG_PROCESS_MAKE_FORK cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_MAKE_FORK < cycle ) printk("\n[DBG] %s : thread %x exit / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle ); #endif return 0; } // end process_make_fork() ///////////////////////////////////////////////////// error_t process_make_exec( exec_info_t * exec_info ) { char * path; // pathname to .elf file pid_t pid; // old_process PID, given to new_process pid_t temp_pid; // temporary PID / given to old_process process_t * old_process; // local pointer on old process thread_t * old_thread; // local pointer on old thread process_t * new_process; // local pointer on new process thread_t * new_thread; // local pointer on new thread xptr_t parent_xp; // extended pointer on parent process pthread_attr_t attr; // new thread attributes lid_t lid; // selected core local index error_t error; // value returned by called functions // get old_thread / old_process / PID / parent_xp old_thread = CURRENT_THREAD; old_process = old_thread->process; pid = old_process->pid; parent_xp = old_process->parent_xp; // get .elf pathname from exec_info path = exec_info->path; // this function must be executed by a thread running in owner cluster assert( (CXY_FROM_PID( pid ) == local_cxy), __FUNCTION__, "local_cluster must be owner_cluster\n" ); assert( (LTID_FROM_TRDID( old_thread->trdid ) == 0) , __FUNCTION__, "must be called by the main thread\n" ); #if DEBUG_PROCESS_MAKE_EXEC uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_MAKE_EXEC < cycle ) printk("\n[DBG] %s : thread %x enters for process %x / %s / cycle %d\n", __FUNCTION__, old_thread, pid, path, cycle ); #endif // allocate memory for new_process descriptor new_process = process_alloc(); if( new_process == NULL ) { printk("\n[ERROR] in %s : cannot allocate process for %s\n", __FUNCTION__ , path ); return -1; } // get a temporary PID for old_process error = cluster_pid_alloc( old_process , &temp_pid ); if( error ) { printk("\n[ERROR] in %s : cannot get PID in cluster %x\n", __FUNCTION__ , local_cxy ); process_free( new_process ); return -1; } // set temporary PID to old_process old_process->pid = temp_pid; // initialize new process descriptor process_reference_init( new_process, pid, parent_xp, // parent_process_xp XPTR(local_cxy , old_process) ); // model_process // give TXT ownership to new_process process_txt_set_ownership( XPTR( local_cxy , new_process) ); #if( DEBUG_PROCESS_MAKE_EXEC & 1 ) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_MAKE_EXEC < cycle ) printk("\n[DBG] %s : thread %x created new process %x / cycle %d \n", __FUNCTION__ , old_thread , new_process , cycle ); #endif // register code & data vsegs as well as entry-point in new process VMM, // and register extended pointer on .elf file in process descriptor error = elf_load_process( path , new_process ); if( error ) { printk("\n[ERROR] in %s : failed to access <%s>\n", __FUNCTION__ , path ); process_txt_set_ownership( XPTR( local_cxy , old_process) ); process_txt_detach( XPTR( local_cxy , new_process) ); process_destroy( new_process ); old_process->pid = pid; return -1; } #if( DEBUG_PROCESS_MAKE_EXEC & 1 ) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_MAKE_EXEC < cycle ) printk("\n[DBG] %s : thread %x registered code/data vsegs in new process %x / cycle %d\n", __FUNCTION__, old_thread , new_process->pid , cycle ); #endif // select a core in local cluster to execute the main thread lid = cluster_select_local_core(); // initialize pthread attributes for main thread attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED; attr.cxy = local_cxy; attr.lid = lid; // create and initialize main thread in local cluster error = thread_user_create( pid, (void *)new_process->vmm.entry_point, exec_info->args_pointers, &attr, &new_thread ); if( error ) { printk("\n[ERROR] in %s : cannot create thread for %s\n", __FUNCTION__ , path ); process_txt_set_ownership( XPTR( local_cxy , old_process) ); process_txt_detach( XPTR( local_cxy , new_process) ); process_destroy( new_process ); old_process->pid = pid; return -1; } // check main thread LTID assert( (LTID_FROM_TRDID(new_thread->trdid) == 0) , __FUNCTION__ , "main thread must have LTID == 0\n" ); #if( DEBUG_PROCESS_MAKE_EXEC & 1 ) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_MAKE_EXEC < cycle ) printk("\n[DBG] %s : thread %x created new_process main thread %x / cycle %d\n", __FUNCTION__ , old_thread , new_thread , cycle ); #endif // get cluster and local pointer on parent process process_t * parent_ptr = GET_PTR( parent_xp ); cxy_t parent_cxy = GET_CXY( parent_xp ); // get extended pointers on parent children_root, children_lock and children_nr xptr_t root_xp = XPTR( parent_cxy , &parent_ptr->children_root ); xptr_t lock_xp = XPTR( parent_cxy , &parent_ptr->children_lock ); xptr_t nr_xp = XPTR( parent_cxy , &parent_ptr->children_nr ); // register new_process in parent children list remote_spinlock_lock( lock_xp ); xlist_add_last( root_xp , XPTR( local_cxy , &new_process->children_list ) ); hal_remote_atomic_add( nr_xp , 1 ); remote_spinlock_unlock( lock_xp ); // activate new thread thread_unblock( XPTR( local_cxy , new_thread ) , THREAD_BLOCKED_GLOBAL ); // detach old_process from TXT process_txt_detach( XPTR( local_cxy , old_process ) ); // request old_thread destruction => old_process destruction thread_block( XPTR( local_cxy , old_thread ) , THREAD_BLOCKED_GLOBAL ); hal_atomic_or( &old_thread->flags , THREAD_FLAG_REQ_DELETE ); hal_fence(); #if DEBUG_PROCESS_MAKE_EXEC cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_MAKE_EXEC < cycle ) printk("\n[DBG] %s : old thread %x blocked for delete / new thread %x activated / cycle %d\n", __FUNCTION__ , old_thread , new_thread , cycle ); #endif return 0; } // end process_make_exec() /////////////////////////////////////////////// void process_zero_create( process_t * process ) { #if DEBUG_PROCESS_ZERO_CREATE uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_ZERO_CREATE < cycle ) printk("\n[DBG] %s : thread %x enter / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle ); #endif // initialize PID, REF_XP, PARENT_XP, and STATE process->pid = 0; process->ref_xp = XPTR( local_cxy , process ); process->parent_xp = XPTR_NULL; process->term_state = 0; // reset th_tbl[] array as empty uint32_t i; for( i = 0 ; i < CONFIG_THREAD_MAX_PER_CLUSTER ; i++ ) { process->th_tbl[i] = NULL; } process->th_nr = 0; spinlock_init( &process->th_lock ); // reset children list as empty xlist_root_init( XPTR( local_cxy , &process->children_root ) ); remote_spinlock_init( XPTR( local_cxy , &process->children_lock ) ); process->children_nr = 0; hal_fence(); #if DEBUG_PROCESS_ZERO_CREATE cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_ZERO_CREATE < cycle ) printk("\n[DBG] %s : thread %x exit / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle ); #endif } // end process_zero_init() ////////////////////////// void process_init_create() { process_t * process; // local pointer on process descriptor pid_t pid; // process_init identifier thread_t * thread; // local pointer on main thread pthread_attr_t attr; // main thread attributes lid_t lid; // selected core local index for main thread error_t error; #if DEBUG_PROCESS_INIT_CREATE uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_INIT_CREATE < cycle ) printk("\n[DBG] %s : thread %x enter / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle ); #endif // allocates memory for process descriptor from local cluster process = process_alloc(); if( process == NULL ) { printk("\n[PANIC] in %s : no memory for process descriptor in cluster %x\n", __FUNCTION__, local_cxy ); } // get PID from local cluster error = cluster_pid_alloc( process , &pid ); if( error ) { printk("\n[PANIC] in %s : cannot allocate PID in cluster %x\n", __FUNCTION__, local_cxy ); process_free( process ); } // check allocated PID assert( (pid == 1) , __FUNCTION__ , "process INIT must be first process in cluster 0\n" ); // initialize process descriptor / parent is local process_zero process_reference_init( process, pid, XPTR( local_cxy , &process_zero ), // parent XPTR( local_cxy , &process_zero ) ); // model // register "code" and "data" vsegs as well as entry-point // in process VMM, using information contained in the elf file. if( elf_load_process( CONFIG_PROCESS_INIT_PATH , process ) ) { printk("\n[PANIC] in %s : cannot access .elf file / path = %s\n", __FUNCTION__, CONFIG_PROCESS_INIT_PATH ); process_destroy( process ); } // get extended pointers on process_zero children_root, children_lock xptr_t children_root_xp = XPTR( local_cxy , &process_zero.children_root ); xptr_t children_lock_xp = XPTR( local_cxy , &process_zero.children_lock ); // register process INIT in parent local process_zero remote_spinlock_lock( children_lock_xp ); xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) ); hal_atomic_add( &process_zero.children_nr , 1 ); remote_spinlock_unlock( children_lock_xp ); // select a core in local cluster to execute the main thread lid = cluster_select_local_core(); // initialize pthread attributes for main thread attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED; attr.cxy = local_cxy; attr.lid = lid; // create and initialize thread descriptor error = thread_user_create( pid, (void *)process->vmm.entry_point, NULL, &attr, &thread ); if( error ) { printk("\n[PANIC] in %s : cannot create main thread / path = %s\n", __FUNCTION__, CONFIG_PROCESS_INIT_PATH ); process_destroy( process ); } // check main thread index assert( (thread->trdid == 0) , __FUNCTION__ , "main thread must have index 0\n" ); // activate thread thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL ); hal_fence(); #if DEBUG_PROCESS_INIT_CREATE cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_INIT_CREATE < cycle ) printk("\n[DBG] %s : thread %x exit / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle ); #endif } // end process_init_create() ///////////////////////////////////////// void process_display( xptr_t process_xp ) { process_t * process_ptr; cxy_t process_cxy; xptr_t parent_xp; // extended pointer on parent process process_t * parent_ptr; cxy_t parent_cxy; pid_t pid; pid_t ppid; uint32_t state; xptr_t ref_xp; uint32_t th_nr; xptr_t txt_file_xp; // extended pointer on TXT_RX pseudo file xptr_t chdev_xp; // extended pointer on TXT_RX chdev chdev_t * chdev_ptr; cxy_t chdev_cxy; xptr_t owner_xp; // extended pointer on TXT owner process xptr_t elf_file_xp; // extended pointer on .elf file cxy_t elf_file_cxy; vfs_file_t * elf_file_ptr; vfs_inode_t * elf_inode_ptr; // local pointer on .elf inode char txt_name[CONFIG_VFS_MAX_NAME_LENGTH]; char elf_name[CONFIG_VFS_MAX_NAME_LENGTH]; // get cluster and local pointer on process process_ptr = GET_PTR( process_xp ); process_cxy = GET_CXY( process_xp ); // check reference process ref_xp = hal_remote_lwd( XPTR( process_cxy , &process_ptr->ref_xp ) ); assert( (process_xp == ref_xp) , __FUNCTION__ , "process is not the reference\n"); // get PID and state pid = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) ); state = hal_remote_lw( XPTR( process_cxy , &process_ptr->term_state ) ); // get PPID parent_xp = hal_remote_lwd( XPTR( process_cxy , &process_ptr->parent_xp ) ); parent_cxy = GET_CXY( parent_xp ); parent_ptr = GET_PTR( parent_xp ); ppid = hal_remote_lw( XPTR( parent_cxy , &parent_ptr->pid ) ); // get number of threads th_nr = hal_remote_lw( XPTR( process_cxy , &process_ptr->th_nr ) ); // get TXT name and process owner txt_file_xp = hal_remote_lwd( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) ); assert( (txt_file_xp != XPTR_NULL) , __FUNCTION__ , "process must be attached to one TXT terminal\n" ); chdev_xp = chdev_from_file( txt_file_xp ); chdev_cxy = GET_CXY( chdev_xp ); chdev_ptr = (chdev_t *)GET_PTR( chdev_xp ); hal_remote_strcpy( XPTR( local_cxy , txt_name ) , XPTR( chdev_cxy , chdev_ptr->name ) ); owner_xp = (xptr_t)hal_remote_lwd( XPTR( chdev_cxy , &chdev_ptr->ext.txt.owner_xp ) ); // get process .elf name elf_file_xp = hal_remote_lwd( XPTR( process_cxy , &process_ptr->vfs_bin_xp ) ); elf_file_cxy = GET_CXY( elf_file_xp ); elf_file_ptr = (vfs_file_t *)GET_PTR( elf_file_xp ); elf_inode_ptr = (vfs_inode_t *)hal_remote_lpt( XPTR( elf_file_cxy , &elf_file_ptr->inode ) ); vfs_inode_get_name( XPTR( elf_file_cxy , elf_inode_ptr ) , elf_name ); // display process info if( owner_xp == process_xp ) { printk("PID %X | PPID %X | STS %X | %s (FG) | %X | %d | %s\n", pid, ppid, state, txt_name, process_ptr, th_nr, elf_name ); } else { printk("PID %X | PPID %X | STS %X | %s (BG) | %X | %d | %s\n", pid, ppid, state, txt_name, process_ptr, th_nr, elf_name ); } } // end process_display() //////////////////////////////////////////////////////////////////////////////////////// // Terminals related functions //////////////////////////////////////////////////////////////////////////////////////// //////////////////////////// uint32_t process_txt_alloc() { uint32_t index; // TXT terminal index xptr_t chdev_xp; // extended pointer on TXT_RX chdev chdev_t * chdev_ptr; // local pointer on TXT_RX chdev cxy_t chdev_cxy; // TXT_RX chdev cluster xptr_t root_xp; // extended pointer on owner field in chdev // scan the user TXT_RX chdevs (TXT0 is reserved for kernel) for( index = 1 ; index < LOCAL_CLUSTER->nb_txt_channels ; index ++ ) { // get pointers on TXT_RX[index] chdev_xp = chdev_dir.txt_rx[index]; chdev_cxy = GET_CXY( chdev_xp ); chdev_ptr = GET_PTR( chdev_xp ); // get extended pointer on root of attached process root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root ); // return free TXT index if found if( xlist_is_empty( root_xp ) ) return index; } assert( false , __FUNCTION__ , "no free TXT terminal found" ); return -1; } // end process_txt_alloc() ///////////////////////////////////////////// void process_txt_attach( process_t * process, uint32_t txt_id ) { xptr_t chdev_xp; // extended pointer on TXT_RX chdev cxy_t chdev_cxy; // TXT_RX chdev cluster chdev_t * chdev_ptr; // local pointer on TXT_RX chdev xptr_t root_xp; // extended pointer on list root in chdev xptr_t lock_xp; // extended pointer on list lock in chdev #if DEBUG_PROCESS_TXT_ATTACH uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_TXT_ATTACH < cycle ) printk("\n[DBG] %s : thread %x enter for process %x / txt_id = %d / cycle %d\n", __FUNCTION__, CURRENT_THREAD, process->pid, txt_id, cycle ); #endif // check process is in owner cluster assert( (CXY_FROM_PID( process->pid ) == local_cxy) , __FUNCTION__ , "process descriptor not in owner cluster" ); // check terminal index assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) , __FUNCTION__ , "illegal TXT terminal index" ); // get pointers on TXT_RX[txt_id] chdev chdev_xp = chdev_dir.txt_rx[txt_id]; chdev_cxy = GET_CXY( chdev_xp ); chdev_ptr = GET_PTR( chdev_xp ); // get extended pointer on root & lock of attached process list root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root ); lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock ); // insert process in attached process list remote_spinlock_lock( lock_xp ); xlist_add_last( root_xp , XPTR( local_cxy , &process->txt_list ) ); remote_spinlock_unlock( lock_xp ); #if DEBUG_PROCESS_TXT_ATTACH cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_TXT_ATTACH < cycle ) printk("\n[DBG] %s : thread %x exit for process %x / txt_id = %d / cycle %d\n", __FUNCTION__, CURRENT_THREAD, process->pid, txt_id , cycle ); #endif } // end process_txt_attach() ///////////////////////////////////////////// void process_txt_detach( xptr_t process_xp ) { process_t * process_ptr; // local pointer on process in owner cluster cxy_t process_cxy; // process owner cluster pid_t process_pid; // process identifier xptr_t file_xp; // extended pointer on stdin file xptr_t chdev_xp; // extended pointer on TXT_RX chdev cxy_t chdev_cxy; // TXT_RX chdev cluster chdev_t * chdev_ptr; // local pointer on TXT_RX chdev xptr_t lock_xp; // extended pointer on list lock in chdev // get process cluster, local pointer, and PID process_cxy = GET_CXY( process_xp ); process_ptr = GET_PTR( process_xp ); process_pid = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) ); // check process descriptor in owner cluster assert( (CXY_FROM_PID( process_pid ) == process_cxy ) , __FUNCTION__ , "process descriptor not in owner cluster" ); #if DEBUG_PROCESS_TXT_ATTACH uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_TXT_ATTACH < cycle ) printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", __FUNCTION__, CURRENT_THREAD, process_pid, cycle ); #endif // release TXT ownership (does nothing if not TXT owner) process_txt_transfer_ownership( process_xp ); // get extended pointer on process stdin file file_xp = (xptr_t)hal_remote_lwd( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) ); // get pointers on TXT_RX chdev chdev_xp = chdev_from_file( file_xp ); chdev_cxy = GET_CXY( chdev_xp ); chdev_ptr = (chdev_t *)GET_PTR( chdev_xp ); // get extended pointer on lock protecting attached process list lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock ); // unlink process from attached process list remote_spinlock_lock( lock_xp ); xlist_unlink( XPTR( process_cxy , &process_ptr->txt_list ) ); remote_spinlock_unlock( lock_xp ); #if DEBUG_PROCESS_TXT_ATTACH cycle = (uint32_t)hal_get_cycles(); uint32_t txt_id = hal_remote_lw( XPTR( chdev_cxy , &chdev_ptr->channel ) ); if( DEBUG_PROCESS_TXT_ATTACH < cycle ) printk("\n[DBG] %s : thread %x exit / process %x detached from TXT %d / cycle %d\n", __FUNCTION__, CURRENT_THREAD, process_pid, txt_id, cycle ); #endif } // end process_txt_detach() /////////////////////////////////////////////////// void process_txt_set_ownership( xptr_t process_xp ) { process_t * process_ptr; cxy_t process_cxy; pid_t process_pid; xptr_t file_xp; xptr_t txt_xp; chdev_t * txt_ptr; cxy_t txt_cxy; // get pointers on process in owner cluster process_cxy = GET_CXY( process_xp ); process_ptr = GET_PTR( process_xp ); // get process PID process_pid = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) ); // check owner cluster assert( (process_cxy == CXY_FROM_PID( process_pid )) , __FUNCTION__, "process descriptor not in owner cluster\n" ); #if DEBUG_PROCESS_TXT_ATTACH uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_TXT_ATTACH < cycle ) printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", __FUNCTION__, CURRENT_THREAD, process_pid, cycle ); #endif // get extended pointer on stdin pseudo file file_xp = hal_remote_lwd( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) ); // get pointers on TXT chdev txt_xp = chdev_from_file( file_xp ); txt_cxy = GET_CXY( txt_xp ); txt_ptr = GET_PTR( txt_xp ); // set owner field in TXT chdev hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp ); #if DEBUG_PROCESS_TXT_ATTACH cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_TXT_ATTACH < cycle ) printk("\n[DBG] %s : thread %x exit for process %x / cycle %d\n", __FUNCTION__, CURRENT_THREAD, process_pid, cycle ); #endif } // end process_txt_set ownership() //////////////////////////////////////////////////////// void process_txt_transfer_ownership( xptr_t process_xp ) { process_t * process_ptr; // local pointer on process releasing ownership cxy_t process_cxy; // process cluster pid_t process_pid; // process identifier xptr_t file_xp; // extended pointer on TXT_RX pseudo file xptr_t txt_xp; // extended pointer on TXT_RX chdev chdev_t * txt_ptr; // local pointer on TXT_RX chdev cxy_t txt_cxy; // cluster of TXT_RX chdev uint32_t txt_id; // TXT_RX channel xptr_t owner_xp; // extended pointer on current TXT_RX owner xptr_t root_xp; // extended pointer on root of attached process list xptr_t lock_xp; // extended pointer on lock protecting attached process list xptr_t iter_xp; // iterator for xlist xptr_t current_xp; // extended pointer on current process process_t * current_ptr; // local pointer on current process cxy_t current_cxy; // cluster for current process // get pointers on process in owner cluster process_cxy = GET_CXY( process_xp ); process_ptr = GET_PTR( process_xp ); // get process PID process_pid = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) ); // check owner cluster assert( (process_cxy == CXY_FROM_PID( process_pid )) , __FUNCTION__, "process descriptor not in owner cluster\n" ); #if DEBUG_PROCESS_TXT_ATTACH uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_TXT_ATTACH < cycle ) printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n", __FUNCTION__, CURRENT_THREAD, process_pid, cycle ); #endif // get extended pointer on stdin pseudo file file_xp = hal_remote_lwd( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) ); // get pointers on TXT chdev txt_xp = chdev_from_file( file_xp ); txt_cxy = GET_CXY( txt_xp ); txt_ptr = GET_PTR( txt_xp ); // get extended pointer on TXT_RX owner and TXT channel owner_xp = hal_remote_lwd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) ); txt_id = hal_remote_lw ( XPTR( txt_cxy , &txt_ptr->channel ) ); #if( DEBUG_PROCESS_TXT_ATTACH & 1 ) if( DEBUG_PROCESS_TXT_ATTACH < cycle ) printk("\n[DBG] %s : file_ptr %x / txt_ptr %x / txt_id %d / owner_ptr = %x\n", __FUNCTION__, GET_PTR(file_xp), txt_ptr, txt_id, GET_PTR(owner_xp) ); #endif // transfer ownership only if process is the TXT owner if( (owner_xp == process_xp) && (txt_id > 0) ) { // get extended pointers on root and lock of attached processes list root_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.root ); lock_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.lock ); // get lock remote_spinlock_lock( lock_xp ); if( process_get_ppid( process_xp ) != 1 ) // process is not KSH { #if( DEBUG_PROCESS_TXT_ATTACH & 1 ) if( DEBUG_PROCESS_TXT_ATTACH < cycle ) printk("\n[DBG] %s : process is not the KSH process => search the KSH\n", __FUNCTION__ ); #endif // scan attached process list to find KSH process XLIST_FOREACH( root_xp , iter_xp ) { current_xp = XLIST_ELEMENT( iter_xp , process_t , txt_list ); current_cxy = GET_CXY( current_xp ); current_ptr = GET_PTR( current_xp ); if( process_get_ppid( current_xp ) == 1 ) // current is KSH { // release lock remote_spinlock_unlock( lock_xp ); // set owner field in TXT chdev hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp ); #if DEBUG_PROCESS_TXT_ATTACH cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_TXT_ATTACH < cycle ) printk("\n[DBG] %s : thread %x exit / process %x to KSH process %x / cycle %d\n", __FUNCTION__, CURRENT_THREAD, process_pid, hal_remote_lw( XPTR( current_cxy , ¤t_ptr->pid ) ), cycle ); #endif return; } } // release lock remote_spinlock_unlock( lock_xp ); // PANIC if KSH not found assert( false , __FUNCTION__ , "KSH process not found for TXT %d" ); return; } else // process is KSH { #if( DEBUG_PROCESS_TXT_ATTACH & 1 ) if( DEBUG_PROCESS_TXT_ATTACH < cycle ) printk("\n[DBG] %s : process is the KSH process => search another\n", __FUNCTION__ ); #endif // scan attached process list to find another process XLIST_FOREACH( root_xp , iter_xp ) { current_xp = XLIST_ELEMENT( iter_xp , process_t , txt_list ); current_cxy = GET_CXY( current_xp ); current_ptr = GET_PTR( current_xp ); if( current_xp != process_xp ) // current is not KSH { // release lock remote_spinlock_unlock( lock_xp ); // set owner field in TXT chdev hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp ); #if DEBUG_PROCESS_TXT_ATTACH cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_TXT_ATTACH < cycle ) printk("\n[DBG] %s : thread %x exit / KSH process %x to process %x / cycle %d\n", __FUNCTION__, CURRENT_THREAD, process_pid, hal_remote_lw( XPTR( current_cxy , ¤t_ptr->pid ) ), cycle ); #endif return; } } // release lock remote_spinlock_unlock( lock_xp ); // no more owner for TXT if no other process found hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL ); #if DEBUG_PROCESS_TXT_ATTACH cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_TXT_ATTACH < cycle ) printk("\n[DBG] %s : thread %x exit / KSH process %x to nobody / cycle %d\n", __FUNCTION__, CURRENT_THREAD, process_pid, cycle ); #endif return; } } else { #if DEBUG_PROCESS_TXT_ATTACH cycle = (uint32_t)hal_get_cycles(); if( DEBUG_PROCESS_TXT_ATTACH < cycle ) printk("\n[DBG] %s : thread %x exit / process %x is not TXT owner / cycle %d\n", __FUNCTION__, CURRENT_THREAD, process_pid, cycle ); #endif } } // end process_txt_transfer_ownership() //////////////////////////////////////////////// xptr_t process_txt_get_owner( uint32_t channel ) { xptr_t txt_rx_xp = chdev_dir.txt_rx[channel]; cxy_t txt_rx_cxy = GET_CXY( txt_rx_xp ); chdev_t * txt_rx_ptr = GET_PTR( txt_rx_xp ); return (xptr_t)hal_remote_lwd( XPTR( txt_rx_cxy , &txt_rx_ptr->ext.txt.owner_xp ) ); } /////////////////////////////////////////// void process_txt_display( uint32_t txt_id ) { xptr_t chdev_xp; cxy_t chdev_cxy; chdev_t * chdev_ptr; xptr_t root_xp; xptr_t lock_xp; xptr_t current_xp; xptr_t iter_xp; // check terminal index assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) , __FUNCTION__ , "illegal TXT terminal index" ); // get pointers on TXT_RX[txt_id] chdev chdev_xp = chdev_dir.txt_rx[txt_id]; chdev_cxy = GET_CXY( chdev_xp ); chdev_ptr = GET_PTR( chdev_xp ); // get extended pointer on root & lock of attached process list root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root ); lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock ); // display header printk("\n***** processes attached to TXT_%d\n", txt_id ); // get lock remote_spinlock_lock( lock_xp ); // scan attached process list XLIST_FOREACH( root_xp , iter_xp ) { current_xp = XLIST_ELEMENT( iter_xp , process_t , txt_list ); process_display( current_xp ); } // release lock remote_spinlock_unlock( lock_xp ); } // end process_txt_display