/* * remote_mutex.c - POSIX mutex implementation. * * Authors Alain Greiner (2016,2017,2018) * * Copyright (c) UPMC Sorbonne Universites * * This file is part of ALMOS-MKH. * * ALMOS-MKH is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2.0 of the License. * * ALMOS-MKH is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with ALMOS-MKH; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include ///////////////////////////////////////////////// xptr_t remote_mutex_from_ident( intptr_t ident ) { // get pointer on local process_descriptor process_t * process = CURRENT_THREAD->process; // get extended pointer on reference process xptr_t ref_xp = process->ref_xp; // get cluster and local pointer on reference process cxy_t ref_cxy = GET_CXY( ref_xp ); process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); // get extended pointers on mutexes list xptr_t root_xp = XPTR( ref_cxy , &ref_ptr->mutex_root ); xptr_t lock_xp = XPTR( ref_cxy , &ref_ptr->sync_lock ); // get lock protecting synchro lists remote_queuelock_acquire( lock_xp ); // scan reference process mutex list xptr_t iter_xp; xptr_t mutex_xp; cxy_t mutex_cxy; remote_mutex_t * mutex_ptr; intptr_t current; bool_t found = false; XLIST_FOREACH( root_xp , iter_xp ) { mutex_xp = XLIST_ELEMENT( iter_xp , remote_mutex_t , list ); mutex_cxy = GET_CXY( mutex_xp ); mutex_ptr = (remote_mutex_t *)GET_PTR( mutex_xp ); current = (intptr_t)hal_remote_lpt( XPTR( mutex_cxy , &mutex_ptr->ident ) ); if( ident == current ) { found = true; break; } } // relese lock protecting synchros lists remote_queuelock_release( lock_xp ); if( found == false ) return XPTR_NULL; else return mutex_xp; } // end remote_mutex_from_ident() ///////////////////////////////////////////// error_t remote_mutex_create( intptr_t ident ) { xptr_t mutex_xp; remote_mutex_t * mutex_ptr; // get pointer on local process descriptor process_t * process = CURRENT_THREAD->process; // get extended pointer on reference process xptr_t ref_xp = process->ref_xp; // get reference process cluster and local pointer cxy_t ref_cxy = GET_CXY( ref_xp ); process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); // allocate memory for mutex descriptor if( ref_cxy == local_cxy ) // local cluster is the reference { kmem_req_t req; req.type = KMEM_MUTEX; req.flags = AF_ZERO; mutex_ptr = kmem_alloc( &req ); mutex_xp = XPTR( local_cxy , mutex_ptr ); } else // reference is remote { rpc_kcm_alloc_client( ref_cxy , KMEM_MUTEX , &mutex_xp ); mutex_ptr = GET_PTR( mutex_xp ); } if( mutex_ptr == NULL ) return 0xFFFFFFFF; // initialise mutex hal_remote_s32 ( XPTR( ref_cxy , &mutex_ptr->taken ) , 0 ); hal_remote_spt( XPTR( ref_cxy , &mutex_ptr->ident ) , (void *)ident ); xlist_entry_init( XPTR( ref_cxy , &mutex_ptr->list ) ); xlist_root_init( XPTR( ref_cxy , &mutex_ptr->root ) ); hal_remote_s64( XPTR( ref_cxy , &mutex_ptr->owner ) , XPTR_NULL ); remote_busylock_init( XPTR( ref_cxy , &mutex_ptr->lock ), LOCK_MUTEX_STATE ); // get root of mutexes list in process, and list_entry in mutex xptr_t root_xp = XPTR( ref_cxy , &ref_ptr->mutex_root ); xptr_t xp_list = XPTR( ref_cxy , &mutex_ptr->list ); // get lock protecting user synchros lists remote_queuelock_acquire( XPTR( ref_cxy , &ref_ptr->sync_lock ) ); // register mutex in process descriptor xlist_add_first( root_xp , xp_list ); // release lock protecting user synchros lists remote_queuelock_release( XPTR( ref_cxy , &ref_ptr->sync_lock ) ); #if DEBUG_MUTEX thread_t * this = CURRENT_THREAD; if( (uint32_t)hal_get_cycles() > DEBUG_QUEUELOCK ) printk("\n[DBG] %s : thread %x in %x process / mutex(%x,%x)\n", __FUNCTION__, this->trdid, this->process->pid, local_cxy, mutex_ptr ); #endif return 0; } // end remote_mutex_create() //////////////////////////////////////////// void remote_mutex_destroy( xptr_t mutex_xp ) { // get pointer on local process descriptor process_t * process = CURRENT_THREAD->process; // get extended pointer on reference process xptr_t ref_xp = process->ref_xp; // get reference process cluster and local pointer cxy_t ref_cxy = GET_CXY( ref_xp ); process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); // get mutex cluster and local pointer cxy_t mutex_cxy = GET_CXY( mutex_xp ); remote_mutex_t * mutex_ptr = (remote_mutex_t *)GET_PTR( mutex_xp ); // get lock protecting user synchros lists remote_queuelock_acquire( XPTR( ref_cxy , &ref_ptr->sync_lock ) ); // remove mutex from reference process xlist xlist_unlink( XPTR( mutex_cxy , &mutex_ptr->list ) ); // release lock protecting user synchros lists remote_queuelock_release( XPTR( ref_cxy , &ref_ptr->sync_lock ) ); // release memory allocated for mutexaphore descriptor if( mutex_cxy == local_cxy ) // reference is local { kmem_req_t req; req.type = KMEM_MUTEX; req.ptr = mutex_ptr; kmem_free( &req ); } else // reference is remote { rpc_kcm_free_client( mutex_cxy , mutex_ptr , KMEM_BARRIER ); } } // end remote_mutex_destroy() ///////////////////////////////////////// void remote_mutex_lock( xptr_t mutex_xp ) { // get cluster and local pointer on mutex remote_mutex_t * mutex_ptr = GET_PTR( mutex_xp ); cxy_t mutex_cxy = GET_CXY( mutex_xp ); // get extended pointers on mutex fields xptr_t taken_xp = XPTR( mutex_cxy , &mutex_ptr->taken ); xptr_t owner_xp = XPTR( mutex_cxy , &mutex_ptr->owner ); xptr_t root_xp = XPTR( mutex_cxy , &mutex_ptr->root ); xptr_t lock_xp = XPTR( mutex_cxy , &mutex_ptr->lock ); // get cluster and pointers on calling thread cxy_t caller_cxy = local_cxy; thread_t * caller_ptr = CURRENT_THREAD; xptr_t caller_xp = XPTR( caller_cxy , caller_ptr ); // check calling thread can yield assert( (caller_ptr->busylocks == 0), "cannot yield : busylocks = %d\n", caller_ptr->busylocks ); while( 1 ) { // get busylock protecting mutex state remote_busylock_acquire( lock_xp ); // test mutex state if( hal_remote_l32( taken_xp ) == 0 ) // success { // register calling thread as mutex owner hal_remote_s64( owner_xp , caller_xp ); // update mutex state hal_remote_s32( taken_xp , 1 ); #if DEBUG_MUTEX thread_t * this = CURRENT_THREAD; if( (uint32_t)hal_get_cycles() > DEBUG_MUTEX ) printk("\n[DBG] %s : thread %x in process %x SUCCESS on mutex(%x,%x)\n", __FUNCTION__, this->trdid, this->process->pid, mutex_cxy, mutex_ptr ); #endif // release busylock protecting mutex state remote_busylock_release( lock_xp ); return; } else // already taken { // block the calling thread thread_block( caller_xp , THREAD_BLOCKED_USERSYNC ); // register calling thread in mutex waiting queue xptr_t entry_xp = XPTR( caller_cxy , &caller_ptr->wait_xlist ); xlist_add_last( root_xp , entry_xp ); #if DEBUG_MUTEX thread_t * this = CURRENT_THREAD; if( (uint32_t)hal_get_cycles() > DEBUG_MUTEX ) printk("\n[DBG] %s : thread %x in process %x BLOCKED on mutex(%x,%x)\n", __FUNCTION__, this->trdid, this->process->pid, mutex_cxy, mutex_ptr ); #endif // release busylock protecting mutex state remote_busylock_release( lock_xp ); // deschedule calling thread sched_yield("blocked on mutex"); } } } // end remote_mutex_lock() ////////////////////////////////////////////// error_t remote_mutex_unlock( xptr_t mutex_xp ) { // memory barrier before mutex release hal_fence(); // get cluster and local pointer on mutex remote_mutex_t * mutex_ptr = GET_PTR( mutex_xp ); cxy_t mutex_cxy = GET_CXY( mutex_xp ); // get cluster and pointers on calling thread cxy_t caller_cxy = local_cxy; thread_t * caller_ptr = CURRENT_THREAD; xptr_t caller_xp = XPTR( caller_cxy , caller_ptr ); // get extended pointers on mutex fields xptr_t taken_xp = XPTR( mutex_cxy , &mutex_ptr->taken ); xptr_t owner_xp = XPTR( mutex_cxy , &mutex_ptr->owner ); xptr_t root_xp = XPTR( mutex_cxy , &mutex_ptr->root ); xptr_t lock_xp = XPTR( mutex_cxy , &mutex_ptr->lock ); // get busylock protecting mutex state remote_busylock_acquire( lock_xp ); // check calling thread is mutex owner if( hal_remote_l64( owner_xp ) != caller_xp ) { // release busylock protecting mutex state remote_busylock_release( lock_xp ); return 0xFFFFFFFF; } #if DEBUG_MUTEX thread_t * this = CURRENT_THREAD; if( (uint32_t)hal_get_cycles() > DEBUG_MUTEX ) printk("\n[DBG] %s : thread %x in %x process EXIT / mutex(%x,%x)\n", __FUNCTION__, this->trdid, this->process->pid, mutex_cxy, mutex_ptr ); #endif // update owner field, hal_remote_s64( owner_xp , XPTR_NULL ); // update taken field hal_remote_s32( taken_xp , 0 ); // unblock first waiting thread if waiting list non empty if( xlist_is_empty( root_xp ) == false ) { // get extended pointer on first waiting thread xptr_t thread_xp = XLIST_FIRST( root_xp , thread_t , wait_xlist ); thread_t * thread_ptr = GET_PTR( thread_xp ); cxy_t thread_cxy = GET_CXY( thread_xp ); #if DEBUG_MUTEX if( (uint32_t)hal_get_cycles() > DEBUG_MUTEX ) { trdid_t trdid = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) ); process_t * process = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->process ) ); pid_t pid = hal_remote_l32( XPTR( thread_cxy , &process->pid ) ); printk("\n[DBG] %s : thread %x in process %x UNBLOCK thread %x in process %d / mutex(%x,%x)\n", __FUNCTION__, this->trdid, this->process->pid, trdid, pid, mutex_cxy, mutex_ptr ); } #endif // remove this thread from waiting queue xlist_unlink( XPTR( thread_cxy , &thread_ptr->wait_xlist ) ); // unblock first waiting thread thread_unblock( thread_xp , THREAD_BLOCKED_USERSYNC ); } // release busylock protecting mutex state remote_busylock_release( lock_xp ); return 0; } // end remote_mutex_unlock() /////////////////////////////////////////////// error_t remote_mutex_trylock( xptr_t mutex_xp ) { // get cluster and local pointer on mutex remote_mutex_t * mutex_ptr = GET_PTR( mutex_xp ); cxy_t mutex_cxy = GET_CXY( mutex_xp ); // get cluster and pointers on calling thread cxy_t caller_cxy = local_cxy; thread_t * caller_ptr = CURRENT_THREAD; xptr_t caller_xp = XPTR( caller_cxy , caller_ptr ); // get extended pointers on mutex fields xptr_t taken_xp = XPTR( mutex_cxy , &mutex_ptr->taken ); xptr_t owner_xp = XPTR( mutex_cxy , &mutex_ptr->owner ); xptr_t lock_xp = XPTR( mutex_cxy , &mutex_ptr->lock ); // get busylock protecting mutex state remote_busylock_acquire( lock_xp ); // test mutex state if( hal_remote_l32( taken_xp ) == 0 ) // success { // register calling thread as mutex owner hal_remote_s64( owner_xp , caller_xp ); // update mutex state hal_remote_s32( taken_xp , 1 ); #if DEBUG_MUTEX thread_t * this = CURRENT_THREAD; if( (uint32_t)hal_get_cycles() > DEBUG_QUEUELOCK ) printk("\n[DBG] %s : SUCCESS for thread %x in process %x / mutex(%x,%x)\n", __FUNCTION__, this->trdid, this->process->pid, mutex_cxy, mutex_ptr ); #endif // release busylock protecting mutex state remote_busylock_release( lock_xp ); return 0; } else // already taken { #if DEBUG_MUTEX thread_t * this = CURRENT_THREAD; if( (uint32_t)hal_get_cycles() > DEBUG_QUEUELOCK ) printk("\n[DBG] %s : FAILURE for thread %x in process %x / mutex(%x,%x)\n", __FUNCTION__, this->trdid, this->process->pid, mutex_cxy, mutex_ptr ); #endif // release busylock protecting mutex state remote_busylock_release( lock_xp ); return 0xFFFFFFFF; } } // end remote_mutex_trylock()