/* * remote_barrier.c - Access a POSIX barrier. * * Author Alain Greiner (2016,2017) * * Copyright (c) UPMC Sorbonne Universites * * This file is part of ALMOS-MKH. * * ALMOS-MKH is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2.0 of the License. * * ALMOS-MKH is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with ALMOS-MKH; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include ///////////////////////////////////////////////// inline void remote_barrier( xptr_t barrier_xp, uint32_t count ) { uint32_t expected; remote_barrier_t * ptr = (remote_barrier_t *)GET_PTR( barrier_xp ); cxy_t cxy = GET_CXY( barrier_xp ); // get barrier sense value uint32_t sense = hal_remote_lw( XPTR( cxy , &ptr->sense ) ); // compute expected value if ( sense == 0 ) expected = 1; else expected = 0; // atomically increment current uint32_t current = hal_remote_atomic_add( XPTR( cxy , &ptr->current ) , 1 ); // last task reset current and toggle sense if( current == (count-1) ) { hal_remote_sw( XPTR( cxy , &ptr->current) , 0 ); hal_remote_sw( XPTR( cxy , &ptr->sense ) , expected ); } else // other tasks poll the sense { while( hal_remote_lw( XPTR( cxy , &ptr->sense ) ) != expected ) asm volatile ("nop"); } } /////////////////////////////////////////////////// xptr_t remote_barrier_from_ident( intptr_t ident ) { // get pointer on local process_descriptor process_t * process = CURRENT_THREAD->process; // get extended pointer on reference process xptr_t ref_xp = process->ref_xp; // get cluster and local pointer on reference process cxy_t ref_cxy = GET_CXY( ref_xp ); process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); // get extended pointer on root of barriers list xptr_t root_xp = XPTR( ref_cxy , &ref_ptr->barrier_root ); // scan reference process barriers list xptr_t iter_xp; xptr_t barrier_xp; cxy_t barrier_cxy; remote_barrier_t * barrier_ptr; intptr_t current; bool_t found = false; XLIST_FOREACH( root_xp , iter_xp ) { barrier_xp = XLIST_ELEMENT( iter_xp , remote_barrier_t , list ); barrier_cxy = GET_CXY( barrier_xp ); barrier_ptr = (remote_barrier_t *)GET_PTR( barrier_xp ); current = (intptr_t)hal_remote_lpt( XPTR( barrier_cxy , &barrier_ptr->ident ) ); if( ident == current ) { found = true; break; } } if( found == false ) return XPTR_NULL; else return barrier_xp; } ////////////////////////////////////////////// error_t remote_barrier_create( intptr_t ident, uint32_t count ) { xptr_t barrier_xp; remote_barrier_t * barrier_ptr; // get pointer on local process descriptor process_t * process = CURRENT_THREAD->process; // get extended pointer on reference process xptr_t ref_xp = process->ref_xp; // get reference process cluster and local pointer cxy_t ref_cxy = GET_CXY( ref_xp ); process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); // allocate memory for barrier descriptor if( ref_cxy == local_cxy ) // local cluster is the reference { kmem_req_t req; req.type = KMEM_BARRIER; req.flags = AF_ZERO; barrier_ptr = kmem_alloc( &req ); barrier_xp = XPTR( local_cxy , barrier_ptr ); } else // reference is remote { rpc_kcm_alloc_client( ref_cxy , KMEM_BARRIER , &barrier_xp ); barrier_ptr = (remote_barrier_t *)GET_PTR( barrier_xp ); } if( barrier_ptr == NULL ) return ENOMEM; // initialise barrier hal_remote_sw ( XPTR( ref_cxy , &barrier_ptr->nb_threads ) , count ); hal_remote_sw ( XPTR( ref_cxy , &barrier_ptr->current ) , 0 ); hal_remote_sw ( XPTR( ref_cxy , &barrier_ptr->sense ) , 0 ); hal_remote_spt( XPTR( ref_cxy , &barrier_ptr->ident ) , (void*)ident ); xlist_entry_init( XPTR( ref_cxy , &barrier_ptr->list ) ); // register barrier in reference process xlist xptr_t root_xp = XPTR( ref_cxy , &ref_ptr->barrier_root ); xptr_t entry_xp = XPTR( ref_cxy , &barrier_ptr->list ); remote_spinlock_lock( XPTR( ref_cxy , &ref_ptr->sync_lock ) ); xlist_add_first( root_xp , entry_xp ); remote_spinlock_unlock( XPTR( ref_cxy , &ref_ptr->sync_lock ) ); return 0; } //////////////////////////////////////////////// void remote_barrier_destroy( xptr_t barrier_xp ) { // get pointer on local process descriptor process_t * process = CURRENT_THREAD->process; // get extended pointer on reference process xptr_t ref_xp = process->ref_xp; // get reference process cluster and local pointer cxy_t ref_cxy = GET_CXY( ref_xp ); process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); // get barrier cluster and local pointer cxy_t barrier_cxy = GET_CXY( barrier_xp ); remote_barrier_t * barrier_ptr = (remote_barrier_t *)GET_PTR( barrier_xp ); // remove barrier from reference process xlist remote_spinlock_lock( XPTR( ref_cxy , &ref_ptr->sync_lock ) ); xlist_unlink( XPTR( barrier_cxy , &barrier_ptr->list ) ); remote_spinlock_unlock( XPTR( ref_cxy , &ref_ptr->sync_lock ) ); // release memory allocated for barrier descriptor if( barrier_cxy == local_cxy ) // reference is local { kmem_req_t req; req.type = KMEM_BARRIER; req.ptr = barrier_ptr; kmem_free( &req ); } else // reference is remote { rpc_kcm_free_client( barrier_cxy , barrier_ptr , KMEM_BARRIER ); } } ///////////////////////////////////////////// void remote_barrier_wait( xptr_t barrier_xp ) { uint32_t expected; uint32_t current; uint32_t count; uint32_t sense; reg_t irq_state; xptr_t root_xp; // get cluster and local pointer on calling thread cxy_t thread_cxy = local_cxy; thread_t * thread_ptr = CURRENT_THREAD; // get cluster and local pointer on remote barrier remote_barrier_t * barrier_ptr = (remote_barrier_t *)GET_PTR( barrier_xp ); cxy_t barrier_cxy = GET_CXY( barrier_xp ); // get count and root fields from barrier descriptor count = hal_remote_lw ( XPTR( barrier_cxy , &barrier_ptr->nb_threads ) ); root_xp = hal_remote_lwd( XPTR( barrier_cxy , &barrier_ptr->root ) ); // get barrier sense value sense = hal_remote_lw( XPTR( barrier_cxy , &barrier_ptr->sense ) ); // compute expected value if ( sense == 0 ) expected = 1; else expected = 0; // atomically increment current current = hal_remote_atomic_add( XPTR( barrier_cxy , &barrier_ptr->current ) , 1 ); // last thread reset current, toggle sense, and activate all waiting threads // other threads block, register in queue, and deschedule if( current == (count-1) ) // last thread { hal_remote_sw( XPTR( barrier_cxy , &barrier_ptr->current) , 0 ); hal_remote_sw( XPTR( barrier_cxy , &barrier_ptr->sense ) , expected ); // activate waiting threads if required if( xlist_is_empty( root_xp ) == false ) { // disable interrupts hal_disable_irq( &irq_state ); xptr_t iter_xp; xptr_t thread_xp; XLIST_FOREACH( root_xp , iter_xp ) { // get extended pointer on waiting thread thread_xp = XLIST_ELEMENT( iter_xp , thread_t , wait_list ); // remove waiting thread from queue remote_spinlock_lock( XPTR( barrier_cxy , &barrier_ptr->lock ) ); xlist_unlink( XPTR( barrier_cxy , &barrier_ptr->list ) ); remote_spinlock_unlock( XPTR( barrier_cxy , &barrier_ptr->lock ) ); // unblock waiting thread thread_unblock( thread_xp , THREAD_BLOCKED_USERSYNC ); } // restore interrupts hal_restore_irq( irq_state ); } } else // not the last thread { // disable interrupts hal_disable_irq( &irq_state ); // register calling thread in barrier waiting queue xptr_t entry_xp = XPTR( thread_cxy , &thread_ptr->wait_list ); remote_spinlock_lock( XPTR( barrier_cxy , &barrier_ptr->lock ) ); xlist_add_last( root_xp , entry_xp ); remote_spinlock_unlock( XPTR( barrier_cxy , &barrier_ptr->lock ) ); // block & deschedule the calling thread thread_block( thread_ptr , THREAD_BLOCKED_USERSYNC ); sched_yield(); // restore interrupts hal_restore_irq( irq_state ); } }