/* * remote_spinlock.c - kernel remote spinlock implementation. * * Authors Alain Greiner (2016,2017,2018) * * Copyright (c) UPMC Sorbonne Universites * * This file is part of ALMOS-MKH. * * ALMOS-MKH is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2.0 of the License. * * ALMOS-MKH is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with ALMOS-MKH; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include /////////////////////////////////////////// void remote_spinlock_init( xptr_t lock_xp ) { remote_spinlock_t * ptr = GET_PTR( lock_xp ); cxy_t cxy = GET_CXY( lock_xp ); hal_remote_sw ( XPTR( cxy , &ptr->taken ) , 0 ); #if DEBUG_REMOTE_SPINLOCKS hal_remote_swd( XPTR( cxy , &ptr->owner ) , XPTR_NULL ); xlist_entry_init( XPTR( cxy , &ptr->list ) ); #endif } ///////////////////////////////////////////////// error_t remote_spinlock_trylock( xptr_t lock_xp ) { reg_t mode; bool_t isAtomic = false; // get cluster and local pointer on remote_spinlock remote_spinlock_t * lock_ptr = GET_PTR( lock_xp ); cxy_t lock_cxy = GET_CXY( lock_xp ); // get local pointer on local thread thread_t * thread_ptr = CURRENT_THREAD; // disable interrupts hal_disable_irq( &mode ); if( hal_remote_lw( XPTR( lock_cxy , &lock_ptr->taken ) ) == 0 ) { isAtomic = hal_remote_atomic_cas( XPTR( lock_cxy , &lock_ptr->taken ) , 0 , 1 ); } if( isAtomic == false ) // failure { hal_restore_irq( mode ); return 1; } else // success : register lock in local thread { thread_ptr->remote_locks++; #if DEBUG_REMOTE_SPINLOCKS hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR( local_cxy , thread_ptr) ); xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) , XPTR( lock_cxy , &lock_ptr->list ) ); #endif hal_restore_irq(mode); return 0; } } /////////////////////////////////////////////////// void remote_spinlock_lock_busy( xptr_t lock_xp, uint32_t * irq_state ) { bool_t isAtomic = false; reg_t mode; volatile uint32_t taken; // get cluster and local pointer on remote_spinlock remote_spinlock_t * lock_ptr = GET_PTR( lock_xp ); cxy_t lock_cxy = GET_CXY( lock_xp ); // get local pointer on local thread thread_t * thread_ptr = CURRENT_THREAD; // disable interrupts hal_disable_irq( &mode ); // loop until success while( isAtomic == false ) { taken = hal_remote_lw( XPTR( lock_cxy , &lock_ptr->taken ) ); // try to take the lock if not already taken if( taken == 0 ) { isAtomic = hal_remote_atomic_cas( XPTR( lock_cxy , &lock_ptr->taken ) , 0 , 1 ); } } // register lock in thread thread_ptr->remote_locks++; #if DEBUG_REMOTE_SPINLOCKS hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR( local_cxy , thread_ptr) ); xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) , XPTR( lock_cxy , &lock_ptr->list ) ); #endif // irq_state must be restored when lock is released *irq_state = mode; } // end remote_spinlock_lock_busy() //////////////////////////////////////////////////// void remote_spinlock_unlock_busy( xptr_t lock_xp, uint32_t irq_state ) { // get cluster and local pointer on remote_spinlock remote_spinlock_t * lock_ptr = GET_PTR( lock_xp ); cxy_t lock_cxy = GET_CXY( lock_xp ); // get pointer on local thread thread_t * thread_ptr = CURRENT_THREAD; #if DEBUG_REMOTE_SPINLOCKS hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL ); xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) ); #endif hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->taken ) , 0 ); thread_ptr->remote_locks--; // deschedule if pending request thread_check_sched(); // restore IRQs hal_restore_irq( irq_state ); } /////////////////////////////////////////// void remote_spinlock_lock( xptr_t lock_xp ) { bool_t isAtomic = false; reg_t mode; volatile uint32_t taken; // get cluster and local pointer on remote_spinlock remote_spinlock_t * lock_ptr = GET_PTR( lock_xp ); cxy_t lock_cxy = GET_CXY( lock_xp ); // get local pointer on calling thread thread_t * thread_ptr = CURRENT_THREAD; // disable interrupts hal_disable_irq( &mode ); // loop until success while( isAtomic == false ) { taken = hal_remote_lw( XPTR( lock_cxy , &lock_ptr->taken ) ); // deschedule if possible when lock already taken if( taken != 0 ) { hal_restore_irq( mode ); if( thread_can_yield() ) sched_yield("waiting spinlock"); hal_disable_irq( &mode ); continue; } // try to take the lock if not already taken isAtomic = hal_remote_atomic_cas( XPTR( lock_cxy , &lock_ptr->taken ) , 0 , 1 ); } // register lock in thread thread_ptr->remote_locks++; #if DEBUG_REMOTE_SPINLOCKS hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ), XPTR( local_cxy , thread_ptr) ); xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ), XPTR( lock_cxy , &lock_ptr->list ) ); #endif // enable interrupts hal_restore_irq( mode ); } ///////////////////////////////////////////// void remote_spinlock_unlock( xptr_t lock_xp ) { // get cluster and local pointer on remote_spinlock remote_spinlock_t * lock_ptr = GET_PTR( lock_xp ); cxy_t lock_cxy = GET_CXY( lock_xp ); // get pointer on local thread thread_t * thread_ptr = CURRENT_THREAD; #if DEBUG_REMOTE_SPINLOCKS hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL ); xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) ); #endif hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->taken ) , 0 ); thread_ptr->remote_locks--; // deschedule if pending request thread_check_sched(); }