/* * spinlock.c - kernel spinlock synchronization * * Authors Ghassan Almaless (2008,2009,2010,2011,2012) * Alain Greiner (2016} * * Copyright (c) UPMC Sorbonne Universites * * This file is part of ALMOS-MKH. * * ALMOS-MKH is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2.0 of the License. * * ALMOS-MKH is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with ALMOS-MKH; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include ////////////////////////////////////////////// inline void spinlock_init( spinlock_t * lock ) { lock->taken = 0; #if DEBUG_SPINLOCKS lock->owner = NULL; list_entry_init( &lock->list ); #endif } /////////////////////////////////////////// void spinlock_lock_busy( spinlock_t * lock, reg_t * irq_state ) { reg_t mode; volatile uint32_t taken; thread_t * this = CURRENT_THREAD; bool_t isAtomic = false; // disable interrupts hal_disable_irq( &mode ); // loop until success while( isAtomic == false ) { taken = lock->taken; // try to take the lock if not already taken if( taken == 0 ) { isAtomic = hal_atomic_cas( &lock->taken , 0 , 1 ); } } this->local_locks++; #if DEBUG_SPINLOCKS lock->owner = this; list_add_first( &this->locks_root , &lock->list ); #endif // irq_state must be restored when lock is released *irq_state = mode; } ////////////////////////////////////////////// void spinlock_unlock_busy( spinlock_t * lock, reg_t irq_state ) { thread_t * this = CURRENT_THREAD; #if DEBUG_SPINLOCKS lock->owner = NULL; list_unlink( &lock->list ); #endif lock->taken = 0; this->local_locks--; // deschedule if pending request thread_check_sched(); // restore IRQs hal_restore_irq( irq_state ); } /////////////////////////////////////// void spinlock_lock( spinlock_t * lock ) { reg_t mode; thread_t * this = CURRENT_THREAD; bool_t isAtomic = false; volatile uint32_t taken; // disable interrupts hal_disable_irq( &mode ); // loop until success while( isAtomic == false ) { taken = lock->taken; // deschedule without blocking when lock already taken if( taken != 0 ) { hal_restore_irq( mode ); if( thread_can_yield() ) sched_yield("waiting spinlock"); hal_disable_irq( &mode ); continue; } // try to atomically take the lock if not already taken isAtomic = hal_atomic_cas( &lock->taken , 0 , 1 ); } this->local_locks++; #if DEBUG_SPINLOCKS lock->owner = this; list_add_first( &this->locks_root , &lock->list ); #endif // restore IRQs hal_restore_irq( mode ); } ///////////////////////////////////////////// error_t spinlock_trylock( spinlock_t * lock ) { reg_t mode; bool_t isAtomic = false; thread_t * this = CURRENT_THREAD; hal_disable_irq( &mode ); if( lock->taken == 0) isAtomic = hal_atomic_cas( &lock->taken , 0 , 1); if(isAtomic == false) { hal_restore_irq(mode); return 1; } else { this->local_locks++; #if DEBUG_SPINLOCKS lock->owner = this; list_add_first( &this->locks_root , &lock->list ); #endif hal_restore_irq(mode); return 0; } } ///////////////////////////////////////// void spinlock_unlock( spinlock_t * lock ) { thread_t * this = CURRENT_THREAD; #if DEBUG_SPINLOCKS lock->owner = NULL; list_unlink( &lock->list ); #endif lock->taken = 0; this->local_locks--; // deschedule if pending request thread_check_sched(); }