source: trunk/kernel/libk/spinlock.c @ 531

Last change on this file since 531 was 473, checked in by alain, 6 years ago

Fix several GCC warning related to the -Wextra compilation option.

File size: 4.3 KB
RevLine 
[1]1/*
2 * spinlock.c - kernel spinlock synchronization
[331]3 *
[1]4 * Authors   Ghassan Almaless  (2008,2009,2010,2011,2012)
5 *           Alain Greiner     (2016}
6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
[14]25#include <kernel_config.h>
[457]26#include <hal_kernel_types.h>
[1]27#include <hal_atomic.h>
28#include <hal_special.h>
29#include <hal_irqmask.h>
30#include <thread.h>
31#include <scheduler.h>
32#include <printk.h>
33#include <spinlock.h>
34
35//////////////////////////////////////////////
36inline void spinlock_init( spinlock_t * lock )
[331]37{
38    lock->taken = 0;
[409]39
[438]40#if DEBUG_SPINLOCKS
[436]41lock->owner = NULL;
42list_entry_init( &lock->list );
[409]43#endif
44
[1]45}
46
47///////////////////////////////////////////
[331]48void spinlock_lock_busy( spinlock_t * lock,
[473]49                         reg_t      * irq_state )
[1]50{
[331]51    reg_t               mode;
52    volatile uint32_t   taken;
53    thread_t          * this     = CURRENT_THREAD;
54    bool_t              isAtomic = false;
[1]55
56    // disable interrupts
[331]57    hal_disable_irq( &mode );
58
[1]59    // loop until success
[331]60    while( isAtomic == false )
61    {
62        taken = lock->taken;
[1]63
[11]64        // try to take the lock if not already taken
[331]65        if( taken == 0 )
[1]66        {
[331]67            isAtomic = hal_atomic_cas( &lock->taken , 0 , 1 );
[1]68        }
[331]69    }
[1]70
[331]71    this->local_locks++;
[409]72
[438]73#if DEBUG_SPINLOCKS
[436]74lock->owner = this;
75list_add_first( &this->locks_root , &lock->list );
[409]76#endif
[1]77
[331]78    // irq_state must be restored when lock is released
[11]79    *irq_state = mode;
[1]80}
81
82//////////////////////////////////////////////
83void spinlock_unlock_busy( spinlock_t * lock,
[473]84                           reg_t        irq_state )
[1]85{
[461]86    thread_t * this = CURRENT_THREAD;
[331]87
[438]88#if DEBUG_SPINLOCKS
[436]89lock->owner = NULL;
90list_unlink( &lock->list );
[409]91#endif
92
[1]93    lock->taken = 0;
94    this->local_locks--;
[331]95
[337]96    // deschedule if pending request
97    thread_check_sched();
98 
99    // restore IRQs
100        hal_restore_irq( irq_state );
[1]101}
[331]102
[1]103///////////////////////////////////////
104void spinlock_lock( spinlock_t * lock )
105{
[331]106    reg_t             mode;
107    thread_t        * this     = CURRENT_THREAD;
108    bool_t            isAtomic = false;
109    volatile uint32_t taken;
110
[1]111    // disable interrupts
[331]112    hal_disable_irq( &mode );
113
[1]114    // loop until success
[331]115    while( isAtomic == false )
116    {
[1]117        taken = lock->taken;
118
119        // deschedule without blocking when lock already taken
[331]120        if( taken != 0 )
[1]121        {
122            hal_restore_irq( mode );
[408]123            if( thread_can_yield() ) sched_yield("waiting spinlock");
[1]124            hal_disable_irq( &mode );
125            continue;
126        }
127
128        // try to atomically take the lock if not already taken
[331]129        isAtomic = hal_atomic_cas( &lock->taken , 0 , 1 );
[1]130    }
131
[331]132    this->local_locks++;
[409]133
[438]134#if DEBUG_SPINLOCKS
[436]135lock->owner = this;
136list_add_first( &this->locks_root , &lock->list );
[409]137#endif
[1]138
[337]139    // restore IRQs
[1]140    hal_restore_irq( mode );
141}
142
[11]143/////////////////////////////////////////////
144error_t spinlock_trylock( spinlock_t * lock )
[331]145{
146    reg_t      mode;
147    bool_t     isAtomic = false;
148    thread_t * this     = CURRENT_THREAD;
[1]149
[331]150    hal_disable_irq( &mode );
[1]151
[331]152    if( lock->taken == 0)
153        isAtomic = hal_atomic_cas( &lock->taken , 0 , 1);
154
155    if(isAtomic == false)
156    {
157        hal_restore_irq(mode);
158        return 1;
159    }
[1]160    else
161    {
[331]162        this->local_locks++;
[409]163
[438]164#if DEBUG_SPINLOCKS
[436]165lock->owner = this;
166list_add_first( &this->locks_root , &lock->list );
[409]167#endif
168
[331]169        hal_restore_irq(mode);
170        return 0;
[1]171    }
172}
173
174/////////////////////////////////////////
175void spinlock_unlock( spinlock_t * lock )
176{
[331]177    thread_t * this = CURRENT_THREAD;
178
[438]179#if DEBUG_SPINLOCKS
[436]180lock->owner = NULL;
181list_unlink( &lock->list );
[409]182#endif
183
[1]184    lock->taken = 0;
185    this->local_locks--;
[337]186
187    // deschedule if pending request
188    thread_check_sched();
[1]189}
190
Note: See TracBrowser for help on using the repository browser.