source: trunk/kernel/libk/spinlock.c @ 394

Last change on this file since 394 was 337, checked in by alain, 7 years ago

Introduce the delayed context switch if current thread has a lock.

File size: 4.2 KB
Line 
1/*
2 * spinlock.c - kernel spinlock synchronization
3 *
4 * Authors   Ghassan Almaless  (2008,2009,2010,2011,2012)
5 *           Alain Greiner     (2016}
6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25#include <kernel_config.h>
26#include <hal_types.h>
27#include <hal_atomic.h>
28#include <hal_special.h>
29#include <hal_irqmask.h>
30#include <thread.h>
31#include <scheduler.h>
32#include <printk.h>
33#include <spinlock.h>
34
35//////////////////////////////////////////////
36inline void spinlock_init( spinlock_t * lock )
37{
38    lock->taken = 0;
39    lock->owner = NULL;
40    list_entry_init( &lock->list );
41}
42
43///////////////////////////////////////////
44void spinlock_lock_busy( spinlock_t * lock,
45                         uint32_t   * irq_state )
46{
47    reg_t               mode;
48    volatile uint32_t   taken;
49    thread_t          * this     = CURRENT_THREAD;
50    bool_t              isAtomic = false;
51
52    // disable interrupts
53    hal_disable_irq( &mode );
54
55    // loop until success
56    while( isAtomic == false )
57    {
58        taken = lock->taken;
59
60        // try to take the lock if not already taken
61        if( taken == 0 )
62        {
63            isAtomic = hal_atomic_cas( &lock->taken , 0 , 1 );
64        }
65    }
66
67    this->local_locks++;
68    lock->owner = this;
69    list_add_first( &this->locks_root , &lock->list );
70
71    // irq_state must be restored when lock is released
72    *irq_state = mode;
73}
74
75//////////////////////////////////////////////
76void spinlock_unlock_busy( spinlock_t * lock,
77                           uint32_t     irq_state )
78{
79    thread_t * this = CURRENT_THREAD;;
80
81    lock->owner = NULL;
82    lock->taken = 0;
83    this->local_locks--;
84    list_unlink( &lock->list );
85
86    // deschedule if pending request
87    thread_check_sched();
88 
89    // restore IRQs
90        hal_restore_irq( irq_state );
91}
92
93///////////////////////////////////////
94void spinlock_lock( spinlock_t * lock )
95{
96    reg_t             mode;
97    thread_t        * this     = CURRENT_THREAD;
98    bool_t            isAtomic = false;
99    volatile uint32_t taken;
100
101    // disable interrupts
102    hal_disable_irq( &mode );
103
104    // loop until success
105    while( isAtomic == false )
106    {
107        taken = lock->taken;
108
109        // deschedule without blocking when lock already taken
110        if( taken != 0 )
111        {
112            hal_restore_irq( mode );
113            if( thread_can_yield() ) sched_yield( NULL );
114            hal_disable_irq( &mode );
115            continue;
116        }
117
118        // try to atomically take the lock if not already taken
119        isAtomic = hal_atomic_cas( &lock->taken , 0 , 1 );
120    }
121
122    this->local_locks++;
123    lock->owner = this;
124    list_add_first( &this->locks_root , &lock->list );
125
126    // restore IRQs
127    hal_restore_irq( mode );
128}
129
130/////////////////////////////////////////////
131error_t spinlock_trylock( spinlock_t * lock )
132{
133    reg_t      mode;
134    bool_t     isAtomic = false;
135    thread_t * this     = CURRENT_THREAD;
136
137    hal_disable_irq( &mode );
138
139    if( lock->taken == 0)
140        isAtomic = hal_atomic_cas( &lock->taken , 0 , 1);
141
142    if(isAtomic == false)
143    {
144        hal_restore_irq(mode);
145        return 1;
146    }
147    else
148    {
149        this->local_locks++;
150        lock->owner = this;
151        list_add_first( &this->locks_root , &lock->list );
152        hal_restore_irq(mode);
153        return 0;
154    }
155}
156
157/////////////////////////////////////////
158void spinlock_unlock( spinlock_t * lock )
159{
160    thread_t * this = CURRENT_THREAD;
161
162    lock->owner = NULL;
163    lock->taken = 0;
164    this->local_locks--;
165    list_unlink( &lock->list );
166
167    // deschedule if pending request
168    thread_check_sched();
169}
170
Note: See TracBrowser for help on using the repository browser.