source: trunk/kernel/libk/remote_spinlock.c @ 479

Last change on this file since 479 was 457, checked in by alain, 6 years ago

This version modifies the exec syscall and fixes a large number of small bugs.
The version number has been updated (0.1)

File size: 6.4 KB
Line 
1/*
2 * remote_spinlock.c - kernel remote spinlock implementation.
3 *
4 * Authors   Alain   Greiner (2016,2017,2018)
5 *
6 * Copyright (c) UPMC Sorbonne Universites
7 *
8 * This file is part of ALMOS-MKH.
9 *
10 * ALMOS-MKH is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2.0 of the License.
13 *
14 * ALMOS-MKH is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24#include <hal_kernel_types.h>
25#include <hal_remote.h>
26#include <hal_irqmask.h>
27#include <thread.h>
28#include <cluster.h>
29#include <scheduler.h>
30#include <remote_spinlock.h>
31
32///////////////////////////////////////////
33void remote_spinlock_init( xptr_t lock_xp )
34{
35        remote_spinlock_t * ptr = GET_PTR( lock_xp );
36        cxy_t               cxy = GET_CXY( lock_xp );
37
38        hal_remote_sw ( XPTR( cxy , &ptr->taken ) , 0 );
39
40#if DEBUG_REMOTE_SPINLOCKS
41hal_remote_swd( XPTR( cxy , &ptr->owner ) , XPTR_NULL );
42xlist_entry_init( XPTR( cxy , &ptr->list ) );
43#endif
44
45}
46
47/////////////////////////////////////////////////
48error_t remote_spinlock_trylock( xptr_t lock_xp )
49{
50        reg_t               mode;
51        bool_t              isAtomic = false;
52
53        // get cluster and local pointer on remote_spinlock
54        remote_spinlock_t * lock_ptr = GET_PTR( lock_xp );
55        cxy_t               lock_cxy = GET_CXY( lock_xp );
56
57        // get local pointer on local thread
58        thread_t          * thread_ptr = CURRENT_THREAD;
59
60        // disable interrupts
61        hal_disable_irq( &mode );
62
63        if( hal_remote_lw( XPTR( lock_cxy , &lock_ptr->taken ) ) == 0 )
64        {
65                isAtomic = hal_remote_atomic_cas( XPTR( lock_cxy , &lock_ptr->taken ) , 0 , 1 );
66        }
67
68        if( isAtomic == false )    // failure
69        {
70                hal_restore_irq( mode );
71                return 1;
72        }
73        else                      // success : register lock in local thread
74        {
75                thread_ptr->remote_locks++;
76
77#if DEBUG_REMOTE_SPINLOCKS
78hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) ,
79                XPTR( local_cxy , thread_ptr) );
80xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
81                         XPTR( lock_cxy , &lock_ptr->list ) );
82#endif
83
84                hal_restore_irq(mode);
85                return 0;
86        }
87}
88
89///////////////////////////////////////////////////
90void remote_spinlock_lock_busy( xptr_t     lock_xp,
91                                uint32_t * irq_state )
92{
93        bool_t              isAtomic = false;
94        reg_t               mode;
95        volatile uint32_t   taken;
96
97        // get cluster and local pointer on remote_spinlock
98        remote_spinlock_t * lock_ptr = GET_PTR( lock_xp );
99        cxy_t               lock_cxy = GET_CXY( lock_xp );
100
101        // get local pointer on local thread
102        thread_t          * thread_ptr = CURRENT_THREAD;
103
104        // disable interrupts
105        hal_disable_irq( &mode );
106
107        // loop until success
108        while( isAtomic == false )
109        {
110                taken = hal_remote_lw( XPTR( lock_cxy , &lock_ptr->taken ) );
111
112                // try to take the lock if not already taken
113                if( taken == 0 )
114                {
115                        isAtomic = hal_remote_atomic_cas( XPTR( lock_cxy , &lock_ptr->taken ) , 0 , 1 );
116                }
117        }
118
119        // register lock in thread
120        thread_ptr->remote_locks++;
121
122#if DEBUG_REMOTE_SPINLOCKS
123hal_remote_swd( XPTR( lock_cxy  , &lock_ptr->owner ) ,
124                XPTR( local_cxy , thread_ptr) );
125xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
126                 XPTR( lock_cxy  , &lock_ptr->list ) );
127#endif
128
129        // irq_state must be restored when lock is released
130        *irq_state = mode;
131
132}  // end remote_spinlock_lock_busy()
133
134////////////////////////////////////////////////////
135void remote_spinlock_unlock_busy( xptr_t    lock_xp,
136                                  uint32_t  irq_state )
137{
138        // get cluster and local pointer on remote_spinlock
139        remote_spinlock_t * lock_ptr = GET_PTR( lock_xp );
140        cxy_t               lock_cxy = GET_CXY( lock_xp );
141
142        // get pointer on local thread
143        thread_t          * thread_ptr = CURRENT_THREAD;
144
145#if DEBUG_REMOTE_SPINLOCKS
146hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL );
147xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
148#endif
149
150        hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->taken ) , 0 );
151        thread_ptr->remote_locks--;
152
153    // deschedule if pending request
154    thread_check_sched();
155 
156    // restore IRQs
157        hal_restore_irq( irq_state );
158}
159
160///////////////////////////////////////////
161void remote_spinlock_lock( xptr_t lock_xp )
162{
163        bool_t              isAtomic = false;
164        reg_t               mode;
165        volatile uint32_t   taken;
166
167        // get cluster and local pointer on remote_spinlock
168        remote_spinlock_t * lock_ptr = GET_PTR( lock_xp );
169        cxy_t               lock_cxy = GET_CXY( lock_xp );
170
171    // get local pointer on calling thread
172    thread_t          * thread_ptr = CURRENT_THREAD;
173
174        // disable interrupts
175        hal_disable_irq( &mode );
176
177        // loop until success
178        while( isAtomic == false )
179        {
180                taken = hal_remote_lw( XPTR( lock_cxy , &lock_ptr->taken ) );
181
182                // deschedule if possible when lock already taken
183                if( taken != 0 )
184                {
185                        hal_restore_irq( mode );
186                        if( thread_can_yield() ) sched_yield("waiting spinlock");
187                        hal_disable_irq( &mode );
188                        continue;
189                }
190
191                // try to take the lock if not already taken
192                isAtomic = hal_remote_atomic_cas( XPTR( lock_cxy , &lock_ptr->taken ) , 0 , 1 );
193        }
194
195        // register lock in thread
196        thread_ptr->remote_locks++;
197
198#if DEBUG_REMOTE_SPINLOCKS
199hal_remote_swd( XPTR( lock_cxy  , &lock_ptr->owner ),
200                XPTR( local_cxy , thread_ptr) );
201xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ),
202                 XPTR( lock_cxy  , &lock_ptr->list ) );
203#endif
204
205        // enable interrupts
206        hal_restore_irq( mode );
207}
208
209/////////////////////////////////////////////
210void remote_spinlock_unlock( xptr_t lock_xp )
211{
212        // get cluster and local pointer on remote_spinlock
213        remote_spinlock_t * lock_ptr = GET_PTR( lock_xp );
214        cxy_t               lock_cxy = GET_CXY( lock_xp );
215
216        // get pointer on local thread
217        thread_t          * thread_ptr = CURRENT_THREAD;
218
219#if DEBUG_REMOTE_SPINLOCKS
220hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL );
221xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
222#endif
223
224        hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->taken ) , 0 );
225        thread_ptr->remote_locks--;
226
227    // deschedule if pending request
228    thread_check_sched();
229}
230
Note: See TracBrowser for help on using the repository browser.