source: trunk/kernel/libk/remote_spinlock.c @ 433

Last change on this file since 433 was 433, checked in by alain, 4 years ago

blip

File size: 6.4 KB
Line 
1/*
2 * remote_spinlock.c - kernel remote spinlock implementation.
3 *
4 * Authors  Mohamed Karaoui (2015)
5 *          Alain   Greiner (2016)
6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25#include <hal_types.h>
26#include <hal_remote.h>
27#include <hal_irqmask.h>
28#include <thread.h>
29#include <cluster.h>
30#include <scheduler.h>
31#include <remote_spinlock.h>
32
33///////////////////////////////////////////
34void remote_spinlock_init( xptr_t lock_xp )
35{
36        remote_spinlock_t * ptr = GET_PTR( lock_xp );
37        cxy_t               cxy = GET_CXY( lock_xp );
38
39        hal_remote_sw ( XPTR( cxy , &ptr->taken ) , 0 );
40
41#if CONFIG_DEBUG_LOCKS
42        hal_remote_swd( XPTR( cxy , &ptr->owner ) , XPTR_NULL );
43        xlist_entry_init( XPTR( cxy , &ptr->list ) );
44#endif
45
46}
47
48/////////////////////////////////////////////////
49error_t remote_spinlock_trylock( xptr_t lock_xp )
50{
51        reg_t               mode;
52        bool_t              isAtomic = false;
53
54        // get cluster and local pointer on remote_spinlock
55        remote_spinlock_t * lock_ptr = GET_PTR( lock_xp );
56        cxy_t               lock_cxy = GET_CXY( lock_xp );
57
58        // get local pointer on local thread
59        thread_t          * thread_ptr = CURRENT_THREAD;
60
61        // disable interrupts
62        hal_disable_irq( &mode );
63
64        if( hal_remote_lw( XPTR( lock_cxy , &lock_ptr->taken ) ) == 0 )
65        {
66                isAtomic = hal_remote_atomic_cas( XPTR( lock_cxy , &lock_ptr->taken ) , 0 , 1 );
67        }
68
69        if( isAtomic == false )    // failure
70        {
71                hal_restore_irq( mode );
72                return 1;
73        }
74        else                      // success : register lock in local thread
75        {
76                thread_ptr->remote_locks++;
77
78#if CONFIG_DEBUG_LOCKS
79                hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) ,
80                                XPTR( local_cxy , thread_ptr) );
81                xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
82                                 XPTR( lock_cxy , &lock_ptr->list ) );
83#endif
84
85                hal_restore_irq(mode);
86                return 0;
87        }
88}
89
90///////////////////////////////////////////////////
91void remote_spinlock_lock_busy( xptr_t     lock_xp,
92                                uint32_t * irq_state )
93{
94        bool_t              isAtomic = false;
95        reg_t               mode;
96        volatile uint32_t   taken;
97
98        // get cluster and local pointer on remote_spinlock
99        remote_spinlock_t * lock_ptr = GET_PTR( lock_xp );
100        cxy_t               lock_cxy = GET_CXY( lock_xp );
101
102        // get local pointer on local thread
103        thread_t          * thread_ptr = CURRENT_THREAD;
104
105        // disable interrupts
106        hal_disable_irq( &mode );
107
108        // loop until success
109        while( isAtomic == false )
110        {
111                taken = hal_remote_lw( XPTR( lock_cxy , &lock_ptr->taken ) );
112
113                // try to take the lock if not already taken
114                if( taken == 0 )
115                {
116                        isAtomic = hal_remote_atomic_cas( XPTR( lock_cxy , &lock_ptr->taken ) , 0 , 1 );
117                }
118        }
119
120        // register lock in thread
121        thread_ptr->remote_locks++;
122
123#if CONFIG_DEBUG_LOCKS
124        hal_remote_swd( XPTR( lock_cxy  , &lock_ptr->owner ) ,
125                        XPTR( local_cxy , thread_ptr) );
126        xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
127                         XPTR( lock_cxy  , &lock_ptr->list ) );
128#endif
129
130        // irq_state must be restored when lock is released
131        *irq_state = mode;
132
133}  // end remote_spinlock_lock_busy()
134
135////////////////////////////////////////////////////
136void remote_spinlock_unlock_busy( xptr_t    lock_xp,
137                                  uint32_t  irq_state )
138{
139        // get cluster and local pointer on remote_spinlock
140        remote_spinlock_t * lock_ptr = GET_PTR( lock_xp );
141        cxy_t               lock_cxy = GET_CXY( lock_xp );
142
143        // get pointer on local thread
144        thread_t          * thread_ptr = CURRENT_THREAD;
145
146#if CONFIG_DEBUG_LOCKS
147        hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL );
148        xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
149#endif
150
151        hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->taken ) , 0 );
152        thread_ptr->remote_locks--;
153
154    // deschedule if pending request
155    thread_check_sched();
156 
157    // restore IRQs
158        hal_restore_irq( irq_state );
159}
160
161///////////////////////////////////////////
162void remote_spinlock_lock( xptr_t lock_xp )
163{
164        bool_t              isAtomic = false;
165        reg_t               mode;
166        volatile uint32_t   taken;
167
168        // get cluster and local pointer on remote_spinlock
169        remote_spinlock_t * lock_ptr = GET_PTR( lock_xp );
170        cxy_t               lock_cxy = GET_CXY( lock_xp );
171
172    // get local pointer on calling thread
173    thread_t          * thread_ptr = CURRENT_THREAD;
174
175        // disable interrupts
176        hal_disable_irq( &mode );
177
178        // loop until success
179        while( isAtomic == false )
180        {
181                taken = hal_remote_lw( XPTR( lock_cxy , &lock_ptr->taken ) );
182
183                // deschedule if possible when lock already taken
184                if( taken != 0 )
185                {
186                        hal_restore_irq( mode );
187                        if( thread_can_yield() ) sched_yield("waiting spinlock");
188                        hal_disable_irq( &mode );
189                        continue;
190                }
191
192                // try to take the lock if not already taken
193                isAtomic = hal_remote_atomic_cas( XPTR( lock_cxy , &lock_ptr->taken ) , 0 , 1 );
194        }
195
196        // register lock in thread
197        thread_ptr->remote_locks++;
198
199#if CONFIG_DEBUG_LOCKS
200        hal_remote_swd( XPTR( lock_cxy  , &lock_ptr->owner ) ,
201                        XPTR( local_cxy , thread_ptr) );
202        xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
203                         XPTR( lock_cxy  , &lock_ptr->list ) );
204#endif
205
206        // enable interrupts
207        hal_restore_irq( mode );
208}
209
210/////////////////////////////////////////////
211void remote_spinlock_unlock( xptr_t lock_xp )
212{
213        // get cluster and local pointer on remote_spinlock
214        remote_spinlock_t * lock_ptr = GET_PTR( lock_xp );
215        cxy_t               lock_cxy = GET_CXY( lock_xp );
216
217        // get pointer on local thread
218        thread_t          * thread_ptr = CURRENT_THREAD;
219
220#if CONFIG_DEBUG_LOCKS
221        hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL );
222        xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
223#endif
224
225        hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->taken ) , 0 );
226        thread_ptr->remote_locks--;
227
228    // deschedule if pending request
229    thread_check_sched();
230}
231
Note: See TracBrowser for help on using the repository browser.