source: trunk/kernel/libk/remote_mutex.c @ 635

Last change on this file since 635 was 635, checked in by alain, 15 months ago

This version is a major evolution: The physical memory allocators,
defined in the kmem.c, ppm.c, and kcm.c files have been modified
to support remote accesses. The RPCs that were previously user
to allocate physical memory in a remote cluster have been removed.
This has been done to cure a dead-lock in case of concurrent page-faults.

This version 2.2 has been tested on a (4 clusters / 2 cores per cluster)
TSAR architecture, for both the "sort" and the "fft" applications.

File size: 12.8 KB
Line 
1/*
2 * remote_mutex.c - POSIX mutex implementation.
3 *
4 * Authors   Alain   Greiner (2016,2017,2018,2019)
5 *
6 * Copyright (c) UPMC Sorbonne Universites
7 *
8 * This file is part of ALMOS-MKH.
9 *
10 * ALMOS-MKH is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2.0 of the License.
13 *
14 * ALMOS-MKH is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24#include <kernel_config.h>
25#include <hal_kernel_types.h>
26#include <hal_remote.h>
27#include <thread.h>
28#include <xlist.h>
29#include <scheduler.h>
30#include <remote_busylock.h>
31#include <remote_mutex.h>
32
33
34/////////////////////////////////////////////////
35xptr_t remote_mutex_from_ident( intptr_t  ident )
36{
37    // get pointer on local process_descriptor
38    process_t * process = CURRENT_THREAD->process;
39
40    // get extended pointer on reference process
41    xptr_t      ref_xp = process->ref_xp;
42
43    // get cluster and local pointer on reference process
44    cxy_t          ref_cxy = GET_CXY( ref_xp );
45    process_t    * ref_ptr = (process_t *)GET_PTR( ref_xp );
46
47    // get extended pointers on mutexes list 
48    xptr_t root_xp = XPTR( ref_cxy , &ref_ptr->mutex_root );
49    xptr_t lock_xp = XPTR( ref_cxy , &ref_ptr->sync_lock );
50
51    // get lock protecting synchro lists
52    remote_queuelock_acquire( lock_xp );
53 
54    // scan reference process mutex list
55    xptr_t           iter_xp;
56    xptr_t           mutex_xp;
57    cxy_t            mutex_cxy;
58    remote_mutex_t * mutex_ptr;
59    intptr_t         current;
60    bool_t           found = false;
61           
62    XLIST_FOREACH( root_xp , iter_xp )
63    {
64        mutex_xp  = XLIST_ELEMENT( iter_xp , remote_mutex_t , list );
65        mutex_cxy = GET_CXY( mutex_xp );
66        mutex_ptr = (remote_mutex_t *)GET_PTR( mutex_xp );
67        current     = (intptr_t)hal_remote_lpt( XPTR( mutex_cxy , &mutex_ptr->ident ) );   
68        if( ident == current )
69        {
70            found = true;
71            break;
72        }
73    }
74
75    // relese lock protecting synchros lists
76    remote_queuelock_release( lock_xp );
77 
78    if( found == false )  return XPTR_NULL;
79    else                  return mutex_xp;
80
81}  // end remote_mutex_from_ident()
82
83/////////////////////////////////////////////
84error_t remote_mutex_create( intptr_t ident )
85{ 
86    remote_mutex_t * mutex_ptr;
87    kmem_req_t       req;   
88
89    // get pointer on local process descriptor
90    process_t * process = CURRENT_THREAD->process;
91
92    // get extended pointer on reference process
93    xptr_t      ref_xp = process->ref_xp;
94
95    // get reference process cluster and local pointer
96    cxy_t       ref_cxy = GET_CXY( ref_xp );
97    process_t * ref_ptr = (process_t *)GET_PTR( ref_xp );
98
99    // allocate memory for mutex descriptor in reference cluster
100    req.type    = KMEM_KCM;
101    req.order   = bits_log2( sizeof(remote_mutex_t) );
102    req.flags   = AF_ZERO | AF_KERNEL;
103    mutex_ptr   = kmem_remote_alloc( ref_cxy , &req );
104
105    if( mutex_ptr == NULL )
106    {
107       printk("\n[ERROR] in %s : cannot create mutex\n", __FUNCTION__);
108       return -1;
109    }
110
111    // initialise mutex
112    hal_remote_s32 ( XPTR( ref_cxy , &mutex_ptr->taken )   , 0 );
113    hal_remote_spt( XPTR( ref_cxy , &mutex_ptr->ident )   , (void *)ident );
114    xlist_entry_init( XPTR( ref_cxy , &mutex_ptr->list ) );
115    xlist_root_init( XPTR( ref_cxy , &mutex_ptr->root ) );
116    hal_remote_s64( XPTR( ref_cxy , &mutex_ptr->owner ) , XPTR_NULL );
117    remote_busylock_init( XPTR( ref_cxy , &mutex_ptr->lock ), LOCK_MUTEX_STATE );
118
119    // get root of mutexes list in process, and list_entry in mutex
120    xptr_t root_xp = XPTR( ref_cxy , &ref_ptr->mutex_root );
121    xptr_t xp_list = XPTR( ref_cxy , &mutex_ptr->list );
122
123    // get lock protecting user synchros lists
124    remote_queuelock_acquire( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
125
126    // register mutex in process descriptor
127    xlist_add_first( root_xp , xp_list );
128
129    // release lock protecting user synchros lists
130    remote_queuelock_release( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
131
132#if DEBUG_MUTEX
133thread_t * this = CURRENT_THREAD;
134if( (uint32_t)hal_get_cycles() > DEBUG_MUTEX )
135printk("\n[%s] : thread[%x,%x] created mutex(%x,%x)\n",
136__FUNCTION__, this->process->pid, this->trdid, local_cxy, mutex_ptr );
137#endif
138
139
140    return 0;
141
142}  // end remote_mutex_create()
143
144////////////////////////////////////////////
145void remote_mutex_destroy( xptr_t mutex_xp )
146{
147    kmem_req_t  req;
148
149    // get pointer on local process descriptor
150    process_t * process = CURRENT_THREAD->process;
151
152    // get extended pointer on reference process
153    xptr_t      ref_xp = process->ref_xp;
154
155    // get reference process cluster and local pointer
156    cxy_t       ref_cxy = GET_CXY( ref_xp );
157    process_t * ref_ptr = (process_t *)GET_PTR( ref_xp );
158
159    // get mutex cluster and local pointer
160    cxy_t            mutex_cxy = GET_CXY( mutex_xp );
161    remote_mutex_t * mutex_ptr = (remote_mutex_t *)GET_PTR( mutex_xp );
162
163    // get lock protecting user synchros lists
164    remote_queuelock_acquire( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
165
166    // remove mutex from reference process xlist
167    xlist_unlink( XPTR( mutex_cxy , &mutex_ptr->list ) );
168
169    // release lock protecting user synchros lists
170    remote_queuelock_release( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
171
172    // release memory allocated for mutex descriptor
173    req.type = KMEM_KCM;
174    req.ptr  = mutex_ptr;
175    kmem_remote_free( mutex_cxy , &req );
176
177}  // end remote_mutex_destroy()
178
179/////////////////////////////////////////
180void remote_mutex_lock( xptr_t mutex_xp )
181{ 
182    // get cluster and pointers on calling thread
183    cxy_t            caller_cxy = local_cxy;
184    thread_t       * caller_ptr = CURRENT_THREAD;
185    xptr_t           caller_xp  = XPTR( caller_cxy , caller_ptr );
186
187    // check calling thread can yield
188    thread_assert_can_yield( caller_ptr , __FUNCTION__ );
189
190    // get cluster and local pointer on mutex
191    remote_mutex_t * mutex_ptr = GET_PTR( mutex_xp );
192    cxy_t            mutex_cxy = GET_CXY( mutex_xp );
193
194    // get extended pointers on mutex fields
195    xptr_t           taken_xp = XPTR( mutex_cxy , &mutex_ptr->taken );
196    xptr_t           owner_xp = XPTR( mutex_cxy , &mutex_ptr->owner );
197    xptr_t           root_xp  = XPTR( mutex_cxy , &mutex_ptr->root );
198    xptr_t           lock_xp  = XPTR( mutex_cxy , &mutex_ptr->lock );
199
200    while( 1 )
201    {
202        // get busylock protecting mutex state
203        remote_busylock_acquire( lock_xp );
204
205        // test mutex state
206        if( hal_remote_l32( taken_xp ) == 0 )                 // success
207        {
208            // register calling thread as mutex owner
209            hal_remote_s64( owner_xp , caller_xp );
210
211            // update mutex state
212            hal_remote_s32( taken_xp , 1 );
213
214#if DEBUG_MUTEX
215thread_t * this = CURRENT_THREAD;
216if( (uint32_t)hal_get_cycles() > DEBUG_MUTEX )
217printk("\n[%s] thread[%x,%x] SUCCESS on mutex(%x,%x)\n",
218__FUNCTION__, this->process->pid, this->trdid, mutex_cxy, mutex_ptr );
219#endif
220
221            // release busylock protecting mutex state
222            remote_busylock_release( lock_xp ); 
223
224             return;
225        }
226        else                                                 //  already taken
227        {
228            // block the calling thread   
229            thread_block( caller_xp , THREAD_BLOCKED_USERSYNC );
230
231            // register calling thread in mutex waiting queue
232            xptr_t entry_xp = XPTR( caller_cxy , &caller_ptr->wait_xlist );
233            xlist_add_last( root_xp , entry_xp );
234
235#if DEBUG_MUTEX
236thread_t * this = CURRENT_THREAD;
237if( (uint32_t)hal_get_cycles() > DEBUG_MUTEX )
238printk("\n[%s] thread[%x,%x] BLOCKED on mutex(%x,%x)\n",
239__FUNCTION__, this->process->pid, this->trdid, mutex_cxy, mutex_ptr );
240#endif
241
242            // release busylock protecting mutex state
243            remote_busylock_release( lock_xp ); 
244
245            // deschedule calling thread
246            sched_yield("blocked on mutex");
247        }
248    } 
249}  // end remote_mutex_lock()
250
251//////////////////////////////////////////////
252error_t remote_mutex_unlock( xptr_t mutex_xp )
253{
254    // memory barrier before mutex release
255    hal_fence();
256
257    // get cluster and local pointer on mutex
258    remote_mutex_t * mutex_ptr = GET_PTR( mutex_xp );
259    cxy_t            mutex_cxy = GET_CXY( mutex_xp );
260
261    // get cluster and pointers on calling thread
262    cxy_t            caller_cxy = local_cxy;
263    thread_t       * caller_ptr = CURRENT_THREAD;
264    xptr_t           caller_xp  = XPTR( caller_cxy , caller_ptr );
265
266    // get extended pointers on mutex fields
267    xptr_t           taken_xp = XPTR( mutex_cxy , &mutex_ptr->taken );
268    xptr_t           owner_xp = XPTR( mutex_cxy , &mutex_ptr->owner );
269    xptr_t           root_xp  = XPTR( mutex_cxy , &mutex_ptr->root );
270    xptr_t           lock_xp  = XPTR( mutex_cxy , &mutex_ptr->lock );
271
272    // get busylock protecting mutex state
273    remote_busylock_acquire( lock_xp );
274   
275    // check calling thread is mutex owner
276    if( hal_remote_l64( owner_xp ) != caller_xp )
277    {
278        // release busylock protecting mutex state
279        remote_busylock_release( lock_xp );
280
281        return 0xFFFFFFFF;
282    }
283
284#if DEBUG_MUTEX
285thread_t * this = CURRENT_THREAD;
286if( (uint32_t)hal_get_cycles() > DEBUG_MUTEX )
287printk("\n[%s] thread[%x,%x] EXIT / mutex(%x,%x)\n",
288__FUNCTION__, this->process->pid, this->trdid, mutex_cxy, mutex_ptr );
289#endif
290
291    // update owner field,
292    hal_remote_s64( owner_xp , XPTR_NULL );
293
294    // update taken field
295    hal_remote_s32( taken_xp , 0 );
296
297    // unblock first waiting thread if waiting list non empty
298    if( xlist_is_empty( root_xp ) == false )
299    {
300        // get extended pointer on first waiting thread
301        xptr_t     thread_xp  = XLIST_FIRST( root_xp , thread_t , wait_xlist );
302        thread_t * thread_ptr = GET_PTR( thread_xp );
303        cxy_t      thread_cxy = GET_CXY( thread_xp );
304
305#if DEBUG_MUTEX
306if( (uint32_t)hal_get_cycles() > DEBUG_MUTEX )
307{
308trdid_t     trdid   = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
309process_t * process = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->process ) );
310pid_t       pid     = hal_remote_l32( XPTR( thread_cxy , &process->pid ) );
311printk("\n[%s] thread[%x,%x] UNBLOCK thread %x in process %d / mutex(%x,%x)\n",
312__FUNCTION__, this->process->pid, this->trdid, trdid, pid, mutex_cxy, mutex_ptr );
313}
314#endif
315
316        // remove this thread from waiting queue
317        xlist_unlink( XPTR( thread_cxy , &thread_ptr->wait_xlist ) );
318
319        // unblock first waiting thread
320        thread_unblock( thread_xp , THREAD_BLOCKED_USERSYNC ); 
321    }
322   
323    // release busylock protecting mutex state
324    remote_busylock_release( lock_xp );
325
326    return 0;
327
328}  // end remote_mutex_unlock()
329
330///////////////////////////////////////////////
331error_t remote_mutex_trylock( xptr_t mutex_xp )
332{
333    // get cluster and local pointer on mutex
334    remote_mutex_t * mutex_ptr = GET_PTR( mutex_xp );
335    cxy_t            mutex_cxy = GET_CXY( mutex_xp );
336
337    // get cluster and pointers on calling thread
338    cxy_t            caller_cxy = local_cxy;
339    thread_t       * caller_ptr = CURRENT_THREAD;
340    xptr_t           caller_xp  = XPTR( caller_cxy , caller_ptr );
341
342    // get extended pointers on mutex fields
343    xptr_t           taken_xp = XPTR( mutex_cxy , &mutex_ptr->taken );
344    xptr_t           owner_xp = XPTR( mutex_cxy , &mutex_ptr->owner );
345    xptr_t           lock_xp  = XPTR( mutex_cxy , &mutex_ptr->lock );
346
347    // get busylock protecting mutex state
348    remote_busylock_acquire( lock_xp );
349
350    // test mutex state
351    if( hal_remote_l32( taken_xp ) == 0 )                 // success
352    {
353        // register calling thread as mutex owner
354        hal_remote_s64( owner_xp , caller_xp );
355
356        // update mutex state
357        hal_remote_s32( taken_xp , 1 );
358
359#if DEBUG_MUTEX
360thread_t * this = CURRENT_THREAD;
361if( (uint32_t)hal_get_cycles() > DEBUG_QUEUELOCK )
362printk("\n[%s] SUCCESS for thread[%x,%x] / mutex(%x,%x)\n",
363__FUNCTION__, this->process->pid, this->trdid, mutex_cxy, mutex_ptr );
364#endif
365        // release busylock protecting mutex state
366        remote_busylock_release( lock_xp ); 
367
368        return 0;
369    }
370    else                                                 //  already taken
371    {
372
373#if DEBUG_MUTEX
374thread_t * this = CURRENT_THREAD;
375if( (uint32_t)hal_get_cycles() > DEBUG_QUEUELOCK )
376printk("\n[%s] FAILURE for thread[%x,%x] / mutex(%x,%x)\n",
377__FUNCTION__, this->process->pid, this->trdid, mutex_cxy, mutex_ptr );
378#endif
379        // release busylock protecting mutex state
380        remote_busylock_release( lock_xp ); 
381
382        return 0xFFFFFFFF;
383    }
384}  // end remote_mutex_trylock()
Note: See TracBrowser for help on using the repository browser.