source: trunk/kernel/kern/cluster.c @ 14

Last change on this file since 14 was 14, checked in by alain, 5 years ago

Bugs fix.

File size: 15.9 KB
Line 
1/*
2 * cluster.c - Cluster-Manager related operations
3 *
4 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *         Mohamed Lamine Karaoui (2015)
6 *         Alain Greiner (2016)
7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
10 * This file is part of ALMOS-MKH..
11 *
12 * ALMOS-MKH. is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
16 * ALMOS-MKH. is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <kernel_config.h>
27#include <hal_types.h>
28#include <hal_atomic.h>
29#include <hal_special.h>
30#include <printk.h>
31#include <errno.h>
32#include <spinlock.h>
33#include <core.h>
34#include <scheduler.h>
35#include <list.h>
36#include <cluster.h>
37#include <boot_info.h>
38#include <bits.h>
39#include <ppm.h>
40#include <thread.h>
41#include <kmem.h>
42#include <process.h>
43#include <dqdt.h>
44
45// TODO #include <sysfs.h>
46
47///////////////////////////////////////////////////////////////////////////////////////////
48// Extern global variables
49///////////////////////////////////////////////////////////////////////////////////////////
50
51process_t process_zero;     // allocated in kernel_init.c file
52
53
54
55//////////////////////////////////
56void cluster_sysfs_register(void)
57{
58        // TODO
59}
60
61/////////////////////////////////////////////////
62error_t cluster_init( struct boot_info_s * info )
63{
64    lpid_t      lpid;     // local process_index
65    lid_t       lid;      // local core index
66
67        cluster_t * cluster = LOCAL_CLUSTER;
68
69    // initialize cluster global parameters
70        cluster->paddr_width     = info->paddr_width; 
71        cluster->x_width         = info->x_width;
72        cluster->y_width         = info->y_width;
73        cluster->x_size          = info->x_size;
74        cluster->y_size          = info->y_size;
75        cluster->io_cxy          = info->io_cxy;
76
77    // initialize cluster local parameters
78        cluster->cores_nr        = info->cores_nr;
79    cluster->cores_in_kernel = info->cores_nr;   // all cpus start in kernel mode
80
81    // initialize the lock protectig the embedded kcm allocator
82        spinlock_init( &cluster->kcm_lock );
83
84    // initialises DQDT
85    cluster->dqdt_root_level = dqdt_init( info->x_size, 
86                                          info->y_size, 
87                                          info->y_width );
88    cluster->threads_var = 0;
89    cluster->pages_var   = 0;
90
91    // initialises embedded PPM
92        ppm_init( &cluster->ppm,
93              info->pages_nr,
94              info->pages_offset );
95
96    // initialises embedded KHM
97        khm_init( &cluster->khm );
98 
99    // initialises embedded KCM
100        kcm_init( &cluster->kcm , KMEM_KCM );
101
102    // initialises all cores descriptors
103        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
104        {
105                core_init( &cluster->core_tbl[lid],    // target core descriptor
106                       lid,                        // local core index
107                       info->core[lid].gid );      // gid from boot_info_t
108        }
109       
110    // initialises RPC fifo
111        rpc_fifo_init( &cluster->rpc_fifo );
112
113    // initialise pref_tbl[] in process manager
114        spinlock_init( &cluster->pmgr.pref_lock );
115    cluster->pmgr.pref_nr = 0;
116    cluster->pmgr.pref_tbl[0] = XPTR( local_cxy , &process_zero );   
117    for( lpid = 1 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
118    {
119        cluster->pmgr.pref_tbl[lpid] = XPTR_NULL;
120    }
121
122    // initialise local_list in process manager
123        spinlock_init( &cluster->pmgr.local_lock );
124    list_root_init( &cluster->pmgr.local_root );
125    cluster->pmgr.local_nr = 0;
126
127    // initialise copies_lists in process manager
128    for( lpid = 1 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
129    {
130            remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] ) );
131        cluster->pmgr.copies_nr[lpid] = 0;
132        xlist_root_init( XPTR( local_cxy , &cluster->pmgr.copies_root[lpid] ) );
133    }   
134
135    hal_wbflush();
136
137        return 0;
138} // end cluster_init()
139
140////////////////////////////////////////
141bool_t cluster_is_undefined( cxy_t cxy )
142{
143    cluster_t * cluster = LOCAL_CLUSTER;
144
145    uint32_t y_width = cluster->y_width;
146
147    uint32_t x = cxy >> y_width;
148    uint32_t y = cxy & ((1<<y_width)-1);
149
150    if( x >= cluster->x_size ) return true; 
151    if( y >= cluster->y_size ) return true; 
152
153    return false;
154}
155
156////////////////////////////////////////////////////////////////////////////////////
157//  Cores related functions
158////////////////////////////////////////////////////////////////////////////////////
159
160////////////////////////////////
161void cluster_core_kernel_enter()
162{
163    cluster_t * cluster = LOCAL_CLUSTER;
164        hal_atomic_inc( &cluster->cores_in_kernel );
165}
166
167///////////////////////////////
168void cluster_core_kernel_exit()
169{
170    cluster_t * cluster = LOCAL_CLUSTER;
171        hal_atomic_dec( &cluster->cores_in_kernel );
172}
173
174/////////////////////////////////
175lid_t cluster_select_local_core()
176{
177    uint32_t min = 100;
178    lid_t    sel = 0;
179    lid_t    lid;
180
181    cluster_t * cluster = LOCAL_CLUSTER;
182
183    for( lid = 0 ; lid < cluster->cores_nr ; lid++ )
184    {
185        if( cluster->core_tbl[lid].usage < min )
186        {
187            min = cluster->core_tbl[lid].usage;
188            sel = lid;
189        }
190    } 
191    return sel;
192}
193
194////////////////////////////////////////////////////////////////////////////////////
195//  Process management related functions
196////////////////////////////////////////////////////////////////////////////////////
197
198//////////////////////////////////////////////////////////
199xptr_t cluster_get_reference_process_from_pid( pid_t pid )
200{ 
201    xptr_t xp;   // extended pointer on process descriptor
202
203    cluster_t * cluster = LOCAL_CLUSTER;
204
205    // get owner cluster and lpid
206    cxy_t  owner_cxy = CXY_FROM_PID( pid );
207    lpid_t lpid      = LPID_FROM_PID( pid );
208
209    // Check valid PID
210    if( lpid >= CONFIG_MAX_PROCESS_PER_CLUSTER )
211    {
212        printk("\n[PANIC] in %s : illegal PID\n", __FUNCTION__ );
213        hal_core_sleep();
214    }
215
216    if( local_cxy == owner_cxy )   // local cluster is owner cluster
217    { 
218        xp = cluster->pmgr.pref_tbl[lpid];
219    }
220    else                              // use a remote_lwd to access owner cluster
221    {
222        xp = (xptr_t)hal_remote_lwd( XPTR( owner_cxy , &cluster->pmgr.pref_tbl[lpid] ) );
223    }
224
225    return xp;
226}
227
228////////////////////////////////////////////////
229error_t cluster_pid_alloc( xptr_t    process_xp,
230                           pid_t   * pid )
231{
232    error_t     error;
233    lpid_t      lpid;
234    bool_t      found;
235
236    pmgr_t    * pm         = &LOCAL_CLUSTER->pmgr;
237
238    // get the process manager lock
239    spinlock_lock( &pm->pref_lock );
240
241    // search an empty slot
242    found = false;
243    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
244    {
245        if( pm->pref_tbl[lpid] == XPTR_NULL )
246        {
247            found = true;
248            break;
249        }
250    }
251
252    if( found )
253    {
254        // register process in pref_tbl[]
255        pm->pref_tbl[lpid] = process_xp;
256        pm->pref_nr++;
257
258        // returns pid
259        *pid = PID( local_cxy , lpid );
260
261        error = 0;
262    }
263    else
264    {
265        error = EAGAIN;
266    }   
267
268    // release the processs_manager lock
269    spinlock_unlock( &pm->pref_lock );
270
271    return error;
272
273} // end cluster_pid_alloc()
274
275/////////////////////////////////////
276void cluster_pid_release( pid_t pid )
277{
278    cxy_t  owner_cxy  = CXY_FROM_PID( pid );
279    lpid_t lpid       = LPID_FROM_PID( pid );
280
281    // check pid argument
282    if( (lpid >= CONFIG_MAX_PROCESS_PER_CLUSTER) || (owner_cxy != local_cxy) )
283    {
284        printk("\n[PANIC] in %s : illegal PID\n", __FUNCTION__ );
285        hal_core_sleep();
286    }
287
288    pmgr_t  * pm = &LOCAL_CLUSTER->pmgr;
289
290    // get the process manager lock
291    spinlock_lock( &pm->pref_lock );
292
293    // remove process from pref_tbl[]
294    pm->pref_tbl[lpid] = XPTR_NULL;
295    pm->pref_nr--;
296
297    // release the processs_manager lock
298    spinlock_unlock( &pm->pref_lock );
299
300} // end cluster_pid_release()
301
302///////////////////////////////////////////////////////////
303process_t * cluster_get_local_process_from_pid( pid_t pid )
304{
305    process_t    * ret     = NULL;
306    list_entry_t * root    = &LOCAL_CLUSTER->pmgr.local_root;
307    list_entry_t * iter;
308    process_t    * process;
309   
310    LIST_FOREACH( root , iter )
311    {
312        process = LIST_ELEMENT( iter , process_t , local_list );
313        if( process->pid == pid )
314        {
315            ret = process;
316            break;
317        }
318    }
319    return ret;
320
321}  // end cluster_get_local_process_from_pid()
322
323//////////////////////////////////////////////////////
324void cluster_process_local_link( process_t * process )
325{
326    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
327
328    // get lock protecting the process manager local list
329    spinlock_lock( &pm->local_lock );
330
331    list_add_first( &pm->local_root , &process->local_list );
332    pm->local_nr++;
333
334    // release lock protecting the process manager local list
335    spinlock_unlock( &pm->local_lock );
336}
337
338////////////////////////////////////////////////////////
339void cluster_process_local_unlink( process_t * process )
340{
341    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
342
343    // get lock protecting the process manager local list
344    spinlock_lock( &pm->local_lock );
345
346    list_unlink( &process->local_list );
347    pm->local_nr--;
348
349    // release lock protecting the process manager local list
350    spinlock_unlock( &pm->local_lock );
351}
352
353///////////////////////////////////////////////////////
354void cluster_process_copies_link( process_t * process )
355{
356    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
357
358    // get owner cluster identifier CXY and process LPID
359    pid_t    pid        = process->pid;
360    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
361    lpid_t   lpid       = LPID_FROM_PID( pid );
362
363    // get extended pointer on lock protecting copies_list[lpid]
364    xptr_t copies_lock  = hal_remote_lwd( XPTR( owner_cxy , &pm->copies_lock[lpid] ) );
365
366    // get extended pointer on the copies_list[lpid] root
367    xptr_t copies_root  = hal_remote_lwd( XPTR( owner_cxy , &pm->copies_root[lpid] ) );
368
369    // get extended pointer on the local copies_list entry
370    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
371
372    // get lock protecting copies_list[lpid]
373    remote_spinlock_lock( copies_lock );
374
375    xlist_add_first( copies_root , copies_entry );
376    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , 1 );
377
378    // release lock protecting copies_list[lpid]
379    remote_spinlock_unlock( copies_lock );
380}
381
382/////////////////////////////////////////////////////////
383void cluster_process_copies_unlink( process_t * process )
384{
385    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
386
387    // get owner cluster identifier CXY and process LPID
388    pid_t    pid        = process->pid;
389    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
390    lpid_t   lpid       = LPID_FROM_PID( pid );
391
392    // get extended pointer on lock protecting copies_list[lpid]
393    xptr_t copies_lock  = hal_remote_lwd( XPTR( owner_cxy , &pm->copies_lock[lpid] ) );
394
395    // get extended pointer on the local copies_list entry
396    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
397
398    // get lock protecting copies_list[lpid]
399    remote_spinlock_lock( copies_lock );
400
401    xlist_unlink( copies_entry );
402    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , -1 );
403
404    // release lock protecting copies_list[lpid]
405    remote_spinlock_unlock( copies_lock );
406}
407
408////////////////////////////////////////////////////////////////////////////////////////
409// TODO Il me semble que la seule chose que fait ce kernel thread à chaque réveil
410// est de mettre à jour la DQDT, et de se rendormir... A-t-on besoin d'un thread ? [AG]
411//////////////////////////////////////////////////////////////////////////////////////////
412
413#if 0
414void * cluster_manager_thread( void * arg )
415{
416        register struct dqdt_cluster_s * root;
417        register struct cluster_s      * root_home;
418
419        register uint32_t                tm_start;
420        register uint32_t                tm_end;
421        register uint32_t                cpu_id;
422        struct cluster_s               * cluster;
423        struct thread_s                * this;
424        struct event_s                   event;
425        struct alarm_info_s              info;
426        register uint32_t                cntr;
427        register bool_t                  isRootMgr;
428        register uint32_t                period;
429
430        cpu_enable_all_irq( NULL );
431
432        cluster   = arg;
433        this      = CURRENT_THREAD;
434        cpu_id    = cpu_get_id();
435        root      = dqdt_root;
436        root_home = dqdt_root->home;
437        isRootMgr = (cluster == root_home) ? true : false;
438        cntr      = 0;
439        period    = (isRootMgr) ?
440                CONFIG_DQDT_ROOTMGR_PERIOD * MSEC_PER_TICK :
441                CONFIG_DQDT_MGR_PERIOD * MSEC_PER_TICK;
442
443        event_set_senderId(&event, this);
444        event_set_priority(&event, E_CHR);
445        event_set_handler(&event, &manager_alarm_event_handler);
446 
447        info.event = &event;
448        thread_preempt_disable(CURRENT_THREAD);
449
450    // infinite loop
451        while(1)
452        {
453                tm_start = cpu_time_stamp();
454                dqdt_update();
455                tm_end   = cpu_time_stamp();
456
457                if(isRootMgr)
458                {
459                        if((cntr % 10) == 0)
460                        {
461                                printk(INFO, "INFO: cpu %d, DQDT update ended [ %u - %u ]\n",
462                                       cpu_id,
463                                       tm_end,
464                                       tm_end - tm_start);
465
466                                dqdt_print_summary(root);
467                        }
468                }
469
470                alarm_wait( &info , period );
471                sched_sleep(this);
472                cntr ++;
473        }
474
475        return NULL;
476} // end cluster_manager_thread()
477
478//////////////////////////////////////////
479EVENT_HANDLER(manager_alarm_event_handler)
480{
481        struct thread_s *manager;
482 
483        manager = event_get_senderId(event);
484 
485        thread_preempt_disable(CURRENT_THREAD);
486
487        //printk(INFO, "%s: cpu %d [%u]\n", __FUNCTION__, cpu_get_id(), cpu_time_stamp());
488
489        sched_wakeup(manager);
490 
491        thread_preempt_enable(CURRENT_THREAD);
492
493        return 0;
494}
495
496///////////////////////////////////////////////
497EVENT_HANDLER(cluster_key_create_event_handler)
498{
499        struct cluster_s *cluster;
500        struct thread_s *sender;
501        ckey_t *ckey;
502        uint32_t key;
503
504        sender  = event_get_senderId(event);
505        ckey    = event_get_argument(event);
506        cluster = current_cluster;
507        key     = cluster->next_key;
508
509        while((key < CLUSTER_TOTAL_KEYS_NR) && (cluster->keys_tbl[key] != NULL))
510                key ++;
511
512        if(key < CLUSTER_TOTAL_KEYS_NR)
513        {
514                ckey->val = key;
515                cluster->keys_tbl[key] = (void *) 0x1; // Reserved
516                cluster->next_key = key;
517                event_set_error(event, 0);
518        }
519        else
520                event_set_error(event, ENOSPC);
521
522        sched_wakeup(sender);
523        return 0;
524}
525
526///////////////////////////////////////////////
527EVENT_HANDLER(cluster_key_delete_event_handler)
528{
529        struct cluster_s *cluster;
530        struct thread_s *sender;
531        ckey_t *ckey;
532        uint32_t key;
533
534        sender  = event_get_senderId(event);
535        ckey    = event_get_argument(event);
536        cluster = current_cluster;
537        key     = ckey->val;
538
539        if(key < cluster->next_key)
540                cluster->next_key = key;
541
542        cluster->keys_tbl[key] = NULL;
543        event_set_error(event, 0);
544
545        sched_wakeup(sender);
546        return 0;
547}
548
549#define _CKEY_CREATE  0x0
550#define _CKEY_DELETE  0x1
551
552error_t cluster_do_key_op(ckey_t *key, uint32_t op)
553{
554        struct event_s event;
555        struct thread_s *this;
556        struct cluster_s *cluster;
557        struct cpu_s *cpu;
558
559        this = CURRENT_THREAD;
560
561        event_set_priority(&event, E_FUNC);
562        event_set_senderId(&event, this);
563        event_set_argument(&event, key);
564
565        if(op == _CKEY_CREATE)
566                event_set_handler(&event, cluster_key_create_event_handler);
567        else
568                event_set_handler(&event, cluster_key_delete_event_handler);
569
570        cluster = current_cluster;
571        cpu     = cluster->bscluster->bscpu;
572        event_send(&event, &cpu->re_listner);
573
574        sched_sleep(this);
575
576        return event_get_error(&event);
577}
578
579error_t cluster_key_create(ckey_t *key)
580{
581        return cluster_do_key_op(key, _CKEY_CREATE);
582}
583
584error_t cluster_key_delete(ckey_t *key)
585{
586        return cluster_do_key_op(key, _CKEY_DELETE);
587}
588
589void* cluster_getspecific(ckey_t *key)
590{
591        struct cluster_s *cluster;
592
593        cluster = current_cluster;
594        return cluster->keys_tbl[key->val];
595}
596
597void  cluster_setspecific(ckey_t *key, void *val)
598{
599        struct cluster_s *cluster;
600
601        cluster = current_cluster;
602        cluster->keys_tbl[key->val] = val;
603}
604#endif
Note: See TracBrowser for help on using the repository browser.