source: trunk/kernel/kern/cluster.c @ 409

Last change on this file since 409 was 409, checked in by alain, 6 years ago

Fix bugs in exec

File size: 17.0 KB
RevLine 
[1]1/*
2 * cluster.c - Cluster-Manager related operations
[19]3 *
[1]4 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *         Mohamed Lamine Karaoui (2015)
[23]6 *         Alain Greiner (2016,2017)
[1]7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
10 * This file is part of ALMOS-MKH..
11 *
12 * ALMOS-MKH. is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
16 * ALMOS-MKH. is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
[14]26#include <kernel_config.h>
[1]27#include <hal_types.h>
28#include <hal_atomic.h>
29#include <hal_special.h>
[50]30#include <hal_ppm.h>
[407]31#include <remote_fifo.h>
[1]32#include <printk.h>
33#include <errno.h>
34#include <spinlock.h>
35#include <core.h>
36#include <scheduler.h>
37#include <list.h>
38#include <cluster.h>
39#include <boot_info.h>
40#include <bits.h>
41#include <ppm.h>
42#include <thread.h>
43#include <kmem.h>
44#include <process.h>
45#include <dqdt.h>
46
[408]47/////////////////////////////////////////////////////////////////////////////////////
[1]48// Extern global variables
[408]49/////////////////////////////////////////////////////////////////////////////////////
[1]50
[23]51extern process_t process_zero;     // allocated in kernel_init.c file
[1]52
53
54/////////////////////////////////////////////////
55error_t cluster_init( struct boot_info_s * info )
56{
[50]57    error_t     error;
[1]58    lpid_t      lpid;     // local process_index
59    lid_t       lid;      // local core index
60
61        cluster_t * cluster = LOCAL_CLUSTER;
62
63    // initialize cluster global parameters
[19]64        cluster->paddr_width     = info->paddr_width;
[1]65        cluster->x_width         = info->x_width;
66        cluster->y_width         = info->y_width;
67        cluster->x_size          = info->x_size;
68        cluster->y_size          = info->y_size;
69        cluster->io_cxy          = info->io_cxy;
70
71    // initialize cluster local parameters
72        cluster->cores_nr        = info->cores_nr;
73
[19]74    // initialize the lock protecting the embedded kcm allocator
[1]75        spinlock_init( &cluster->kcm_lock );
76
[407]77cluster_dmsg("\n[DBG] %s for cluster %x enters\n",
78__FUNCTION__ , local_cxy );
[50]79
[19]80    // initialises DQDT
81    cluster->dqdt_root_level = dqdt_init( info->x_size,
82                                          info->y_size,
[1]83                                          info->y_width );
84    cluster->threads_var = 0;
85    cluster->pages_var   = 0;
86
87    // initialises embedded PPM
[50]88        error = hal_ppm_init( info );
[1]89
[50]90    if( error )
91    {
92        printk("\n[ERROR] in %s : cannot initialize PPM in cluster %x\n",
93               __FUNCTION__ , local_cxy );
94        return ENOMEM;
95    }
96
[407]97cluster_dmsg("\n[DBG] %s : PPM initialized in cluster %x at cycle %d\n",
98__FUNCTION__ , local_cxy , hal_get_cycles() );
[50]99
[1]100    // initialises embedded KHM
101        khm_init( &cluster->khm );
[19]102
[407]103    cluster_dmsg("\n[DBG] %s : KHM initialized in cluster %x at cycle %d\n",
[101]104                 __FUNCTION__ , local_cxy , hal_get_cycles() );
[50]105
[19]106    // initialises embedded KCM
[5]107        kcm_init( &cluster->kcm , KMEM_KCM );
[1]108
[407]109    cluster_dmsg("\n[DBG] %s : KCM initialized in cluster %x at cycle %d\n",
[101]110                 __FUNCTION__ , local_cxy , hal_get_cycles() );
[50]111
[296]112    // initialises all cores descriptors
[1]113        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
114        {
115                core_init( &cluster->core_tbl[lid],    // target core descriptor
116                       lid,                        // local core index
117                       info->core[lid].gid );      // gid from boot_info_t
118        }
[19]119
[407]120cluster_dmsg("\n[DBG] %s : cores initialized in cluster %x at cycle %d\n",
121__FUNCTION__ , local_cxy , hal_get_cycles() );
[50]122
[1]123    // initialises RPC fifo
[407]124        local_fifo_init( &cluster->rpc_fifo );
[279]125    cluster->rpc_threads = 0;
[1]126
[407]127cluster_dmsg("\n[DBG] %s : RPC fifo inialized in cluster %x at cycle %d\n",
128__FUNCTION__ , local_cxy , hal_get_cycles() );
[50]129
[1]130    // initialise pref_tbl[] in process manager
131        spinlock_init( &cluster->pmgr.pref_lock );
132    cluster->pmgr.pref_nr = 0;
[19]133    cluster->pmgr.pref_tbl[0] = XPTR( local_cxy , &process_zero );
[1]134    for( lpid = 1 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
135    {
136        cluster->pmgr.pref_tbl[lpid] = XPTR_NULL;
137    }
138
139    // initialise local_list in process manager
[23]140        remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
141    xlist_root_init( XPTR( local_cxy , &cluster->pmgr.local_root ) );
[1]142    cluster->pmgr.local_nr = 0;
143
144    // initialise copies_lists in process manager
[101]145    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
[1]146    {
147            remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] ) );
148        cluster->pmgr.copies_nr[lpid] = 0;
149        xlist_root_init( XPTR( local_cxy , &cluster->pmgr.copies_root[lpid] ) );
[19]150    }
[1]151
[407]152cluster_dmsg("\n[DBG] %s Process Manager initialized in cluster %x at cycle %d\n",
153__FUNCTION__ , local_cxy , hal_get_cycles() );
[50]154
[124]155    hal_fence();
[1]156
157        return 0;
158} // end cluster_init()
159
160////////////////////////////////////////
161bool_t cluster_is_undefined( cxy_t cxy )
162{
163    cluster_t * cluster = LOCAL_CLUSTER;
164
165    uint32_t y_width = cluster->y_width;
166
167    uint32_t x = cxy >> y_width;
168    uint32_t y = cxy & ((1<<y_width)-1);
169
[19]170    if( x >= cluster->x_size ) return true;
171    if( y >= cluster->y_size ) return true;
[1]172
173    return false;
174}
175
176////////////////////////////////////////////////////////////////////////////////////
177//  Cores related functions
178////////////////////////////////////////////////////////////////////////////////////
179
180/////////////////////////////////
181lid_t cluster_select_local_core()
182{
183    uint32_t min = 100;
184    lid_t    sel = 0;
185    lid_t    lid;
186
187    cluster_t * cluster = LOCAL_CLUSTER;
188
189    for( lid = 0 ; lid < cluster->cores_nr ; lid++ )
190    {
191        if( cluster->core_tbl[lid].usage < min )
192        {
193            min = cluster->core_tbl[lid].usage;
194            sel = lid;
195        }
[19]196    }
[1]197    return sel;
198}
199
200////////////////////////////////////////////////////////////////////////////////////
201//  Process management related functions
202////////////////////////////////////////////////////////////////////////////////////
203
204//////////////////////////////////////////////////////////
205xptr_t cluster_get_reference_process_from_pid( pid_t pid )
[19]206{
[23]207    xptr_t ref_xp;   // extended pointer on reference process descriptor
[1]208
209    cluster_t * cluster = LOCAL_CLUSTER;
210
211    // get owner cluster and lpid
212    cxy_t  owner_cxy = CXY_FROM_PID( pid );
213    lpid_t lpid      = LPID_FROM_PID( pid );
214
[19]215    // Check valid PID
[23]216    if( lpid >= CONFIG_MAX_PROCESS_PER_CLUSTER )  return XPTR_NULL;
[1]217
218    if( local_cxy == owner_cxy )   // local cluster is owner cluster
[19]219    {
[23]220        ref_xp = cluster->pmgr.pref_tbl[lpid];
[1]221    }
222    else                              // use a remote_lwd to access owner cluster
223    {
[23]224        ref_xp = (xptr_t)hal_remote_lwd( XPTR( owner_cxy , &cluster->pmgr.pref_tbl[lpid] ) );
[1]225    }
226
[23]227    return ref_xp;
[1]228}
229
230////////////////////////////////////////////////
231error_t cluster_pid_alloc( xptr_t    process_xp,
232                           pid_t   * pid )
233{
234    error_t     error;
235    lpid_t      lpid;
236    bool_t      found;
237
238    pmgr_t    * pm         = &LOCAL_CLUSTER->pmgr;
239
240    // get the process manager lock
241    spinlock_lock( &pm->pref_lock );
242
243    // search an empty slot
244    found = false;
245    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
246    {
247        if( pm->pref_tbl[lpid] == XPTR_NULL )
248        {
249            found = true;
250            break;
251        }
252    }
253
254    if( found )
255    {
256        // register process in pref_tbl[]
257        pm->pref_tbl[lpid] = process_xp;
258        pm->pref_nr++;
259
260        // returns pid
261        *pid = PID( local_cxy , lpid );
262
263        error = 0;
264    }
265    else
266    {
267        error = EAGAIN;
[19]268    }
[1]269
270    // release the processs_manager lock
271    spinlock_unlock( &pm->pref_lock );
272
273    return error;
274
275} // end cluster_pid_alloc()
276
277/////////////////////////////////////
278void cluster_pid_release( pid_t pid )
279{
280    cxy_t  owner_cxy  = CXY_FROM_PID( pid );
281    lpid_t lpid       = LPID_FROM_PID( pid );
282
[409]283    pmgr_t  * pm = &LOCAL_CLUSTER->pmgr;
284
[1]285    // check pid argument
[409]286    assert( (lpid < CONFIG_MAX_PROCESS_PER_CLUSTER) && (owner_cxy == local_cxy) ,
287    __FUNCTION__ , "illegal PID" );
[1]288
[409]289    // check number of copies
290    assert( (pm->copies_nr[lpid] == 0) ,
291    __FUNCTION__ , "number of copies must be 0" ); 
[1]292
293    // get the process manager lock
294    spinlock_lock( &pm->pref_lock );
295
296    // remove process from pref_tbl[]
297    pm->pref_tbl[lpid] = XPTR_NULL;
298    pm->pref_nr--;
299
300    // release the processs_manager lock
301    spinlock_unlock( &pm->pref_lock );
302
303} // end cluster_pid_release()
304
305///////////////////////////////////////////////////////////
306process_t * cluster_get_local_process_from_pid( pid_t pid )
307{
[23]308    xptr_t         process_xp;
309    process_t    * process_ptr;
310    xptr_t         root_xp;
311    xptr_t         iter_xp;
312    bool_t         found;
[19]313
[23]314    found   = false;
315    root_xp = XPTR( local_cxy , &LOCAL_CLUSTER->pmgr.local_root );
316
317    XLIST_FOREACH( root_xp , iter_xp )
[1]318    {
[23]319        process_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
320        process_ptr = (process_t *)GET_PTR( process_xp );
321        if( process_ptr->pid == pid )
[1]322        {
[23]323            found = true;
[1]324            break;
325        }
326    }
327
[23]328    if (found ) return process_ptr;
329    else        return NULL;
330
[1]331}  // end cluster_get_local_process_from_pid()
332
333//////////////////////////////////////////////////////
334void cluster_process_local_link( process_t * process )
335{
[407]336    uint32_t irq_state;
[1]337    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
338
339    // get lock protecting the process manager local list
[407]340    remote_spinlock_lock_busy( XPTR( local_cxy , &pm->local_lock ) , & irq_state );
[1]341
[23]342    xlist_add_first( XPTR( local_cxy , &pm->local_root ),
343                     XPTR( local_cxy , &process->local_list ) );
[1]344    pm->local_nr++;
345
346    // release lock protecting the process manager local list
[407]347    remote_spinlock_unlock_busy( XPTR( local_cxy , &pm->local_lock ) , irq_state );
[1]348}
349
350////////////////////////////////////////////////////////
351void cluster_process_local_unlink( process_t * process )
352{
[407]353    uint32_t irq_state;
[1]354    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
355
356    // get lock protecting the process manager local list
[407]357    remote_spinlock_lock_busy( XPTR( local_cxy , &pm->local_lock ) , &irq_state );
[1]358
[23]359    xlist_unlink( XPTR( local_cxy , &process->local_list ) );
[1]360    pm->local_nr--;
361
362    // release lock protecting the process manager local list
[407]363    remote_spinlock_unlock_busy( XPTR( local_cxy , &pm->local_lock ) , irq_state );
[1]364}
365
366///////////////////////////////////////////////////////
367void cluster_process_copies_link( process_t * process )
368{
[407]369    uint32_t irq_state;
[1]370    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
371
372    // get owner cluster identifier CXY and process LPID
373    pid_t    pid        = process->pid;
374    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
375    lpid_t   lpid       = LPID_FROM_PID( pid );
376
377    // get extended pointer on lock protecting copies_list[lpid]
[120]378    xptr_t copies_lock  = XPTR( owner_cxy , &pm->copies_lock[lpid] );
[1]379
380    // get extended pointer on the copies_list[lpid] root
[120]381    xptr_t copies_root  = XPTR( owner_cxy , &pm->copies_root[lpid] );
[1]382
383    // get extended pointer on the local copies_list entry
384    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
385
[19]386    // get lock protecting copies_list[lpid]
[407]387    remote_spinlock_lock_busy( copies_lock , &irq_state );
[1]388
389    xlist_add_first( copies_root , copies_entry );
390    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , 1 );
391
[19]392    // release lock protecting copies_list[lpid]
[407]393    remote_spinlock_unlock_busy( copies_lock , irq_state );
[1]394}
395
396/////////////////////////////////////////////////////////
397void cluster_process_copies_unlink( process_t * process )
398{
[407]399    uint32_t irq_state;
[1]400    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
401
402    // get owner cluster identifier CXY and process LPID
403    pid_t    pid        = process->pid;
404    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
405    lpid_t   lpid       = LPID_FROM_PID( pid );
406
407    // get extended pointer on lock protecting copies_list[lpid]
408    xptr_t copies_lock  = hal_remote_lwd( XPTR( owner_cxy , &pm->copies_lock[lpid] ) );
409
410    // get extended pointer on the local copies_list entry
411    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
412
[19]413    // get lock protecting copies_list[lpid]
[407]414    remote_spinlock_lock_busy( copies_lock , &irq_state );
[1]415
416    xlist_unlink( copies_entry );
417    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , -1 );
418
[19]419    // release lock protecting copies_list[lpid]
[407]420    remote_spinlock_unlock_busy( copies_lock , irq_state );
[1]421}
422
423////////////////////////////////////////////////////////////////////////////////////////
424// TODO Il me semble que la seule chose que fait ce kernel thread à chaque réveil
[19]425// est de mettre à jour la DQDT, et de se rendormir... A-t-on besoin d'un thread ? [AG]
[1]426//////////////////////////////////////////////////////////////////////////////////////////
427
428#if 0
429void * cluster_manager_thread( void * arg )
430{
431        register struct dqdt_cluster_s * root;
432        register struct cluster_s      * root_home;
433
434        register uint32_t                tm_start;
435        register uint32_t                tm_end;
436        register uint32_t                cpu_id;
437        struct cluster_s               * cluster;
438        struct thread_s                * this;
439        struct event_s                   event;
440        struct alarm_info_s              info;
441        register uint32_t                cntr;
442        register bool_t                  isRootMgr;
443        register uint32_t                period;
444
445        cpu_enable_all_irq( NULL );
446
447        cluster   = arg;
448        this      = CURRENT_THREAD;
449        cpu_id    = cpu_get_id();
450        root      = dqdt_root;
451        root_home = dqdt_root->home;
452        isRootMgr = (cluster == root_home) ? true : false;
453        cntr      = 0;
[19]454        period    = (isRootMgr) ?
455                CONFIG_DQDT_ROOTMGR_PERIOD * MSEC_PER_TICK :
[1]456                CONFIG_DQDT_MGR_PERIOD * MSEC_PER_TICK;
457
458        event_set_senderId(&event, this);
459        event_set_priority(&event, E_CHR);
460        event_set_handler(&event, &manager_alarm_event_handler);
[19]461
[1]462        info.event = &event;
463        thread_preempt_disable(CURRENT_THREAD);
464
465    // infinite loop
466        while(1)
467        {
468                tm_start = cpu_time_stamp();
469                dqdt_update();
470                tm_end   = cpu_time_stamp();
471
472                if(isRootMgr)
473                {
474                        if((cntr % 10) == 0)
475                        {
[19]476                                printk(INFO, "INFO: cpu %d, DQDT update ended [ %u - %u ]\n",
477                                       cpu_id,
478                                       tm_end,
[1]479                                       tm_end - tm_start);
480
481                                dqdt_print_summary(root);
482                        }
483                }
484
485                alarm_wait( &info , period );
486                sched_sleep(this);
487                cntr ++;
488        }
489
490        return NULL;
491} // end cluster_manager_thread()
492
493//////////////////////////////////////////
494EVENT_HANDLER(manager_alarm_event_handler)
495{
496        struct thread_s *manager;
[19]497
[1]498        manager = event_get_senderId(event);
[19]499
[1]500        thread_preempt_disable(CURRENT_THREAD);
501
502        //printk(INFO, "%s: cpu %d [%u]\n", __FUNCTION__, cpu_get_id(), cpu_time_stamp());
503
504        sched_wakeup(manager);
[19]505
[1]506        thread_preempt_enable(CURRENT_THREAD);
507
508        return 0;
509}
510
511///////////////////////////////////////////////
512EVENT_HANDLER(cluster_key_create_event_handler)
513{
514        struct cluster_s *cluster;
515        struct thread_s *sender;
516        ckey_t *ckey;
517        uint32_t key;
518
519        sender  = event_get_senderId(event);
520        ckey    = event_get_argument(event);
521        cluster = current_cluster;
522        key     = cluster->next_key;
523
524        while((key < CLUSTER_TOTAL_KEYS_NR) && (cluster->keys_tbl[key] != NULL))
525                key ++;
526
527        if(key < CLUSTER_TOTAL_KEYS_NR)
528        {
529                ckey->val = key;
[19]530                cluster->keys_tbl[key] = (void *) 0x1; // Reserved
[1]531                cluster->next_key = key;
532                event_set_error(event, 0);
533        }
534        else
535                event_set_error(event, ENOSPC);
536
537        sched_wakeup(sender);
538        return 0;
539}
540
541///////////////////////////////////////////////
542EVENT_HANDLER(cluster_key_delete_event_handler)
543{
544        struct cluster_s *cluster;
545        struct thread_s *sender;
546        ckey_t *ckey;
547        uint32_t key;
548
549        sender  = event_get_senderId(event);
550        ckey    = event_get_argument(event);
551        cluster = current_cluster;
552        key     = ckey->val;
553
554        if(key < cluster->next_key)
555                cluster->next_key = key;
556
557        cluster->keys_tbl[key] = NULL;
558        event_set_error(event, 0);
559
560        sched_wakeup(sender);
561        return 0;
562}
563
564#define _CKEY_CREATE  0x0
565#define _CKEY_DELETE  0x1
566
567error_t cluster_do_key_op(ckey_t *key, uint32_t op)
568{
569        struct event_s event;
570        struct thread_s *this;
571        struct cluster_s *cluster;
572        struct cpu_s *cpu;
573
574        this = CURRENT_THREAD;
575
576        event_set_priority(&event, E_FUNC);
577        event_set_senderId(&event, this);
578        event_set_argument(&event, key);
579
580        if(op == _CKEY_CREATE)
581                event_set_handler(&event, cluster_key_create_event_handler);
582        else
583                event_set_handler(&event, cluster_key_delete_event_handler);
584
585        cluster = current_cluster;
586        cpu     = cluster->bscluster->bscpu;
587        event_send(&event, &cpu->re_listner);
588
589        sched_sleep(this);
590
591        return event_get_error(&event);
592}
593
594error_t cluster_key_create(ckey_t *key)
595{
596        return cluster_do_key_op(key, _CKEY_CREATE);
597}
598
599error_t cluster_key_delete(ckey_t *key)
600{
601        return cluster_do_key_op(key, _CKEY_DELETE);
602}
603
604void* cluster_getspecific(ckey_t *key)
605{
606        struct cluster_s *cluster;
607
608        cluster = current_cluster;
609        return cluster->keys_tbl[key->val];
610}
611
612void  cluster_setspecific(ckey_t *key, void *val)
613{
614        struct cluster_s *cluster;
615
616        cluster = current_cluster;
617        cluster->keys_tbl[key->val] = val;
618}
619#endif
Note: See TracBrowser for help on using the repository browser.