source: trunk/kernel/kern/cluster.c @ 355

Last change on this file since 355 was 296, checked in by alain, 7 years ago

Several modifs in the generic scheduler and in the hal_context to
fix the context switch mechanism.

File size: 17.3 KB
RevLine 
[1]1/*
2 * cluster.c - Cluster-Manager related operations
[19]3 *
[1]4 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *         Mohamed Lamine Karaoui (2015)
[23]6 *         Alain Greiner (2016,2017)
[1]7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
10 * This file is part of ALMOS-MKH..
11 *
12 * ALMOS-MKH. is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
16 * ALMOS-MKH. is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
[14]26#include <kernel_config.h>
[1]27#include <hal_types.h>
28#include <hal_atomic.h>
29#include <hal_special.h>
[50]30#include <hal_ppm.h>
[1]31#include <printk.h>
32#include <errno.h>
33#include <spinlock.h>
34#include <core.h>
35#include <scheduler.h>
36#include <list.h>
37#include <cluster.h>
38#include <boot_info.h>
39#include <bits.h>
40#include <ppm.h>
41#include <thread.h>
42#include <kmem.h>
43#include <process.h>
44#include <dqdt.h>
45
46///////////////////////////////////////////////////////////////////////////////////////////
47// Extern global variables
48///////////////////////////////////////////////////////////////////////////////////////////
49
[23]50extern process_t process_zero;     // allocated in kernel_init.c file
[1]51
52
53
54//////////////////////////////////
55void cluster_sysfs_register(void)
56{
57        // TODO
58}
59
60/////////////////////////////////////////////////
61error_t cluster_init( struct boot_info_s * info )
62{
[50]63    error_t     error;
[1]64    lpid_t      lpid;     // local process_index
65    lid_t       lid;      // local core index
66
67        cluster_t * cluster = LOCAL_CLUSTER;
68
69    // initialize cluster global parameters
[19]70        cluster->paddr_width     = info->paddr_width;
[1]71        cluster->x_width         = info->x_width;
72        cluster->y_width         = info->y_width;
73        cluster->x_size          = info->x_size;
74        cluster->y_size          = info->y_size;
75        cluster->io_cxy          = info->io_cxy;
76
77    // initialize cluster local parameters
78        cluster->cores_nr        = info->cores_nr;
[279]79    cluster->cores_in_kernel = 0;
[1]80
[19]81    // initialize the lock protecting the embedded kcm allocator
[1]82        spinlock_init( &cluster->kcm_lock );
83
[50]84    cluster_dmsg("\n[INFO] %s for cluster %x enters\n",
85                 __FUNCTION__ , local_cxy );
86
[19]87    // initialises DQDT
88    cluster->dqdt_root_level = dqdt_init( info->x_size,
89                                          info->y_size,
[1]90                                          info->y_width );
91    cluster->threads_var = 0;
92    cluster->pages_var   = 0;
93
94    // initialises embedded PPM
[50]95        error = hal_ppm_init( info );
[1]96
[50]97    if( error )
98    {
99        printk("\n[ERROR] in %s : cannot initialize PPM in cluster %x\n",
100               __FUNCTION__ , local_cxy );
101        return ENOMEM;
102    }
103
104    cluster_dmsg("\n[INFO] %s : PPM initialized in cluster %x at cycle %d\n",
[101]105                 __FUNCTION__ , local_cxy , hal_get_cycles() );
[50]106
[1]107    // initialises embedded KHM
108        khm_init( &cluster->khm );
[19]109
[50]110    cluster_dmsg("\n[INFO] %s : KHM initialized in cluster %x at cycle %d\n",
[101]111                 __FUNCTION__ , local_cxy , hal_get_cycles() );
[50]112
[19]113    // initialises embedded KCM
[5]114        kcm_init( &cluster->kcm , KMEM_KCM );
[1]115
[50]116    cluster_dmsg("\n[INFO] %s : KCM initialized in cluster %x at cycle %d\n",
[101]117                 __FUNCTION__ , local_cxy , hal_get_cycles() );
[50]118
[296]119    // initialises all cores descriptors
[1]120        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
121        {
122                core_init( &cluster->core_tbl[lid],    // target core descriptor
123                       lid,                        // local core index
124                       info->core[lid].gid );      // gid from boot_info_t
125        }
[19]126
[50]127    cluster_dmsg("\n[INFO] %s : cores initialized in cluster %x at cycle %d\n",
[101]128                 __FUNCTION__ , local_cxy , hal_get_cycles() );
[50]129
[1]130    // initialises RPC fifo
131        rpc_fifo_init( &cluster->rpc_fifo );
[279]132    cluster->rpc_threads = 0;
[1]133
[50]134    cluster_dmsg("\n[INFO] %s : RPC fifo inialized in cluster %x at cycle %d\n",
[101]135                 __FUNCTION__ , local_cxy , hal_get_cycles() );
[50]136
[1]137    // initialise pref_tbl[] in process manager
138        spinlock_init( &cluster->pmgr.pref_lock );
139    cluster->pmgr.pref_nr = 0;
[19]140    cluster->pmgr.pref_tbl[0] = XPTR( local_cxy , &process_zero );
[1]141    for( lpid = 1 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
142    {
143        cluster->pmgr.pref_tbl[lpid] = XPTR_NULL;
144    }
145
146    // initialise local_list in process manager
[23]147        remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
148    xlist_root_init( XPTR( local_cxy , &cluster->pmgr.local_root ) );
[1]149    cluster->pmgr.local_nr = 0;
150
151    // initialise copies_lists in process manager
[101]152    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
[1]153    {
154            remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] ) );
155        cluster->pmgr.copies_nr[lpid] = 0;
156        xlist_root_init( XPTR( local_cxy , &cluster->pmgr.copies_root[lpid] ) );
[19]157    }
[1]158
[50]159    cluster_dmsg("\n[INFO] %s Process Manager initialized in cluster %x at cycle %d\n",
[101]160                 __FUNCTION__ , local_cxy , hal_get_cycles() );
[50]161
[124]162    hal_fence();
[1]163
164        return 0;
165} // end cluster_init()
166
167////////////////////////////////////////
168bool_t cluster_is_undefined( cxy_t cxy )
169{
170    cluster_t * cluster = LOCAL_CLUSTER;
171
172    uint32_t y_width = cluster->y_width;
173
174    uint32_t x = cxy >> y_width;
175    uint32_t y = cxy & ((1<<y_width)-1);
176
[19]177    if( x >= cluster->x_size ) return true;
178    if( y >= cluster->y_size ) return true;
[1]179
180    return false;
181}
182
183////////////////////////////////////////////////////////////////////////////////////
184//  Cores related functions
185////////////////////////////////////////////////////////////////////////////////////
186
187////////////////////////////////
188void cluster_core_kernel_enter()
189{
190    cluster_t * cluster = LOCAL_CLUSTER;
[23]191        hal_atomic_add( &cluster->cores_in_kernel , 1 );
[1]192}
193
194///////////////////////////////
195void cluster_core_kernel_exit()
196{
197    cluster_t * cluster = LOCAL_CLUSTER;
[23]198        hal_atomic_add( &cluster->cores_in_kernel , -1 );
[1]199}
200
201/////////////////////////////////
202lid_t cluster_select_local_core()
203{
204    uint32_t min = 100;
205    lid_t    sel = 0;
206    lid_t    lid;
207
208    cluster_t * cluster = LOCAL_CLUSTER;
209
210    for( lid = 0 ; lid < cluster->cores_nr ; lid++ )
211    {
212        if( cluster->core_tbl[lid].usage < min )
213        {
214            min = cluster->core_tbl[lid].usage;
215            sel = lid;
216        }
[19]217    }
[1]218    return sel;
219}
220
221////////////////////////////////////////////////////////////////////////////////////
222//  Process management related functions
223////////////////////////////////////////////////////////////////////////////////////
224
225//////////////////////////////////////////////////////////
226xptr_t cluster_get_reference_process_from_pid( pid_t pid )
[19]227{
[23]228    xptr_t ref_xp;   // extended pointer on reference process descriptor
[1]229
230    cluster_t * cluster = LOCAL_CLUSTER;
231
232    // get owner cluster and lpid
233    cxy_t  owner_cxy = CXY_FROM_PID( pid );
234    lpid_t lpid      = LPID_FROM_PID( pid );
235
[19]236    // Check valid PID
[23]237    if( lpid >= CONFIG_MAX_PROCESS_PER_CLUSTER )  return XPTR_NULL;
[1]238
239    if( local_cxy == owner_cxy )   // local cluster is owner cluster
[19]240    {
[23]241        ref_xp = cluster->pmgr.pref_tbl[lpid];
[1]242    }
243    else                              // use a remote_lwd to access owner cluster
244    {
[23]245        ref_xp = (xptr_t)hal_remote_lwd( XPTR( owner_cxy , &cluster->pmgr.pref_tbl[lpid] ) );
[1]246    }
247
[23]248    return ref_xp;
[1]249}
250
251////////////////////////////////////////////////
252error_t cluster_pid_alloc( xptr_t    process_xp,
253                           pid_t   * pid )
254{
255    error_t     error;
256    lpid_t      lpid;
257    bool_t      found;
258
259    pmgr_t    * pm         = &LOCAL_CLUSTER->pmgr;
260
261    // get the process manager lock
262    spinlock_lock( &pm->pref_lock );
263
264    // search an empty slot
265    found = false;
266    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
267    {
268        if( pm->pref_tbl[lpid] == XPTR_NULL )
269        {
270            found = true;
271            break;
272        }
273    }
274
275    if( found )
276    {
277        // register process in pref_tbl[]
278        pm->pref_tbl[lpid] = process_xp;
279        pm->pref_nr++;
280
281        // returns pid
282        *pid = PID( local_cxy , lpid );
283
284        error = 0;
285    }
286    else
287    {
288        error = EAGAIN;
[19]289    }
[1]290
291    // release the processs_manager lock
292    spinlock_unlock( &pm->pref_lock );
293
294    return error;
295
296} // end cluster_pid_alloc()
297
298/////////////////////////////////////
299void cluster_pid_release( pid_t pid )
300{
301    cxy_t  owner_cxy  = CXY_FROM_PID( pid );
302    lpid_t lpid       = LPID_FROM_PID( pid );
303
304    // check pid argument
305    if( (lpid >= CONFIG_MAX_PROCESS_PER_CLUSTER) || (owner_cxy != local_cxy) )
306    {
307        printk("\n[PANIC] in %s : illegal PID\n", __FUNCTION__ );
308        hal_core_sleep();
309    }
310
311    pmgr_t  * pm = &LOCAL_CLUSTER->pmgr;
312
313    // get the process manager lock
314    spinlock_lock( &pm->pref_lock );
315
316    // remove process from pref_tbl[]
317    pm->pref_tbl[lpid] = XPTR_NULL;
318    pm->pref_nr--;
319
320    // release the processs_manager lock
321    spinlock_unlock( &pm->pref_lock );
322
323} // end cluster_pid_release()
324
325///////////////////////////////////////////////////////////
326process_t * cluster_get_local_process_from_pid( pid_t pid )
327{
[23]328    xptr_t         process_xp;
329    process_t    * process_ptr;
330    xptr_t         root_xp;
331    xptr_t         iter_xp;
332    bool_t         found;
[19]333
[23]334    found   = false;
335    root_xp = XPTR( local_cxy , &LOCAL_CLUSTER->pmgr.local_root );
336
337    XLIST_FOREACH( root_xp , iter_xp )
[1]338    {
[23]339        process_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
340        process_ptr = (process_t *)GET_PTR( process_xp );
341        if( process_ptr->pid == pid )
[1]342        {
[23]343            found = true;
[1]344            break;
345        }
346    }
347
[23]348    if (found ) return process_ptr;
349    else        return NULL;
350
[1]351}  // end cluster_get_local_process_from_pid()
352
353//////////////////////////////////////////////////////
354void cluster_process_local_link( process_t * process )
355{
356    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
357
358    // get lock protecting the process manager local list
[23]359    remote_spinlock_lock( XPTR( local_cxy , &pm->local_lock ) );
[1]360
[23]361    xlist_add_first( XPTR( local_cxy , &pm->local_root ),
362                     XPTR( local_cxy , &process->local_list ) );
[1]363    pm->local_nr++;
364
365    // release lock protecting the process manager local list
[23]366    remote_spinlock_unlock( XPTR( local_cxy , &pm->local_lock ) );
[1]367}
368
369////////////////////////////////////////////////////////
370void cluster_process_local_unlink( process_t * process )
371{
372    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
373
374    // get lock protecting the process manager local list
[23]375    remote_spinlock_lock( XPTR( local_cxy , &pm->local_lock ) );
[1]376
[23]377    xlist_unlink( XPTR( local_cxy , &process->local_list ) );
[1]378    pm->local_nr--;
379
380    // release lock protecting the process manager local list
[23]381    remote_spinlock_unlock( XPTR( local_cxy , &pm->local_lock ) );
[1]382}
383
384///////////////////////////////////////////////////////
385void cluster_process_copies_link( process_t * process )
386{
387    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
388
389    // get owner cluster identifier CXY and process LPID
390    pid_t    pid        = process->pid;
391    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
392    lpid_t   lpid       = LPID_FROM_PID( pid );
393
394    // get extended pointer on lock protecting copies_list[lpid]
[120]395    xptr_t copies_lock  = XPTR( owner_cxy , &pm->copies_lock[lpid] );
[1]396
397    // get extended pointer on the copies_list[lpid] root
[120]398    xptr_t copies_root  = XPTR( owner_cxy , &pm->copies_root[lpid] );
[1]399
400    // get extended pointer on the local copies_list entry
401    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
402
[19]403    // get lock protecting copies_list[lpid]
[1]404    remote_spinlock_lock( copies_lock );
405
406    xlist_add_first( copies_root , copies_entry );
407    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , 1 );
408
[19]409    // release lock protecting copies_list[lpid]
[1]410    remote_spinlock_unlock( copies_lock );
411}
412
413/////////////////////////////////////////////////////////
414void cluster_process_copies_unlink( process_t * process )
415{
416    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
417
418    // get owner cluster identifier CXY and process LPID
419    pid_t    pid        = process->pid;
420    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
421    lpid_t   lpid       = LPID_FROM_PID( pid );
422
423    // get extended pointer on lock protecting copies_list[lpid]
424    xptr_t copies_lock  = hal_remote_lwd( XPTR( owner_cxy , &pm->copies_lock[lpid] ) );
425
426    // get extended pointer on the local copies_list entry
427    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
428
[19]429    // get lock protecting copies_list[lpid]
[1]430    remote_spinlock_lock( copies_lock );
431
432    xlist_unlink( copies_entry );
433    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , -1 );
434
[19]435    // release lock protecting copies_list[lpid]
[1]436    remote_spinlock_unlock( copies_lock );
437}
438
439////////////////////////////////////////////////////////////////////////////////////////
440// TODO Il me semble que la seule chose que fait ce kernel thread à chaque réveil
[19]441// est de mettre à jour la DQDT, et de se rendormir... A-t-on besoin d'un thread ? [AG]
[1]442//////////////////////////////////////////////////////////////////////////////////////////
443
444#if 0
445void * cluster_manager_thread( void * arg )
446{
447        register struct dqdt_cluster_s * root;
448        register struct cluster_s      * root_home;
449
450        register uint32_t                tm_start;
451        register uint32_t                tm_end;
452        register uint32_t                cpu_id;
453        struct cluster_s               * cluster;
454        struct thread_s                * this;
455        struct event_s                   event;
456        struct alarm_info_s              info;
457        register uint32_t                cntr;
458        register bool_t                  isRootMgr;
459        register uint32_t                period;
460
461        cpu_enable_all_irq( NULL );
462
463        cluster   = arg;
464        this      = CURRENT_THREAD;
465        cpu_id    = cpu_get_id();
466        root      = dqdt_root;
467        root_home = dqdt_root->home;
468        isRootMgr = (cluster == root_home) ? true : false;
469        cntr      = 0;
[19]470        period    = (isRootMgr) ?
471                CONFIG_DQDT_ROOTMGR_PERIOD * MSEC_PER_TICK :
[1]472                CONFIG_DQDT_MGR_PERIOD * MSEC_PER_TICK;
473
474        event_set_senderId(&event, this);
475        event_set_priority(&event, E_CHR);
476        event_set_handler(&event, &manager_alarm_event_handler);
[19]477
[1]478        info.event = &event;
479        thread_preempt_disable(CURRENT_THREAD);
480
481    // infinite loop
482        while(1)
483        {
484                tm_start = cpu_time_stamp();
485                dqdt_update();
486                tm_end   = cpu_time_stamp();
487
488                if(isRootMgr)
489                {
490                        if((cntr % 10) == 0)
491                        {
[19]492                                printk(INFO, "INFO: cpu %d, DQDT update ended [ %u - %u ]\n",
493                                       cpu_id,
494                                       tm_end,
[1]495                                       tm_end - tm_start);
496
497                                dqdt_print_summary(root);
498                        }
499                }
500
501                alarm_wait( &info , period );
502                sched_sleep(this);
503                cntr ++;
504        }
505
506        return NULL;
507} // end cluster_manager_thread()
508
509//////////////////////////////////////////
510EVENT_HANDLER(manager_alarm_event_handler)
511{
512        struct thread_s *manager;
[19]513
[1]514        manager = event_get_senderId(event);
[19]515
[1]516        thread_preempt_disable(CURRENT_THREAD);
517
518        //printk(INFO, "%s: cpu %d [%u]\n", __FUNCTION__, cpu_get_id(), cpu_time_stamp());
519
520        sched_wakeup(manager);
[19]521
[1]522        thread_preempt_enable(CURRENT_THREAD);
523
524        return 0;
525}
526
527///////////////////////////////////////////////
528EVENT_HANDLER(cluster_key_create_event_handler)
529{
530        struct cluster_s *cluster;
531        struct thread_s *sender;
532        ckey_t *ckey;
533        uint32_t key;
534
535        sender  = event_get_senderId(event);
536        ckey    = event_get_argument(event);
537        cluster = current_cluster;
538        key     = cluster->next_key;
539
540        while((key < CLUSTER_TOTAL_KEYS_NR) && (cluster->keys_tbl[key] != NULL))
541                key ++;
542
543        if(key < CLUSTER_TOTAL_KEYS_NR)
544        {
545                ckey->val = key;
[19]546                cluster->keys_tbl[key] = (void *) 0x1; // Reserved
[1]547                cluster->next_key = key;
548                event_set_error(event, 0);
549        }
550        else
551                event_set_error(event, ENOSPC);
552
553        sched_wakeup(sender);
554        return 0;
555}
556
557///////////////////////////////////////////////
558EVENT_HANDLER(cluster_key_delete_event_handler)
559{
560        struct cluster_s *cluster;
561        struct thread_s *sender;
562        ckey_t *ckey;
563        uint32_t key;
564
565        sender  = event_get_senderId(event);
566        ckey    = event_get_argument(event);
567        cluster = current_cluster;
568        key     = ckey->val;
569
570        if(key < cluster->next_key)
571                cluster->next_key = key;
572
573        cluster->keys_tbl[key] = NULL;
574        event_set_error(event, 0);
575
576        sched_wakeup(sender);
577        return 0;
578}
579
580#define _CKEY_CREATE  0x0
581#define _CKEY_DELETE  0x1
582
583error_t cluster_do_key_op(ckey_t *key, uint32_t op)
584{
585        struct event_s event;
586        struct thread_s *this;
587        struct cluster_s *cluster;
588        struct cpu_s *cpu;
589
590        this = CURRENT_THREAD;
591
592        event_set_priority(&event, E_FUNC);
593        event_set_senderId(&event, this);
594        event_set_argument(&event, key);
595
596        if(op == _CKEY_CREATE)
597                event_set_handler(&event, cluster_key_create_event_handler);
598        else
599                event_set_handler(&event, cluster_key_delete_event_handler);
600
601        cluster = current_cluster;
602        cpu     = cluster->bscluster->bscpu;
603        event_send(&event, &cpu->re_listner);
604
605        sched_sleep(this);
606
607        return event_get_error(&event);
608}
609
610error_t cluster_key_create(ckey_t *key)
611{
612        return cluster_do_key_op(key, _CKEY_CREATE);
613}
614
615error_t cluster_key_delete(ckey_t *key)
616{
617        return cluster_do_key_op(key, _CKEY_DELETE);
618}
619
620void* cluster_getspecific(ckey_t *key)
621{
622        struct cluster_s *cluster;
623
624        cluster = current_cluster;
625        return cluster->keys_tbl[key->val];
626}
627
628void  cluster_setspecific(ckey_t *key, void *val)
629{
630        struct cluster_s *cluster;
631
632        cluster = current_cluster;
633        cluster->keys_tbl[key->val] = val;
634}
635#endif
Note: See TracBrowser for help on using the repository browser.