Changeset 527


Ignore:
Timestamp:
Mar 27, 2015, 11:33:53 AM (9 years ago)
Author:
alain
Message:

1) Introducing dynamic routing of external IRQs when the platform
contains an IOPIC component.
2) Improving parallel boot for elf files: The FAT access is done by
P[0,0,0] only, but the code replication in all cluster is done
in parallel by all P[x,y,0] processors.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • soft/giet_vm/giet_boot/boot.c

    r524 r527  
    1 ///////////////////////////////////////////////////////////////////////////////////////
     1///////////////////////////////////////////////////////////////////////////////////
    22// File     : boot.c
    33// Date     : 01/11/2013
    44// Author   : alain greiner
    55// Copyright (c) UPMC-LIP6
    6 ///////////////////////////////////////////////////////////////////////////////////////
     6///////////////////////////////////////////////////////////////////////////////////
    77// The boot.c file contains the bootloader for the GIET-VM static OS. 
    88//
    99// This code has been written for the MIPS32 processor.
    1010// The virtual adresses are on 32 bits and use the (unsigned int) type. The
    11 // physicals addresses can have up to 40 bits, and use the  (unsigned long long) type.
     11// physicals addresses can have up to 40 bits, and use type (unsigned long long).
    1212// It natively supports clusterised shared memory multi-processors architectures,
    1313// where each processor is identified by a composite index [x,y,p],
     
    4343//      of the software objects (vsegs) on the physical memory banks (psegs).
    4444//
    45 //    The max number of vspaces (GIET_NB_VSPACE_MAX) is a configuration parameter,
    46 //    and - for each application - the tasks are statically allocateded on procesors.
     45//    The max number of vspaces (GIET_NB_VSPACE_MAX) is a configuration parameter.
     46//    For each application, the tasks are statically allocateded on processors.
    4747//    The page table are statically build in the boot phase, and they do not
    4848//    change during execution.
    4949//    The GIET_VM uses both small pages (4 Kbytes), and big pages (2 Mbytes).
    5050//
    51 //    Each page table (one page table per virtual space) is monolithic, and contains
    52 //    one PT1 (8 Kbytes) and a variable number of PT2s (4 Kbytes each). For each vspace,
    53 //    the number of PT2s is defined by the size of the PTAB vseg in the mapping.
    54 //    The PT1 is indexed by the ix1 field (11 bits) of the VPN. Each entry is 32 bits.
    55 //    A PT2 is indexed the ix2 field (9 bits) of the VPN. Each entry is a double word.
     51//    Each page table (one page table per virtual space) is monolithic, and
     52//    contains one PT1 (8 Kbytes) and a variable number of PT2s (4 Kbytes each).
     53//    For each vspace, the number of PT2s is defined by the size of the PTAB vseg
     54//    in the mapping.
     55//    The PT1 is indexed by the ix1 field (11 bits) of the VPN. An entry is 32 bits.
     56//    A PT2 is indexed the ix2 field (9 bits) of the VPN. An entry is 64 bits.
    5657//    The first word contains the flags, the second word contains the PPN.
    5758//    The page tables are distributed/replicated in all clusters.
    58 ///////////////////////////////////////////////////////////////////////////////////////
     59///////////////////////////////////////////////////////////////////////////////////
    5960// Implementation Notes:
    6061//
    61 // 1) The cluster_id variable is a linear index in the mapping_info array of clusters.
     62// 1) The cluster_id variable is a linear index in the mapping_info array.
    6263//    The cluster_xy variable is the tological index = x << Y_WIDTH + y
    6364//
    6465// 2) We set the _tty0_boot_mode variable to force the _printf() function to use
    6566//    the tty0_spin_lock for exclusive access to TTY0.
    66 ///////////////////////////////////////////////////////////////////////////////////////
     67///////////////////////////////////////////////////////////////////////////////////
    6768
    6869#include <giet_config.h>
     
    7576#include <bdv_driver.h>
    7677#include <hba_driver.h>
    77 #include <dma_driver.h>
     78#include <sdc_driver.h>
    7879#include <cma_driver.h>
    7980#include <nic_driver.h>
    80 #include <ioc_driver.h>
    8181#include <iob_driver.h>
    8282#include <pic_driver.h>
    8383#include <mwr_driver.h>
     84#include <dma_driver.h>
    8485#include <ctx_handler.h>
    8586#include <irq_handler.h>
     
    131132////////////////////////////////////////////////////////////////////////////
    132133
    133 extern void boot_entry();
    134 
    135134// FAT internal representation for boot code 
    136135__attribute__((section(".kdata")))
    137 fat32_fs_t          fat   __attribute__((aligned(512)));
     136fat32_fs_t  _fat   __attribute__((aligned(512)));
    138137
    139138// Temporaty buffer used to load one complete .elf file 
    140139__attribute__((section(".kdata")))
    141 char                boot_elf_buffer[GIET_ELF_BUFFER_SIZE] __attribute__((aligned(512)));
     140char  _boot_elf_buffer[GIET_ELF_BUFFER_SIZE] __attribute__((aligned(512)));
    142141
    143142// Physical memory allocators array (one per cluster)
    144143__attribute__((section(".kdata")))
    145 pmem_alloc_t        boot_pmem_alloc[X_SIZE][Y_SIZE];
     144pmem_alloc_t  boot_pmem_alloc[X_SIZE][Y_SIZE];
    146145
    147146// Distributed kernel heap (one per cluster)
     
    153152static_scheduler_t* _schedulers[X_SIZE][Y_SIZE][NB_PROCS_MAX];
    154153
    155 // Page tables virtual base addresses array (one per vspace)
     154// Page tables virtual base addresses (one per vspace and per cluster)
    156155__attribute__((section(".kdata")))
    157156unsigned int        _ptabs_vaddr[GIET_NB_VSPACE_MAX][X_SIZE][Y_SIZE];
     
    169168unsigned int        _ptabs_max_pt2;
    170169
    171 // WTI channel allocator (one per cluster)
    172 __attribute__((section(".kdata")))
    173 unsigned int        _wti_channel_alloc[X_SIZE][Y_SIZE];
    174 
    175170// boot code uses a spin lock to protect TTY0
    176171__attribute__((section(".kdata")))
     
    184179simple_barrier_t    _barrier_all_clusters;
    185180
     181//////////////////////////////////////////////////////////////////////////////
     182//        Extern variables
     183//////////////////////////////////////////////////////////////////////////////
     184
    186185// this variable is defined in the tty0.c file
    187186extern spin_lock_t  _tty0_spin_lock;
     187
     188extern void boot_entry();
    188189
    189190//////////////////////////////////////////////////////////////////////////////
     
    713714
    714715///////////////////////////////////////////////////////////////////////////////
    715 // Processor P[x][y][0] computes physical base address for all globals vsegs,
    716 // using the local Page Table, to check page tables initialisation.
    717 ///////////////////////////////////////////////////////////////////////////////
    718 void boot_ptab_check( unsigned int x,
    719                       unsigned int y )
    720 {
    721     mapping_header_t*   header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE;
    722     mapping_vseg_t*     vseg   = _get_vseg_base(header);
    723     page_table_t*       ptab   = (page_table_t*)_ptabs_vaddr[0][x][y];
    724 
    725     unsigned int vseg_id;
    726     for (vseg_id = 0; vseg_id < header->globals; vseg_id++)
    727     {
    728         unsigned int  vpn   = vseg[vseg_id].vbase >> 12;
    729         unsigned int  ppn   = 0; 
    730         unsigned int  flags = 0;
    731         _v2p_translate( ptab , vpn , &ppn , &flags );
    732         _printf("@@@ P[%d,%d,0] access vseg %s : vpn = %x / ppn = %x\n",
    733                 x , y , vseg[vseg_id].name , vpn , ppn ); 
    734     }
    735 }
    736 
    737 ///////////////////////////////////////////////////////////////////////////////
    738716// This function is executed by  processor[x][y][0] in each cluster
    739717// containing at least one processor.
     
    936914} // end boot_get_sched_vaddr()
    937915
     916#if BOOT_DEBUG_SCHED
     917/////////////////////////////////////////////////////////////////////////////
     918// This debug function should be executed by only one procesor.
     919// It loops on all processors in all clusters to display
     920// the HWI / PTI / WTI interrupt vectors for each processor.
     921/////////////////////////////////////////////////////////////////////////////
     922void boot_sched_irq_display()
     923{
     924    unsigned int         cx;
     925    unsigned int         cy;
     926    unsigned int         lpid;
     927    unsigned int         slot;
     928    unsigned int         entry;
     929
     930    mapping_header_t*    header  = (mapping_header_t *)SEG_BOOT_MAPPING_BASE;
     931    mapping_cluster_t*   cluster = _get_cluster_base(header);
     932
     933    static_scheduler_t*  psched;
     934
     935    for ( cx = 0 ; cx < X_SIZE ; cx++ )
     936    {
     937        for ( cy = 0 ; cy < Y_SIZE ; cy++ )
     938        {
     939            unsigned int cluster_id = (cx * Y_SIZE) + cy;
     940            unsigned int nprocs = cluster[cluster_id].procs;
     941
     942            for ( lpid = 0 ; lpid < nprocs ; lpid++ )
     943            {
     944                psched = _schedulers[cx][cy][lpid];
     945       
     946                _printf("\n[BOOT] scheduler for proc[%d,%d,%d]\n",
     947                        cx , cy , lpid );
     948
     949                for ( slot = 0 ; slot < 32 ; slot++ )
     950                {
     951                    entry = psched->hwi_vector[slot];
     952                    if ( (entry & 0xFFFF) != 0 )     
     953                    _printf(" - HWI %d / isrtype = %d / channel = %d\n",
     954                            slot , (entry & 0xFFFF) , ((entry >> 16) & 0x7FFF) );
     955                }
     956                for ( slot = 0 ; slot < 32 ; slot++ )
     957                {
     958                    entry = psched->wti_vector[slot];
     959                    if ( (entry & 0xFFFF) != 0 )     
     960                    _printf(" - WTI %d / isrtype = %d / channel = %d\n",
     961                            slot , (entry & 0xFFFF) , ((entry >> 16) & 0x7FFF) );
     962                }
     963                for ( slot = 0 ; slot < 32 ; slot++ )
     964                {
     965                    entry = psched->pti_vector[slot];
     966                    if ( (entry & 0xFFFF) != 0 )     
     967                    _printf(" - PTI %d / isrtype = %d / channel = %d\n",
     968                            slot , (entry & 0xFFFF) , ((entry >> 16) & 0x7FFF) );
     969                }
     970            }
     971        }
     972    }
     973}  // end boot_sched_irq_display()
     974#endif
     975
     976
    938977////////////////////////////////////////////////////////////////////////////////////
    939978// This function is executed in parallel by all processors P[x][y][0].
    940979// It initialises all schedulers in cluster [x][y]. The MMU must be activated.
    941980// It is split in two phases separated by a synchronisation barrier.
    942 // - In Step 1, it initialises the _schedulers[x][y][l] pointers array,
    943 //              the idle_task context and the HWI / PTI vectors.
     981// - In Step 1, it initialises the _schedulers[x][y][l] pointers array, the
     982//              idle_task context, the  HWI / PTI / WTI interrupt vectors,
     983//              and the CU HWI / PTI / WTI masks.
    944984// - In Step 2, it scan all tasks in all vspaces to complete the tasks contexts,
    945985//              initialisation as specified in the mapping_info data structure,
     
    9671007    static_scheduler_t*  psched;               // pointer on processor scheduler
    9681008
    969     unsigned int cluster_id = x * Y_SIZE + y; 
     1009    unsigned int cluster_id = (x * Y_SIZE) + y;
     1010    unsigned int cluster_xy = (x << Y_WIDTH) + y; 
    9701011    unsigned int nprocs = cluster[cluster_id].procs;
    9711012    unsigned int lpid;                       
    9721013   
    973     /////////////////////////////////////////////////////////////////////////
    974     // Step 1 : initialize the schedulers[] array of pointers,
    975     //          the idle task context and the HWI and PTI interrupt vectors.
    976     //          The WTI interrupt vector entries corresponding to interrupts
    977     //          generated by the PIC component are handled later.
     1014    if ( nprocs > 8 )
     1015    {
     1016        _printf("\n[BOOT ERROR] cluster[%d,%d] contains more than 8 procs\n", x, y );
     1017        _exit();
     1018    }
     1019
     1020    ////////////////////////////////////////////////////////////////////////////////
     1021    // Step 1 : - initialize the schedulers[] array of pointers,
     1022    //          - initialize the "tasks" and "current variables.
     1023    //          - initialise the idle task context.
     1024    //          - initialize the HWI, PTI and WTI interrupt vectors.
     1025    //          - initialize the XCU masks for HWI / WTI / PTI interrupts.
     1026    //
     1027    // The general policy for interrupts routing is the following:         
     1028    //          - the local HWI are statically allocatedted to local processors.
     1029    //          - the nprocs first PTI are allocated for TICK (one per processor).
     1030    //          - we allocate 4 WTI per processor: the first one is for WAKUP,
     1031    //            the 3 others WTI are used for external interrupts (from PIC),
     1032    //            and are dynamically allocated by kernel on demand.
     1033    ///////////////////////////////////////////////////////////////////////////////
    9781034
    9791035    // get scheduler array virtual base address in cluster[x,y]
     
    9821038    if ( sched_length < (nprocs<<13) ) // 8 Kbytes per scheduler
    9831039    {
    984         _printf("\n[BOOT ERROR] Sched segment too small in cluster[%d,%d]\n", x, y );
     1040        _printf("\n[BOOT ERROR] Sched segment too small in cluster[%d,%d]\n",
     1041                x, y );
    9851042        _exit();
    9861043    }
     
    9971054        psched->current = IDLE_TASK_INDEX;
    9981055
    999         // default values for HWI / PTI / SWI vectors (valid bit = 0)
     1056        // set default values for HWI / PTI / SWI vectors (valid bit = 0)
    10001057        unsigned int slot;
    10011058        for (slot = 0; slot < 32; slot++)
     
    10051062            psched->wti_vector[slot] = 0;
    10061063        }
    1007 
    1008         // WTI[lpid] <= ISR_WAKUP / PTI[lpid] <= ISR_TICK
    1009         psched->wti_vector[lpid] = ISR_WAKUP | 0x80000000;
    1010         psched->pti_vector[lpid] = ISR_TICK  | 0x80000000;
    10111064
    10121065        // initializes the idle_task context:
     
    10261079    }
    10271080
    1028     // scan local peripherals to get local XCU
     1081    // HWI / PTI / WTI masks (up to 8 local processors)
     1082    unsigned int hwi_mask[8] = {0,0,0,0,0,0,0,0};
     1083    unsigned int pti_mask[8] = {0,0,0,0,0,0,0,0};
     1084    unsigned int wti_mask[8] = {0,0,0,0,0,0,0,0};
     1085
     1086    // scan local peripherals to get and check local XCU
    10291087    mapping_periph_t*  xcu = NULL;
    1030 
    1031     for ( periph_id = cluster[cluster_id].periph_offset ;
    1032           periph_id < cluster[cluster_id].periph_offset + cluster[cluster_id].periphs;
    1033           periph_id++ )
     1088    unsigned int       min = cluster[cluster_id].periph_offset ;
     1089    unsigned int       max = min + cluster[cluster_id].periphs ;
     1090
     1091    for ( periph_id = min ; periph_id < max ; periph_id++ )
    10341092    {
    10351093        if( periph[periph_id].type == PERIPH_TYPE_XCU )
     
    10371095            xcu = &periph[periph_id];
    10381096
    1039             if ( xcu->arg0 < (nprocs * header->irq_per_proc) )
    1040             {
    1041                 _printf("\n[BOOT ERROR] Not enough inputs for XCU[%d,%d]\n", x, y );
     1097            // check nb_hwi_in
     1098            if ( xcu->arg0 < xcu->irqs )
     1099            {
     1100                _printf("\n[BOOT ERROR] Not enough HWI inputs for XCU[%d,%d]\n",
     1101                         x, y );
     1102                _exit();
     1103            }
     1104            // check nb_pti_in
     1105            if ( xcu->arg2 < nprocs )
     1106            {
     1107                _printf("\n[BOOT ERROR] Not enough PTI inputs for XCU[%d,%d]\n",
     1108                         x, y );
     1109                _exit();
     1110            }
     1111            // check nb_wti_in
     1112            if ( xcu->arg1 < (4 * nprocs) )
     1113            {
     1114                _printf("\n[BOOT ERROR] Not enough WTI inputs for XCU[%d,%d]\n",
     1115                        x, y );
     1116                _exit();
     1117            }
     1118            // check nb_irq_out
     1119            if ( xcu->arg3 < (nprocs * header->irq_per_proc) )
     1120            {
     1121                _printf("\n[BOOT ERROR] Not enough outputs for XCU[%d,%d]\n",
     1122                        x, y );
    10421123                _exit();
    10431124            }
     
    10511132    }
    10521133
    1053     // scan HWIs connected to local XCU
     1134    // HWI interrupt vector definition
     1135    // scan HWI connected to local XCU
    10541136    // for round-robin allocation to local processors
    10551137    lpid = 0;
     
    10691151        }
    10701152
    1071         _schedulers[x][y][lpid]->hwi_vector[srcid] = isr | channel | 0x80000000;
     1153        // register entry in HWI interrupt vector
     1154        _schedulers[x][y][lpid]->hwi_vector[srcid] = isr | channel;
     1155
     1156        // update XCU HWI mask for P[x,y,lpid]
     1157        hwi_mask[lpid] = hwi_mask[lpid] | (1<<srcid);
    10721158
    10731159        lpid = (lpid + 1) % nprocs;
    10741160    } // end for irqs
     1161
     1162    // PTI interrupt vector definition
     1163    // one PTI for TICK per processor
     1164    for ( lpid = 0 ; lpid < nprocs ; lpid++ )
     1165    {
     1166        // register entry in PTI interrupt vector
     1167        _schedulers[x][y][lpid]->pti_vector[lpid] = ISR_TICK;
     1168
     1169        // update XCU PTI mask for P[x,y,lpid]
     1170        pti_mask[lpid] = pti_mask[lpid] | (1<<lpid);
     1171    }
     1172
     1173    // WTI interrupt vector definition
     1174    // 4 WTI per processor, first for WAKUP
     1175    for ( lpid = 0 ; lpid < nprocs ; lpid++ )
     1176    {
     1177        // register WAKUP ISR in WTI interrupt vector
     1178        _schedulers[x][y][lpid]->wti_vector[4*lpid] = ISR_WAKUP;
     1179
     1180        // update XCU WTI mask for P[x,y,lpid] (4 entries per proc)
     1181        wti_mask[lpid] = wti_mask[lpid] | (0x1<<(lpid                 ));
     1182        wti_mask[lpid] = wti_mask[lpid] | (0x1<<(lpid + NB_PROCS_MAX  ));
     1183        wti_mask[lpid] = wti_mask[lpid] | (0x1<<(lpid + 2*NB_PROCS_MAX));
     1184        wti_mask[lpid] = wti_mask[lpid] | (0x1<<(lpid + 3*NB_PROCS_MAX));
     1185    }
     1186
     1187    // set the XCU masks for HWI / WTI / PTI interrupts
     1188    for ( lpid = 0 ; lpid < nprocs ; lpid++ )
     1189    {
     1190        unsigned int channel = lpid * IRQ_PER_PROCESSOR;
     1191
     1192        _xcu_set_mask( cluster_xy, channel, hwi_mask[lpid], IRQ_TYPE_HWI );
     1193        _xcu_set_mask( cluster_xy, channel, wti_mask[lpid], IRQ_TYPE_WTI );
     1194        _xcu_set_mask( cluster_xy, channel, pti_mask[lpid], IRQ_TYPE_PTI );
     1195    }
    10751196
    10761197    //////////////////////////////////////////////
     
    10781199    //////////////////////////////////////////////
    10791200
    1080     ////////////////////////////////////////////////////////////////////////
     1201#if BOOT_DEBUG_SCHED
     1202if ( cluster_xy == 0 ) boot_sched_irq_display();
     1203_simple_barrier_wait( &_barrier_all_clusters );
     1204#endif
     1205
     1206    ///////////////////////////////////////////////////////////////////////////////
    10811207    // Step 2 : Initialise the tasks context. The context of task placed
    10821208    //          on  processor P must be stored in the scheduler of P.
     
    10841210    //          on the local processors. We complete the scheduler when the
    10851211    //          required placement fit one local processor.
     1212    ///////////////////////////////////////////////////////////////////////////////
    10861213
    10871214    for (vspace_id = 0; vspace_id < header->vspaces; vspace_id++)
     
    11111238
    11121239            // ctx_epc : Get the virtual address of the memory location containing
    1113             // the task entry point : the start_vector is stored by GCC in the seg_data
    1114             // segment and we must wait the .elf loading to get the entry point value...
     1240            // the task entry point : the start_vector is stored by GCC in the
     1241            // seg_data segment, and we must wait the .elf loading to get
     1242            // the entry point value...
    11151243            vseg_id = vspace[vspace_id].start_vseg_id;     
    11161244            unsigned int ctx_epc = vseg[vseg_id].vbase + (task[task_id].startid)*4;
     
    11891317
    11901318
    1191 /////////////////////////////////////////////////////////////////////////////
    1192 // This function loops on all processors in all clusters to display
    1193 // the interrupt vectors for each processor.
    1194 /////////////////////////////////////////////////////////////////////////////
    1195 void boot_sched_irq_display()
    1196 {
    1197     unsigned int         cx;
    1198     unsigned int         cy;
    1199     unsigned int         lpid;
    1200     unsigned int         slot;
    1201     unsigned int         entry;
    1202 
    1203     mapping_header_t*    header  = (mapping_header_t *)SEG_BOOT_MAPPING_BASE;
    1204     mapping_cluster_t*   cluster = _get_cluster_base(header);
    1205 
    1206     static_scheduler_t*  psched;
    1207 
    1208     for ( cx = 0 ; cx < X_SIZE ; cx++ )
    1209     {
    1210         for ( cy = 0 ; cy < Y_SIZE ; cy++ )
    1211         {
    1212             unsigned int cluster_id = (cx * Y_SIZE) + cy;
    1213             unsigned int nprocs = cluster[cluster_id].procs;
    1214 
    1215             for ( lpid = 0 ; lpid < nprocs ; lpid++ )
    1216             {
    1217                 psched = _schedulers[cx][cy][lpid];
    1218        
    1219                 _printf("\n[BOOT] scheduler for proc[%d,%d,%d] : ntasks = %d\n",
    1220                         cx , cy , lpid , psched->tasks );
    1221 
    1222                 for ( slot = 0 ; slot < 32 ; slot++ )
    1223                 {
    1224                     entry = psched->hwi_vector[slot];
    1225                     if ( entry & 0x80000000 )
    1226                     _printf(" - HWI %d / isrtype = %d / channel = %d\n",
    1227                             slot , (entry & 0xFFFF) , ((entry >> 16) & 0x7FFF) );
    1228                 }
    1229                 for ( slot = 0 ; slot < 32 ; slot++ )
    1230                 {
    1231                     entry = psched->wti_vector[slot];
    1232                     if ( entry & 0x80000000 )
    1233                     _printf(" - WTI %d / isrtype = %d / channel = %d\n",
    1234                             slot , (entry & 0xFFFF) , ((entry >> 16) & 0x7FFF) );
    1235                 }
    1236                 for ( slot = 0 ; slot < 32 ; slot++ )
    1237                 {
    1238                     entry = psched->pti_vector[slot];
    1239                     if ( entry & 0x80000000 )
    1240                     _printf(" - PTI %d / isrtype = %d / channel = %d\n",
    1241                             slot , (entry & 0xFFFF) , ((entry >> 16) & 0x7FFF) );
    1242                 }
    1243             }
    1244         }
    1245     }
    1246 }  // end boot_sched_display()
    1247 
    1248 
    1249 /////////////////////////////////////////////////////////////////////////////
    1250 // This function complete the schedulers initialisation when the platform
    1251 // contains a PIC component in the IO cluster.
    1252 // It is executed by P[0][0][0] only.
    1253 // It scan HWIs connected to PIC for Round Robin allocation to processors,
    1254 // as WTI. It allocates one WTI per processor, starting from P[0,0,0],
    1255 // and increments (cluster_id, lpid) as required.
    1256 /////////////////////////////////////////////////////////////////////////////
    1257 void boot_pic_wti_init()
    1258 {
    1259     mapping_header_t*    header  = (mapping_header_t *)SEG_BOOT_MAPPING_BASE;
    1260     mapping_cluster_t*   cluster = _get_cluster_base(header);
    1261     mapping_periph_t*    periph  = _get_periph_base(header);
    1262     mapping_irq_t*       irq     = _get_irq_base(header);
    1263 
    1264     unsigned int         periph_id;   // peripheral index in mapping_info
    1265     unsigned int         irq_id;      // irq index in mapping_info
    1266 
    1267     // get cluster_io index in mapping
    1268     unsigned int         x_io       = header->x_io; 
    1269     unsigned int         y_io       = header->y_io; 
    1270     unsigned int         cluster_io = (x_io * Y_SIZE) + y_io;
    1271    
    1272     // scan peripherals in cluster_io to find PIC
    1273     mapping_periph_t*    pic = NULL;
    1274 
    1275     for ( periph_id = cluster[cluster_io].periph_offset ;
    1276           periph_id < cluster[cluster_io].periph_offset + cluster[cluster_io].periphs;
    1277           periph_id++ )
    1278     {
    1279         if ( periph[periph_id].type == PERIPH_TYPE_PIC )
    1280         {
    1281             pic = &periph[periph_id];
    1282             break;
    1283         }
    1284     }
    1285 
    1286     if ( pic == NULL )  return;
    1287 
    1288     // initialize WTI channel allocators in all clusters
    1289     unsigned int x;
    1290     unsigned int y;
    1291     for ( x = 0 ; x < X_SIZE ; x++ )
    1292     {
    1293         for ( y = 0 ; y < Y_SIZE ; y++ )
    1294         {
    1295             _wti_channel_alloc[x][y] = NB_PROCS_MAX;
    1296         }
    1297     }
    1298 
    1299     // scan IRQS defined in PIC
    1300     unsigned int  cluster_id = 0;
    1301     unsigned int  lpid       = 0;
    1302     unsigned int  cx         = cluster[cluster_id].x;
    1303     unsigned int  cy         = cluster[cluster_id].y;
    1304 
    1305     for ( irq_id = pic->irq_offset ;
    1306           irq_id < pic->irq_offset + pic->irqs ;
    1307           irq_id++ )
    1308     {
    1309         // compute next values for cluster_id, lpid, cx, cy
    1310         // if no more WTI allocatable in current cluster
    1311         unsigned int overflow = 0;
    1312 
    1313         while ( (lpid >= cluster[cluster_id].procs) ||
    1314                 (_wti_channel_alloc[cx][cy] >= 32) )
    1315         {
    1316             cluster_id = (cluster_id + 1) % (X_SIZE*Y_SIZE);
    1317             cx         = cluster[cluster_id].x;
    1318             cy         = cluster[cluster_id].y;
    1319             lpid       = 0;
    1320 
    1321             overflow++;
    1322 
    1323             if ( overflow > 1024 )
    1324             {
    1325                 _printf("\n[BOOT ERROR] Not enough processors for external IRQs\n");
    1326                 _exit();
    1327             }
    1328         }
    1329         // allocate a WTI to processor defined by (cluster_id,lpid)
    1330         unsigned int type    = irq[irq_id].srctype;
    1331         unsigned int srcid   = irq[irq_id].srcid;
    1332         unsigned int isr     = irq[irq_id].isr & 0xFFFF;
    1333         unsigned int channel = irq[irq_id].channel << 16;
    1334 
    1335         if ( (type != IRQ_TYPE_HWI) || (srcid > 31) )
    1336         {
    1337             _printf("\n[BOOT ERROR] in boot_pic_wti_init() Bad IRQ type\n");
    1338             _exit();
    1339         }
    1340 
    1341         // get scheduler address for selected processor
    1342         static_scheduler_t* psched = _schedulers[cx][cy][lpid];
    1343 
    1344         // update WTI vector for selected processor
    1345         unsigned int index            = _wti_channel_alloc[cx][cy];
    1346         psched->wti_vector[index]     = isr | channel | 0x80000000;
    1347 
    1348         // update IRQ fields in mapping for PIC initialisation
    1349         irq[irq_id].dest_id = index;
    1350         irq[irq_id].dest_xy = (cx << Y_WIDTH) + cy;
    1351 
    1352         // update pointers
    1353         _wti_channel_alloc[cx][cy] = index + 1;
    1354         lpid                       = lpid + 1;
    1355 
    1356     }  // end for IRQs
    1357 
    1358 #if BOOT_DEBUG_SCHED
    1359 boot_sched_irq_display();
    1360 #endif
    1361 
    1362 } // end boot_pic_wti_init()
    1363                
     1319
    13641320//////////////////////////////////////////////////////////////////////////////////
    13651321// This function loads the map.bin file from block device.
     
    13681324{
    13691325    // open file "map.bin"
    1370     int fd_id = _fat_open( IOC_BOOT_MODE,
    1371                            "map.bin",
    1372                            0 );         // no creation
     1326    int fd_id = _fat_open( 0, "map.bin", 0 );    // no IRQ / no creation
     1327
    13731328    if ( fd_id == -1 )
    13741329    {
     
    13781333
    13791334#if BOOT_DEBUG_MAPPING
    1380 _printf("\n[BOOT] map.bin file successfully open at cycle %d\n", _get_proctime() );
     1335_printf("\n[BOOT] map.bin file successfully open at cycle %d\n",
     1336        _get_proctime() );
    13811337#endif
    13821338
    13831339    // get "map.bin" file size (from fat) and check it
    1384     unsigned int size    = fat.fd[fd_id].file_size;
     1340    unsigned int size    = _fat.fd[fd_id].file_size;
    13851341
    13861342    if ( size > SEG_BOOT_MAPPING_SIZE )
    13871343    {
    1388         _printf("\n[BOOT ERROR] : allocated segment too small for map.bin file\n");
     1344        _printf("\n[BOOT ERROR] : segment too small for map.bin file\n");
    13891345        _exit();
    13901346    }
    13911347
    13921348#if BOOT_DEBUG_MAPPING
    1393 _printf("\n[BOOT] map.bin buffer pbase = %x / buffer size = %x / file_size = %x\n",
     1349_printf("\n[BOOT] map.bin buf_pbase = %x / buf_size = %x / file_size = %x\n",
    13941350        SEG_BOOT_MAPPING_BASE , SEG_BOOT_MAPPING_SIZE , size );
    13951351#endif
     
    14001356    if ( offset ) nblocks++;
    14011357
    1402     unsigned int ok = _fat_read( IOC_BOOT_MODE,
     1358    unsigned int ok = _fat_read( 0,        // No IRQ
    14031359                                 fd_id,
    14041360                                 (unsigned int*)SEG_BOOT_MAPPING_BASE,
     
    14561412
    14571413
    1458 /////////////////////////////////////////////////////////////////////////////////////
    1459 // This function load all loadable segments for one .elf file, identified
     1414//////////////////////////////////////////////////////////////////////////////////
     1415// This function load all loadable segments contained in the .elf file identified
    14601416// by the "pathname" argument. Some loadable segments can be copied in several
    14611417// clusters: same virtual address but different physical addresses. 
    14621418// - It open the file.
    1463 // - It loads the complete file in the dedicated boot_elf_buffer.
     1419// - It loads the complete file in the dedicated _boot_elf_buffer.
    14641420// - It copies each loadable segments  at the virtual address defined in
    14651421//   the .elf file, making several copies if the target vseg is not local.
    14661422// - It closes the file.
    1467 // This function is supposed to be executed by processor[0,0,0].
    1468 // Note:
    1469 //   We must use physical addresses to reach the destination buffers that
    1470 //   can be located in remote clusters. We use either a _physical_memcpy(),
    1471 //   or a _dma_physical_copy() if DMA is available.
    1472 //////////////////////////////////////////////////////////////////////////////////////
     1423// This function is supposed to be executed by all processors[x,y,0].
     1424//
     1425// Note: We must use physical addresses to reach the destination buffers that
     1426// can be located in remote clusters. We use either a _physical_memcpy(),
     1427// or a _dma_physical_copy() if DMA is available.
     1428//////////////////////////////////////////////////////////////////////////////////
    14731429void load_one_elf_file( unsigned int is_kernel,     // kernel file if non zero
    14741430                        char*        pathname,
     
    14791435    mapping_vseg_t    * vseg    = _get_vseg_base(header);
    14801436
    1481     unsigned int seg_id;
     1437    unsigned int procid = _get_procid();
     1438    unsigned int cxy    = procid >> P_WIDTH;
     1439    unsigned int x      = cxy >> Y_WIDTH;
     1440    unsigned int y      = cxy & ((1<<Y_WIDTH)-1);
     1441    unsigned int p      = procid & ((1<<P_WIDTH)-1);
    14821442
    14831443#if BOOT_DEBUG_ELF
    1484 _printf("\n[BOOT] Start searching file %s at cycle %d\n",
    1485         pathname, _get_proctime() );
    1486 #endif
    1487 
    1488     // open .elf file
    1489     int fd_id = _fat_open( IOC_BOOT_MODE,
    1490                            pathname,
    1491                            0 );      // no creation
    1492     if ( fd_id < 0 )
    1493     {
    1494         _printf("\n[BOOT ERROR] load_one_elf_file() : %s not found\n", pathname );
    1495         _exit();
    1496     }
    1497 
    1498     // check buffer size versus file size
    1499     if ( fat.fd[fd_id].file_size > GIET_ELF_BUFFER_SIZE )
    1500     {
    1501         _printf("\n[BOOT ERROR] in load_one_elf_file() : %s / size = %x "
    1502                 "larger than GIET_ELF_BUFFER_SIZE = %x\n",
    1503                 pathname , fat.fd[fd_id].file_size , GIET_ELF_BUFFER_SIZE );
    1504         _exit();
    1505     }
    1506 
    1507     // compute number of sectors
    1508     unsigned int nbytes   = fat.fd[fd_id].file_size;
    1509     unsigned int nsectors = nbytes>>9;
    1510     if( nbytes & 0x1FF) nsectors++;
    1511 
    1512     // load file in elf buffer
    1513     if( _fat_read( IOC_BOOT_MODE,
    1514                    fd_id,
    1515                    boot_elf_buffer,
    1516                    nsectors,
    1517                    0 ) != nsectors )
    1518     {
    1519         _printf("\n[BOOT ERROR] load_one_elf_file() : unexpected EOF for file %s\n",
    1520                 pathname );
    1521         _exit();
    1522     }
    1523 
    1524     // Check ELF Magic Number in ELF header
    1525     Elf32_Ehdr* elf_header_ptr = (Elf32_Ehdr*)boot_elf_buffer;
    1526 
    1527     if ( (elf_header_ptr->e_ident[EI_MAG0] != ELFMAG0) ||
    1528          (elf_header_ptr->e_ident[EI_MAG1] != ELFMAG1) ||
    1529          (elf_header_ptr->e_ident[EI_MAG2] != ELFMAG2) ||
    1530          (elf_header_ptr->e_ident[EI_MAG3] != ELFMAG3) )
    1531     {
    1532         _printf("\n[BOOT ERROR] load_elf() : file %s does not use ELF format\n",
    1533                 pathname );
    1534         _exit();
    1535     }
     1444_printf("\n[DEBUG BOOT_ELF] P[%d,%d,%d] enters load_one_elf_file() : %s\n",
     1445        x , y , p , pathname );
     1446#endif
     1447
     1448    Elf32_Ehdr* elf_header_ptr = NULL;  //  avoid a warning
     1449
     1450    int fd_id = 0;                      //  avoid a warning
     1451
     1452    // only P[0,0,0] load file from FAT
     1453    if ( (cxy == 0) && (p == 0) )
     1454    {
     1455        // open .elf file
     1456        fd_id = _fat_open( 0 , pathname , 0 );  // no IRQ / no creation
     1457
     1458        if ( fd_id < 0 )
     1459        {
     1460            _printf("\n[BOOT ERROR] load_one_elf_file() : %s not found\n",
     1461                    pathname );
     1462            _exit();
     1463        }
     1464
     1465        // check buffer size versus file size
     1466        if ( _fat.fd[fd_id].file_size > GIET_ELF_BUFFER_SIZE )
     1467        {
     1468            _printf("\n[BOOT ERROR] in load_one_elf_file() : %s / size = %x "
     1469                    "larger than GIET_ELF_BUFFER_SIZE = %x\n",
     1470                    pathname , _fat.fd[fd_id].file_size , GIET_ELF_BUFFER_SIZE );
     1471            _exit();
     1472        }
     1473
     1474        // compute number of sectors
     1475        unsigned int nbytes   = _fat.fd[fd_id].file_size;
     1476        unsigned int nsectors = nbytes>>9;
     1477        if( nbytes & 0x1FF) nsectors++;
     1478
     1479        // load file to elf buffer
     1480        if( _fat_read( 0,                    // no IRQ
     1481                       fd_id,
     1482                       _boot_elf_buffer,
     1483                       nsectors,
     1484                       0 ) != nsectors )     // offset
     1485        {
     1486            _printf("\n[BOOT ERROR] load_one_elf_file() : unexpected EOF for %s\n",
     1487                    pathname );
     1488            _exit();
     1489        }
     1490
     1491        // Check ELF Magic Number in ELF header
     1492        Elf32_Ehdr* ptr = (Elf32_Ehdr*)_boot_elf_buffer;
     1493
     1494        if ( (ptr->e_ident[EI_MAG0] != ELFMAG0) ||
     1495             (ptr->e_ident[EI_MAG1] != ELFMAG1) ||
     1496             (ptr->e_ident[EI_MAG2] != ELFMAG2) ||
     1497             (ptr->e_ident[EI_MAG3] != ELFMAG3) )
     1498        {
     1499            _printf("\n[BOOT ERROR] load_elf() : file %s does not use ELF format\n",
     1500                    pathname );
     1501            _exit();
     1502        }
     1503
     1504#if BOOT_DEBUG_ELF
     1505_printf("\n[DEBUG BOOT_ELF] P[%d,%d,%d] load file %s\n",
     1506        x , y , p , pathname );
     1507#endif
     1508
     1509    } // end if P[0,0,0]
     1510
     1511    //////////////////////////////////////////////
     1512    _simple_barrier_wait( &_barrier_all_clusters );
     1513    //////////////////////////////////////////////
     1514
     1515    // Each processor P[x,y,0] copy replicated segments in cluster[x,y]
     1516    elf_header_ptr = (Elf32_Ehdr*)_boot_elf_buffer;
    15361517
    15371518    // get program header table pointer
    1538     unsigned int pht_index = elf_header_ptr->e_phoff;
    1539     if( pht_index == 0 )
     1519    unsigned int offset = elf_header_ptr->e_phoff;
     1520    if( offset == 0 )
    15401521    {
    15411522        _printf("\n[BOOT ERROR] load_one_elf_file() : file %s "
     
    15431524        _exit();
    15441525    }
    1545     Elf32_Phdr* elf_pht_ptr = (Elf32_Phdr*)(boot_elf_buffer + pht_index);
     1526
     1527    Elf32_Phdr* elf_pht_ptr = (Elf32_Phdr*)(_boot_elf_buffer + offset);
    15461528
    15471529    // get number of segments
    15481530    unsigned int nsegments   = elf_header_ptr->e_phnum;
    15491531
    1550     // Loop on loadable segments in the .elf file
     1532    // First loop on loadable segments in the .elf file
     1533    unsigned int seg_id;
    15511534    for (seg_id = 0 ; seg_id < nsegments ; seg_id++)
    15521535    {
     
    15591542            unsigned int seg_memsz  = elf_pht_ptr[seg_id].p_memsz;
    15601543
    1561 #if BOOT_DEBUG_ELF
    1562 _printf("\n[BOOT] Segment %d : vaddr = %x / size = %x\n",
    1563         seg_id , seg_vaddr , seg_filesz );
    1564 #endif
    1565 
    1566             if( seg_memsz < seg_filesz )
    1567             {
    1568                 _printf("\n[BOOT ERROR] load_one_elf_file() : segment at vaddr = %x"
    1569                         " in file %s has memsize < filesize \n", seg_vaddr, pathname );
     1544            if( seg_memsz != seg_filesz )
     1545            {
     1546                _printf("\n[BOOT ERROR] load_one_elf_file() : segment at vaddr = %x\n"
     1547                        " in file %s has memsize = %x / filesize = %x \n"
     1548                        " check that all global variables are in data segment\n",
     1549                        seg_vaddr, pathname , seg_memsz , seg_filesz );
    15701550                _exit();
    15711551            }
    15721552
    1573             // fill empty space with 0 as required
    1574             if( seg_memsz > seg_filesz )
    1575             {
    1576                 unsigned int i;
    1577                 for( i = seg_filesz ; i < seg_memsz ; i++ )
    1578                    boot_elf_buffer[i+seg_offset] = 0;
    1579             }
    1580 
    1581             unsigned int src_vaddr = (unsigned int)boot_elf_buffer + seg_offset;
     1553            unsigned int src_vaddr = (unsigned int)_boot_elf_buffer + seg_offset;
    15821554
    15831555            // search all vsegs matching the virtual address
     
    15971569            }
    15981570
     1571            // Second loop on vsegs in the mapping
    15991572            for ( vseg_id = vseg_first ; vseg_id < vseg_last ; vseg_id++ )
    16001573            {
     
    16031576                    found = 1;
    16041577
    1605                     // get destination buffer physical address and size
     1578                    // get destination buffer physical address, size, coordinates
    16061579                    paddr_t      seg_paddr  = vseg[vseg_id].pbase;
    16071580                    unsigned int seg_size   = vseg[vseg_id].length;
    1608                    
    1609 #if BOOT_DEBUG_ELF
    1610 _printf("   loaded into vseg %s at paddr = %l / buffer size = %x\n",
    1611         vseg[vseg_id].name , seg_paddr , seg_size );
    1612 #endif
     1581                    unsigned int extend     = (unsigned int)(seg_paddr>>32);
     1582                    unsigned int cx         = extend >> Y_WIDTH;
     1583                    unsigned int cy         = extend & ((1<<Y_WIDTH)-1);
     1584
    16131585                    // check vseg size
    16141586                    if ( seg_size < seg_filesz )
     
    16201592                    }
    16211593
    1622                     // copy the segment from boot buffer to destination buffer
    1623                     // using DMA channel[0,0,0] if it is available.
    1624                     if( NB_DMA_CHANNELS > 0 )
     1594                    // P[x,y,0] copy the segment from boot buffer in cluster[0,0]
     1595                    // to destination buffer in cluster[x,y],
     1596                    if ( (cx == x) && (cy == y) )
    16251597                    {
    1626                         _dma_physical_copy( 0,                  // DMA in cluster[0,0]
    1627                                             0,                  // DMA channel 0
    1628                                             (paddr_t)seg_paddr, // destination paddr
    1629                                             (paddr_t)src_vaddr, // source paddr
    1630                                             seg_filesz );       // size
    1631                     }
    1632                     else
    1633                     {
    1634                         _physical_memcpy( (paddr_t)seg_paddr,   // destination paddr
    1635                                           (paddr_t)src_vaddr,   // source paddr
    1636                                           seg_filesz );         // size
     1598                        if( NB_DMA_CHANNELS > 0 )
     1599                        {
     1600                            _dma_physical_copy( extend,    // DMA in cluster[x,y]       
     1601                                                0,         // DMA channel 0
     1602                                                seg_paddr,
     1603                                                (paddr_t)src_vaddr,
     1604                                                seg_filesz );   
     1605                        }
     1606                        else
     1607                        {
     1608                            _physical_memcpy( seg_paddr,            // dest paddr
     1609                                              (paddr_t)src_vaddr,   // source paddr
     1610                                              seg_filesz );         // size
     1611                        }
     1612#if BOOT_DEBUG_ELF
     1613_printf("\n[DEBUG BOOT_ELF] P[%d,%d,%d] copy segment %d :\n"
     1614        "  vaddr = %x / size = %x / paddr = %l\n",
     1615        x , y , p , seg_id , seg_vaddr , seg_memsz , seg_paddr );
     1616#endif
    16371617                    }
    16381618                }
    1639             }  // end for vsegs in vspace
     1619            }  // end for vsegs
    16401620
    16411621            // check at least one matching vseg
     
    16511631    }  // end for loadable segments
    16521632
    1653     // close .elf file
    1654     _fat_close( fd_id );
    1655 
    1656     _printf("\n[BOOT] File %s loaded at cycle %d\n",
    1657             pathname , _get_proctime() );
     1633    //////////////////////////////////////////////
     1634    _simple_barrier_wait( &_barrier_all_clusters );
     1635    //////////////////////////////////////////////
     1636
     1637    // only P[0,0,0] close the file
     1638    if ( (cxy == 0) && (p == 0) )
     1639    {
     1640        // close .elf file
     1641        _fat_close( fd_id );
     1642
     1643        _printf("\n[BOOT] File %s loaded at cycle %d\n",
     1644                pathname , _get_proctime() );
     1645    }
    16581646
    16591647} // end load_one_elf_file()
     
    17441732    mapping_cluster_t * cluster = _get_cluster_base(header);
    17451733    mapping_periph_t * periph   = _get_periph_base(header);
    1746     mapping_irq_t * irq         = _get_irq_base(header);
    17471734
    17481735    unsigned int periph_id;
     
    17701757            case PERIPH_TYPE_IOC:    // vci_block_device component
    17711758            {
    1772                 if ( subtype == IOC_SUBTYPE_BDV )
    1773                 {
    1774                     _bdv_init();
    1775                 }
    1776                 else if ( subtype == IOC_SUBTYPE_HBA )
    1777                 {
    1778                     for (channel_id = 0; channel_id < channels; channel_id++)
    1779                         _hba_init( channel_id );
    1780                 }
    1781                 else if ( subtype == IOC_SUBTYPE_SPI )
    1782                 {
    1783                     //TODO
    1784                 }
     1759                if      ( subtype == IOC_SUBTYPE_BDV ) _bdv_init();
     1760                else if ( subtype == IOC_SUBTYPE_HBA ) _hba_init();
     1761                else if ( subtype == IOC_SUBTYPE_SPI ) _sdc_init();
    17851762                break;
    17861763            }
     
    18141791                break;
    18151792            }
    1816             case PERIPH_TYPE_PIC:    // vci_iopic component
    1817             {
    1818                 // scan all IRQs defined in mapping for PIC component,
    1819                 // and initialises addresses for WTI IRQs
    1820                 for ( channel_id = periph[periph_id].irq_offset ;
    1821                       channel_id < periph[periph_id].irq_offset +
    1822                       periph[periph_id].irqs ; channel_id++ )
    1823                 {
    1824                     unsigned int hwi_id     = irq[channel_id].srcid;   // HWI in PIC
    1825                     unsigned int wti_id     = irq[channel_id].dest_id; // WTI in XCU
    1826                     unsigned int cluster_xy = irq[channel_id].dest_xy; // XCU coordinates
    1827                     unsigned int vaddr;
    1828 
    1829                     _xcu_get_wti_address( wti_id, &vaddr );
    1830                     _pic_init( hwi_id, vaddr, cluster_xy );
    1831 
    1832 #if BOOT_DEBUG_PERI
    1833 _printf("[BOOT] PIC : hwi_index = %d => wti_index = %d for XCU[%d,%d]\n",
    1834         hwi_id , wti_id , cluster_xy >> Y_WIDTH , cluster_xy & ((1<<Y_WIDTH)-1) );
    1835 #endif
    1836                 }
    1837                 break;
    1838             }
    18391793        }  // end switch periph type
    18401794    } // end loop on peripherals
    18411795} // end boot_peripherals_init()
    18421796
    1843 ///////////////////////////////////////////////////////////////////////////////////////
     1797/////////////////////////////////////////////////////////////////////////////////
    18441798// This function is executed in parallel by all processors[x][y][0].
    1845 // It initialises the physical memory allocator in each cluster containing a RAM pseg.
    1846 ///////////////////////////////////////////////////////////////////////////////////////
     1799// It initialises the physical memory allocator in each cluster containing
     1800// a RAM pseg.
     1801/////////////////////////////////////////////////////////////////////////////////
    18471802void boot_pmem_init( unsigned int cx,
    18481803                     unsigned int cy )
     
    18881843    {
    18891844        _printf("\n[BOOT ERROR] boot_pmem_init() : no RAM in cluster[%d][%d]\n",
    1890               cx , cy );
     1845                cx , cy );
    18911846        _exit();
    18921847    }   
     
    19041859    unsigned int       lpid       = gpid & ((1 << P_WIDTH) -1);
    19051860
     1861    //////////////////////////////////////////////////////////
    19061862    // Phase ONE : only P[0][0][0] execute it
     1863    //////////////////////////////////////////////////////////
    19071864    if ( gpid == 0 )   
    19081865    {
     
    19151872
    19161873        // initialises the FAT
    1917         _fat_init( IOC_BOOT_MODE );
     1874        _fat_init( 0 );   // no IRQ
    19181875
    19191876        _printf("\n[BOOT] FAT initialised at cycle %d\n", _get_proctime() );
     
    19471904            {
    19481905                unsigned long long paddr = (((unsigned long long)cluster_xy)<<32) +
    1949                                            SEG_XCU_BASE + XCU_REG( XCU_WTI_REG , 0 );
     1906                                           SEG_XCU_BASE+XCU_REG( XCU_WTI_REG , 0 );
    19501907
    19511908                _physical_write( paddr , (unsigned int)boot_entry );
     
    19531910        }
    19541911
    1955         _printf("\n[BOOT] Processors P[x,y,0] start at cycle %d\n", _get_proctime() );
    1956     }
    1957 
     1912        _printf("\n[BOOT] Processors P[x,y,0] start at cycle %d\n",
     1913                _get_proctime() );
     1914    }
     1915
     1916    /////////////////////////////////////////////////////////////////
    19581917    // Phase TWO : All processors P[x][y][0] execute it in parallel
     1918    /////////////////////////////////////////////////////////////////
    19591919    if( lpid == 0 )
    19601920    {
     
    19971957        _simple_barrier_wait( &_barrier_all_clusters );
    19981958        //////////////////////////////////////////////
     1959
     1960        if ( gpid == 0 )
     1961        {
     1962            _printf("\n[BOOT] Schedulers initialised at cycle %d\n",
     1963                    _get_proctime() );
     1964        }
     1965
    19991966       
    2000         // Processor P[0,0,0] completes schedulers with PIC-WTI
    2001         // initialises external peripherals and load .elf files.
    2002         if ( gpid == 0 )
    2003         {
    2004             // complete schedulers initialisation
    2005             boot_pic_wti_init();
    2006 
    2007             _printf("\n[BOOT] Schedulers initialised at cycle %d\n", _get_proctime() );
    2008 
    2009             // initialize non replicated peripherals
    2010             boot_peripherals_init();
    2011 
    2012             _printf("\n[BOOT] Peripherals initialised at cycle %d\n", _get_proctime() );
    2013 
    2014             // Loading all .elf files
    2015             boot_elf_load();
    2016         }
     1967        // Each processor P[x,y,0] contributes to load .elf files.
     1968        boot_elf_load();
    20171969
    20181970        //////////////////////////////////////////////
     
    20201972        //////////////////////////////////////////////
    20211973       
     1974        // Processor P[0,0,0] initialises external peripherals
     1975        if ( gpid == 0 )
     1976        {
     1977            // initialize external peripherals
     1978            boot_peripherals_init();
     1979       
     1980            _printf("\n[BOOT] Peripherals initialised at cycle %d\n",
     1981                    _get_proctime() );
     1982        }
     1983
     1984        //////////////////////////////////////////////
     1985        _simple_barrier_wait( &_barrier_all_clusters );
     1986        //////////////////////////////////////////////
     1987
    20221988        // each processor P[x][y][0] wake up other processors in same cluster
    20231989        mapping_header_t*  header     = (mapping_header_t *)SEG_BOOT_MAPPING_BASE;
     
    20311997        }
    20321998
    2033         if ( gpid == 0 )    // only P[0][0][0] makes display
    2034         _printf("\n[BOOT] All processors start at cycle %d\n", _get_proctime() );
    2035     }
    2036 
     1999        // only P[0][0][0] makes display
     2000        if ( gpid == 0 )
     2001        {   
     2002            _printf("\n[BOOT] All processors start at cycle %d\n",
     2003                    _get_proctime() );
     2004        }
     2005    }
    20372006    // Other processors than P[x][y][0] activate MMU (using local PTAB)
    20382007    if ( lpid != 0 )
     
    20482017    _set_sr( 0 );
    20492018
     2019    // Each proocessor get kernel entry virtual address
     2020    unsigned int kernel_entry = (unsigned int)&kernel_init_vbase;
     2021
     2022#if BOOT_DEBUG_ELF
     2023_printf("\n@@@ P[%d,%d,%d] exit boot / jumping to %x at cycle %d\n",
     2024        cx, cy, lpid, kernel_entry , _get_proctime() );
     2025#endif
     2026
    20502027    // All processors jump to kernel_init
    2051     unsigned int kernel_entry = (unsigned int)&kernel_init_vbase;
    20522028    asm volatile( "jr   %0" ::"r"(kernel_entry) );
    20532029
Note: See TracChangeset for help on using the changeset viewer.