/* * boot.c - TSAR bootloader implementation. * * Authors : Vu Son (2016) * Alain Greiner (2016,2017,2018,2019) * * Copyright (c) UPMC Sorbonne Universites * * This file is part of ALMOS-MKH. * * ALMOS-MKH is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2.0 of the License. * * ALMOS-MKH is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with ALMOS-MKH; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /**************************************************************************** * This file contains the ALMOS-MKH. boot-loader for the TSAR architecture. * * * * It supports a clusterised, shared memory, multi-processor architecture, * * where each processor core is identified by a composite index [cxy,lid] * * with one physical memory bank per cluster. * * * * The 'boot.elf' file (containing the boot-loader binary code) is stored * * on disk (not in the FAT file system), and must be loaded into memory by * * the preloader running on the core[0][0] (cxy = 0 / lid = 0). * * * * The main task of the boot-loader is to load in the first physical page * * of each cluster a copy of the kernel code (segments "kcode" and "kdata") * * and to build - in each cluster - a cluster specific description of the * * hardware archtecture, stored in the "kdata" segment as the boot_info_t * * structure. The "kernel.elf" and "arch_info.bin" files are supposed to be * * stored on disk in a FAT32 file system. * * * * All cores contribute to the boot procedure, but all cores are not * * simultaneously active: * * - in a first phase, only core[0][0] is running (core 0 in cluster 0). * * - in a second phase, only core[cxy][0] is running in each cluster. * * - in last phase, all core[cxy][lid] are running. * * * * Finally, all cores jump to the kernel_init() function that makes the * * actual kernel initialisation. * * * * Implementation note: * * * * To allows each core to use the local copy of both the boot code and the * * kernel code, the boot-loader builds a minimal and temporary BPT (Boot * * Page Table) containing only two big pages: page[0] maps the kernel code, * * and page 1 maps the boot code. * ****************************************************************************/ #include #include #include #include #include #include #include #include #include #include #include /***************************************************************************** * Macros. ****************************************************************************/ #define PAGE_ROUND_DOWN(x) ((x) & (~PPM_PAGE_SIZE -1)) #define PAGE_ROUND_UP(x) (((x) + PPM_PAGE_SIZE-1) & \ (~(PPM_PAGE_SIZE-1))) /***************************************************************************** * Global variables. ****************************************************************************/ // the Boot Page Table contains two PTE1, and should be aligned on 8 Kbytes uint32_t boot_pt[2] __attribute__((aligned(2048))); // synchronization variables. volatile boot_remote_spinlock_t tty0_lock; // protect TTY0 access volatile boot_remote_barrier_t global_barrier; // synchronize CP0 cores volatile boot_remote_barrier_t local_barrier; // synchronize cores in one cluster uint32_t active_cores_nr; // number of expected CP0s // kernel segments layout variables uint32_t seg_kcode_base; // kcode segment base address uint32_t seg_kcode_size; // kcode segment size (bytes) uint32_t seg_kdata_base; // kdata segment base address uint32_t seg_kdata_size; // kdata segment size (bytes) uint32_t seg_kentry_base; // kcode segment base address uint32_t seg_kentry_size; // kcode segment size (bytes) uint32_t kernel_entry; // kernel entry point // Functions extern void boot_entry( void ); // boot_loader entry point extern void boot_loader( lid_t lid, cxy_t cxy ); #if DEBUG_BOOT_INFO /********************************************************************************* * This debug function returns the printable string for each device type. ********************************************************************************/ static char * device_type_str( enum device_types_e dev_type ) { switch ( dev_type ) { case DEV_TYPE_RAM_SCL: return "RAM_SCL"; case DEV_TYPE_ROM_SCL: return "ROM_SCL"; case DEV_TYPE_FBF_SCL: return "FBF_SCL"; case DEV_TYPE_IOB_TSR: return "IOB_TSR"; case DEV_TYPE_IOC_BDV: return "IOC_BDV"; case DEV_TYPE_IOC_HBA: return "IOC_HBA"; case DEV_TYPE_IOC_SDC: return "IOC_SDC"; case DEV_TYPE_IOC_SPI: return "IOC_SPI"; case DEV_TYPE_IOC_RDK: return "IOC_RDK"; case DEV_TYPE_MMC_TSR: return "MMC_TSR"; case DEV_TYPE_DMA_SCL: return "DMA_SCL"; case DEV_TYPE_NIC_CBF: return "NIC_CBF"; case DEV_TYPE_TIM_SCL: return "TIM_SCL"; case DEV_TYPE_TXT_TTY: return "TXT_TTY"; case DEV_TYPE_TXT_MTY: return "TXT_MTY"; case DEV_TYPE_ICU_XCU: return "ICU_XCU"; case DEV_TYPE_PIC_TSR: return "PIC_TSR"; default: return "undefined"; } } #endif /************************************************************************************ * This function loads the arch_info.bin file into the boot cluster memory. ***********************************************************************************/ static void boot_archinfo_load( void ) { archinfo_header_t* header = (archinfo_header_t*)ARCHINFO_BASE; // Load file into memory if (boot_fat32_load(ARCHINFO_PATHNAME, ARCHINFO_BASE, ARCHINFO_MAX_SIZE)) { boot_printf("\n[BOOT ERROR]: boot_archinfo_load(): " "<%s> file not found\n", ARCHINFO_PATHNAME); boot_exit(); } if (header->signature != ARCHINFO_SIGNATURE) { boot_printf("\n[BOOT_ERROR]: boot_archinfo_load(): " "<%s> file signature should be %x\n", ARCHINFO_PATHNAME, ARCHINFO_SIGNATURE); boot_exit(); } #if DEBUG_BOOT_INFO boot_printf("\n[BOOT INFO] in %s : file %s loaded at address = %x\n", __FUNCTION__ , ARCHINFO_PATHNAME , ARCHINFO_BASE ); #endif } // boot_archinfo_load() /************************************************************************************** * This function loads the 'kernel.elf' file into the boot cluster memory buffer, * analyzes it, and places the three kcode, kentry, kdata segments at their final * physical adresses (defined the .elf file). * It set the global variables defining the kernel layout. *************************************************************************************/ static void boot_kernel_load( void ) { Elf32_Ehdr * elf_header; // pointer on kernel.elf header. Elf32_Phdr * program_header; // pointer on kernel.elf program header. uint32_t phdr_offset; // program header offset in kernel.elf file. uint32_t segments_nb; // number of segments in kernel.elf file. uint32_t seg_src_addr; // segment address in kernel.elf file (source). uint32_t seg_paddr; // segment local physical address of segment uint32_t seg_offset; // segment offset in kernel.elf file uint32_t seg_filesz; // segment size (bytes) in kernel.elf file uint32_t seg_memsz; // segment size (bytes) in memory image. bool_t kcode_found; // kcode segment found. bool_t kdata_found; // kdata segment found. bool_t kentry_found; // kentry segment found. uint32_t seg_id; // iterator for segments loop. #if DEBUG_BOOT_ELF boot_printf("\n[BOOT INFO] %s enters for file %s at cycle %d\n", __FUNCTION__ , KERNEL_PATHNAME , boot_get_proctime() ); #endif // Load kernel.elf file into memory buffer if ( boot_fat32_load(KERNEL_PATHNAME, KERN_BASE, KERN_MAX_SIZE) ) { boot_printf("\n[BOOT ERROR] in %s : <%s> file not found\n", KERNEL_PATHNAME); boot_exit(); } // get pointer to kernel.elf header elf_header = (Elf32_Ehdr*)KERN_BASE; // check signature if ((elf_header->e_ident[EI_MAG0] != ELFMAG0) || (elf_header->e_ident[EI_MAG1] != ELFMAG1) || (elf_header->e_ident[EI_MAG2] != ELFMAG2) || (elf_header->e_ident[EI_MAG3] != ELFMAG3)) { boot_printf("\n[BOOT_ERROR]: boot_kernel_load(): " "<%s> is not an ELF file\n", KERNEL_PATHNAME); boot_exit(); } // Get program header table offset and number of segments phdr_offset = elf_header->e_phoff; segments_nb = elf_header->e_phnum; // Get program header table pointer program_header = (Elf32_Phdr*)(KERN_BASE + phdr_offset); // loop on segments kcode_found = false; kdata_found = false; kentry_found = false; for (seg_id = 0; seg_id < segments_nb; seg_id++) { if (program_header[seg_id].p_type == PT_LOAD) // Found one loadable segment { // Get segment attributes. seg_paddr = program_header[seg_id].p_paddr; seg_offset = program_header[seg_id].p_offset; seg_filesz = program_header[seg_id].p_filesz; seg_memsz = program_header[seg_id].p_memsz; // get segment base address in buffer seg_src_addr = (uint32_t)KERN_BASE + seg_offset; // Load segment to its final physical memory address boot_memcpy( (void*)seg_paddr, (void*)seg_src_addr, seg_filesz ); #if DEBUG_BOOT_ELF boot_printf("\n[BOOT INFO] in %s for file %s : found loadable segment\n" " base = %x / size = %x\n", __FUNCTION__ , KERNEL_PATHNAME , seg_paddr , seg_memsz ); #endif // Fill remaining memory with zero if (filesz < memsz). if( seg_memsz < seg_filesz ) { boot_memset( (void*)(seg_paddr + seg_filesz), 0, seg_memsz - seg_filesz); } // Note: we suppose that the 'kernel.elf' file contains exactly // three loadable segments ktext, kentry, & kdata: // - the kcode segment is read-only and base == KCODE_BASE // - the kentry segment is read-only and base == KENTRY_BASE if( ((program_header[seg_id].p_flags & PF_W) == 0) && (program_header[seg_id].p_paddr == KCODE_BASE) ) // kcode segment { if( kcode_found ) { boot_printf("\n[BOOT_ERROR] in %s for file %s :\n" " two kcode segments found\n", __FUNCTION__ , KERNEL_PATHNAME ); boot_exit(); } kcode_found = true; seg_kcode_base = seg_paddr; seg_kcode_size = seg_memsz; } else if( program_header[seg_id].p_paddr == KENTRY_BASE ) // kentry segment { if( kentry_found ) { boot_printf("\n[BOOT_ERROR] in %s for file %s :\n" " two kentry segments found\n", __FUNCTION__ , KERNEL_PATHNAME ); boot_exit(); } kentry_found = true; seg_kentry_base = seg_paddr; seg_kentry_size = seg_memsz; } else // kdata segment { if( kdata_found ) { boot_printf("\n[BOOT_ERROR] in %s for file %s :\n" " two kdata segments found\n", __FUNCTION__ , KERNEL_PATHNAME ); boot_exit(); } kdata_found = true; seg_kdata_base = seg_paddr; seg_kdata_size = seg_memsz; } } } // check kcode & kdata segments found if( kcode_found == false ) { boot_printf("\n[BOOT_ERROR] in %s for file %s : seg_kcode not found\n", __FUNCTION__ , KERNEL_PATHNAME ); boot_exit(); } if( kentry_found == false ) { boot_printf("\n[BOOT_ERROR] in %s for file %s : seg_kentry not found\n", __FUNCTION__ , KERNEL_PATHNAME ); boot_exit(); } if( kdata_found == false ) { boot_printf("\n[BOOT_ERROR] in %s for file %s : seg_kdata not found\n", __FUNCTION__ , KERNEL_PATHNAME ); boot_exit(); } // check segments sizes if( seg_kentry_size > KENTRY_MAX_SIZE ) { boot_printf("\n[BOOT_ERROR] in %s for file %s : seg_kentry too large\n", __FUNCTION__ , KERNEL_PATHNAME ); boot_exit(); } if( (seg_kcode_size + seg_kdata_size) > KCODE_MAX_SIZE ) { boot_printf("\n[BOOT_ERROR] in %s for file %s : seg_kcode + seg_kdata too large\n", __FUNCTION__ , KERNEL_PATHNAME ); } // set entry point kernel_entry = (uint32_t)elf_header->e_entry; #if DEBUG_BOOT_ELF boot_printf("\n[BOOT INFO] %s completed for file %s at cycle %d\n", __FUNCTION__ , KERNEL_PATHNAME , boot_get_proctime() ); #endif } // boot_kernel_load() /************************************************************************************* * This function initializes the boot_info_t structure for a given cluster. * @ boot_info : pointer to local boot_info_t structure * @ cxy : cluster identifier ************************************************************************************/ static void boot_info_init( boot_info_t * boot_info, cxy_t cxy ) { archinfo_header_t * header; archinfo_core_t * core_base; archinfo_cluster_t * cluster_base; archinfo_device_t * device_base; archinfo_irq_t * irq_base; archinfo_cluster_t * cluster; archinfo_cluster_t * my_cluster = NULL; // target cluster archinfo_cluster_t * io_cluster = NULL; // external peripherals cluster archinfo_core_t * core; uint32_t core_id; archinfo_device_t * device; uint32_t device_id; archinfo_irq_t * irq; uint32_t irq_id; uint32_t end; boot_device_t * boot_dev; // get pointer on ARCHINFO header and on the four arch_info arrays header = (archinfo_header_t*)ARCHINFO_BASE; core_base = archinfo_get_core_base (header); cluster_base = archinfo_get_cluster_base(header); device_base = archinfo_get_device_base (header); irq_base = archinfo_get_irq_base (header); // Initialize global platform parameters boot_info->x_size = header->x_size; boot_info->y_size = header->y_size; boot_info->x_width = header->x_width; boot_info->y_width = header->y_width; boot_info->paddr_width = header->paddr_width; boot_info->io_cxy = header->io_cxy; // Initialize kernel segments from global variables boot_info->kcode_base = seg_kcode_base; boot_info->kcode_size = seg_kcode_size; boot_info->kdata_base = seg_kdata_base; boot_info->kdata_size = seg_kdata_size; boot_info->kentry_base = seg_kentry_base; boot_info->kentry_size = seg_kentry_size; // loop on arch_info clusters to build cluster_info[][] array // and get io_cluster and my_cluster pointers for (cluster = cluster_base; cluster < &cluster_base[header->x_size * header->y_size]; cluster++) { int x = cluster->cxy >> Y_WIDTH; int y = cluster->cxy & ((1 << Y_WIDTH) - 1); boot_info->cluster_info[x][y] = (uint8_t)cluster->cores; if( cluster->cxy == cxy ) my_cluster = cluster; if( cluster->cxy == header->io_cxy ) io_cluster = cluster; } if( my_cluster == NULL ) { boot_printf("\n[ERROR] in %s : cannot found cluster %x in arch_info\n", __FUNCTION__ , cxy ); boot_exit(); } if( io_cluster == NULL ) { boot_printf("\n[ERROR] in %s : cannot found io_cluster %x in arch_info\n", __FUNCTION__ , cxy ); boot_exit(); } ////////////////////////////////////////////////////////// // initialize the boot_info array of external peripherals #if DEBUG_BOOT_INFO boot_printf("\n[BOOT INFO] %s : external peripherals at cycle %d\n", __FUNCTION__ , boot_get_proctime() ); #endif device_id = 0; for (device = &device_base[io_cluster->device_offset]; device < &device_base[io_cluster->device_offset + io_cluster->devices]; device++ ) { if( device_id >= CONFIG_MAX_EXT_DEV ) { boot_printf("\n[ERROR] in %s : too much external devices in arch_info\n", __FUNCTION__ ); boot_exit(); } // keep only external devices if( (device->type != DEV_TYPE_RAM_SCL) && (device->type != DEV_TYPE_ICU_XCU) && (device->type != DEV_TYPE_MMC_TSR) && (device->type != DEV_TYPE_DMA_SCL) && (device->type != DEV_TYPE_TXT_MTY) && (device->type != DEV_TYPE_IOC_SPI) ) { boot_dev = &boot_info->ext_dev[device_id]; boot_dev->type = device->type; boot_dev->base = device->base; boot_dev->channels = device->channels; boot_dev->param0 = device->arg0; boot_dev->param1 = device->arg1; boot_dev->param2 = device->arg2; boot_dev->param3 = device->arg3; boot_dev->irqs = device->irqs; device_id++; #if DEBUG_BOOT_INFO boot_printf(" - %s : base = %l / size = %l / channels = %d / irqs = %d\n", device_type_str(device->type), device->base, device->size, device->channels, device->irqs ); #endif } // handle IRQs for PIC if (device->type == DEV_TYPE_PIC_TSR) { for (irq_id = 0; irq_id < CONFIG_MAX_EXTERNAL_IRQS ; irq_id++) { boot_dev->irq[irq_id].valid = 0; } for (irq = &irq_base[device->irq_offset]; irq < &irq_base[device->irq_offset + device->irqs]; irq++) { boot_dev->irq[irq->port].valid = 1; boot_dev->irq[irq->port].dev_type = irq->dev_type; boot_dev->irq[irq->port].channel = irq->channel; boot_dev->irq[irq->port].is_rx = irq->is_rx; #if DEBUG_BOOT_INFO boot_printf(" . irq_port = %d / source = %s / channel = %d / is_rx = %d\n", irq->port , device_type_str( irq->dev_type ) , irq->channel , irq->is_rx ); #endif } } } // end loop on io_cluster peripherals // initialize number of external peripherals boot_info->ext_dev_nr = device_id; // Initialize cluster specific resources boot_info->cxy = my_cluster->cxy; #if DEBUG_BOOT_INFO boot_printf("\n[BOOT INFO] %s : cores in cluster %x\n", __FUNCTION__ , cxy ); #endif //////////////////////////////////////// // Initialize array of core descriptors core_id = 0; for (core = &core_base[my_cluster->core_offset]; core < &core_base[my_cluster->core_offset + my_cluster->cores]; core++ ) { boot_info->core[core_id].gid = (gid_t)core->gid; boot_info->core[core_id].lid = (lid_t)core->lid; boot_info->core[core_id].cxy = (cxy_t)core->cxy; #if DEBUG_BOOT_INFO boot_printf(" - core_gid = %x : cxy = %x / lid = %d\n", core->gid , core->cxy , core->lid ); #endif core_id++; } // Initialize number of cores in my_cluster boot_info->cores_nr = core_id; ////////////////////////////////////////////////////////////////////// // initialise boot_info array of internal devices (RAM, ICU, MMC, DMA) #if DEBUG_BOOT_INFO boot_printf("\n[BOOT INFO] %s : internal peripherals in cluster %x\n", __FUNCTION__ , cxy ); #endif device_id = 0; for (device = &device_base[my_cluster->device_offset]; device < &device_base[my_cluster->device_offset + my_cluster->devices]; device++ ) { // keep only internal devices if( (device->type == DEV_TYPE_RAM_SCL) || (device->type == DEV_TYPE_ICU_XCU) || (device->type == DEV_TYPE_MMC_TSR) || (device->type == DEV_TYPE_DMA_SCL) || (device->type == DEV_TYPE_TXT_MTY) || (device->type == DEV_TYPE_IOC_SPI) ) { if (device->type == DEV_TYPE_RAM_SCL) // RAM { // set number of physical memory pages boot_info->pages_nr = device->size >> CONFIG_PPM_PAGE_SHIFT; #if DEBUG_BOOT_INFO boot_printf(" - RAM : %x pages\n", boot_info->pages_nr ); #endif } else // ICU / MMC / DMA / MTY { if( device_id >= CONFIG_MAX_INT_DEV ) { boot_printf("\n[ERROR] in %s : too much internal devices in cluster %x\n", __FUNCTION__ , cxy ); boot_exit(); } boot_dev = &boot_info->int_dev[device_id]; boot_dev->type = device->type; boot_dev->base = device->base; boot_dev->channels = device->channels; boot_dev->param0 = device->arg0; boot_dev->param1 = device->arg1; boot_dev->param2 = device->arg2; boot_dev->param3 = device->arg3; boot_dev->irqs = device->irqs; device_id++; #if DEBUG_BOOT_INFO boot_printf(" - %s : base = %l / size = %l / channels = %d / irqs = %d\n", device_type_str( device->type ) , device->base , device->size , device->channels , device->irqs ); #endif // handle IRQs for ICU if (device->type == DEV_TYPE_ICU_XCU) { for (irq_id = 0; irq_id < CONFIG_MAX_INTERNAL_IRQS ; irq_id++) { boot_dev->irq[irq_id].valid = 0; } for (irq = &irq_base[device->irq_offset]; irq < &irq_base[device->irq_offset + device->irqs] ; irq++) { boot_dev->irq[irq->port].valid = 1; boot_dev->irq[irq->port].dev_type = irq->dev_type; boot_dev->irq[irq->port].channel = irq->channel; boot_dev->irq[irq->port].is_rx = irq->is_rx; #if DEBUG_BOOT_INFO boot_printf(" . irq_port = %d / source = %s / channel = %d / is_rx = %d\n", irq->port , device_type_str( irq->dev_type ) , irq->channel , irq->is_rx ); #endif } } } } } // end loop on local peripherals // initialize number of internal peripherals boot_info->int_dev_nr = device_id; // Get the top address of the kernel segments end = boot_info->kdata_base + boot_info->kdata_size; // compute number of physical pages occupied by the kernel code boot_info->pages_offset = ( (end & CONFIG_PPM_PAGE_MASK) == 0 ) ? (end >> CONFIG_PPM_PAGE_SHIFT) : (end >> CONFIG_PPM_PAGE_SHIFT) + 1; // no reserved zones for TSAR architecture boot_info->rsvd_nr = 0; // set boot_info signature boot_info->signature = BOOT_INFO_SIGNATURE; } // boot_info_init() /*********************************************************************************** * This function check the local boot_info_t structure for a given core. * @ boot_info : pointer to local 'boot_info_t' structure to be checked. * @ lid : core local identifier, index the core descriptor table. **********************************************************************************/ static void boot_check_core( boot_info_t * boot_info, lid_t lid) { gid_t gid; // global hardware identifier of this core boot_core_t * this; // BOOT_INFO core descriptor of this core. // Get core hardware identifier gid = (gid_t)boot_get_procid(); // get pointer on core descriptor this = &boot_info->core[lid]; if ( (this->gid != gid) || (this->cxy != boot_info->cxy) ) { boot_printf("\n[BOOT ERROR] in boot_check_core() :\n" " - boot_info cxy = %x\n" " - boot_info lid = %d\n" " - boot_info gid = %x\n" " - actual gid = %x\n", this->cxy , this->lid , this->gid , gid ); boot_exit(); } } // boot_check_core() /********************************************************************************* * This function is called by CP0 in cluster(0,0) to activate all other CP0s. * It returns the number of CP0s actually activated. ********************************************************************************/ static uint32_t boot_wake_all_cp0s( void ) { archinfo_header_t* header; // Pointer on ARCHINFO header archinfo_cluster_t* cluster_base; // Pointer on ARCHINFO clusters base archinfo_cluster_t* cluster; // Iterator for loop on clusters archinfo_device_t* device_base; // Pointer on ARCHINFO devices base archinfo_device_t* device; // Iterator for loop on devices uint32_t cp0_nb = 0; // CP0s counter header = (archinfo_header_t*)ARCHINFO_BASE; cluster_base = archinfo_get_cluster_base(header); device_base = archinfo_get_device_base (header); // loop on all clusters for (cluster = cluster_base; cluster < &cluster_base[header->x_size * header->y_size]; cluster++) { // Skip boot cluster. if (cluster->cxy == BOOT_CORE_CXY) continue; // Skip clusters without core (thus without CP0). if (cluster->cores == 0) continue; // Skip clusters without device (thus without XICU). if (cluster->devices == 0) continue; // search XICU device associated to CP0, and send a WTI to activate it for (device = &device_base[cluster->device_offset]; device < &device_base[cluster->device_offset + cluster->devices]; device++) { if (device->type == DEV_TYPE_ICU_XCU) { #if DEBUG_BOOT_WAKUP boot_printf("\n[BOOT] core[%x,0] activated at cycle %d\n", cluster->cxy , boot_get_proctime ); #endif boot_remote_sw((xptr_t)device->base, (uint32_t)boot_entry); cp0_nb++; } } } return cp0_nb; } // boot_wake_cp0() /********************************************************************************* * This function is called by all CP0s to activate the other CPi cores. * @ boot_info : pointer to local 'boot_info_t' structure. *********************************************************************************/ static void boot_wake_local_cores(boot_info_t * boot_info) { unsigned int core_id; // get pointer on XCU device descriptor in boot_info boot_device_t * xcu = &boot_info->int_dev[0]; // loop on cores for (core_id = 1; core_id < boot_info->cores_nr; core_id++) { #if DEBUG_BOOT_WAKUP boot_printf("\n[BOOT] core[%x,%d] activated at cycle %d\n", boot_info->cxy , core_id , boot_get_proctime() ); #endif // send an IPI boot_remote_sw( (xptr_t)(xcu->base + (core_id << 2)) , (uint32_t)boot_entry ); } } // boot_wake_local_cores() /********************************************************************************* * This function is called by all core[cxy][0] to initialize the Boot Page Table: * map two local big pages for the boot code and kernel code. * @ cxy : local cluster identifier. *********************************************************************************/ void boot_page_table_init( cxy_t cxy ) { // set PTE1 in slot[0] for kernel code uint32_t kernel_attr = 0x8A800000; // flagss : V,C,X,G uint32_t kernel_ppn1 = (cxy << 20) >> 9; // big physical page index == 0 boot_pt[0] = kernel_attr | kernel_ppn1; // set PTE1 in slot[1] for boot code (no global flag) uint32_t boot_attr = 0x8A000000; // flags : V,C,X uint32_t boot_ppn1 = ((cxy << 20) + 512) >> 9; // big physical page index == 1 boot_pt[1] = boot_attr | boot_ppn1; } /********************************************************************************* * This function is called by all cores to activate the instruction MMU, * and use the local copy of boot code. *********************************************************************************/ void boot_activate_ins_mmu( cxy_t cxy ) { // set mmu_ptpr register uint32_t ptpr = ((uint32_t)boot_pt >> 13) | (cxy << 19); asm volatile ( "mtc2 %0, $0 \n" : : "r" (ptpr) ); // set ITLB bit in mmu_mode asm volatile ( "mfc2 $26, $1 \n" "ori $26, $26, 0x8 \n" "mtc2 $26, $1 \n" ); } /********************************************************************************* * This main function of the boot-loader is called by the boot_entry() * function, and executed by all cores. * The arguments values are computed by the boot_entry code. * @ lid : core local identifier, * @ cxy : cluster identifier, *********************************************************************************/ void boot_loader( lid_t lid, cxy_t cxy ) { boot_info_t * boot_info; // pointer on local boot_info_t structure if (lid == 0) { /************************************i********************** * PHASE Sequencial : only core[0][0] executes it **********************************************************/ if (cxy == 0) { boot_printf("\n[BOOT] core[%x,%d] enters at cycle %d\n", cxy , lid , boot_get_proctime() ); // Initialize IOC driver if (USE_IOC_BDV) boot_bdv_init(); else if (USE_IOC_HBA) boot_hba_init(); // else if (USE_IOC_SDC) boot_sdc_init(); // else if (USE_IOC_SPI) boot_spi_init(); else if (!USE_IOC_RDK) { boot_printf("\n[BOOT ERROR] in %s : no IOC driver\n"); boot_exit(); } // Initialize FAT32. boot_fat32_init(); // Load the 'kernel.elf' file into memory from IOC, and set // the global variables defining the kernel layout boot_kernel_load(); boot_printf("\n[BOOT] core[%x,%d] loaded kernel at cycle %d\n", cxy , lid , boot_get_proctime() ); // Load the arch_info.bin file into memory. boot_archinfo_load(); boot_printf("\n[BOOT] core[%x,%d] loaded arch_info at cycle %d\n", cxy , lid , boot_get_proctime() ); // Get local boot_info_t structure base address. // It is the first structure in the .kdata segment. boot_info = (boot_info_t *)seg_kdata_base; // Initialize local boot_info_t structure. boot_info_init( boot_info , cxy ); boot_printf("\n[BOOT] core[%x,%d] initialised boot_info at cycle %d\n", cxy , lid , boot_get_proctime() ); // check boot_info signature if (boot_info->signature != BOOT_INFO_SIGNATURE) { boot_printf("\n[BOOT ERROR] in %s reported by core[%x,%d]\n" " illegal boot_info signature / should be %x\n", __FUNCTION__ , cxy , lid , BOOT_INFO_SIGNATURE ); boot_exit(); } // Check core information. boot_check_core(boot_info, lid); // TO BE DONE // core[0][0] identity maps two big pages for the boot and kernel code, // boot_page_table_init( 0 ); // TO BE DONE // core[0][0] activates the instruction MMU to use the local copy of boot code // boot_activate_ins_mmu( 0 ); // Activate other core[cxy][0] / get number of activated cores active_cores_nr = boot_wake_all_cp0s() + 1; // Wait until all clusters (i.e all CP0s) ready to enter kernel. boot_remote_barrier( XPTR( BOOT_CORE_CXY , &global_barrier ) , active_cores_nr ); // activate other local cores boot_wake_local_cores( boot_info ); // Wait until all local cores in cluster ready boot_remote_barrier( XPTR( cxy , &local_barrier ) , boot_info->cores_nr ); } /************************************************************************** * PHASE partially parallel : all core[cxy][0] with (cxy != 0) execute it **************************************************************************/ else { // at this point, the DATA extension registers point // already on the local cluster cxy to use the local stack, // but all cores must access the code stored in cluster 0 // Each CP0 copies the boot code (data and instructions) // from the cluster 0 to the local cluster. boot_remote_memcpy( XPTR( cxy , BOOT_BASE ), XPTR( BOOT_CORE_CXY , BOOT_BASE ), BOOT_MAX_SIZE ); // from now, it is safe to refer to the boot global variables boot_printf("\n[BOOT] core[%x,%d] replicated boot code at cycle %d\n", cxy , lid , boot_get_proctime() ); // TO BE DONE // Each core identity maps two big pages for the boot and kernel code, // boot_page_table_init( cxy ); // Each core activates the instruction MMU to use the local copy of boot code // boot_activate_ins_mmu( cxy ); // Each CP0 copies the arch_info.bin into the local memory. boot_remote_memcpy(XPTR(cxy, ARCHINFO_BASE), XPTR(BOOT_CORE_CXY, ARCHINFO_BASE), ARCHINFO_MAX_SIZE ); boot_printf("\n[BOOT] core[%x,%d] replicated arch_info at cycle %d\n", cxy , lid , boot_get_proctime() ); // Each CP0 copies the kcode segment into local memory boot_remote_memcpy( XPTR( cxy , seg_kcode_base ), XPTR( BOOT_CORE_CXY , seg_kcode_base ), seg_kcode_size ); // Each CP0 copies the kdata segment into local memory boot_remote_memcpy( XPTR( cxy , seg_kdata_base ), XPTR( BOOT_CORE_CXY , seg_kdata_base ), seg_kdata_size ); // [TO BE REMOVED // Each CP0 copies the kentry segment into local memory boot_remote_memcpy( XPTR( cxy , seg_kentry_base ), XPTR( BOOT_CORE_CXY , seg_kentry_base ), seg_kentry_size ); boot_printf("\n[BOOT] core[%x,%d] replicated kernel code at cycle %d\n", cxy , lid , boot_get_proctime() ); // Each CP0 get local boot_info_t structure base address. boot_info = (boot_info_t*)seg_kdata_base; // Each CP0 initializes local boot_info_t structure. boot_info_init( boot_info , cxy ); boot_printf("\n[BOOT] core[%x,%d] initialised boot_info at cycle %d\n", cxy , lid , boot_get_proctime() ); // Each CP0 checks core information. boot_check_core( boot_info , lid ); // Each CP0 get number of active clusters from BOOT_CORE cluster uint32_t count = boot_remote_lw( XPTR( 0 , &active_cores_nr ) ); // Wait until all clusters (i.e all CP0s) ready boot_remote_barrier( XPTR( BOOT_CORE_CXY , &global_barrier ) , count ); // activate other local cores boot_wake_local_cores( boot_info ); // Wait until all local cores in cluster ready boot_remote_barrier( XPTR( cxy , &local_barrier ) , boot_info->cores_nr ); } } else { /*********************************************************************** * PHASE fully parallel : all cores[cxy][lid] with (lid! = 0) execute it **********************************************************************/ // TO BE DONE // each core activate the instruction MMU to use the local copy of the boot code // boot_activate_ins_mmu( cxy ); // Get local boot_info_t structure base address. boot_info = (boot_info_t *)seg_kdata_base; // Check core information boot_check_core(boot_info, lid); // Wait until all local cores in cluster ready boot_remote_barrier( XPTR( cxy , &local_barrier ) , boot_info->cores_nr ); } // the "kernel_entry" global variable, set by boot_kernel_load() define // the adress of the kernel_init() function. // Each core initialise the following registers before jumping to kernel: // - gr_29 : stack pointer / kernel stack allocated in idle thread descriptor, // - c0_sr : status register / reset BEV bit // - gr_04 : kernel_init() argument / pointer on boot_info structure // - c0_ebase : kentry_base // compute "sp" from base address of idle thread descriptors array and lid. // The array of idle-thread descriptors is allocated in the kdata segment, // just after the boot_info structure. uint32_t base; uint32_t offset = sizeof( boot_info_t ); uint32_t pmask = CONFIG_PPM_PAGE_MASK; uint32_t psize = CONFIG_PPM_PAGE_SIZE; if( offset & pmask ) base = seg_kdata_base + (offset & ~pmask) + psize; else base = seg_kdata_base + offset; uint32_t sp = base + ((lid + 1) * CONFIG_THREAD_DESC_SIZE) - 16; // get "ebase" from kerneL_info uint32_t ebase = boot_info->kentry_base; // TO BE DONE // The cp0_ebase will not be set by the assenbly code below // when the kentry segment will be removed => done in kernel init asm volatile( "mfc0 $27, $12 \n" "lui $26, 0xFFBF \n" "ori $26, $26, 0xFFFF \n" "and $27, $27, $26 \n" "mtc0 $27, $12 \n" "move $4, %0 \n" "move $29, %1 \n" "mtc0 %2, $15, 1 \n" "jr %3 \n" : : "r"(boot_info) , "r"(sp) , "r"(ebase) , "r"(kernel_entry) : "$26" , "$27" , "$29" , "$4" ); } // boot_loader()