/* * hal_devices_init.c - Devices intialization operations for the TSAR architecture * * Authors Ghassan Almaless (2008,2009,2010,2011,2012) * Mohamed Lamine Karaoui (2015) * Alain Greiner (2016) * Copyright (c) UPMC Sorbonne Universites * * This file is part of ALMOS-MKH. * * ALMOS-MKH.is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2.0 of the License. * * ALMOS-MKH.is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with ALMOS-MKH.; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define _ARCH_BIB_SIGNATURE_ #include #define die(args...) do {boot_dmsg(args); while(1);} while(0) #define ICU_MASK 0xFFFFFFFF // interrupt enabled for first 32 devices #define KATTR (PMM_HUGE | PMM_READ | PMM_WRITE | PMM_EXECUTE | PMM_CACHED | PMM_GLOBAL | PMM_DIRTY | PMM_ACCESSED) #define KDEV_ATTR (PMM_READ | PMM_WRITE | PMM_GLOBAL | PMM_DIRTY | PMM_ACCESSED) /********************************************************************************************* * This function set the various masks for the XCU device in the local cluster. * It should be executed by core[0] after the local cluster manager initialisation, * and after devices initialisation. The général policy is the following: * - PTI : it routes one PTI per core (with index = lid) for TICK. * - WTI : it routes one WTI per core (with index = lid) for wakup. * - HWI : it routes all local HWI to core[0]. ********************************************************************************************/ void hal_xcu_set_mask() { uint32_t lid; cluster_t * cluster; // pointer on local cluster manager uint32_t cores; // number of cores in cluster device_t * xcu; // pointer on local XCU device descriptor uint32_t hwi_mask; // HWI mask for XCU uint32_t hwi_index; // HWI index in XCU list_entry_t * iter; // iterator to scan the device list device_t * dev; // pointer on local device descriptor dev_type_t type; // device type cluster = LOCAL_CLUSTER; cores = cluster->cores_nr; xcu = cluster->xcu; // loop on all local devices to compute the hwi_mask hwi_mask = 0; hwi_index = 0; LIST_FOREACH( cluster->devlist , iter ) { dev = LIST_ELEMENT( iter , device_t , list ); type = dev->type; // only replicated devices MMC and MWR generate HWI if( (type == DEV_TYPE_MMC) || (type == DEV_TYPE_MWR) ) { hwi_mask = hwi_mask |= (1<op.drvid == drvid) return dev; } return NULL; } /********************************************************************************************* * This function register all detected devices in given cluster. ********************************************************************************************/ void hal_dev_register( cluster_t * cluster, cluster_entry_s * entry ) { device_t * xicu; list_entry_t * iter; device_t * dev; cpu_t * cpu_ptr; uint32_t cpu; error_t err; xicu = hal_dev_locate(&entry->devlist, SOCLIB_XICU_ID); dev = hal_dev_locate(&entry->devlist, SOCLIB_DMA_ID); hal_entrys[entry->cxy].xicu = xicu; hal_entrys[entry->cxy].dma = dev; if((cluster->id == entry->cxy) && (xicu == NULL)) die("[ERROR]\tNo XICU is found for cluster %d\n", entry->cxy); //only local cluster get to set the irq if(cluster->id != entry->cxy) return; list_foreach((&entry->devlist), iter) { dev = list_element(iter, struct device_s, list); if((dev != xicu) && (dev->irq != -1)) { err = xicu->op.icu.bind(xicu, dev); if(err) die("[ERROR]\tFailed to bind device %s, irq %d, on xicu %s @%x [ err %d ]\n", dev->name, dev->irq, xicu->name, xicu, err); } } if(cluster->id != entry->cxy) return; hal_xicu_set_mask(cluster, xicu); for(cpu = 0; cpu < cluster->cpu_nr; cpu++) { cpu_ptr = &cluster->cpu_tbl[cpu]; hal_cpu_set_irq_entry(cpu_ptr, 0, &xicu->action); } } // end hal_dev_register() ////////////////////////////////////////////////// void hal_dev_iopic_register( cluster_t * cluster, struct cluster_entry_s *entry); /** HAL-ARCH CPUs & Clusters counters */ static global_t hal_onln_cpus_nr; static global_t hal_cpus_per_cluster; static global_t hal_onln_clusters_nr; static global_t hal_boot_cxy_; /**/ struct hal_entry_s hal_entrys[CLUSTER_NR]; global_t __current_cxy; /** Return Platform CPUs Online Number */ inline uint32_t hal_onln_cpu_nr(void) { return hal_onln_cpus_nr.value; } /** Return Platform Clsuters Online Number */ inline uint32_t hal_onln_cluster_nr(void) { return hal_onln_clusters_nr.value; } inline uint32_t hal_cpu_per_cluster(void) { return hal_cpus_per_cluster.value; } inline cxy_t hal_boot_cxy(void) { return hal_boot_cxy_.value; } extern uint32_t cpu_gid_tbl[CPU_PER_CLUSTER] CACHELINE; inline void cpu_gid_tbl_init(struct boot_info_s *info) { uint32_t i; uint32_t cpu_nr; cpu_nr = info->local_onln_cpu_nr; if(CPU_PER_CLUSTER < cpu_nr) while(1);//die("ERROR: This Kernel Is Compiled For only %d cpus per cluster\n", CPU_PER_CLUSTER); for(i=0; i < cpu_nr; i++) cpu_gid_tbl[i] = hal_cpu_gid(info->local_cluster_id, i); } cxy_t Arch_cxy_To_Almos_cxy[CLUSTER_NR]; //address of info->hal_cxy_to_cxy array static void fill_hal_cxy_to_almos_array(struct hal_bib_header_s *header) { int i; cluster_info_t *cluster_tbl; cluster_tbl = (cluster_info_t*)((uint32_t)header + sizeof(header_info_t)); assert(CLUSTER_NR >= header->onln_clstr_nr); for(i = 0; i < header->onln_clstr_nr; i++) { if(cluster_tbl[i].hal_cxy >= CLUSTER_NR) continue; Arch_cxy_To_Almos_cxy[cluster_tbl[i].hal_cxy] = cluster_tbl[i].cxy; } } /********************************************************************************************* * This function makes the kernel virtual mapping of all hardware memory banks. * Initialize the boot table entries. ********************************************************************************************/ void hal_state_init( boot_info_t * info ) { struct process_s *process; struct thread_s *this; header_info_t *header; cluster_info_t *clusters; cluster_info_t *cluster_ptr; dev_info_t *dev_tbl; uint32_t vaddr_start; uint32_t vaddr_limit; uint32_t size; uint32_t cxy; uint32_t i; header = (header_info_t*) info->hal_info; clusters = (cluster_info_t*) ((uint32_t)header + sizeof(header_info_t)); cluster_ptr = &clusters[info->local_cluster_id]; dev_tbl = (dev_info_t*)(cluster_ptr->offset + (uint32_t)header); cxy = cluster_ptr->cxy; /* Global variables init */ hal_onln_clusters_nr.value = header->onln_clstr_nr; hal_onln_cpus_nr.value = header->onln_cpu_nr; hal_cpus_per_cluster.value = header->cpu_nr; hal_boot_cxy_.value = info->boot_cluster_id; __current_cxy.value = cxy; fill_hal_cxy_to_almos_array(header); //used by remote_sb->hal_cxy_... for(i = 0; i < info->onln_clstr_nr; i++) { cluster_ptr = &clusters[i]; cxy = cluster_ptr->cxy; hal_entrys[cxy].hal_cxy = cluster_ptr->hal_cxy; } cpu_gid_tbl_init(info); kboot_tty_init(info); size = dev_tbl[0].size; if(size == 0) die("[ERROR]\t%s: Unexpected memory size for cluster %u\n", \ __FUNCTION__, cxy); this = CURRENT_THREAD; process = this->process; /* TODO: deal with offline clusters as well */ for(i = 0; i < info->onln_clstr_nr; i++) { cluster_ptr = &clusters[i]; cxy = cluster_ptr->cxy; if(cxy >= CLUSTER_NR) { die("\n[ERROR]\t%s: This kernel version support up to %d clusters, found %d\n", \ __FUNCTION__, CLUSTER_NR, cxy); } dev_tbl = (dev_info_t*)(cluster_ptr->offset + (uint32_t)header); if(dev_tbl[0].id == SOCLIB_RAM_ID) { vaddr_start = dev_tbl[0].base; size = dev_tbl[0].size; }else { vaddr_start = 0; size = 0; } if((cluster_ptr->cpu_nr != 0) && (size == 0)) die("\n[ERROR]\t%s: This Kernel Version Do Not Support CPU-Only Clusters, cxy %d\n", \ __FUNCTION__, cxy); vaddr_limit = vaddr_start + size; if(cxy == info->local_cluster_id) { process->vmm.limit_addr = vaddr_limit; } boot_dmsg("[INFO]\tHardware initialization of cluster %u\t\t\t\t[ %d ]\n", \ cxy, cpu_time_stamp()); cluster_entry_init(cxy, vaddr_start, size); } } /********************************************************************************************* * This function makes the architecture specific initialisation. * It makes the local cluster initialisation, dynamically detects the cluster devices, * and associates the appropriate drivers. ********************************************************************************************/ void hal_init( boot_info_t * info ) { cluster_t * cluster; header_info_t * header; cluster_info_t * clusters; cluster_info_t * cluster_ptr; dev_info_t * dev_tbl; cxy_t iopic_cxy; // should be the IO_cluster bool_t iopic_found; error_t error; uint32_t rcxy; cxy_t cxy; uint32_t i; /* Local variables init header = (header_info_t *)info->hal_info; if( strncmp( header->signature , hal_bib_signature , 16 ) ) { while(1); // TODO no TTY yet ??? [AG] } if( strncmp( header->arch , "TSAR" , 16)) { while(1); // TODO no TTY yet ??? [AG] } */ iopic_found = false; iopic_cxy = 0; clusters = (cluster_info_t*)((uint32_t)header + sizeof(header_info_t)); cluster_ptr = &clusters[info->local_cluster_id]; dev_tbl = (dev_info_t*)(cluster_ptr->offset + (uint32_t)header); cxy = cluster_ptr->cxy; hal_state_init(info); /* init cluster: also init current_{cxy, cluster} macros */ error = cluster_init(info, dev_tbl[0].base, dev_tbl[0].base + dev_tbl[0].size, clusters_tbl[cxy].vmem_start); if(err) die("ERROR: Failed To Initialize Cluster %d, Err %d\n", cxy, err); cluster = current_cluster; //TODO: move it to cluster.c //!!! THis value include the io_cluster ! cluster->clstr_nr = info->onln_clstr_nr; // TODO: headr->x_max * header->y_max; for(i = 0; i < info->onln_clstr_nr; i++) { cluster_ptr = &clusters[i]; dev_tbl = (dev_info_t*)(cluster_ptr->offset + (uint32_t)header); rcxy = cluster_ptr->cxy; boot_dmsg("\n[INFO]\tDevices initialization for cluster %d\t\t\t\t[ %d ]\n", \ rcxy, cpu_time_stamp()); if(hal_dev_init(cluster, &clusters_tbl[rcxy], dev_tbl, cluster_ptr)) { iopic = true; iopic_cxy = rcxy; } } if(iopic) //&& (cluster->io_clstr == cluster->id)) // Now it's dynamique hal_dev_iopic_register(cluster, &clusters_tbl[iopic_cxy]); } // end hal_init() /********************************************************************************************* * This function initialises all devices descriptors associated to peripherals * contained in the local cluster, as specified by the boot_info structure. ********************************************************************************************* * @ info : pointer on the local boot-info structure. * @ returns 0 if success / returns ENOMEM or EINVAL if error ********************************************************************************************/ error_t hal_devices_init( boot_info_t * info ) { drvdb_entry_t * entry; list_entry_t * devlist; boot_device_t * dev_tbl; // pointer on array of devices in boot_info uint32_t dev_nr; // actual number of devices in this cluster cluster_t * cluster; // pointer on local cluster manager cxy_t local_cxy; // local cluster identifier cxy-t target_cxy; // target cluster identifier device_t * device; // pointer on current device descriptor xptr_t base; // remote pointer on segment base uint32_t size; // segment size (bytes) dev_type_t type; // device type uint32_t i; bool_t pic_found; driver_t * driver; error_t error; // get local cluster pointer and identifier cluster = LOCAL_CLUSTER; cxy = cluster->cxy; // get number of peripherals dev_nr = info->devices_nr; dev_tbl = info->dev; // loop on all peripherals in cluster for( i = 0 ; i < dev_nr ; i++ ) { type = dev_tbl[i].type; // skip RAM and ROM if( (type == DEV_TYPE_RAM) || (type == DEV_TYPE_ROM) ) continue; // test PIC if( dev_tbl[i].type == DEV_TYPE_PIC ) pic_found = true; // replicated if( (type == DEV_TYPE_XCU ) || (type == DEV_TYPE_MMC) || (type == DEV_TYPE_MWR) ) { device = device_alloc( local); if( device == NULL ) { printk(PANIC,"%s failed to allocate device descriptor in cluster %x\n", __FUNCTION__ , cxy ); return ENOMEM; } } else { driver = drvdb_entry_get_driver(entry); // initialise device descriptor from boot_info spinlock_init( &device->lock ); device->type = dev_tbl[i].type; device->base = dev_tbl[i].base; device->size = dev_tbl[i].size; device->isr = dev_tbl[i].irq; error = driver->init( dev ); if( error ) { die("[ERROR]\tFailed To Initialize Device %s [Cluster %d, Dev %d, Err %d]\n", drvdb_entry_get_name(entry), rcxy, i, err); } devfs_register( dev ); list_add_last( devlist , &dev->list); boot_dmsg("[INFO]\tFound Device: %s\t\t\t\t\t\t[ %d ]\n[INFO]\t\tBase <0x%x> cxy %d Size <0x%x> Irq <%d>\t\t[ %d ]\n", \ drvdb_entry_get_name(entry), cpu_time_stamp(), (uint32_t)dev_base, \ rcxy, dev_tbl[i].size, dev_tbl[i].irq, cpu_time_stamp()); } // end loop on peripherals // register device descriptor in global list ??? hal_dev_register( cluster , centry ); // TODO ??? return iopic_clstr; } // end hal_devices_init() /********************************************************************************************* ********************************************************************************************/ void hal_xicu_set_wti(struct device_s *xicu, uint32_t cpu_inter, uint32_t wti_index, struct irq_action_s *action, struct device_s *dev) { uint64_t mailbox; uint32_t irq_out; error_t err; mailbox = xicu->base_paddr + (wti_index << 2); irq_out = cpu_inter*OUTPUT_IRQ_PER_PROC; xicu->op.icu.set_mask(xicu, (1 << wti_index), XICU_MSK_WTI_ENABLE, irq_out); boot_dmsg("[INFO]\t\t WTI %d <-> CPU %d (mailbox 0x%x%x)\t\t\t[ %d ]\n", \ wti_index, cpu_inter, MSB(mailbox), LSB(mailbox), \ cpu_time_stamp()); err = xicu->op.icu.bind_wti(xicu, action, wti_index); if(err != 0) die("[ERROR]\tFailed to bind %s via wti_mailbox 0x%x:%x , on xicu %s @%x [ err %d ]\n", \ dev ? dev->name : "IPI", MSB(mailbox),LSB(mailbox), xicu->name, xicu, err); if(dev) dev->mailbox = mailbox; //The binding is (to be) done dynamically in the peripheral //iopic->op.iopic.bind_wti_and_irq(iopic, mailbox, dev->irq); } //TODO: distibute iopic devices on multiple cluster ? //For now all the devices of the IOPIC_Cluster (cluster //without cpus) and there IRQs are handled by the IO_Cluster. void hal_dev_iopic_register(struct cluster_s *cluster, struct cluster_entry_s *entry) { struct list_entry *iter; struct device_s *iopic; struct device_s *xicu; struct device_s *dev; uint32_t device_index; uint32_t wti_index; uint32_t cpu_inter; uint32_t cpu_nr; //error_t err; cpu_inter = 0; device_index = 0; cpu_nr = cluster->cpu_nr; wti_index = cpu_nr;//Leave one WTI per proc for IPI xicu = hal_entrys[cluster->id].xicu; iopic = hal_dev_locate(&entry->devlist, SOCLIB_IOPIC_ID); if(xicu == NULL) die("[ERROR]\tNo XICU Is Found for IOPIC cluster %d\n", \ entry->cxy); boot_dmsg("\n[INFO]\tIOPIC cluster configuration: cxy %d <0x%x,0x%x>\t[ %d ]\n", \ entry->cxy, iopic->base, (iopic->base+iopic->size), cpu_time_stamp()); list_foreach(&entry->devlist, iter) { dev = list_element(iter, struct device_s, list); if(wti_index > (IOPIC_WTI_PER_CLSTR - cpu_nr)) die("[ERROR]\tNo enough WTI in cluster %d\n", cluster->id); if((dev != iopic) && (dev->irq != -1)) { boot_dmsg("[INFO]\tLinking device %s to XICU on cluster %d through IOPIC cluster\t[ %d ]\n", \ dev->name, xicu->cxy, cpu_time_stamp()); hal_xicu_set_wti(xicu, device_index%cpu_nr, wti_index, &dev->action, dev); dev->iopic = iopic; wti_index++; device_index++; } devfs_register(dev); } } // end hal_xcu_set_wti()