/* * hal_gpt.c - implementation of the Generic Page Table API for x86_64 * * Copyright (c) 2017 Maxime Villard * * This file is part of ALMOS-MKH. * * ALMOS-MKH is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2.0 of the License. * * ALMOS-MKH is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with ALMOS-MKH.; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include /* XXX */ #include #include #include #include #include #include #include #include #include #include #include #include extern vaddr_t __kernel_end; size_t kimg_size __in_kdata = 0; paddr_t pa_avail __in_kdata = 0; vaddr_t va_avail __in_kdata = 0; vaddr_t tmpva __in_kdata = (KERNBASE + NKL2_KIMG_ENTRIES * NBPD_L2); paddr_t hal_gpt_bootstrap_palloc(size_t npages) { paddr_t pa = pa_avail; pa_avail += npages * PAGE_SIZE; return pa; } vaddr_t hal_gpt_bootstrap_valloc(size_t npages) { vaddr_t va = va_avail; va_avail += npages * PAGE_SIZE; return va; } /* * Reset the bootstrap VA we've used in cluster0 so far. After this * function, cluster0's heap is empty. */ void hal_gpt_bootstrap_reset() { /* * Re-enter cluster0's space, because we altered it when mapping the ACPI * tables. */ hal_gpt_enter_range(CLUSTER_MIN_VA(0), 0, CLUSTER_PA_SIZE / PAGE_SIZE); va_avail = CLUSTER_MIN_VA(0) + KERNEL_VA_SIZE; } /* * Uniformize the PA and VA offsets, and return the value. After this function, * we are guaranteed to have [VA = PA + constant_offset]. And therefore we can * only call hal_gpt_bootstrap_valloc, without entering it in a PA. */ size_t hal_gpt_bootstrap_uniformize() { size_t pa_offset = pa_avail - 0; size_t va_offset = va_avail - CLUSTER_MIN_VA(0); if (pa_offset < va_offset) pa_avail += (va_offset - pa_offset); else if (pa_offset > va_offset) va_avail += (pa_offset - va_offset); return MAX(pa_offset, va_offset); } void hal_gpt_enter(vaddr_t va, paddr_t pa, pt_entry_t flags) { XASSERT(va % PAGE_SIZE == 0); XASSERT(pa % PAGE_SIZE == 0); //XASSERT(va == tmpva || PTE_BASE[pl1_i(va)] == 0); PTE_BASE[pl1_i(va)] = (pa & PG_FRAME) | flags; invlpg(va); } void hal_gpt_enter_range(vaddr_t va, paddr_t pa, size_t n) { pt_entry_t flags = PG_V | PG_KW | PG_NX; size_t i; for (i = 0; i < n; i++) { hal_gpt_enter(va + i * PAGE_SIZE, pa + i * PAGE_SIZE, flags); } } void hal_gpt_leave(vaddr_t va) { XASSERT(va % PAGE_SIZE == 0); XASSERT(PTE_BASE[pl1_i(va)] != 0); PTE_BASE[pl1_i(va)] = 0; invlpg(va); } void hal_gpt_leave_range(vaddr_t va, size_t n) { size_t i; for (i = 0; i < n; i++) { hal_gpt_leave(va + i * PAGE_SIZE); } } /* * Create a page tree that can map va_start->va_end. The caller can then * enter these addresses to physical locations. * * This function is a bit complicated, and may need to be revisited. */ void hal_gpt_maptree_area(vaddr_t va_start, vaddr_t va_end) { pt_entry_t flags = PG_V | PG_KW | PG_NX; size_t L4start, L4end, nL4e; size_t L3start, L3end, nL3e; size_t L2start, L2end, nL2e; paddr_t L3page, L2page, L1page; paddr_t pa; size_t i, npa; pt_entry_t *pde; /* Allocate L3 */ L4start = pl4_i(va_start); L4end = pl4_i(va_end); nL4e = (L4end - L4start + 1); L3page = hal_gpt_bootstrap_palloc(nL4e); /* Allocate L2 */ L3start = pl3_i(va_start); L3end = pl3_i(va_end); nL3e = (L3end - L3start + 1); L2page = hal_gpt_bootstrap_palloc(nL3e); /* Allocate L1 */ L2start = pl2_i(va_start); L2end = pl2_i(va_end); nL2e = (L2end - L2start + 1); L1page = hal_gpt_bootstrap_palloc(nL2e); /* Zero out L1 */ for (i = 0; i < nL2e; i++) { pa = L1page + i * PAGE_SIZE; hal_gpt_enter(tmpva, pa, flags); memset((void *)tmpva, 0, PAGE_SIZE); } /* Zero out L2 */ for (i = 0; i < nL3e; i++) { pa = L2page + i * PAGE_SIZE; hal_gpt_enter(tmpva, pa, flags); memset((void *)tmpva, 0, PAGE_SIZE); } /* Zero out L3 */ for (i = 0; i < nL4e; i++) { pa = L3page + i * PAGE_SIZE; hal_gpt_enter(tmpva, pa, flags); memset((void *)tmpva, 0, PAGE_SIZE); } /* Create L2, linked to L1 */ npa = (L2start / NPDPG) * PAGE_SIZE; for (i = L2start; i <= L2end; i++) { pa = (paddr_t)&(((pt_entry_t *)L2page)[i]); pa -= npa; /* shift on the left */ pa &= PG_FRAME; /* rounddown to a page boundary */ hal_gpt_enter(tmpva, pa, flags); pde = (pt_entry_t *)tmpva; pa = L1page + (i - L2start) * PAGE_SIZE; pde[i % NPDPG] = (pa & PG_FRAME) | PG_V | PG_KW; } /* Create L3, linked to L2 */ npa = (L3start / NPDPG) * PAGE_SIZE; for (i = L3start; i <= L3end; i++) { pa = (paddr_t)&(((pt_entry_t *)L3page)[i]); pa -= npa; /* shift on the left */ pa &= PG_FRAME; /* rounddown to a page boundary */ hal_gpt_enter(tmpva, pa, flags); pde = (pt_entry_t *)tmpva; pa = L2page + (i - L3start) * PAGE_SIZE; pde[i % NPDPG] = (pa & PG_FRAME) | PG_V | PG_KW; } /* Link L3 into L4 */ for (i = 0; i < nL4e; i++) { pa = L3page + i * PAGE_SIZE; L4_BASE[L4start + i] = (pa & PG_FRAME) | PG_V | PG_KW; } } void hal_gpt_init(paddr_t firstpa) { /* Initialize global values */ pa_avail = firstpa; va_avail = CLUSTER_MIN_VA(0) + KERNEL_VA_SIZE; kimg_size = ((uint64_t)&__kernel_end - KERNBASE); XASSERT(kimg_size % PAGE_SIZE == 0); /* * Create cluster0's page tree, enter the space, and unmap the area * below the kernel. */ hal_gpt_maptree_area(CLUSTER_MIN_VA(0), CLUSTER_MIN_VA(0) + CLUSTER_PA_SIZE); hal_gpt_enter_range(CLUSTER_MIN_VA(0), 0, CLUSTER_PA_SIZE / PAGE_SIZE); hal_gpt_leave_range(CLUSTER_MIN_VA(0), (KERNTEXTOFF - KERNBASE) / PAGE_SIZE); /* * Do the same, but now in the local cluster map. */ hal_gpt_maptree_area(LOCAL_CLUSTER_MIN_VA, LOCAL_CLUSTER_MIN_VA + CLUSTER_PA_SIZE); hal_gpt_enter_range(LOCAL_CLUSTER_MIN_VA, 0, CLUSTER_PA_SIZE / PAGE_SIZE); hal_gpt_leave_range(LOCAL_CLUSTER_MIN_VA, (KERNTEXTOFF - KERNBASE) / PAGE_SIZE); } /* -------------------------------------------------------------------------- */ /**************************************************************************************** * These global variables defines the masks for the Generic Page Table Entry attributes, * and must be defined in all GPT implementation. ***************************************************************************************/ uint32_t GPT_MAPPED; uint32_t GPT_SMALL; uint32_t GPT_READABLE; uint32_t GPT_WRITABLE; uint32_t GPT_EXECUTABLE; uint32_t GPT_CACHABLE; uint32_t GPT_USER; uint32_t GPT_DIRTY; uint32_t GPT_ACCESSED; uint32_t GPT_GLOBAL; uint32_t GPT_COW; uint32_t GPT_SWAP; uint32_t GPT_LOCKED; error_t hal_gpt_create( gpt_t * gpt ) { x86_panic((char *)__func__); return 0; } void hal_gpt_destroy( gpt_t * gpt ) { x86_panic((char *)__func__); } void hal_gpt_print( gpt_t * gpt ) { x86_panic((char *)__func__); } error_t hal_gpt_set_pte( gpt_t * gpt, vpn_t vpn, ppn_t ppn, uint32_t attr ) { x86_panic((char *)__func__); return 0; } void hal_gpt_get_pte( gpt_t * gpt, vpn_t vpn, uint32_t * attr, ppn_t * ppn ) { x86_panic((char *)__func__); } void hal_gpt_reset_pte( gpt_t * gpt, vpn_t vpn ) { x86_panic((char *)__func__); } error_t hal_gpt_lock_pte( gpt_t * gpt, vpn_t vpn ) { x86_panic((char *)__func__); return 0; } error_t hal_gpt_unlock_pte( gpt_t * gpt, vpn_t vpn ) { x86_panic((char *)__func__); return 0; } error_t hal_gpt_copy( gpt_t * dst_gpt, gpt_t * src_gpt, bool_t cow ) { x86_panic((char *)__func__); return 0; }