/* * hal_kentry.S - exception/interrupt/syscall kernel entry point for MIPS32 * * AUthors Ghassan Almaless (2007,2008,2009,2010,2011,2012) * Mohamed Lamine Karaoui (2015) * Alain Greiner (2017) * * Copyright (c) UPMC Sorbonne Universites * * This file is part of ALMOS-MKH. * * ALMOS-MKH is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2.0 of the License. * * ALMOS-MKH is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with ALMOS-MKH; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #--------------------------------------------------------------------------------- # This code is the unique kernel entry point in case of exception, interrupt, # or syscall for the TSAR_MIPS32 architecture. # # - If the core is in user mode: # . we desactivate the MMU. # . we save the context in the uzone of the calling thread descriptor. # . we increment the cores_in_kernel variable. # . we call the relevant exception/interrupt/syscall handler # # - If the core is already in kernel mode: # . we save the context in the kernel stack # . we call the relevant exception/interrupt/syscall handler # # - In both cases, when the handler returns: # . we restore the context # . we reactivate the MMU ??? TODO #--------------------------------------------------------------------------------- .section .kentry,"ax",@progbits .extern cpu_do_interrupt .extern cpu_do_exception .extern cpu_do_syscall .extern cpu_kentry .extern cpu_kexit .org 0x180 .ent kentry .global kentry .global kentry_load .set noat .set noreorder #define SAVE_SIZE CPU_REGS_NR*4 #define LID_WIDTH 2 #define CXY_WIDTH 8 #define CXY_MASK 0xFF #define MMU_MODE_MASK 0xF #define GID_MASK 0x3FF #define LID_MASK 0x3 #--------------------------------------------------------------------------------- # Kernel Entry point #--------------------------------------------------------------------------------- kentry: mfc0 $26, $12 # read SR to test user/kernel mode andi $26, $26, 0x10 # User Mode bitmask beq $26, $0, kernel_mode ori $26, $0, 0x3 # $26 <= MMU OFF value #--------------------------------------------------------------------------------------- # this code is executed when the core is in user mode: # - we use the uzone defined in user thread descriptor. # - we set the MMU off, and save the CP2_MODE register to uzone. # - we save the user thread stack pointer to uzone and load the kernel stack pointer # - we store the uzone pointer in $27 user_mode: mtc2 $26, $1 # set MMU OFF nop mfc0 $26, $4, 2 # $26 <= thread pointer lw $26, 0($26) # $26 <= uzone pointer sw $29, (UZ_SP*4)($26) # save user stack to uzone lw $29, (UZ_KSP*4)($26) # load kernel stack from uzone ori $27, $0, 0xF # MMU old value: assumed ON sw $27, (UZ_MODE*4)($26) # save MMU MODE to uzone j unified_mode or $27, $0, $26 # $27 <= uzone #--------------------------------------------------------------------------------------- # this code is executed when the core is in kernel mode: # - we use an uzone allocated in kernel stack. # - we set the MMU off, set the MMU data_paddr extension to local_cxy, # and save the CP2_MODE and CP2_DEXT to uzone. # - we save the kernel stack pointer to uzone and load the new kernel stack pointer # - we store the uzone pointer in $27 kernel_mode: mfc2 $26, $24 andi $26, $26, CXY_MASK # $26 <= CP2_DEXT mfc0 $27, $15, 1 andi $27, $27, GID_MASK # $27 <= core_gid (4/4/2 format) srl $27, $27, LID_WIDTH # $27 <= local_cxy mtc2 $27, $24 # set local_cxy to CP2_DEXT # use $26 to save both CP2_MODE (4 bits) and CP2_DEXT (8 bits) values mfc2 $27, $1 andi $27, $27, MMU_MODE_MASK # $27 <= CP2_MODE sll $27, $27, CXY_WIDTH # $27 <= 0x00000M00 or $26, $26, $27 # $26 <= 0x00000MXY ori $27, $0, 0x3 mtc2 $27, $1 # set MMU OFF # save old SP, CP2_MODE and CP2_DEXT in uzone allocated in kernel stack addiu $27, $29, -(SAVE_SIZE) # allocate an uzone in stack (use $27 as KSP) sw $29, (UZ_SP*4)($27) # save old KSP in this uzone srl $29, $26, CXY_WIDTH sw $29, (UZ_MODE*4)($27) # save CP2_MODE in this uzone andi $26, $26, CXY_MASK sw $26, (UZ_DEXT*4)($27) # save CP2_DEXT in this uzone or $29, $27, $0 # load new kernel stack pointer #-------------------------------------------------------------------------------------- # this code is executed in both modes, with the two following assumptions: # - $27 contains the pointer on uzone to save the cpu registers # - $29 contains the kernel stack pointer unified_mode: sw $1, (UZ_AT*4)($27) sw $2, (UZ_V0*4)($27) sw $3, (UZ_V1*4)($27) sw $4, (UZ_A0*4)($27) sw $5, (UZ_A1*4)($27) sw $6, (UZ_A2*4)($27) sw $7, (UZ_A3*4)($27) sw $8, (UZ_T0*4)($27) sw $9, (UZ_T1*4)($27) sw $10, (UZ_T2*4)($27) sw $11, (UZ_T3*4)($27) sw $12, (UZ_T4*4)($27) sw $13, (UZ_T5*4)($27) sw $14, (UZ_T6*4)($27) sw $15, (UZ_T7*4)($27) sw $16, (UZ_S0*4)($27) sw $17, (UZ_S1*4)($27) sw $18, (UZ_S2*4)($27) sw $19, (UZ_S3*4)($27) sw $20, (UZ_S4*4)($27) sw $21, (UZ_S5*4)($27) sw $22, (UZ_S6*4)($27) sw $23, (UZ_S7*4)($27) sw $24, (UZ_T8*4)($27) sw $25, (UZ_T9*4)($27) sw $28, (UZ_GP*4)($27) sw $30, (UZ_S8*4)($27) sw $31, (UZ_RA*4)($27) mfc0 $16, $14 sw $16, (UZ_EPC*4)($27) # Save EPC mflo $14 sw $14, (UZ_LO*4)($27) # save LO mfhi $15 sw $15, (UZ_HI*4)($27) # save HI mfc0 $18, $12 sw $18, (UZ_SR*4)($27) # Save SR mfc0 $17, $13 sw $17, (UZ_CR*4)($27) # Save CR srl $3, $18, 5 # put SR in kernel mode, IRQ disabled, clear exl sll $3, $3, 5 mtc0 $3, $12 # Set new SR andi $1, $17, 0x3F # $1 <= XCODE (from CR) # signal that core enters kernel jal cluster_core_kernel_enter nop #--------------------------------------------------------------------------------------- # Depending on XCODE (in $1) , call the apropriate handler. The three called # functions take the same two arguments: thread pointer and uzone pointer. mfc0 $4, $4, 2 # $4 <= thread pointer (first arg) or $5, $0, $27 # $5 <= uzone pointer (second arg) or $19, $0, $27 # $19 <= &uzone (for kentry_exit) ori $8, $0, 0x20 # $8 <= cause syscall beq $8, $1, cause_sys nop beq $1, $0, cause_int nop cause_excp: la $1, hal_do_exception jalr $1 # call exception handler addiu $29, $29, -8 # hal_do_exception has 2 args addiu $29, $29, 8 j kentry_exit # jump to kentry_exit nop cause_sys: la $1, hal_do_syscall jalr $1 # call syscall handler addiu $29, $29, -8 # hal_do_syscall has 2 args addiu $29, $29, 8 j kentry_exit # jump to kentry_exit or $19, $0, $2 cause_int: la $1, hal_do_interrupt jalr $1 # call interrupt handler addiu $29, $29, -8 # hal_do_interrupt has 2 args addiu $29, $29, 8 # ----------------------------------------------------------------------------------- # Kentry exit # ----------------------------------------------------------------------------------- kentry_exit: # signal that core exit kernel jal cluster_core_kernel_exit # restore context from uzone or $27, $0, $19 # $27 <= &uzone lw $29, (UZ_SP*4)($27) # restore SP from uzone lw $16, (UZ_EPC*4)($27) mtc0 $16, $14 # restore EPC from uzone lw $16, (UZ_HI*4)($27) mthi $16 # restore HI from uzone lw $16, (UZ_LO*4)($27) mtlo $16 # restore LO from uzone lw $17, (UZ_SR*4)($27) # get saved SR value from uzone andi $17, $17, 0x1F # keep only the 5 LSB bits mfc0 $26, $12 # get current SR value from CP0 or $26, $26, $17 # merge the two values mtc0 $26, $12 # setup new SR to CP0 lw $1, (UZ_AT*4)($27) lw $2, (UZ_V0*4)($27) lw $3, (UZ_V1*4)($27) lw $4, (UZ_A0*4)($27) lw $5, (UZ_A1*4)($27) lw $6, (UZ_A2*4)($27) lw $7, (UZ_A3*4)($27) lw $8, (UZ_T0*4)($27) lw $9, (UZ_T1*4)($27) lw $10, (UZ_T2*4)($27) lw $11, (UZ_T3*4)($27) lw $12, (UZ_T4*4)($27) lw $13, (UZ_T5*4)($27) lw $14, (UZ_T6*4)($27) lw $15, (UZ_T7*4)($27) lw $16, (UZ_S0*4)($27) lw $17, (UZ_S1*4)($27) lw $18, (UZ_S2*4)($27) lw $19, (UZ_S3*4)($27) lw $20, (UZ_S4*4)($27) lw $21, (UZ_S5*4)($27) lw $22, (UZ_S6*4)($27) lw $23, (UZ_S7*4)($27) lw $24, (UZ_T8*4)($27) lw $25, (UZ_T9*4)($27) lw $28, (UZ_GP*4)($27) lw $30, (UZ_S8*4)($27) lw $31, (UZ_RA*4)($27) lw $26, (UZ_DEXT*4)($27) mtc2 $26, $24 # restore CP2_DEXT from uzone #TODO: optimize lw $26, (UZ_MODE*4)($27) # get saved CP2_MODE from uzone andi $26, $26, 0xc # keep only the TLBs controling bits beq $26, $0, out_mmu_3 # both MSB are 0 (the first two LSB are always set) andi $26, $26, 0x8 beq $26, $0, out_mmu_7 # first MSB is 0 (bit 2 is set) # Possible value for MMU_MODE # In kernel mode : 0x7/0x3 # In user mode : 0xF # DP_EXT can either be local or remote # Once these register set we can no longuer # access global data out_mmu_F: ori $26, $0, 0xF mtc2 $26, $1 # CP2_MODE <= 0xF j out_kentry nop out_mmu_7: ori $26, $0, 0x7 mtc2 $26, $1 # CP2_MODE <= 0x7 j out_kentry nop out_mmu_3: ori $26, $0, 0x3 mtc2 $26, $1 # CP2_MODE <= 0x3 out_kentry: nop eret .end kentry .set reorder .set at .ent kentry_load kentry_load: # theses nops are required to load the eret instruction # while we are in virtual mode (processor pipeline) ? mtc2 $26, $1 # set MMU MODE nop nop eret .end kentry_load #-------------------------------------------------------------------------------