// Copyright 2020 Espressif Systems (Shanghai) PTE LTD // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "soc/soc.h" #include "soc/interrupt_reg.h" #include "riscv/rvruntime-frames.h" .equ SAVE_REGS, 32 .equ CONTEXT_SIZE, (SAVE_REGS * 4) .macro save_regs addi sp, sp, -CONTEXT_SIZE sw ra, RV_STK_RA(sp) sw tp, RV_STK_TP(sp) sw t0, RV_STK_T0(sp) sw t1, RV_STK_T1(sp) sw t2, RV_STK_T2(sp) sw s0, RV_STK_S0(sp) sw s1, RV_STK_S1(sp) sw a0, RV_STK_A0(sp) sw a1, RV_STK_A1(sp) sw a2, RV_STK_A2(sp) sw a3, RV_STK_A3(sp) sw a4, RV_STK_A4(sp) sw a5, RV_STK_A5(sp) sw a6, RV_STK_A6(sp) sw a7, RV_STK_A7(sp) sw s2, RV_STK_S2(sp) sw s3, RV_STK_S3(sp) sw s4, RV_STK_S4(sp) sw s5, RV_STK_S5(sp) sw s6, RV_STK_S6(sp) sw s7, RV_STK_S7(sp) sw s8, RV_STK_S8(sp) sw s9, RV_STK_S9(sp) sw s10, RV_STK_S10(sp) sw s11, RV_STK_S11(sp) sw t3, RV_STK_T3(sp) sw t4, RV_STK_T4(sp) sw t5, RV_STK_T5(sp) sw t6, RV_STK_T6(sp) .endm .macro save_mepc csrr t0, mepc sw t0, RV_STK_MEPC(sp) .endm .macro restore_regs lw ra, RV_STK_RA(sp) lw tp, RV_STK_TP(sp) lw t0, RV_STK_T0(sp) lw t1, RV_STK_T1(sp) lw t2, RV_STK_T2(sp) lw s0, RV_STK_S0(sp) lw s1, RV_STK_S1(sp) lw a0, RV_STK_A0(sp) lw a1, RV_STK_A1(sp) lw a2, RV_STK_A2(sp) lw a3, RV_STK_A3(sp) lw a4, RV_STK_A4(sp) lw a5, RV_STK_A5(sp) lw a6, RV_STK_A6(sp) lw a7, RV_STK_A7(sp) lw s2, RV_STK_S2(sp) lw s3, RV_STK_S3(sp) lw s4, RV_STK_S4(sp) lw s5, RV_STK_S5(sp) lw s6, RV_STK_S6(sp) lw s7, RV_STK_S7(sp) lw s8, RV_STK_S8(sp) lw s9, RV_STK_S9(sp) lw s10, RV_STK_S10(sp) lw s11, RV_STK_S11(sp) lw t3, RV_STK_T3(sp) lw t4, RV_STK_T4(sp) lw t5, RV_STK_T5(sp) lw t6, RV_STK_T6(sp) addi sp, sp, CONTEXT_SIZE .endm .macro restore_mepc lw t0, RV_STK_MEPC(sp) csrw mepc, t0 .endm .global vPortYieldFromISR .global uxInterruptNesting .global uxSchedulerRunning .global xIsrStackTop .global pxCurrentTCB .global _global_interrupt_handler .section .exception_vectors.text /* This is the vector table. MTVEC points here. * * Use 4-byte intructions here. 1 instruction = 1 entry of the table. * The CPU jumps to MTVEC (i.e. the first entry) in case of an exception, * and (MTVEC & 0xfffffffc) + (mcause & 0x7fffffff) * 4, in case of an interrupt. * * Note: for our CPU, we need to place this on a 256-byte boundary, as CPU * only uses the 24 MSBs of the MTVEC, i.e. (MTVEC & 0xffffff00). */ .balign 0x100 .global _vector_table .type _vector_table, @function _vector_table: .option push .option norvc j _panic_handler /* exception handler, entry 0 */ .rept 31 j _interrupt_handler /* 31 identical entries, all pointing to the interrupt handler */ .endr .option pop .size _vector_table, .-_vector_table /* Exception handler.*/ .global xt_unhandled_exception .type _panic_handler, @function _panic_handler: addi sp, sp, -RV_STK_FRMSZ /* allocate space on stack to store necessary registers */ /* save general registers */ sw ra, RV_STK_RA(sp) sw gp, RV_STK_GP(sp) sw tp, RV_STK_TP(sp) sw t0, RV_STK_T0(sp) sw t1, RV_STK_T1(sp) sw t2, RV_STK_T2(sp) sw s0, RV_STK_S0(sp) sw s1, RV_STK_S1(sp) sw a0, RV_STK_A0(sp) sw a1, RV_STK_A1(sp) sw a2, RV_STK_A2(sp) sw a3, RV_STK_A3(sp) sw a4, RV_STK_A4(sp) sw a5, RV_STK_A5(sp) sw a6, RV_STK_A6(sp) sw a7, RV_STK_A7(sp) sw s2, RV_STK_S2(sp) sw s3, RV_STK_S3(sp) sw s4, RV_STK_S4(sp) sw s5, RV_STK_S5(sp) sw s6, RV_STK_S6(sp) sw s7, RV_STK_S7(sp) sw s8, RV_STK_S8(sp) sw s9, RV_STK_S9(sp) sw s10, RV_STK_S10(sp) sw s11, RV_STK_S11(sp) sw t3, RV_STK_T3(sp) sw t4, RV_STK_T4(sp) sw t5, RV_STK_T5(sp) sw t6, RV_STK_T6(sp) addi t0, sp, RV_STK_FRMSZ /* restore sp with the value when trap happened */ sw t0, RV_STK_SP(sp) csrr t0, mepc sw t0, RV_STK_MEPC(sp) csrr t0, mstatus sw t0, RV_STK_MSTATUS(sp) csrr t0, mtvec sw t0, RV_STK_MTVEC(sp) csrr t0, mcause sw t0, RV_STK_MCAUSE(sp) csrr t0, mtval sw t0, RV_STK_MTVAL(sp) csrr t0, mhartid sw t0, RV_STK_MHARTID(sp) /* call xt_unhandled_exception(sp,cause) */ mv a0, sp csrr a1, mcause jal zero, xt_unhandled_exception /* panicHandler never returns */ .size _panic_handler, .-_panic_handler /* This is the interrupt handler. * It saves the registers on the stack, * prepares for interrupt nesting, * re-enables the interrupts, * then jumps to the C dispatcher in interrupt.c. */ .global _interrupt_handler .type _interrupt_handler, @function _interrupt_handler: /* entry */ save_regs save_mepc /* scheduler not enabled, jump directly to ISR handler */ lw t0, uxSchedulerRunning beq t0, zero, already_on_handler /* increments the ISR nesting count */ la t0, uxInterruptNesting lw t1, 0x0(t0) addi t2,t1,1 sw t2, 0x0(t0) /* If reached here from another low priority ISR, skip stack pushing to TCB */ bne t1,zero, already_on_handler /* Otherwise, save current sp, and use the isr stack from here */ lw t0, pxCurrentTCB sw sp, 0x0(t0) lw sp, xIsrStackTop already_on_handler: /* Before dispatch c handler, restore interrupt to enable nested intr */ csrr s1, mcause csrr s2, mstatus /* Save the interrupt threshold level */ la t0, INTERRUPT_CORE0_CPU_INT_THRESH_REG lw s3, 0(t0) /* Increase interrupt threshold level */ li t2, 0x7fffffff and t1, s1, t2 /* t1 = mcause & mask */ slli t1, t1, 2 /* t1 = mcause * 4 */ la t2, INTC_INT_PRIO_REG(0) add t1, t2, t1 /* t1 = INTC_INT_PRIO_REG + 4 * mcause */ lw t2, 0(t1) /* t2 = INTC_INT_PRIO_REG[mcause] */ addi t2, t2, 1 /* t2 = t2 +1 */ sw t2, 0(t0) /* INTERRUPT_CORE0_CPU_INT_THRESH_REG = t2 */ fence li t0, 0x8 csrrs t0, mstatus, t0 /* call the C dispatcher */ mv a0, sp /* argument 1, stack pointer */ csrr a1, mcause /* argument 2, interrupt number */ /* mask off the interrupt flag of mcause */ li t0, 0x7fffffff and a1, a1, t0 jal _global_interrupt_handler /* After dispatch c handler, disable interrupt to make freertos make context switch */ la t0, 0x8 csrrc t0, mstatus, t0 /* restore the interrupt threshold level */ la t0, INTERRUPT_CORE0_CPU_INT_THRESH_REG sw s3, 0(t0) fence /* may skip RTOS aware interrupt since scheduler was not started */ lw t1, uxSchedulerRunning beq t1,zero, isr_exit /* update nesting interrupts counter */ la t0, uxInterruptNesting lw t1, 0x0(t0) /* Already zero, protect against underflow */ beq t1, zero, isr_skip_decrement addi t1,t1, -1 sw t1, 0x0(t0) isr_skip_decrement: /* may still have interrupts pending, skip section below and exit */ bne t1,zero,isr_exit /* handled all the ISRs and scheduled the next task, take its stack */ /* load on sp, then exit. */ lw sp, pxCurrentTCB lw sp, 0x0(sp) isr_exit: /* restore the rest of the registers */ csrw mcause, s1 csrw mstatus, s2 restore_mepc restore_regs /* exit, this will also re-enable the interrupts */ mret .size _interrupt_handler, .-_interrupt_handler