2 * FreeRTOS Kernel V10.4.6
3 * Copyright (C) 2006-2015 Cadence Design Systems, Inc.
4 * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
6 * SPDX-License-Identifier: MIT
8 * Permission is hereby granted, free of charge, to any person obtaining a copy of
9 * this software and associated documentation files (the "Software"), to deal in
10 * the Software without restriction, including without limitation the rights to
11 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
12 * the Software, and to permit persons to whom the Software is furnished to do so,
13 * subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in all
16 * copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
20 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
21 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
22 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
23 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * https://www.FreeRTOS.org
26 * https://github.com/FreeRTOS
30 /*******************************************************************************
31 --------------------------------------------------------------------------------
33 XTENSA VECTORS AND LOW LEVEL HANDLERS FOR AN RTOS
35 Xtensa low level exception and interrupt vectors and handlers for an RTOS.
37 Interrupt handlers and user exception handlers support interaction with
38 the RTOS by calling XT_RTOS_INT_ENTER and XT_RTOS_INT_EXIT before and
39 after user's specific interrupt handlers. These macros are defined in
40 xtensa_<rtos>.h to call suitable functions in a specific RTOS.
42 Users can install application-specific interrupt handlers for low and
43 medium level interrupts, by calling xt_set_interrupt_handler(). These
44 handlers can be written in C, and must obey C calling convention. The
45 handler table is indexed by the interrupt number. Each handler may be
46 provided with an argument.
48 Note that the system timer interrupt is handled specially, and is
49 dispatched to the RTOS-specific handler. This timer cannot be hooked
52 Optional hooks are also provided to install a handler per level at
53 run-time, made available by compiling this source file with
54 '-DXT_INTEXC_HOOKS' (useful for automated testing).
56 !! This file is a template that usually needs to be modified to handle !!
57 !! application specific interrupts. Search USER_EDIT for helpful comments !!
58 !! on where to insert handlers and how to write them. !!
60 Users can also install application-specific exception handlers in the
61 same way, by calling xt_set_exception_handler(). One handler slot is
62 provided for each exception type. Note that some exceptions are handled
63 by the porting layer itself, and cannot be taken over by application
64 code in this manner. These are the alloca, syscall, and coprocessor
67 The exception handlers can be written in C, and must follow C calling
68 convention. Each handler is passed a pointer to an exception frame as
69 its single argument. The exception frame is created on the stack, and
70 holds the saved context of the thread that took the exception. If the
71 handler returns, the context will be restored and the instruction that
72 caused the exception will be retried. If the handler makes any changes
73 to the saved state in the exception frame, the changes will be applied
74 when restoring the context.
76 Because Xtensa is a configurable architecture, this port supports all user
77 generated configurations (except restrictions stated in the release notes).
78 This is accomplished by conditional compilation using macros and functions
79 defined in the Xtensa HAL (hardware adaptation layer) for your configuration.
80 Only the relevant parts of this file will be included in your RTOS build.
81 For example, this file provides interrupt vector templates for all types and
82 all priority levels, but only the ones in your configuration are built.
84 NOTES on the use of 'call0' for long jumps instead of 'j':
85 1. This file should be assembled with the -mlongcalls option to xt-xcc.
86 2. The -mlongcalls compiler option causes 'call0 dest' to be expanded to
87 a sequence 'l32r a0, dest' 'callx0 a0' which works regardless of the
88 distance from the call to the destination. The linker then relaxes
89 it back to 'call0 dest' if it determines that dest is within range.
90 This allows more flexibility in locating code without the performance
91 overhead of the 'l32r' literal data load in cases where the destination
92 is in range of 'call0'. There is an additional benefit in that 'call0'
93 has a longer range than 'j' due to the target being word-aligned, so
94 the 'l32r' sequence is less likely needed.
95 3. The use of 'call0' with -mlongcalls requires that register a0 not be
96 live at the time of the call, which is always the case for a function
97 call but needs to be ensured if 'call0' is used as a jump in lieu of 'j'.
98 4. This use of 'call0' is independent of the C function call ABI.
100 *******************************************************************************/
102 #include "xtensa_rtos.h"
103 #include "esp_idf_version.h"
104 #if (ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0))
105 #include "esp_panic.h"
107 #include "esp_private/panic_reason.h"
108 #endif /* ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0) */
109 #include "sdkconfig.h"
113 Define for workaround: pin no-cpu-affinity tasks to a cpu when fpu is used.
114 Please change this when the tcb structure is changed
116 #define TASKTCB_XCOREID_OFFSET (0x38+configMAX_TASK_NAME_LEN+3)&~3
120 --------------------------------------------------------------------------------
121 In order for backtracing to be able to trace from the pre-exception stack
122 across to the exception stack (including nested interrupts), we need to create
123 a pseudo base-save area to make it appear like the exception dispatcher was
124 triggered by a CALL4 from the pre-exception code. In reality, the exception
125 dispatcher uses the same window as pre-exception code, and only CALL0s are
126 used within the exception dispatcher.
128 To create the pseudo base-save area, we need to store a copy of the pre-exception's
129 base save area (a0 to a4) below the exception dispatcher's SP. EXCSAVE_x will
130 be used to store a copy of the SP that points to the interrupted code's exception
131 frame just in case the exception dispatcher's SP does not point to the exception
132 frame (which is the case when switching from task to interrupt stack).
134 Clearing the pseudo base-save area is uncessary as the interrupt dispatcher
135 will restore the current SP to that of the pre-exception SP.
136 --------------------------------------------------------------------------------
138 #ifdef CONFIG_FREERTOS_INTERRUPT_BACKTRACE
139 #define XT_DEBUG_BACKTRACE 1
144 --------------------------------------------------------------------------------
145 Defines used to access _xtos_interrupt_table.
146 --------------------------------------------------------------------------------
148 #define XIE_HANDLER 0
154 Macro get_percpu_entry_for - convert a per-core ID into a multicore entry.
155 Basically does reg=reg*portNUM_PROCESSORS+current_core_id
156 Multiple versions here to optimize for specific portNUM_PROCESSORS values.
158 .macro get_percpu_entry_for reg scratch
159 #if (portNUM_PROCESSORS == 1)
160 /* No need to do anything */
161 #elif (portNUM_PROCESSORS == 2)
162 /* Optimized 2-core code. */
164 addx2 \reg,\reg,\scratch
166 /* Generalized n-core code. Untested! */
167 movi \scratch,portNUM_PROCESSORS
168 mull \scratch,\reg,\scratch
170 add \reg,\scratch,\reg
174 --------------------------------------------------------------------------------
175 Macro extract_msb - return the input with only the highest bit set.
177 Input : "ain" - Input value, clobbered.
178 Output : "aout" - Output value, has only one bit set, MSB of "ain".
179 The two arguments must be different AR registers.
180 --------------------------------------------------------------------------------
183 .macro extract_msb aout ain
185 addi \aout, \ain, -1 /* aout = ain - 1 */
186 and \ain, \ain, \aout /* ain = ain & aout */
187 bnez \ain, 1b /* repeat until ain == 0 */
188 addi \aout, \aout, 1 /* return aout + 1 */
192 --------------------------------------------------------------------------------
193 Macro dispatch_c_isr - dispatch interrupts to user ISRs.
194 This will dispatch to user handlers (if any) that are registered in the
195 XTOS dispatch table (_xtos_interrupt_table). These handlers would have
196 been registered by calling _xtos_set_interrupt_handler(). There is one
197 exception - the timer interrupt used by the OS will not be dispatched
198 to a user handler - this must be handled by the caller of this macro.
200 Level triggered and software interrupts are automatically deasserted by
204 -- PS.INTLEVEL is set to "level" at entry
205 -- PS.EXCM = 0, C calling enabled
207 NOTE: For CALL0 ABI, a12-a15 have not yet been saved.
209 NOTE: This macro will use registers a0 and a2-a7. The arguments are:
210 level -- interrupt level
211 mask -- interrupt bitmask for this level
212 --------------------------------------------------------------------------------
215 .macro dispatch_c_isr level mask
217 #ifdef CONFIG_PM_TRACE
218 movi a6, 0 /* = ESP_PM_TRACE_IDLE */
220 call4 esp_pm_trace_exit
221 #endif // CONFIG_PM_TRACE
223 /* Get mask of pending, enabled interrupts at this level into a2. */
225 .L_xt_user_int_&level&:
231 beqz a2, 9f /* nothing to do */
233 /* This bit of code provides a nice debug backtrace in the debugger.
234 It does take a few more instructions, so undef XT_DEBUG_BACKTRACE
235 if you want to save the cycles.
236 At this point, the exception frame should have been allocated and filled,
237 and current sp points to the interrupt stack (for non-nested interrupt)
238 or below the allocated exception frame (for nested interrupts). Copy the
239 pre-exception's base save area below the current SP.
241 #ifdef XT_DEBUG_BACKTRACE
242 #ifndef __XTENSA_CALL0_ABI__
243 #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
244 rsr a0, EXCSAVE_1 + \level - 1 /* Get exception frame pointer stored in EXCSAVE_x */
245 l32i a3, a0, XT_STK_A0 /* Copy pre-exception a0 (return address) */
247 l32i a3, a0, XT_STK_A1 /* Copy pre-exception a1 (stack pointer) */
249 #endif /* ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0) */
250 /* Backtracing only needs a0 and a1, no need to create full base save area.
251 Also need to change current frame's return address to point to pre-exception's
252 last run instruction.
254 rsr a0, EPC_1 + \level - 1 /* return address */
255 movi a4, 0xC0000000 /* constant with top 2 bits set (call size) */
256 or a0, a0, a4 /* set top 2 bits */
257 addx2 a0, a4, a0 /* clear top bit -- simulating call4 size */
261 #ifdef CONFIG_PM_ENABLE
262 call4 esp_pm_impl_isr_hook
265 #ifdef XT_INTEXC_HOOKS
266 /* Call interrupt hook if present to (pre)handle interrupts. */
267 movi a4, _xt_intexc_hooks
268 l32i a4, a4, \level << 2
270 #ifdef __XTENSA_CALL0_ABI__
282 /* Now look up in the dispatch table and call user ISR if any. */
283 /* If multiple bits are set then MSB has highest priority. */
285 extract_msb a4, a2 /* a4 = MSB of a2, a2 trashed */
288 /* Enable all interrupts at this level that are numerically higher
289 than the one we just selected, since they are treated as higher
292 movi a3, \mask /* a3 = all interrupts at this level */
293 add a2, a4, a4 /* a2 = a4 << 1 */
294 addi a2, a2, -1 /* a2 = mask of 1's <= a4 bit */
295 and a2, a2, a3 /* a2 = mask of all bits <= a4 at this level */
297 l32i a6, a3, 4 /* a6 = _xt_vpri_mask */
299 addi a2, a2, -1 /* a2 = mask to apply */
300 and a5, a6, a2 /* mask off all bits <= a4 bit */
301 s32i a5, a3, 4 /* update _xt_vpri_mask */
303 and a3, a3, a2 /* mask off all bits <= a4 bit */
305 rsil a3, \level - 1 /* lower interrupt level by 1 */
308 movi a3, XT_TIMER_INTEN /* a3 = timer interrupt bit */
309 wsr a4, INTCLEAR /* clear sw or edge-triggered interrupt */
310 beq a3, a4, 7f /* if timer interrupt then skip table */
312 find_ms_setbit a3, a4, a3, 0 /* a3 = interrupt number */
314 get_percpu_entry_for a3, a12
315 movi a4, _xt_interrupt_table
316 addx8 a3, a3, a4 /* a3 = address of interrupt table entry */
317 l32i a4, a3, XIE_HANDLER /* a4 = handler address */
318 #ifdef __XTENSA_CALL0_ABI__
319 mov a12, a6 /* save in callee-saved reg */
320 l32i a2, a3, XIE_ARG /* a2 = handler arg */
321 callx0 a4 /* call handler */
324 mov a2, a6 /* save in windowed reg */
325 l32i a6, a3, XIE_ARG /* a6 = handler arg */
326 callx4 a4 /* call handler */
332 j .L_xt_user_int_&level& /* check for more interrupts */
337 .ifeq XT_TIMER_INTPRI - \level
338 .L_xt_user_int_timer_&level&:
340 Interrupt handler for the RTOS tick timer if at this level.
341 We'll be reading the interrupt state again after this call
342 so no need to preserve any registers except a6 (vpri_mask).
345 #ifdef __XTENSA_CALL0_ABI__
347 call0 XT_RTOS_TIMER_INT
351 call4 XT_RTOS_TIMER_INT
358 j .L_xt_user_int_&level& /* check for more interrupts */
363 /* Restore old value of _xt_vpri_mask from a2. Also update INTENABLE from
364 virtual _xt_intenable which _could_ have changed during interrupt
368 l32i a4, a3, 0 /* a4 = _xt_intenable */
369 s32i a2, a3, 4 /* update _xt_vpri_mask */
370 and a4, a4, a2 /* a4 = masked intenable */
371 wsr a4, INTENABLE /* update INTENABLE */
381 --------------------------------------------------------------------------------
383 Should be reached by call0 (preferable) or jump only. If call0, a0 says where
384 from. If on simulator, display panic message and abort, else loop indefinitely.
385 --------------------------------------------------------------------------------
392 .type _xt_panic,@function
398 /* Allocate exception frame and save minimal context. */
400 addi sp, sp, -XT_STK_FRMSZ
401 s32i a0, sp, XT_STK_A1
402 #if XCHAL_HAVE_WINDOWED
403 s32e a0, sp, -12 /* for debug backtrace */
405 rsr a0, PS /* save interruptee's PS */
406 s32i a0, sp, XT_STK_PS
407 rsr a0, EPC_1 /* save interruptee's PC */
408 s32i a0, sp, XT_STK_PC
409 #if XCHAL_HAVE_WINDOWED
410 s32e a0, sp, -16 /* for debug backtrace */
412 s32i a12, sp, XT_STK_A12 /* _xt_context_save requires A12- */
413 s32i a13, sp, XT_STK_A13 /* A13 to have already been saved */
414 call0 _xt_context_save
416 /* Save exc cause and vaddr into exception frame */
418 s32i a0, sp, XT_STK_EXCCAUSE
420 s32i a0, sp, XT_STK_EXCVADDR
422 /* _xt_context_save seems to save the current a0, but we need the interuptees a0. Fix this. */
423 rsr a0, EXCSAVE_1 /* save interruptee's a0 */
425 s32i a0, sp, XT_STK_A0
427 /* Set up PS for C, disable all interrupts except NMI and debug, and clear EXCM. */
428 movi a0, PS_INTLEVEL(5) | PS_UM | PS_WOE
437 //Call using call0. Prints the hex char in a2. Kills a3, a4, a5
441 panic_print_hex_loop:
444 bgei a5,64,panic_print_hex_loop
447 bgei a5,10,panic_print_hex_a
457 bnei a4,0,panic_print_hex_loop
465 .section .rodata, "a"
471 --------------------------------------------------------------------------------
472 Hooks to dynamically install handlers for exceptions and interrupts.
473 Allows automated regression frameworks to install handlers per test.
474 Consists of an array of function pointers indexed by interrupt level,
475 with index 0 containing the entry for user exceptions.
476 Initialized with all 0s, meaning no handler is installed at each level.
477 See comment in xtensa_rtos.h for more details.
479 *WARNING* This array is for all CPUs, that is, installing a hook for
480 one CPU will install it for all others as well!
481 --------------------------------------------------------------------------------
484 #ifdef XT_INTEXC_HOOKS
486 .global _xt_intexc_hooks
487 .type _xt_intexc_hooks,@object
491 .fill XT_INTEXC_HOOK_NUM, 4, 0
496 --------------------------------------------------------------------------------
497 EXCEPTION AND LEVEL 1 INTERRUPT VECTORS AND LOW LEVEL HANDLERS
498 (except window exception vectors).
500 Each vector goes at a predetermined location according to the Xtensa
501 hardware configuration, which is ensured by its placement in a special
502 section known to the Xtensa linker support package (LSP). It performs
503 the minimum necessary before jumping to the handler in the .text section.
505 The corresponding handler goes in the normal .text section. It sets up
506 the appropriate stack frame, saves a few vector-specific registers and
507 calls XT_RTOS_INT_ENTER to save the rest of the interrupted context
508 and enter the RTOS, then sets up a C environment. It then calls the
509 user's interrupt handler code (which may be coded in C) and finally
510 calls XT_RTOS_INT_EXIT to transfer control to the RTOS for scheduling.
512 While XT_RTOS_INT_EXIT does not return directly to the interruptee,
513 eventually the RTOS scheduler will want to dispatch the interrupted
514 task or handler. The scheduler will return to the exit point that was
515 saved in the interrupt stack frame at XT_STK_EXIT.
516 --------------------------------------------------------------------------------
521 --------------------------------------------------------------------------------
523 --------------------------------------------------------------------------------
528 .begin literal_prefix .DebugExceptionVector
529 .section .DebugExceptionVector.text, "ax"
530 .global _DebugExceptionVector
532 .global xt_debugexception
533 _DebugExceptionVector:
534 wsr a0, EXCSAVE+XCHAL_DEBUGLEVEL /* preserve a0 */
535 call0 xt_debugexception /* load exception handler */
542 --------------------------------------------------------------------------------
544 Double exceptions are not a normal occurrence. They indicate a bug of some kind.
545 --------------------------------------------------------------------------------
548 #ifdef XCHAL_DOUBLEEXC_VECTOR_VADDR
550 .begin literal_prefix .DoubleExceptionVector
551 .section .DoubleExceptionVector.text, "ax"
552 .global _DoubleExceptionVector
555 _DoubleExceptionVector:
558 break 1, 4 /* unhandled double exception */
560 movi a0,PANIC_RSN_DOUBLEEXCEPTION
562 call0 _xt_panic /* does not return */
563 rfde /* make a0 point here not later */
567 #endif /* XCHAL_DOUBLEEXC_VECTOR_VADDR */
570 --------------------------------------------------------------------------------
571 Kernel Exception (including Level 1 Interrupt from kernel mode).
572 --------------------------------------------------------------------------------
575 .begin literal_prefix .KernelExceptionVector
576 .section .KernelExceptionVector.text, "ax"
577 .global _KernelExceptionVector
580 _KernelExceptionVector:
582 wsr a0, EXCSAVE_1 /* preserve a0 */
583 call0 _xt_kernel_exc /* kernel exception handler */
584 /* never returns here - call0 is used as a jump (see note at top) */
593 break 1, 0 /* unhandled kernel exception */
595 movi a0,PANIC_RSN_KERNELEXCEPTION
597 call0 _xt_panic /* does not return */
598 rfe /* make a0 point here not there */
602 --------------------------------------------------------------------------------
603 User Exception (including Level 1 Interrupt from user mode).
604 --------------------------------------------------------------------------------
607 .begin literal_prefix .UserExceptionVector
608 .section .UserExceptionVector.text, "ax"
609 .global _UserExceptionVector
610 .type _UserExceptionVector,@function
613 _UserExceptionVector:
615 wsr a0, EXCSAVE_1 /* preserve a0 */
616 call0 _xt_user_exc /* user exception handler */
617 /* never returns here - call0 is used as a jump (see note at top) */
622 --------------------------------------------------------------------------------
623 Insert some waypoints for jumping beyond the signed 8-bit range of
624 conditional branch instructions, so the conditional branchces to specific
625 exception handlers are not taken in the mainline. Saves some cycles in the
627 --------------------------------------------------------------------------------
630 #ifdef CONFIG_ESP32_IRAM_AS_8BIT_ACCESSIBLE_MEMORY
631 .global LoadStoreErrorHandler
632 .global AlignmentErrorHandler
637 #if XCHAL_HAVE_WINDOWED
640 call0 _xt_alloca_exc /* in window vectors section */
641 /* never returns here - call0 is used as a jump (see note at top) */
646 call0 _xt_syscall_exc
647 /* never returns here - call0 is used as a jump (see note at top) */
653 /* never returns here - call0 is used as a jump (see note at top) */
656 #ifdef CONFIG_ESP32_IRAM_AS_8BIT_ACCESSIBLE_MEMORY
658 _call_loadstore_handler:
659 call0 LoadStoreErrorHandler
660 /* This will return only if wrong opcode or address out of range*/
664 _call_alignment_handler:
665 call0 AlignmentErrorHandler
666 /* This will return only if wrong opcode or address out of range*/
672 --------------------------------------------------------------------------------
673 User exception handler.
674 --------------------------------------------------------------------------------
677 .type _xt_user_exc,@function
682 /* If level 1 interrupt then jump to the dispatcher */
684 beqi a0, EXCCAUSE_LEVEL1INTERRUPT, _xt_lowint1
686 /* Handle any coprocessor exceptions. Rely on the fact that exception
687 numbers above EXCCAUSE_CP0_DISABLED all relate to the coprocessors.
690 bgeui a0, EXCCAUSE_CP0_DISABLED, _xt_to_coproc_exc
693 /* Handle alloca and syscall exceptions */
694 #if XCHAL_HAVE_WINDOWED
695 beqi a0, EXCCAUSE_ALLOCA, _xt_to_alloca_exc
697 beqi a0, EXCCAUSE_SYSCALL, _xt_to_syscall_exc
699 #ifdef CONFIG_ESP32_IRAM_AS_8BIT_ACCESSIBLE_MEMORY
700 beqi a0, EXCCAUSE_LOAD_STORE_ERROR, _call_loadstore_handler
703 beqi a0, 8, _call_alignment_handler
708 /* Handle all other exceptions. All can have user-defined handlers. */
709 /* NOTE: we'll stay on the user stack for exception handling. */
711 /* Allocate exception frame and save minimal context. */
713 addi sp, sp, -XT_STK_FRMSZ
714 s32i a0, sp, XT_STK_A1
715 #if XCHAL_HAVE_WINDOWED
716 s32e a0, sp, -12 /* for debug backtrace */
718 rsr a0, PS /* save interruptee's PS */
719 s32i a0, sp, XT_STK_PS
720 rsr a0, EPC_1 /* save interruptee's PC */
721 s32i a0, sp, XT_STK_PC
722 #if XCHAL_HAVE_WINDOWED
723 s32e a0, sp, -16 /* for debug backtrace */
725 s32i a12, sp, XT_STK_A12 /* _xt_context_save requires A12- */
726 s32i a13, sp, XT_STK_A13 /* A13 to have already been saved */
727 call0 _xt_context_save
729 /* Save exc cause and vaddr into exception frame */
731 s32i a0, sp, XT_STK_EXCCAUSE
733 s32i a0, sp, XT_STK_EXCVADDR
735 /* _xt_context_save seems to save the current a0, but we need the interuptees a0. Fix this. */
736 rsr a0, EXCSAVE_1 /* save interruptee's a0 */
737 s32i a0, sp, XT_STK_A0
739 /* Set up PS for C, reenable hi-pri interrupts, and clear EXCM. */
740 #ifdef __XTENSA_CALL0_ABI__
741 movi a0, PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM
743 movi a0, PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE
748 Create pseudo base save area. At this point, sp is still pointing to the
749 allocated and filled exception stack frame.
751 #ifdef XT_DEBUG_BACKTRACE
752 #ifndef __XTENSA_CALL0_ABI__
753 #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
754 l32i a3, sp, XT_STK_A0 /* Copy pre-exception a0 (return address) */
756 l32i a3, sp, XT_STK_A1 /* Copy pre-exception a1 (stack pointer) */
758 #endif /* ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0) */
759 rsr a0, EPC_1 /* return address for debug backtrace */
760 movi a5, 0xC0000000 /* constant with top 2 bits set (call size) */
761 rsync /* wait for WSR.PS to complete */
762 or a0, a0, a5 /* set top 2 bits */
763 addx2 a0, a5, a0 /* clear top bit -- thus simulating call4 size */
765 rsync /* wait for WSR.PS to complete */
769 rsr a2, EXCCAUSE /* recover exc cause */
771 #ifdef XT_INTEXC_HOOKS
773 Call exception hook to pre-handle exceptions (if installed).
774 Pass EXCCAUSE in a2, and check result in a2 (if -1, skip default handling).
776 movi a4, _xt_intexc_hooks
777 l32i a4, a4, 0 /* user exception hook index 0 */
779 .Ln_xt_user_exc_call_hook:
780 #ifdef __XTENSA_CALL0_ABI__
782 beqi a2, -1, .L_xt_user_done
786 beqi a6, -1, .L_xt_user_done
792 rsr a2, EXCCAUSE /* recover exc cause */
793 movi a3, _xt_exception_table
794 get_percpu_entry_for a2, a4
795 addx4 a4, a2, a3 /* a4 = address of exception table entry */
796 l32i a4, a4, 0 /* a4 = handler address */
797 #ifdef __XTENSA_CALL0_ABI__
798 mov a2, sp /* a2 = pointer to exc frame */
799 callx0 a4 /* call handler */
801 mov a6, sp /* a6 = pointer to exc frame */
802 callx4 a4 /* call handler */
807 /* Restore context and return */
808 call0 _xt_context_restore
809 l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
811 l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
813 l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
814 l32i sp, sp, XT_STK_A1 /* remove exception frame */
815 rsync /* ensure PS and EPC written */
816 rfe /* PS.EXCM is cleared */
820 --------------------------------------------------------------------------------
821 Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
822 on entry and used to return to a thread or interrupted interrupt handler.
823 --------------------------------------------------------------------------------
826 .global _xt_user_exit
827 .type _xt_user_exit,@function
830 l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
832 l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
834 l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
835 l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */
836 rsync /* ensure PS and EPC written */
837 rfe /* PS.EXCM is cleared */
842 --------------------------------------------------------------------------------
843 Syscall Exception Handler (jumped to from User Exception Handler).
844 Syscall 0 is required to spill the register windows (no-op in Call 0 ABI).
845 Only syscall 0 is handled here. Other syscalls return -1 to caller in a2.
846 --------------------------------------------------------------------------------
850 .type _xt_syscall_exc,@function
854 #ifdef __XTENSA_CALL0_ABI__
856 Save minimal regs for scratch. Syscall 0 does nothing in Call0 ABI.
857 Use a minimal stack frame (16B) to save A2 & A3 for scratch.
858 PS.EXCM could be cleared here, but unlikely to improve worst-case latency.
860 addi a0, a0, -PS_EXCM_MASK
866 #else /* Windowed ABI */
868 Save necessary context and spill the register windows.
869 PS.EXCM is still set and must remain set until after the spill.
870 Reuse context save function though it saves more than necessary.
871 For this reason, a full interrupt stack frame is allocated.
873 addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
874 s32i a12, sp, XT_STK_A12 /* _xt_context_save requires A12- */
875 s32i a13, sp, XT_STK_A13 /* A13 to have already been saved */
876 call0 _xt_context_save
880 Grab the interruptee's PC and skip over the 'syscall' instruction.
881 If it's at the end of a zero-overhead loop and it's not on the last
882 iteration, decrement loop counter and skip to beginning of loop.
884 rsr a2, EPC_1 /* a2 = PC of 'syscall' */
885 addi a3, a2, 3 /* ++PC */
887 rsr a0, LEND /* if (PC == LEND */
889 rsr a0, LCOUNT /* && LCOUNT != 0) */
891 addi a0, a0, -1 /* --LCOUNT */
892 rsr a3, LBEG /* PC = LBEG */
893 wsr a0, LCOUNT /* } */
895 1: wsr a3, EPC_1 /* update PC */
897 /* Restore interruptee's context and return from exception. */
898 #ifdef __XTENSA_CALL0_ABI__
903 call0 _xt_context_restore
904 addi sp, sp, XT_STK_FRMSZ
907 movnez a2, a0, a2 /* return -1 if not syscall 0 */
912 --------------------------------------------------------------------------------
913 Co-Processor Exception Handler (jumped to from User Exception Handler).
914 These exceptions are generated by co-processor instructions, which are only
915 allowed in thread code (not in interrupts or kernel code). This restriction is
916 deliberately imposed to reduce the burden of state-save/restore in interrupts.
917 --------------------------------------------------------------------------------
921 .section .rodata, "a"
923 /* Offset to CP n save area in thread's CP save area. */
924 .global _xt_coproc_sa_offset
925 .type _xt_coproc_sa_offset,@object
926 .align 16 /* minimize crossing cache boundaries */
927 _xt_coproc_sa_offset:
928 .word XT_CP0_SA, XT_CP1_SA, XT_CP2_SA, XT_CP3_SA
929 .word XT_CP4_SA, XT_CP5_SA, XT_CP6_SA, XT_CP7_SA
931 /* Bitmask for CP n's CPENABLE bit. */
932 .type _xt_coproc_mask,@object
933 .align 16,,8 /* try to keep it all in one cache line */
937 .long (i<<16) | (1<<i) // upper 16-bits = i, lower = bitmask
943 /* Owner thread of CP n, identified by thread's CP save area (0 = unowned). */
944 .global _xt_coproc_owner_sa
945 .type _xt_coproc_owner_sa,@object
946 .align 16,,XCHAL_CP_MAX<<2 /* minimize crossing cache boundaries */
948 .space (XCHAL_CP_MAX * portNUM_PROCESSORS) << 2
955 j .L_xt_coproc_invalid /* not in a thread (invalid) */
962 --------------------------------------------------------------------------------
963 Coprocessor exception handler.
964 At entry, only a0 has been saved (in EXCSAVE_1).
965 --------------------------------------------------------------------------------
968 .type _xt_coproc_exc,@function
973 /* Allocate interrupt stack frame and save minimal context. */
974 mov a0, sp /* sp == a1 */
975 addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
976 s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */
977 #if XCHAL_HAVE_WINDOWED
978 s32e a0, sp, -12 /* for debug backtrace */
980 rsr a0, PS /* save interruptee's PS */
981 s32i a0, sp, XT_STK_PS
982 rsr a0, EPC_1 /* save interruptee's PC */
983 s32i a0, sp, XT_STK_PC
984 rsr a0, EXCSAVE_1 /* save interruptee's a0 */
985 s32i a0, sp, XT_STK_A0
986 #if XCHAL_HAVE_WINDOWED
987 s32e a0, sp, -16 /* for debug backtrace */
989 movi a0, _xt_user_exit /* save exit point for dispatch */
990 s32i a0, sp, XT_STK_EXIT
993 s32i a5, sp, XT_STK_A5 /* save a5 */
994 addi a5, a0, -EXCCAUSE_CP0_DISABLED /* a5 = CP index */
996 /* Save a few more of interruptee's registers (a5 was already saved). */
997 s32i a2, sp, XT_STK_A2
998 s32i a3, sp, XT_STK_A3
999 s32i a4, sp, XT_STK_A4
1000 s32i a15, sp, XT_STK_A15
1002 /* Get co-processor state save area of new owner thread. */
1003 call0 XT_RTOS_CP_STATE /* a15 = new owner's save area */
1004 #if (ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0))
1005 beqz a15, .L_goto_invalid /* not in a thread (invalid) */
1007 #ifndef CONFIG_FREERTOS_FPU_IN_ISR
1008 beqz a15, .L_goto_invalid
1010 #endif /* ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0) */
1012 /*When FPU in ISR is enabled we could deal with zeroed a15 */
1014 /* Enable the co-processor's bit in CPENABLE. */
1015 movi a0, _xt_coproc_mask
1016 rsr a4, CPENABLE /* a4 = CPENABLE */
1017 addx4 a0, a5, a0 /* a0 = &_xt_coproc_mask[n] */
1018 l32i a0, a0, 0 /* a0 = (n << 16) | (1 << n) */
1020 /* FPU operations are incompatible with non-pinned tasks. If we have a FPU operation
1021 here, to keep the entire thing from crashing, it's better to pin the task to whatever
1022 core we're running on now. */
1023 movi a2, pxCurrentTCB
1026 l32i a2, a2, 0 /* a2 = start of pxCurrentTCB[cpuid] */
1027 addi a2, a2, TASKTCB_XCOREID_OFFSET /* offset to xCoreID in tcb struct */
1028 s32i a3, a2, 0 /* store current cpuid */
1030 /* Grab correct xt_coproc_owner_sa for this core */
1031 movi a2, XCHAL_CP_MAX << 2
1032 mull a2, a2, a3 /* multiply by current processor id */
1033 movi a3, _xt_coproc_owner_sa /* a3 = base of owner array */
1034 add a3, a3, a2 /* a3 = owner area needed for this processor */
1036 extui a2, a0, 0, 16 /* coprocessor bitmask portion */
1037 or a4, a4, a2 /* a4 = CPENABLE | (1 << n) */
1041 Keep loading _xt_coproc_owner_sa[n] atomic (=load once, then use that value
1042 everywhere): _xt_coproc_release assumes it works like this in order not to need
1047 /* Get old coprocessor owner thread (save area ptr) and assign new one. */
1048 addx4 a3, a5, a3 /* a3 = &_xt_coproc_owner_sa[n] */
1049 l32i a2, a3, 0 /* a2 = old owner's save area */
1050 s32i a15, a3, 0 /* _xt_coproc_owner_sa[n] = new */
1051 rsync /* ensure wsr.CPENABLE is complete */
1053 /* Only need to context switch if new owner != old owner. */
1054 /* If float is necessary on ISR, we need to remove this check */
1055 /* below, because on restoring from ISR we may have new == old condition used
1056 * to force cp restore to next thread
1058 #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
1059 #ifndef CONFIG_FREERTOS_FPU_IN_ISR
1061 beq a15, a2, .L_goto_done /* new owner == old, we're done */
1062 #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
1066 /* If no old owner then nothing to save. */
1067 beqz a2, .L_check_new
1069 /* If old owner not actively using CP then nothing to save. */
1070 l16ui a4, a2, XT_CPENABLE /* a4 = old owner's CPENABLE */
1071 bnone a4, a0, .L_check_new /* old owner not using CP */
1074 /* Save old owner's coprocessor state. */
1076 movi a5, _xt_coproc_sa_offset
1078 /* Mark old owner state as no longer active (CPENABLE bit n clear). */
1079 xor a4, a4, a0 /* clear CP bit in CPENABLE */
1080 s16i a4, a2, XT_CPENABLE /* update old owner's CPENABLE */
1082 extui a4, a0, 16, 5 /* a4 = CP index = n */
1083 addx4 a5, a4, a5 /* a5 = &_xt_coproc_sa_offset[n] */
1085 /* Mark old owner state as saved (CPSTORED bit n set). */
1086 l16ui a4, a2, XT_CPSTORED /* a4 = old owner's CPSTORED */
1087 l32i a5, a5, 0 /* a5 = XT_CP[n]_SA offset */
1088 or a4, a4, a0 /* set CP in old owner's CPSTORED */
1089 s16i a4, a2, XT_CPSTORED /* update old owner's CPSTORED */
1090 l32i a2, a2, XT_CP_ASA /* ptr to actual (aligned) save area */
1091 extui a3, a0, 16, 5 /* a3 = CP index = n */
1092 add a2, a2, a5 /* a2 = old owner's area for CP n */
1095 The config-specific HAL macro invoked below destroys a2-5, preserves a0-1.
1096 It is theoretically possible for Xtensa processor designers to write TIE
1097 that causes more address registers to be affected, but it is generally
1098 unlikely. If that ever happens, more registers needs to be saved/restored
1099 around this macro invocation, and the value in a15 needs to be recomputed.
1101 xchal_cpi_store_funcbody
1104 /* Check if any state has to be restored for new owner. */
1105 /* NOTE: a15 = new owner's save area, cannot be zero when we get here. */
1106 #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
1107 beqz a15, .L_xt_coproc_done
1108 #endif /* ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0) */
1110 l16ui a3, a15, XT_CPSTORED /* a3 = new owner's CPSTORED */
1111 movi a4, _xt_coproc_sa_offset
1112 bnone a3, a0, .L_check_cs /* full CP not saved, check callee-saved */
1113 xor a3, a3, a0 /* CPSTORED bit is set, clear it */
1114 s16i a3, a15, XT_CPSTORED /* update new owner's CPSTORED */
1116 /* Adjust new owner's save area pointers to area for CP n. */
1117 extui a3, a0, 16, 5 /* a3 = CP index = n */
1118 addx4 a4, a3, a4 /* a4 = &_xt_coproc_sa_offset[n] */
1119 l32i a4, a4, 0 /* a4 = XT_CP[n]_SA */
1120 l32i a5, a15, XT_CP_ASA /* ptr to actual (aligned) save area */
1121 add a2, a4, a5 /* a2 = new owner's area for CP */
1124 The config-specific HAL macro invoked below destroys a2-5, preserves a0-1.
1125 It is theoretically possible for Xtensa processor designers to write TIE
1126 that causes more address registers to be affected, but it is generally
1127 unlikely. If that ever happens, more registers needs to be saved/restored
1128 around this macro invocation.
1130 xchal_cpi_load_funcbody
1132 /* Restore interruptee's saved registers. */
1133 /* Can omit rsync for wsr.CPENABLE here because _xt_user_exit does it. */
1135 l32i a15, sp, XT_STK_A15
1136 l32i a5, sp, XT_STK_A5
1137 l32i a4, sp, XT_STK_A4
1138 l32i a3, sp, XT_STK_A3
1139 l32i a2, sp, XT_STK_A2
1140 call0 _xt_user_exit /* return via exit dispatcher */
1141 /* Never returns here - call0 is used as a jump (see note at top) */
1144 /* a0 = CP mask in low bits, a15 = new owner's save area */
1145 l16ui a2, a15, XT_CP_CS_ST /* a2 = mask of CPs saved */
1146 bnone a2, a0, .L_xt_coproc_done /* if no match then done */
1147 and a2, a2, a0 /* a2 = which CPs to restore */
1148 extui a2, a2, 0, 8 /* extract low 8 bits */
1149 s32i a6, sp, XT_STK_A6 /* save extra needed regs */
1150 s32i a7, sp, XT_STK_A7
1151 s32i a13, sp, XT_STK_A13
1152 s32i a14, sp, XT_STK_A14
1153 call0 _xt_coproc_restorecs /* restore CP registers */
1154 l32i a6, sp, XT_STK_A6 /* restore saved registers */
1155 l32i a7, sp, XT_STK_A7
1156 l32i a13, sp, XT_STK_A13
1157 l32i a14, sp, XT_STK_A14
1160 /* Co-processor exception occurred outside a thread (not supported). */
1161 .L_xt_coproc_invalid:
1162 movi a0,PANIC_RSN_COPROCEXCEPTION
1164 call0 _xt_panic /* not in a thread (invalid) */
1168 #endif /* XCHAL_CP_NUM */
1172 -------------------------------------------------------------------------------
1173 Level 1 interrupt dispatch. Assumes stack frame has not been allocated yet.
1174 -------------------------------------------------------------------------------
1177 .section .iram1,"ax"
1178 .type _xt_lowint1,@function
1182 mov a0, sp /* sp == a1 */
1183 addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
1184 s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */
1185 rsr a0, PS /* save interruptee's PS */
1186 s32i a0, sp, XT_STK_PS
1187 rsr a0, EPC_1 /* save interruptee's PC */
1188 s32i a0, sp, XT_STK_PC
1189 rsr a0, EXCSAVE_1 /* save interruptee's a0 */
1190 s32i a0, sp, XT_STK_A0
1191 movi a0, _xt_user_exit /* save exit point for dispatch */
1192 s32i a0, sp, XT_STK_EXIT
1194 #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
1195 /* EXCSAVE_1 should now be free to use. Use it to keep a copy of the
1196 current stack pointer that points to the exception frame (XT_STK_FRAME).*/
1197 #ifdef XT_DEBUG_BACKTRACE
1198 #ifndef __XTENSA_CALL0_ABI__
1203 #endif /* ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0) */
1206 /* Save rest of interrupt context and enter RTOS. */
1207 call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
1209 /* !! We are now on the RTOS system stack !! */
1211 /* Set up PS for C, enable interrupts above this level and clear EXCM. */
1212 #ifdef __XTENSA_CALL0_ABI__
1213 movi a0, PS_INTLEVEL(1) | PS_UM
1215 movi a0, PS_INTLEVEL(1) | PS_UM | PS_WOE
1220 /* OK to call C code at this point, dispatch user ISRs */
1222 dispatch_c_isr 1 XCHAL_INTLEVEL1_MASK
1224 /* Done handling interrupts, transfer control to OS */
1225 call0 XT_RTOS_INT_EXIT /* does not return directly here */
1229 -------------------------------------------------------------------------------
1230 MEDIUM PRIORITY (LEVEL 2+) INTERRUPT VECTORS AND LOW LEVEL HANDLERS.
1232 Medium priority interrupts are by definition those with priority greater
1233 than 1 and not greater than XCHAL_EXCM_LEVEL. These are disabled by
1234 setting PS.EXCM and therefore can easily support a C environment for
1235 handlers in C, and interact safely with an RTOS.
1237 Each vector goes at a predetermined location according to the Xtensa
1238 hardware configuration, which is ensured by its placement in a special
1239 section known to the Xtensa linker support package (LSP). It performs
1240 the minimum necessary before jumping to the handler in the .text section.
1242 The corresponding handler goes in the normal .text section. It sets up
1243 the appropriate stack frame, saves a few vector-specific registers and
1244 calls XT_RTOS_INT_ENTER to save the rest of the interrupted context
1245 and enter the RTOS, then sets up a C environment. It then calls the
1246 user's interrupt handler code (which may be coded in C) and finally
1247 calls XT_RTOS_INT_EXIT to transfer control to the RTOS for scheduling.
1249 While XT_RTOS_INT_EXIT does not return directly to the interruptee,
1250 eventually the RTOS scheduler will want to dispatch the interrupted
1251 task or handler. The scheduler will return to the exit point that was
1252 saved in the interrupt stack frame at XT_STK_EXIT.
1253 -------------------------------------------------------------------------------
1256 #if XCHAL_EXCM_LEVEL >= 2
1258 .begin literal_prefix .Level2InterruptVector
1259 .section .Level2InterruptVector.text, "ax"
1260 .global _Level2Vector
1261 .type _Level2Vector,@function
1264 wsr a0, EXCSAVE_2 /* preserve a0 */
1265 call0 _xt_medint2 /* load interrupt handler */
1266 /* never returns here - call0 is used as a jump (see note at top) */
1270 .section .iram1,"ax"
1271 .type _xt_medint2,@function
1274 mov a0, sp /* sp == a1 */
1275 addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
1276 s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */
1277 rsr a0, EPS_2 /* save interruptee's PS */
1278 s32i a0, sp, XT_STK_PS
1279 rsr a0, EPC_2 /* save interruptee's PC */
1280 s32i a0, sp, XT_STK_PC
1281 rsr a0, EXCSAVE_2 /* save interruptee's a0 */
1282 s32i a0, sp, XT_STK_A0
1283 movi a0, _xt_medint2_exit /* save exit point for dispatch */
1284 s32i a0, sp, XT_STK_EXIT
1286 /* EXCSAVE_2 should now be free to use. Use it to keep a copy of the
1287 current stack pointer that points to the exception frame (XT_STK_FRAME).*/
1288 #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
1289 #ifdef XT_DEBUG_BACKTRACE
1290 #ifndef __XTENSA_CALL0_ABI__
1295 #endif /* ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0) */
1298 /* Save rest of interrupt context and enter RTOS. */
1299 call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
1301 /* !! We are now on the RTOS system stack !! */
1303 /* Set up PS for C, enable interrupts above this level and clear EXCM. */
1304 #ifdef __XTENSA_CALL0_ABI__
1305 movi a0, PS_INTLEVEL(2) | PS_UM
1307 movi a0, PS_INTLEVEL(2) | PS_UM | PS_WOE
1312 /* OK to call C code at this point, dispatch user ISRs */
1314 dispatch_c_isr 2 XCHAL_INTLEVEL2_MASK
1316 /* Done handling interrupts, transfer control to OS */
1317 call0 XT_RTOS_INT_EXIT /* does not return directly here */
1320 Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
1321 on entry and used to return to a thread or interrupted interrupt handler.
1323 .global _xt_medint2_exit
1324 .type _xt_medint2_exit,@function
1327 /* Restore only level-specific regs (the rest were already restored) */
1328 l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
1330 l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
1332 l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
1333 l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */
1334 rsync /* ensure EPS and EPC written */
1337 #endif /* Level 2 */
1339 #if XCHAL_EXCM_LEVEL >= 3
1341 .begin literal_prefix .Level3InterruptVector
1342 .section .Level3InterruptVector.text, "ax"
1343 .global _Level3Vector
1344 .type _Level3Vector,@function
1347 wsr a0, EXCSAVE_3 /* preserve a0 */
1348 call0 _xt_medint3 /* load interrupt handler */
1349 /* never returns here - call0 is used as a jump (see note at top) */
1353 .section .iram1,"ax"
1354 .type _xt_medint3,@function
1357 mov a0, sp /* sp == a1 */
1358 addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
1359 s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */
1360 rsr a0, EPS_3 /* save interruptee's PS */
1361 s32i a0, sp, XT_STK_PS
1362 rsr a0, EPC_3 /* save interruptee's PC */
1363 s32i a0, sp, XT_STK_PC
1364 rsr a0, EXCSAVE_3 /* save interruptee's a0 */
1365 s32i a0, sp, XT_STK_A0
1366 movi a0, _xt_medint3_exit /* save exit point for dispatch */
1367 s32i a0, sp, XT_STK_EXIT
1369 /* EXCSAVE_3 should now be free to use. Use it to keep a copy of the
1370 current stack pointer that points to the exception frame (XT_STK_FRAME).*/
1371 #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
1372 #ifdef XT_DEBUG_BACKTRACE
1373 #ifndef __XTENSA_CALL0_ABI__
1378 #endif /* ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0) */
1381 /* Save rest of interrupt context and enter RTOS. */
1382 call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
1384 /* !! We are now on the RTOS system stack !! */
1386 /* Set up PS for C, enable interrupts above this level and clear EXCM. */
1387 #ifdef __XTENSA_CALL0_ABI__
1388 movi a0, PS_INTLEVEL(3) | PS_UM
1390 movi a0, PS_INTLEVEL(3) | PS_UM | PS_WOE
1395 /* OK to call C code at this point, dispatch user ISRs */
1397 dispatch_c_isr 3 XCHAL_INTLEVEL3_MASK
1399 /* Done handling interrupts, transfer control to OS */
1400 call0 XT_RTOS_INT_EXIT /* does not return directly here */
1403 Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
1404 on entry and used to return to a thread or interrupted interrupt handler.
1406 .global _xt_medint3_exit
1407 .type _xt_medint3_exit,@function
1410 /* Restore only level-specific regs (the rest were already restored) */
1411 l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
1413 l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
1415 l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
1416 l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */
1417 rsync /* ensure EPS and EPC written */
1420 #endif /* Level 3 */
1422 #if XCHAL_EXCM_LEVEL >= 4
1424 .begin literal_prefix .Level4InterruptVector
1425 .section .Level4InterruptVector.text, "ax"
1426 .global _Level4Vector
1427 .type _Level4Vector,@function
1430 wsr a0, EXCSAVE_4 /* preserve a0 */
1431 call0 _xt_medint4 /* load interrupt handler */
1435 .section .iram1,"ax"
1436 .type _xt_medint4,@function
1439 mov a0, sp /* sp == a1 */
1440 addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
1441 s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */
1442 rsr a0, EPS_4 /* save interruptee's PS */
1443 s32i a0, sp, XT_STK_PS
1444 rsr a0, EPC_4 /* save interruptee's PC */
1445 s32i a0, sp, XT_STK_PC
1446 rsr a0, EXCSAVE_4 /* save interruptee's a0 */
1447 s32i a0, sp, XT_STK_A0
1448 movi a0, _xt_medint4_exit /* save exit point for dispatch */
1449 s32i a0, sp, XT_STK_EXIT
1451 /* EXCSAVE_4 should now be free to use. Use it to keep a copy of the
1452 current stack pointer that points to the exception frame (XT_STK_FRAME).*/
1453 #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
1454 #ifdef XT_DEBUG_BACKTRACE
1455 #ifndef __XTENSA_CALL0_ABI__
1460 #endif /* ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0) */
1463 /* Save rest of interrupt context and enter RTOS. */
1464 call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
1466 /* !! We are now on the RTOS system stack !! */
1468 /* Set up PS for C, enable interrupts above this level and clear EXCM. */
1469 #ifdef __XTENSA_CALL0_ABI__
1470 movi a0, PS_INTLEVEL(4) | PS_UM
1472 movi a0, PS_INTLEVEL(4) | PS_UM | PS_WOE
1477 /* OK to call C code at this point, dispatch user ISRs */
1479 dispatch_c_isr 4 XCHAL_INTLEVEL4_MASK
1481 /* Done handling interrupts, transfer control to OS */
1482 call0 XT_RTOS_INT_EXIT /* does not return directly here */
1485 Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
1486 on entry and used to return to a thread or interrupted interrupt handler.
1488 .global _xt_medint4_exit
1489 .type _xt_medint4_exit,@function
1492 /* Restore only level-specific regs (the rest were already restored) */
1493 l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
1495 l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
1497 l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
1498 l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */
1499 rsync /* ensure EPS and EPC written */
1502 #endif /* Level 4 */
1504 #if XCHAL_EXCM_LEVEL >= 5
1506 .begin literal_prefix .Level5InterruptVector
1507 .section .Level5InterruptVector.text, "ax"
1508 .global _Level5Vector
1509 .type _Level5Vector,@function
1512 wsr a0, EXCSAVE_5 /* preserve a0 */
1513 call0 _xt_medint5 /* load interrupt handler */
1517 .section .iram1,"ax"
1518 .type _xt_medint5,@function
1521 mov a0, sp /* sp == a1 */
1522 addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
1523 s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */
1524 rsr a0, EPS_5 /* save interruptee's PS */
1525 s32i a0, sp, XT_STK_PS
1526 rsr a0, EPC_5 /* save interruptee's PC */
1527 s32i a0, sp, XT_STK_PC
1528 rsr a0, EXCSAVE_5 /* save interruptee's a0 */
1529 s32i a0, sp, XT_STK_A0
1530 movi a0, _xt_medint5_exit /* save exit point for dispatch */
1531 s32i a0, sp, XT_STK_EXIT
1533 /* EXCSAVE_5 should now be free to use. Use it to keep a copy of the
1534 current stack pointer that points to the exception frame (XT_STK_FRAME).*/
1535 #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
1536 #ifdef XT_DEBUG_BACKTRACE
1537 #ifndef __XTENSA_CALL0_ABI__
1542 #endif /* ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0) */
1544 /* Save rest of interrupt context and enter RTOS. */
1545 call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
1547 /* !! We are now on the RTOS system stack !! */
1549 /* Set up PS for C, enable interrupts above this level and clear EXCM. */
1550 #ifdef __XTENSA_CALL0_ABI__
1551 movi a0, PS_INTLEVEL(5) | PS_UM
1553 movi a0, PS_INTLEVEL(5) | PS_UM | PS_WOE
1558 /* OK to call C code at this point, dispatch user ISRs */
1560 dispatch_c_isr 5 XCHAL_INTLEVEL5_MASK
1562 /* Done handling interrupts, transfer control to OS */
1563 call0 XT_RTOS_INT_EXIT /* does not return directly here */
1566 Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
1567 on entry and used to return to a thread or interrupted interrupt handler.
1569 .global _xt_medint5_exit
1570 .type _xt_medint5_exit,@function
1573 /* Restore only level-specific regs (the rest were already restored) */
1574 l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
1576 l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
1578 l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
1579 l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */
1580 rsync /* ensure EPS and EPC written */
1583 #endif /* Level 5 */
1585 #if XCHAL_EXCM_LEVEL >= 6
1587 .begin literal_prefix .Level6InterruptVector
1588 .section .Level6InterruptVector.text, "ax"
1589 .global _Level6Vector
1590 .type _Level6Vector,@function
1593 wsr a0, EXCSAVE_6 /* preserve a0 */
1594 call0 _xt_medint6 /* load interrupt handler */
1598 .section .iram1,"ax"
1599 .type _xt_medint6,@function
1602 mov a0, sp /* sp == a1 */
1603 addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
1604 s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */
1605 rsr a0, EPS_6 /* save interruptee's PS */
1606 s32i a0, sp, XT_STK_PS
1607 rsr a0, EPC_6 /* save interruptee's PC */
1608 s32i a0, sp, XT_STK_PC
1609 rsr a0, EXCSAVE_6 /* save interruptee's a0 */
1610 s32i a0, sp, XT_STK_A0
1611 movi a0, _xt_medint6_exit /* save exit point for dispatch */
1612 s32i a0, sp, XT_STK_EXIT
1614 /* EXCSAVE_6 should now be free to use. Use it to keep a copy of the
1615 current stack pointer that points to the exception frame (XT_STK_FRAME).*/
1616 #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
1617 #ifdef XT_DEBUG_BACKTRACE
1618 #ifndef __XTENSA_CALL0_ABI__
1623 #endif /* ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0) */
1625 /* Save rest of interrupt context and enter RTOS. */
1626 call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
1628 /* !! We are now on the RTOS system stack !! */
1630 /* Set up PS for C, enable interrupts above this level and clear EXCM. */
1631 #ifdef __XTENSA_CALL0_ABI__
1632 movi a0, PS_INTLEVEL(6) | PS_UM
1634 movi a0, PS_INTLEVEL(6) | PS_UM | PS_WOE
1639 /* OK to call C code at this point, dispatch user ISRs */
1641 dispatch_c_isr 6 XCHAL_INTLEVEL6_MASK
1643 /* Done handling interrupts, transfer control to OS */
1644 call0 XT_RTOS_INT_EXIT /* does not return directly here */
1647 Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
1648 on entry and used to return to a thread or interrupted interrupt handler.
1650 .global _xt_medint6_exit
1651 .type _xt_medint6_exit,@function
1654 /* Restore only level-specific regs (the rest were already restored) */
1655 l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
1657 l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
1659 l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
1660 l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */
1661 rsync /* ensure EPS and EPC written */
1664 #endif /* Level 6 */
1667 /*******************************************************************************
1669 HIGH PRIORITY (LEVEL > XCHAL_EXCM_LEVEL) INTERRUPT VECTORS AND HANDLERS
1671 High priority interrupts are by definition those with priorities greater
1672 than XCHAL_EXCM_LEVEL. This includes non-maskable (NMI). High priority
1673 interrupts cannot interact with the RTOS, that is they must save all regs
1674 they use and not call any RTOS function.
1676 A further restriction imposed by the Xtensa windowed architecture is that
1677 high priority interrupts must not modify the stack area even logically
1678 "above" the top of the interrupted stack (they need to provide their
1679 own stack or static save area).
1681 Cadence Design Systems recommends high priority interrupt handlers be coded in assembly
1682 and used for purposes requiring very short service times.
1684 Here are templates for high priority (level 2+) interrupt vectors.
1685 They assume only one interrupt per level to avoid the burden of identifying
1686 which interrupts at this level are pending and enabled. This allows for
1687 minimum latency and avoids having to save/restore a2 in addition to a0.
1688 If more than one interrupt per high priority level is configured, this burden
1689 is on the handler which in any case must provide a way to save and restore
1690 registers it uses without touching the interrupted stack.
1692 Each vector goes at a predetermined location according to the Xtensa
1693 hardware configuration, which is ensured by its placement in a special
1694 section known to the Xtensa linker support package (LSP). It performs
1695 the minimum necessary before jumping to the handler in the .text section.
1697 *******************************************************************************/
1700 These stubs just call xt_highintX/xt_nmi to handle the real interrupt. Please define
1701 these in an external assembly source file. If these symbols are not defined anywhere
1702 else, the defaults in xtensa_vector_defaults.S are used.
1705 #if XCHAL_NUM_INTLEVELS >=2 && XCHAL_EXCM_LEVEL <2 && XCHAL_DEBUGLEVEL !=2
1707 .begin literal_prefix .Level2InterruptVector
1708 .section .Level2InterruptVector.text, "ax"
1709 .global _Level2Vector
1710 .type _Level2Vector,@function
1714 wsr a0, EXCSAVE_2 /* preserve a0 */
1715 call0 xt_highint2 /* load interrupt handler */
1719 #endif /* Level 2 */
1721 #if XCHAL_NUM_INTLEVELS >=3 && XCHAL_EXCM_LEVEL <3 && XCHAL_DEBUGLEVEL !=3
1723 .begin literal_prefix .Level3InterruptVector
1724 .section .Level3InterruptVector.text, "ax"
1725 .global _Level3Vector
1726 .type _Level3Vector,@function
1730 wsr a0, EXCSAVE_3 /* preserve a0 */
1731 call0 xt_highint3 /* load interrupt handler */
1732 /* never returns here - call0 is used as a jump (see note at top) */
1736 #endif /* Level 3 */
1738 #if XCHAL_NUM_INTLEVELS >=4 && XCHAL_EXCM_LEVEL <4 && XCHAL_DEBUGLEVEL !=4
1740 .begin literal_prefix .Level4InterruptVector
1741 .section .Level4InterruptVector.text, "ax"
1742 .global _Level4Vector
1743 .type _Level4Vector,@function
1747 wsr a0, EXCSAVE_4 /* preserve a0 */
1748 call0 xt_highint4 /* load interrupt handler */
1749 /* never returns here - call0 is used as a jump (see note at top) */
1753 #endif /* Level 4 */
1755 #if XCHAL_NUM_INTLEVELS >=5 && XCHAL_EXCM_LEVEL <5 && XCHAL_DEBUGLEVEL !=5
1757 .begin literal_prefix .Level5InterruptVector
1758 .section .Level5InterruptVector.text, "ax"
1759 .global _Level5Vector
1760 .type _Level5Vector,@function
1764 wsr a0, EXCSAVE_5 /* preserve a0 */
1765 call0 xt_highint5 /* load interrupt handler */
1766 /* never returns here - call0 is used as a jump (see note at top) */
1770 #endif /* Level 5 */
1772 #if XCHAL_NUM_INTLEVELS >=6 && XCHAL_EXCM_LEVEL <6 && XCHAL_DEBUGLEVEL !=6
1774 .begin literal_prefix .Level6InterruptVector
1775 .section .Level6InterruptVector.text, "ax"
1776 .global _Level6Vector
1777 .type _Level6Vector,@function
1781 wsr a0, EXCSAVE_6 /* preserve a0 */
1782 call0 xt_highint6 /* load interrupt handler */
1783 /* never returns here - call0 is used as a jump (see note at top) */
1787 #endif /* Level 6 */
1791 .begin literal_prefix .NMIExceptionVector
1792 .section .NMIExceptionVector.text, "ax"
1793 .global _NMIExceptionVector
1794 .type _NMIExceptionVector,@function
1797 _NMIExceptionVector:
1798 wsr a0, EXCSAVE + XCHAL_NMILEVEL _ /* preserve a0 */
1799 call0 xt_nmi /* load interrupt handler */
1800 /* never returns here - call0 is used as a jump (see note at top) */
1807 /*******************************************************************************
1809 WINDOW OVERFLOW AND UNDERFLOW EXCEPTION VECTORS AND ALLOCA EXCEPTION HANDLER
1811 Here is the code for each window overflow/underflow exception vector and
1812 (interspersed) efficient code for handling the alloca exception cause.
1813 Window exceptions are handled entirely in the vector area and are very
1814 tight for performance. The alloca exception is also handled entirely in
1815 the window vector area so comes at essentially no cost in code size.
1816 Users should never need to modify them and Cadence Design Systems recommends
1819 Window handlers go at predetermined vector locations according to the
1820 Xtensa hardware configuration, which is ensured by their placement in a
1821 special section known to the Xtensa linker support package (LSP). Since
1822 their offsets in that section are always the same, the LSPs do not define
1823 a section per vector.
1825 These things are coded for XEA2 only (XEA1 is not supported).
1827 Note on Underflow Handlers:
1828 The underflow handler for returning from call[i+1] to call[i]
1829 must preserve all the registers from call[i+1]'s window.
1830 In particular, a0 and a1 must be preserved because the RETW instruction
1831 will be reexecuted (and may even underflow if an intervening exception
1832 has flushed call[i]'s registers).
1833 Registers a2 and up may contain return values.
1835 *******************************************************************************/
1837 #if XCHAL_HAVE_WINDOWED
1839 .section .WindowVectors.text, "ax"
1842 --------------------------------------------------------------------------------
1843 Window Overflow Exception for Call4.
1845 Invoked if a call[i] referenced a register (a4-a15)
1846 that contains data from ancestor call[j];
1847 call[j] had done a call4 to call[j+1].
1849 window rotated to call[j] start point;
1850 a0-a3 are registers to be saved;
1851 a4-a15 must be preserved;
1852 a5 is call[j+1]'s stack pointer.
1853 --------------------------------------------------------------------------------
1857 .global _WindowOverflow4
1860 s32e a0, a5, -16 /* save a0 to call[j+1]'s stack frame */
1861 s32e a1, a5, -12 /* save a1 to call[j+1]'s stack frame */
1862 s32e a2, a5, -8 /* save a2 to call[j+1]'s stack frame */
1863 s32e a3, a5, -4 /* save a3 to call[j+1]'s stack frame */
1864 rfwo /* rotates back to call[i] position */
1867 --------------------------------------------------------------------------------
1868 Window Underflow Exception for Call4
1870 Invoked by RETW returning from call[i+1] to call[i]
1871 where call[i]'s registers must be reloaded (not live in ARs);
1872 where call[i] had done a call4 to call[i+1].
1874 window rotated to call[i] start point;
1875 a0-a3 are undefined, must be reloaded with call[i].reg[0..3];
1876 a4-a15 must be preserved (they are call[i+1].reg[0..11]);
1877 a5 is call[i+1]'s stack pointer.
1878 --------------------------------------------------------------------------------
1882 .global _WindowUnderflow4
1885 l32e a0, a5, -16 /* restore a0 from call[i+1]'s stack frame */
1886 l32e a1, a5, -12 /* restore a1 from call[i+1]'s stack frame */
1887 l32e a2, a5, -8 /* restore a2 from call[i+1]'s stack frame */
1888 l32e a3, a5, -4 /* restore a3 from call[i+1]'s stack frame */
1892 --------------------------------------------------------------------------------
1893 Handle alloca exception generated by interruptee executing 'movsp'.
1894 This uses space between the window vectors, so is essentially "free".
1895 All interruptee's regs are intact except a0 which is saved in EXCSAVE_1,
1896 and PS.EXCM has been set by the exception hardware (can't be interrupted).
1897 The fact the alloca exception was taken means the registers associated with
1898 the base-save area have been spilled and will be restored by the underflow
1899 handler, so those 4 registers are available for scratch.
1900 The code is optimized to avoid unaligned branches and minimize cache misses.
1901 --------------------------------------------------------------------------------
1905 .global _xt_alloca_exc
1908 rsr a0, WINDOWBASE /* grab WINDOWBASE before rotw changes it */
1909 rotw -1 /* WINDOWBASE goes to a4, new a0-a3 are scratch */
1911 extui a3, a2, XCHAL_PS_OWB_SHIFT, XCHAL_PS_OWB_BITS
1912 xor a3, a3, a4 /* bits changed from old to current windowbase */
1913 rsr a4, EXCSAVE_1 /* restore original a0 (now in a4) */
1914 slli a3, a3, XCHAL_PS_OWB_SHIFT
1915 xor a2, a2, a3 /* flip changed bits in old window base */
1916 wsr a2, PS /* update PS.OWB to new window base */
1919 _bbci.l a4, 31, _WindowUnderflow4
1920 rotw -1 /* original a0 goes to a8 */
1921 _bbci.l a8, 30, _WindowUnderflow8
1923 j _WindowUnderflow12
1926 --------------------------------------------------------------------------------
1927 Window Overflow Exception for Call8
1929 Invoked if a call[i] referenced a register (a4-a15)
1930 that contains data from ancestor call[j];
1931 call[j] had done a call8 to call[j+1].
1933 window rotated to call[j] start point;
1934 a0-a7 are registers to be saved;
1935 a8-a15 must be preserved;
1936 a9 is call[j+1]'s stack pointer.
1937 --------------------------------------------------------------------------------
1941 .global _WindowOverflow8
1944 s32e a0, a9, -16 /* save a0 to call[j+1]'s stack frame */
1945 l32e a0, a1, -12 /* a0 <- call[j-1]'s sp
1946 (used to find end of call[j]'s frame) */
1947 s32e a1, a9, -12 /* save a1 to call[j+1]'s stack frame */
1948 s32e a2, a9, -8 /* save a2 to call[j+1]'s stack frame */
1949 s32e a3, a9, -4 /* save a3 to call[j+1]'s stack frame */
1950 s32e a4, a0, -32 /* save a4 to call[j]'s stack frame */
1951 s32e a5, a0, -28 /* save a5 to call[j]'s stack frame */
1952 s32e a6, a0, -24 /* save a6 to call[j]'s stack frame */
1953 s32e a7, a0, -20 /* save a7 to call[j]'s stack frame */
1954 rfwo /* rotates back to call[i] position */
1957 --------------------------------------------------------------------------------
1958 Window Underflow Exception for Call8
1960 Invoked by RETW returning from call[i+1] to call[i]
1961 where call[i]'s registers must be reloaded (not live in ARs);
1962 where call[i] had done a call8 to call[i+1].
1964 window rotated to call[i] start point;
1965 a0-a7 are undefined, must be reloaded with call[i].reg[0..7];
1966 a8-a15 must be preserved (they are call[i+1].reg[0..7]);
1967 a9 is call[i+1]'s stack pointer.
1968 --------------------------------------------------------------------------------
1972 .global _WindowUnderflow8
1975 l32e a0, a9, -16 /* restore a0 from call[i+1]'s stack frame */
1976 l32e a1, a9, -12 /* restore a1 from call[i+1]'s stack frame */
1977 l32e a2, a9, -8 /* restore a2 from call[i+1]'s stack frame */
1978 l32e a7, a1, -12 /* a7 <- call[i-1]'s sp
1979 (used to find end of call[i]'s frame) */
1980 l32e a3, a9, -4 /* restore a3 from call[i+1]'s stack frame */
1981 l32e a4, a7, -32 /* restore a4 from call[i]'s stack frame */
1982 l32e a5, a7, -28 /* restore a5 from call[i]'s stack frame */
1983 l32e a6, a7, -24 /* restore a6 from call[i]'s stack frame */
1984 l32e a7, a7, -20 /* restore a7 from call[i]'s stack frame */
1988 --------------------------------------------------------------------------------
1989 Window Overflow Exception for Call12
1991 Invoked if a call[i] referenced a register (a4-a15)
1992 that contains data from ancestor call[j];
1993 call[j] had done a call12 to call[j+1].
1995 window rotated to call[j] start point;
1996 a0-a11 are registers to be saved;
1997 a12-a15 must be preserved;
1998 a13 is call[j+1]'s stack pointer.
1999 --------------------------------------------------------------------------------
2003 .global _WindowOverflow12
2006 s32e a0, a13, -16 /* save a0 to call[j+1]'s stack frame */
2007 l32e a0, a1, -12 /* a0 <- call[j-1]'s sp
2008 (used to find end of call[j]'s frame) */
2009 s32e a1, a13, -12 /* save a1 to call[j+1]'s stack frame */
2010 s32e a2, a13, -8 /* save a2 to call[j+1]'s stack frame */
2011 s32e a3, a13, -4 /* save a3 to call[j+1]'s stack frame */
2012 s32e a4, a0, -48 /* save a4 to end of call[j]'s stack frame */
2013 s32e a5, a0, -44 /* save a5 to end of call[j]'s stack frame */
2014 s32e a6, a0, -40 /* save a6 to end of call[j]'s stack frame */
2015 s32e a7, a0, -36 /* save a7 to end of call[j]'s stack frame */
2016 s32e a8, a0, -32 /* save a8 to end of call[j]'s stack frame */
2017 s32e a9, a0, -28 /* save a9 to end of call[j]'s stack frame */
2018 s32e a10, a0, -24 /* save a10 to end of call[j]'s stack frame */
2019 s32e a11, a0, -20 /* save a11 to end of call[j]'s stack frame */
2020 rfwo /* rotates back to call[i] position */
2023 --------------------------------------------------------------------------------
2024 Window Underflow Exception for Call12
2026 Invoked by RETW returning from call[i+1] to call[i]
2027 where call[i]'s registers must be reloaded (not live in ARs);
2028 where call[i] had done a call12 to call[i+1].
2030 window rotated to call[i] start point;
2031 a0-a11 are undefined, must be reloaded with call[i].reg[0..11];
2032 a12-a15 must be preserved (they are call[i+1].reg[0..3]);
2033 a13 is call[i+1]'s stack pointer.
2034 --------------------------------------------------------------------------------
2038 .global _WindowUnderflow12
2041 l32e a0, a13, -16 /* restore a0 from call[i+1]'s stack frame */
2042 l32e a1, a13, -12 /* restore a1 from call[i+1]'s stack frame */
2043 l32e a2, a13, -8 /* restore a2 from call[i+1]'s stack frame */
2044 l32e a11, a1, -12 /* a11 <- call[i-1]'s sp
2045 (used to find end of call[i]'s frame) */
2046 l32e a3, a13, -4 /* restore a3 from call[i+1]'s stack frame */
2047 l32e a4, a11, -48 /* restore a4 from end of call[i]'s stack frame */
2048 l32e a5, a11, -44 /* restore a5 from end of call[i]'s stack frame */
2049 l32e a6, a11, -40 /* restore a6 from end of call[i]'s stack frame */
2050 l32e a7, a11, -36 /* restore a7 from end of call[i]'s stack frame */
2051 l32e a8, a11, -32 /* restore a8 from end of call[i]'s stack frame */
2052 l32e a9, a11, -28 /* restore a9 from end of call[i]'s stack frame */
2053 l32e a10, a11, -24 /* restore a10 from end of call[i]'s stack frame */
2054 l32e a11, a11, -20 /* restore a11 from end of call[i]'s stack frame */
2057 #endif /* XCHAL_HAVE_WINDOWED */
2059 .section .UserEnter.text, "ax"
2060 .global call_user_start
2061 .type call_user_start,@function