2 * FreeRTOS Kernel V10.4.6
3 * Copyright (C) 2006-2015 Cadence Design Systems, Inc.
4 * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
6 * SPDX-License-Identifier: MIT
8 * Permission is hereby granted, free of charge, to any person obtaining a copy of
9 * this software and associated documentation files (the "Software"), to deal in
10 * the Software without restriction, including without limitation the rights to
11 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
12 * the Software, and to permit persons to whom the Software is furnished to do so,
13 * subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in all
16 * copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
20 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
21 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
22 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
23 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * https://www.FreeRTOS.org
26 * https://github.com/FreeRTOS
30 /*******************************************************************************
32 XTENSA CONTEXT SAVE AND RESTORE ROUTINES
34 Low-level Call0 functions for handling generic context save and restore of
35 registers not specifically addressed by the interrupt vectors and handlers.
36 Those registers (not handled by these functions) are PC, PS, A0, A1 (SP).
37 Except for the calls to RTOS functions, this code is generic to Xtensa.
39 Note that in Call0 ABI, interrupt handlers are expected to preserve the callee-
40 save regs (A12-A15), which is always the case if the handlers are coded in C.
41 However A12, A13 are made available as scratch registers for interrupt dispatch
42 code, so are presumed saved anyway, and are always restored even in Call0 ABI.
43 Only A14, A15 are truly handled as callee-save regs.
45 Because Xtensa is a configurable architecture, this port supports all user
46 generated configurations (except restrictions stated in the release notes).
47 This is accomplished by conditional compilation using macros and functions
48 defined in the Xtensa HAL (hardware adaptation layer) for your configuration.
49 Only the processor state included in your configuration is saved and restored,
50 including any processor state added by user configuration options or TIE.
52 *******************************************************************************/
54 /* Warn nicely if this file gets named with a lowercase .s instead of .S: */
56 NOERROR: .error "C preprocessor needed for this file: make sure its filename\
57 ends in uppercase .S, or use xt-xcc's -x assembler-with-cpp option."
60 #include "xtensa_rtos.h"
61 #include "xtensa_context.h"
62 #include "esp_idf_version.h"
63 #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
64 #include "xt_asm_utils.h"
68 #include <xtensa/overlay_os_asm.h>
73 /*******************************************************************************
77 !! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !!
79 Saves all Xtensa processor state except PC, PS, A0, A1 (SP), A12, A13, in the
80 interrupt stack frame defined in xtensa_rtos.h.
81 Its counterpart is _xt_context_restore (which also restores A12, A13).
83 Caller is expected to have saved PC, PS, A0, A1 (SP), A12, A13 in the frame.
84 This function preserves A12 & A13 in order to provide the caller with 2 scratch
85 regs that need not be saved over the call to this function. The choice of which
86 2 regs to provide is governed by xthal_window_spill_nw and xthal_save_extra_nw,
87 to avoid moving data more than necessary. Caller can assign regs accordingly.
90 A0 = Return address in caller.
91 A1 = Stack pointer of interrupted thread or handler ("interruptee").
92 Original A12, A13 have already been saved in the interrupt stack frame.
93 Other processor state except PC, PS, A0, A1 (SP), A12, A13, is as at the
94 point of interruption.
95 If windowed ABI, PS.EXCM = 1 (exceptions disabled).
98 A0 = Return address in caller.
99 A1 = Stack pointer of interrupted thread or handler ("interruptee").
100 A12, A13 as at entry (preserved).
101 If windowed ABI, PS.EXCM = 1 (exceptions disabled).
103 *******************************************************************************/
105 .global _xt_context_save
106 .type _xt_context_save,@function
113 s32i a2, sp, XT_STK_A2
114 s32i a3, sp, XT_STK_A3
115 s32i a4, sp, XT_STK_A4
116 s32i a5, sp, XT_STK_A5
117 s32i a6, sp, XT_STK_A6
118 s32i a7, sp, XT_STK_A7
119 s32i a8, sp, XT_STK_A8
120 s32i a9, sp, XT_STK_A9
121 s32i a10, sp, XT_STK_A10
122 s32i a11, sp, XT_STK_A11
125 Call0 ABI callee-saved regs a12-15 do not need to be saved here.
126 a12-13 are the caller's responsibility so it can use them as scratch.
127 So only need to save a14-a15 here for Windowed ABI (not Call0).
129 #ifndef __XTENSA_CALL0_ABI__
130 s32i a14, sp, XT_STK_A14
131 s32i a15, sp, XT_STK_A15
135 s32i a3, sp, XT_STK_SAR
139 s32i a3, sp, XT_STK_LBEG
141 s32i a3, sp, XT_STK_LEND
143 s32i a3, sp, XT_STK_LCOUNT
147 /* Save virtual priority mask */
148 movi a3, _xt_vpri_mask
150 s32i a3, sp, XT_STK_VPRI
153 #if XCHAL_EXTRA_SA_SIZE > 0 || !defined(__XTENSA_CALL0_ABI__)
154 mov a9, a0 /* preserve ret addr */
157 #if (ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0))
158 #ifndef __XTENSA_CALL0_ABI__
160 To spill the reg windows, temp. need pre-interrupt stack ptr and a4-15.
161 Need to save a9,12,13 temporarily (in frame temps) and recover originals.
162 Interrupts need to be disabled below XCHAL_EXCM_LEVEL and window overflow
163 and underflow exceptions disabled (assured by PS.EXCM == 1).
165 s32i a12, sp, XT_STK_TMP0 /* temp. save stuff in stack frame */
166 s32i a13, sp, XT_STK_TMP1
167 s32i a9, sp, XT_STK_TMP2
170 Save the overlay state if we are supporting overlays. Since we just saved
171 three registers, we can conveniently use them here. Note that as of now,
172 overlays only work for windowed calling ABI.
175 l32i a9, sp, XT_STK_PC /* recover saved PC */
176 _xt_overlay_get_state a9, a12, a13
177 s32i a9, sp, XT_STK_OVLY /* save overlay state */
180 l32i a12, sp, XT_STK_A12 /* recover original a9,12,13 */
181 l32i a13, sp, XT_STK_A13
182 l32i a9, sp, XT_STK_A9
183 addi sp, sp, XT_STK_FRMSZ /* restore the interruptee's SP */
184 call0 xthal_window_spill_nw /* preserves only a4,5,8,9,12,13 */
185 addi sp, sp, -XT_STK_FRMSZ
186 l32i a12, sp, XT_STK_TMP0 /* recover stuff from stack frame */
187 l32i a13, sp, XT_STK_TMP1
188 l32i a9, sp, XT_STK_TMP2
190 #endif /* (ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0)) */
192 #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
193 s32i a12, sp, XT_STK_TMP0 /* temp. save stuff in stack frame */
194 s32i a13, sp, XT_STK_TMP1
195 s32i a9, sp, XT_STK_TMP2
197 l32i a12, sp, XT_STK_A12 /* recover original a9,12,13 */
198 l32i a13, sp, XT_STK_A13
199 l32i a9, sp, XT_STK_A9
202 #if XCHAL_EXTRA_SA_SIZE > 0
203 addi a2, sp, XT_STK_EXTRA /* where to save it */
204 # if XCHAL_EXTRA_SA_ALIGN > 16
205 movi a3, -XCHAL_EXTRA_SA_ALIGN
206 and a2, a2, a3 /* align dynamically >16 bytes */
208 call0 xthal_save_extra_nw /* destroys a0,2,3 */
211 #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
212 #ifndef __XTENSA_CALL0_ABI__
214 l32i a9, sp, XT_STK_PC /* recover saved PC */
215 _xt_overlay_get_state a9, a12, a13
216 s32i a9, sp, XT_STK_OVLY /* save overlay state */
219 /* SPILL_ALL_WINDOWS macro requires window overflow exceptions to be enabled,
220 * i.e. PS.EXCM cleared and PS.WOE set.
221 * Since we are going to clear PS.EXCM, we also need to increase INTLEVEL
222 * at least to XCHAL_EXCM_LEVEL. This matches that value of effective INTLEVEL
223 * at entry (CINTLEVEL=max(PS.INTLEVEL, XCHAL_EXCM_LEVEL) when PS.EXCM is set.
224 * Since WindowOverflow exceptions will trigger inside SPILL_ALL_WINDOWS,
225 * need to save/restore EPC1 as well.
226 * Note: even though a4-a15 are saved into the exception frame, we should not
227 * clobber them until after SPILL_ALL_WINDOWS. This is because these registers
228 * may contain live windows belonging to previous frames in the call stack.
229 * These frames will be spilled by SPILL_ALL_WINDOWS, and if the register was
230 * used as a temporary by this code, the temporary value would get stored
231 * onto the stack, instead of the real value.
233 rsr a2, PS /* to be restored after SPILL_ALL_WINDOWS */
234 movi a0, PS_INTLEVEL_MASK
235 and a3, a2, a0 /* get the current INTLEVEL */
236 bgeui a3, XCHAL_EXCM_LEVEL, 1f /* calculate max(INTLEVEL, XCHAL_EXCM_LEVEL) */
237 movi a3, XCHAL_EXCM_LEVEL
239 movi a0, PS_UM | PS_WOE /* clear EXCM, enable window overflow, set new INTLEVEL */
242 rsr a0, EPC1 /* to be restored after SPILL_ALL_WINDOWS */
244 addi sp, sp, XT_STK_FRMSZ /* restore the interruptee's SP */
246 addi sp, sp, -XT_STK_FRMSZ /* return the current stack pointer and proceed with context save*/
249 wsr a2, PS /* restore to the value at entry */
251 wsr a0, EPC1 /* likewise */
253 #endif /* __XTENSA_CALL0_ABI__ */
255 l32i a12, sp, XT_STK_TMP0 /* restore the temp saved registers */
256 l32i a13, sp, XT_STK_TMP1 /* our return address is there */
257 l32i a9, sp, XT_STK_TMP2
258 #endif /* ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0) */
260 #if XCHAL_EXTRA_SA_SIZE > 0 || !defined(__XTENSA_CALL0_ABI__)
261 mov a0, a9 /* retrieve ret addr */
266 /*******************************************************************************
270 !! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !!
272 Restores all Xtensa processor state except PC, PS, A0, A1 (SP) (and in Call0
273 ABI, A14, A15 which are preserved by all interrupt handlers) from an interrupt
274 stack frame defined in xtensa_rtos.h .
275 Its counterpart is _xt_context_save (whose caller saved A12, A13).
277 Caller is responsible to restore PC, PS, A0, A1 (SP).
280 A0 = Return address in caller.
281 A1 = Stack pointer of interrupted thread or handler ("interruptee").
284 A0 = Return address in caller.
285 A1 = Stack pointer of interrupted thread or handler ("interruptee").
286 Other processor state except PC, PS, A0, A1 (SP), is as at the point
289 *******************************************************************************/
291 .global _xt_context_restore
292 .type _xt_context_restore,@function
298 #if XCHAL_EXTRA_SA_SIZE > 0
300 NOTE: Normally the xthal_restore_extra_nw macro only affects address
301 registers a2-a5. It is theoretically possible for Xtensa processor
302 designers to write TIE that causes more address registers to be
303 affected, but it is generally unlikely. If that ever happens,
304 more registers need to be saved/restored around this macro invocation.
305 Here we only assume a13 is preserved.
306 Future Xtensa tools releases might limit the regs that can be affected.
308 mov a13, a0 /* preserve ret addr */
309 addi a2, sp, XT_STK_EXTRA /* where to find it */
310 # if XCHAL_EXTRA_SA_ALIGN > 16
311 movi a3, -XCHAL_EXTRA_SA_ALIGN
312 and a2, a2, a3 /* align dynamically >16 bytes */
314 call0 xthal_restore_extra_nw /* destroys a0,2,3,4,5 */
315 mov a0, a13 /* retrieve ret addr */
319 l32i a2, sp, XT_STK_LBEG
320 l32i a3, sp, XT_STK_LEND
322 l32i a2, sp, XT_STK_LCOUNT
329 If we are using overlays, this is a good spot to check if we need
330 to restore an overlay for the incoming task. Here we have a bunch
331 of registers to spare. Note that this step is going to use a few
332 bytes of storage below SP (SP-20 to SP-32) if an overlay is going
335 l32i a2, sp, XT_STK_PC /* retrieve PC */
336 l32i a3, sp, XT_STK_PS /* retrieve PS */
337 l32i a4, sp, XT_STK_OVLY /* retrieve overlay state */
338 l32i a5, sp, XT_STK_A1 /* retrieve stack ptr */
339 _xt_overlay_check_map a2, a3, a4, a5, a6
340 s32i a2, sp, XT_STK_PC /* save updated PC */
341 s32i a3, sp, XT_STK_PS /* save updated PS */
345 /* Restore virtual interrupt priority and interrupt enable */
347 l32i a4, a3, 0 /* a4 = _xt_intenable */
348 l32i a5, sp, XT_STK_VPRI /* a5 = saved _xt_vpri_mask */
350 wsr a4, INTENABLE /* update INTENABLE */
351 s32i a5, a3, 4 /* restore _xt_vpri_mask */
354 l32i a3, sp, XT_STK_SAR
355 l32i a2, sp, XT_STK_A2
357 l32i a3, sp, XT_STK_A3
358 l32i a4, sp, XT_STK_A4
359 l32i a5, sp, XT_STK_A5
360 l32i a6, sp, XT_STK_A6
361 l32i a7, sp, XT_STK_A7
362 l32i a8, sp, XT_STK_A8
363 l32i a9, sp, XT_STK_A9
364 l32i a10, sp, XT_STK_A10
365 l32i a11, sp, XT_STK_A11
368 Call0 ABI callee-saved regs a12-15 do not need to be restored here.
369 However a12-13 were saved for scratch before XT_RTOS_INT_ENTER(),
370 so need to be restored anyway, despite being callee-saved in Call0.
372 l32i a12, sp, XT_STK_A12
373 l32i a13, sp, XT_STK_A13
374 #ifndef __XTENSA_CALL0_ABI__
375 l32i a14, sp, XT_STK_A14
376 l32i a15, sp, XT_STK_A15
382 /*******************************************************************************
386 Initializes global co-processor management data, setting all co-processors
387 to "unowned". Leaves CPENABLE as it found it (does NOT clear it).
389 Called during initialization of the RTOS, before any threads run.
391 This may be called from normal Xtensa single-threaded application code which
392 might use co-processors. The Xtensa run-time initialization enables all
393 co-processors. They must remain enabled here, else a co-processor exception
394 might occur outside of a thread, which the exception handler doesn't expect.
397 Xtensa single-threaded run-time environment is in effect.
398 No thread is yet running.
403 Obeys ABI conventions per prototype:
404 void _xt_coproc_init(void)
406 *******************************************************************************/
410 .global _xt_coproc_init
411 .type _xt_coproc_init,@function
418 /* Initialize thread co-processor ownerships to 0 (unowned). */
419 movi a2, _xt_coproc_owner_sa /* a2 = base of owner array */
420 addi a3, a2, (XCHAL_CP_MAX*portNUM_PROCESSORS) << 2 /* a3 = top+1 of owner array */
421 movi a4, 0 /* a4 = 0 (unowned) */
431 /*******************************************************************************
435 Releases any and all co-processors owned by a given thread. The thread is
436 identified by it's co-processor state save area defined in xtensa_context.h .
438 Must be called before a thread's co-proc save area is deleted to avoid
439 memory corruption when the exception handler tries to save the state.
440 May be called when a thread terminates or completes but does not delete
441 the co-proc save area, to avoid the exception handler having to save the
442 thread's co-proc state before another thread can use it (optimization).
444 Needs to be called on the processor the thread was running on. Unpinned threads
445 won't have an entry here because they get pinned as soon they use a coprocessor.
448 A2 = Pointer to base of co-processor state save area.
453 Obeys ABI conventions per prototype:
454 void _xt_coproc_release(void * coproc_sa_base)
456 *******************************************************************************/
460 .global _xt_coproc_release
461 .type _xt_coproc_release,@function
466 ENTRY0 /* a2 = base of save area */
469 movi a3, XCHAL_CP_MAX << 2
471 movi a3, _xt_coproc_owner_sa /* a3 = base of owner array */
474 addi a4, a3, XCHAL_CP_MAX << 2 /* a4 = top+1 of owner array */
475 movi a5, 0 /* a5 = 0 (unowned) */
477 rsil a6, XCHAL_EXCM_LEVEL /* lock interrupts */
479 1: l32i a7, a3, 0 /* a7 = owner at a3 */
480 bne a2, a7, 2f /* if (coproc_sa_base == owner) */
481 s32i a5, a3, 0 /* owner = unowned */
482 2: addi a3, a3, 1<<2 /* a3 = next entry in owner array */
483 bltu a3, a4, 1b /* repeat until end of array */
485 3: wsr a6, PS /* restore interrupts */
492 /*******************************************************************************
495 If there is a current thread and it has a coprocessor state save area, then
496 save all callee-saved state into this area. This function is called from the
497 solicited context switch handler. It calls a system-specific function to get
498 the coprocessor save area base address.
501 - The thread being switched out is still the current thread.
502 - CPENABLE state reflects which coprocessors are active.
503 - Registers have been saved/spilled already.
506 - All necessary CP callee-saved state has been saved.
507 - Registers a2-a7, a13-a15 have been trashed.
509 Must be called from assembly code only, using CALL0.
510 *******************************************************************************/
513 .extern _xt_coproc_sa_offset /* external reference */
515 .global _xt_coproc_savecs
516 .type _xt_coproc_savecs,@function
522 /* At entry, CPENABLE should be showing which CPs are enabled. */
524 rsr a2, CPENABLE /* a2 = which CPs are enabled */
525 beqz a2, .Ldone /* quick exit if none */
526 mov a14, a0 /* save return address */
527 call0 XT_RTOS_CP_STATE /* get address of CP save area */
528 mov a0, a14 /* restore return address */
529 beqz a15, .Ldone /* if none then nothing to do */
530 s16i a2, a15, XT_CP_CS_ST /* save mask of CPs being stored */
531 movi a13, _xt_coproc_sa_offset /* array of CP save offsets */
532 l32i a15, a15, XT_CP_ASA /* a15 = base of aligned save area */
534 #if XCHAL_CP0_SA_SIZE
535 bbci.l a2, 0, 2f /* CP 0 not enabled */
536 l32i a14, a13, 0 /* a14 = _xt_coproc_sa_offset[0] */
537 add a3, a14, a15 /* a3 = save area for CP 0 */
538 xchal_cp0_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
542 #if XCHAL_CP1_SA_SIZE
543 bbci.l a2, 1, 2f /* CP 1 not enabled */
544 l32i a14, a13, 4 /* a14 = _xt_coproc_sa_offset[1] */
545 add a3, a14, a15 /* a3 = save area for CP 1 */
546 xchal_cp1_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
550 #if XCHAL_CP2_SA_SIZE
554 xchal_cp2_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
558 #if XCHAL_CP3_SA_SIZE
562 xchal_cp3_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
566 #if XCHAL_CP4_SA_SIZE
570 xchal_cp4_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
574 #if XCHAL_CP5_SA_SIZE
578 xchal_cp5_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
582 #if XCHAL_CP6_SA_SIZE
586 xchal_cp6_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
590 #if XCHAL_CP7_SA_SIZE
594 xchal_cp7_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
603 /*******************************************************************************
606 Restore any callee-saved coprocessor state for the incoming thread.
607 This function is called from coprocessor exception handling, when giving
608 ownership to a thread that solicited a context switch earlier. It calls a
609 system-specific function to get the coprocessor save area base address.
612 - The incoming thread is set as the current thread.
613 - CPENABLE is set up correctly for all required coprocessors.
614 - a2 = mask of coprocessors to be restored.
617 - All necessary CP callee-saved state has been restored.
618 - CPENABLE - unchanged.
619 - Registers a2-a7, a13-a15 have been trashed.
621 Must be called from assembly code only, using CALL0.
622 *******************************************************************************/
625 .global _xt_coproc_restorecs
626 .type _xt_coproc_restorecs,@function
630 _xt_coproc_restorecs:
632 mov a14, a0 /* save return address */
633 call0 XT_RTOS_CP_STATE /* get address of CP save area */
634 mov a0, a14 /* restore return address */
635 beqz a15, .Ldone2 /* if none then nothing to do */
636 l16ui a3, a15, XT_CP_CS_ST /* a3 = which CPs have been saved */
637 xor a3, a3, a2 /* clear the ones being restored */
638 s32i a3, a15, XT_CP_CS_ST /* update saved CP mask */
639 movi a13, _xt_coproc_sa_offset /* array of CP save offsets */
640 l32i a15, a15, XT_CP_ASA /* a15 = base of aligned save area */
642 #if XCHAL_CP0_SA_SIZE
643 bbci.l a2, 0, 2f /* CP 0 not enabled */
644 l32i a14, a13, 0 /* a14 = _xt_coproc_sa_offset[0] */
645 add a3, a14, a15 /* a3 = save area for CP 0 */
646 xchal_cp0_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
650 #if XCHAL_CP1_SA_SIZE
651 bbci.l a2, 1, 2f /* CP 1 not enabled */
652 l32i a14, a13, 4 /* a14 = _xt_coproc_sa_offset[1] */
653 add a3, a14, a15 /* a3 = save area for CP 1 */
654 xchal_cp1_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
658 #if XCHAL_CP2_SA_SIZE
662 xchal_cp2_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
666 #if XCHAL_CP3_SA_SIZE
670 xchal_cp3_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
674 #if XCHAL_CP4_SA_SIZE
678 xchal_cp4_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
682 #if XCHAL_CP5_SA_SIZE
686 xchal_cp5_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
690 #if XCHAL_CP6_SA_SIZE
694 xchal_cp6_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
698 #if XCHAL_CP7_SA_SIZE
702 xchal_cp7_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL