2 * FreeRTOS Kernel V10.4.4
\r
3 * Copyright (C) 2015-2019 Cadence Design Systems, Inc.
\r
4 * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
\r
6 * SPDX-License-Identifier: MIT
\r
8 * Permission is hereby granted, free of charge, to any person obtaining a copy of
\r
9 * this software and associated documentation files (the "Software"), to deal in
\r
10 * the Software without restriction, including without limitation the rights to
\r
11 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
\r
12 * the Software, and to permit persons to whom the Software is furnished to do so,
\r
13 * subject to the following conditions:
\r
15 * The above copyright notice and this permission notice shall be included in all
\r
16 * copies or substantial portions of the Software.
\r
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
\r
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
\r
20 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
\r
21 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
\r
22 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
23 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
25 * https://www.FreeRTOS.org
\r
26 * https://github.com/FreeRTOS
\r
31 * XTENSA CONTEXT SAVE AND RESTORE ROUTINES
\r
33 * Low-level Call0 functions for handling generic context save and restore of
\r
34 * registers not specifically addressed by the interrupt vectors and handlers.
\r
35 * Those registers (not handled by these functions) are PC, PS, A0, A1 (SP).
\r
36 * Except for the calls to RTOS functions, this code is generic to Xtensa.
\r
38 * Note that in Call0 ABI, interrupt handlers are expected to preserve the callee-
\r
39 * save regs (A12-A15), which is always the case if the handlers are coded in C.
\r
40 * However A12, A13 are made available as scratch registers for interrupt dispatch
\r
41 * code, so are presumed saved anyway, and are always restored even in Call0 ABI.
\r
42 * Only A14, A15 are truly handled as callee-save regs.
\r
44 * Because Xtensa is a configurable architecture, this port supports all user
\r
45 * generated configurations (except restrictions stated in the release notes).
\r
46 * This is accomplished by conditional compilation using macros and functions
\r
47 * defined in the Xtensa HAL (hardware adaptation layer) for your configuration.
\r
48 * Only the processor state included in your configuration is saved and restored,
\r
49 * including any processor state added by user configuration options or TIE.
\r
52 /* Warn nicely if this file gets named with a lowercase .s instead of .S: */
\r
54 NOERROR: .error "C preprocessor needed for this file: make sure its filename\
\r
55 ends in uppercase .S, or use xt-xcc's -x assembler-with-cpp option."
\r
58 #include "xtensa_rtos.h"
\r
61 #include <xtensa/overlay_os_asm.h>
\r
66 /*******************************************************************************
\r
70 !! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !!
\r
72 Saves all Xtensa processor state except PC, PS, A0, A1 (SP), A12, A13, in the
\r
73 interrupt stack frame defined in xtensa_rtos.h.
\r
74 Its counterpart is _xt_context_restore (which also restores A12, A13).
\r
76 Caller is expected to have saved PC, PS, A0, A1 (SP), A12, A13 in the frame.
\r
77 This function preserves A12 & A13 in order to provide the caller with 2 scratch
\r
78 regs that need not be saved over the call to this function. The choice of which
\r
79 2 regs to provide is governed by xthal_window_spill_nw and xthal_save_extra_nw,
\r
80 to avoid moving data more than necessary. Caller can assign regs accordingly.
\r
83 A0 = Return address in caller.
\r
84 A1 = Stack pointer of interrupted thread or handler ("interruptee").
\r
85 Original A12, A13 have already been saved in the interrupt stack frame.
\r
86 Other processor state except PC, PS, A0, A1 (SP), A12, A13, is as at the
\r
87 point of interruption.
\r
88 If windowed ABI, PS.EXCM = 1 (exceptions disabled).
\r
91 A0 = Return address in caller.
\r
92 A1 = Stack pointer of interrupted thread or handler ("interruptee").
\r
93 A12, A13 as at entry (preserved).
\r
94 If windowed ABI, PS.EXCM = 1 (exceptions disabled).
\r
96 *******************************************************************************/
\r
98 .global _xt_context_save
\r
99 .type _xt_context_save,@function
\r
103 s32i a2, sp, XT_STK_A2
\r
104 s32i a3, sp, XT_STK_A3
\r
105 s32i a4, sp, XT_STK_A4
\r
106 s32i a5, sp, XT_STK_A5
\r
107 s32i a6, sp, XT_STK_A6
\r
108 s32i a7, sp, XT_STK_A7
\r
109 s32i a8, sp, XT_STK_A8
\r
110 s32i a9, sp, XT_STK_A9
\r
111 s32i a10, sp, XT_STK_A10
\r
112 s32i a11, sp, XT_STK_A11
\r
115 Call0 ABI callee-saved regs a12-15 do not need to be saved here.
\r
116 a12-13 are the caller's responsibility so it can use them as scratch.
\r
117 So only need to save a14-a15 here for Windowed ABI (not Call0).
\r
119 #ifndef __XTENSA_CALL0_ABI__
\r
120 s32i a14, sp, XT_STK_A14
\r
121 s32i a15, sp, XT_STK_A15
\r
125 s32i a3, sp, XT_STK_SAR
\r
127 #if XCHAL_HAVE_LOOPS
\r
129 s32i a3, sp, XT_STK_LBEG
\r
131 s32i a3, sp, XT_STK_LEND
\r
133 s32i a3, sp, XT_STK_LCOUNT
\r
137 /* Save virtual priority mask */
\r
138 movi a3, _xt_vpri_mask
\r
140 s32i a3, sp, XT_STK_VPRI
\r
143 #if XCHAL_EXTRA_SA_SIZE > 0 || !defined(__XTENSA_CALL0_ABI__)
\r
144 mov a9, a0 /* preserve ret addr */
\r
147 #ifndef __XTENSA_CALL0_ABI__
\r
149 To spill the reg windows, temp. need pre-interrupt stack ptr and a4-15.
\r
150 Need to save a9,12,13 temporarily (in frame temps) and recover originals.
\r
151 Interrupts need to be disabled below XCHAL_EXCM_LEVEL and window overflow
\r
152 and underflow exceptions disabled (assured by PS.EXCM == 1).
\r
154 s32i a12, sp, XT_STK_TMP0 /* temp. save stuff in stack frame */
\r
155 s32i a13, sp, XT_STK_TMP1
\r
156 s32i a9, sp, XT_STK_TMP2
\r
159 Save the overlay state if we are supporting overlays. Since we just saved
\r
160 three registers, we can conveniently use them here. Note that as of now,
\r
161 overlays only work for windowed calling ABI.
\r
164 l32i a9, sp, XT_STK_PC /* recover saved PC */
\r
165 _xt_overlay_get_state a9, a12, a13
\r
166 s32i a9, sp, XT_STK_OVLY /* save overlay state */
\r
169 l32i a12, sp, XT_STK_A12 /* recover original a9,12,13 */
\r
170 l32i a13, sp, XT_STK_A13
\r
171 l32i a9, sp, XT_STK_A9
\r
172 addi sp, sp, XT_STK_FRMSZ /* restore the interruptee's SP */
\r
173 call0 xthal_window_spill_nw /* preserves only a4,5,8,9,12,13 */
\r
174 addi sp, sp, -XT_STK_FRMSZ
\r
175 l32i a12, sp, XT_STK_TMP0 /* recover stuff from stack frame */
\r
176 l32i a13, sp, XT_STK_TMP1
\r
177 l32i a9, sp, XT_STK_TMP2
\r
180 #if XCHAL_EXTRA_SA_SIZE > 0
\r
182 NOTE: Normally the xthal_save_extra_nw macro only affects address
\r
183 registers a2-a5. It is theoretically possible for Xtensa processor
\r
184 designers to write TIE that causes more address registers to be
\r
185 affected, but it is generally unlikely. If that ever happens,
\r
186 more registers need to be saved/restored around this macro invocation.
\r
187 Here we assume a9,12,13 are preserved.
\r
188 Future Xtensa tools releases might limit the regs that can be affected.
\r
190 addi a2, sp, XT_STK_EXTRA /* where to save it */
\r
191 # if XCHAL_EXTRA_SA_ALIGN > 16
\r
192 movi a3, -XCHAL_EXTRA_SA_ALIGN
\r
193 and a2, a2, a3 /* align dynamically >16 bytes */
\r
195 call0 xthal_save_extra_nw /* destroys a0,2,3,4,5 */
\r
198 #if XCHAL_EXTRA_SA_SIZE > 0 || !defined(__XTENSA_CALL0_ABI__)
\r
199 mov a0, a9 /* retrieve ret addr */
\r
204 /*******************************************************************************
\r
206 _xt_context_restore
\r
208 !! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !!
\r
210 Restores all Xtensa processor state except PC, PS, A0, A1 (SP) (and in Call0
\r
211 ABI, A14, A15 which are preserved by all interrupt handlers) from an interrupt
\r
212 stack frame defined in xtensa_rtos.h .
\r
213 Its counterpart is _xt_context_save (whose caller saved A12, A13).
\r
215 Caller is responsible to restore PC, PS, A0, A1 (SP).
\r
218 A0 = Return address in caller.
\r
219 A1 = Stack pointer of interrupted thread or handler ("interruptee").
\r
222 A0 = Return address in caller.
\r
223 A1 = Stack pointer of interrupted thread or handler ("interruptee").
\r
224 Other processor state except PC, PS, A0, A1 (SP), is as at the point
\r
227 *******************************************************************************/
\r
229 .global _xt_context_restore
\r
230 .type _xt_context_restore,@function
\r
232 _xt_context_restore:
\r
234 #if XCHAL_EXTRA_SA_SIZE > 0
\r
236 NOTE: Normally the xthal_restore_extra_nw macro only affects address
\r
237 registers a2-a5. It is theoretically possible for Xtensa processor
\r
238 designers to write TIE that causes more address registers to be
\r
239 affected, but it is generally unlikely. If that ever happens,
\r
240 more registers need to be saved/restored around this macro invocation.
\r
241 Here we only assume a13 is preserved.
\r
242 Future Xtensa tools releases might limit the regs that can be affected.
\r
244 mov a13, a0 /* preserve ret addr */
\r
245 addi a2, sp, XT_STK_EXTRA /* where to find it */
\r
246 # if XCHAL_EXTRA_SA_ALIGN > 16
\r
247 movi a3, -XCHAL_EXTRA_SA_ALIGN
\r
248 and a2, a2, a3 /* align dynamically >16 bytes */
\r
250 call0 xthal_restore_extra_nw /* destroys a0,2,3,4,5 */
\r
251 mov a0, a13 /* retrieve ret addr */
\r
254 #if XCHAL_HAVE_LOOPS
\r
255 l32i a2, sp, XT_STK_LBEG
\r
256 l32i a3, sp, XT_STK_LEND
\r
258 l32i a2, sp, XT_STK_LCOUNT
\r
265 If we are using overlays, this is a good spot to check if we need
\r
266 to restore an overlay for the incoming task. Here we have a bunch
\r
267 of registers to spare. Note that this step is going to use a few
\r
268 bytes of storage below SP (SP-20 to SP-32) if an overlay is going
\r
271 l32i a2, sp, XT_STK_PC /* retrieve PC */
\r
272 l32i a3, sp, XT_STK_PS /* retrieve PS */
\r
273 l32i a4, sp, XT_STK_OVLY /* retrieve overlay state */
\r
274 l32i a5, sp, XT_STK_A1 /* retrieve stack ptr */
\r
275 _xt_overlay_check_map a2, a3, a4, a5, a6
\r
276 s32i a2, sp, XT_STK_PC /* save updated PC */
\r
277 s32i a3, sp, XT_STK_PS /* save updated PS */
\r
280 #ifdef XT_USE_SWPRI
\r
281 /* Restore virtual interrupt priority and interrupt enable */
\r
282 movi a3, _xt_intdata
\r
283 l32i a4, a3, 0 /* a4 = _xt_intenable */
\r
284 l32i a5, sp, XT_STK_VPRI /* a5 = saved _xt_vpri_mask */
\r
286 wsr a4, INTENABLE /* update INTENABLE */
\r
287 s32i a5, a3, 4 /* restore _xt_vpri_mask */
\r
290 l32i a3, sp, XT_STK_SAR
\r
291 l32i a2, sp, XT_STK_A2
\r
293 l32i a3, sp, XT_STK_A3
\r
294 l32i a4, sp, XT_STK_A4
\r
295 l32i a5, sp, XT_STK_A5
\r
296 l32i a6, sp, XT_STK_A6
\r
297 l32i a7, sp, XT_STK_A7
\r
298 l32i a8, sp, XT_STK_A8
\r
299 l32i a9, sp, XT_STK_A9
\r
300 l32i a10, sp, XT_STK_A10
\r
301 l32i a11, sp, XT_STK_A11
\r
304 Call0 ABI callee-saved regs a12-15 do not need to be restored here.
\r
305 However a12-13 were saved for scratch before XT_RTOS_INT_ENTER(),
\r
306 so need to be restored anyway, despite being callee-saved in Call0.
\r
308 l32i a12, sp, XT_STK_A12
\r
309 l32i a13, sp, XT_STK_A13
\r
310 #ifndef __XTENSA_CALL0_ABI__
\r
311 l32i a14, sp, XT_STK_A14
\r
312 l32i a15, sp, XT_STK_A15
\r
318 /*******************************************************************************
\r
322 Initializes global co-processor management data, setting all co-processors
\r
323 to "unowned". Leaves CPENABLE as it found it (does NOT clear it).
\r
325 Called during initialization of the RTOS, before any threads run.
\r
327 This may be called from normal Xtensa single-threaded application code which
\r
328 might use co-processors. The Xtensa run-time initialization enables all
\r
329 co-processors. They must remain enabled here, else a co-processor exception
\r
330 might occur outside of a thread, which the exception handler doesn't expect.
\r
333 Xtensa single-threaded run-time environment is in effect.
\r
334 No thread is yet running.
\r
339 Obeys ABI conventions per prototype:
\r
340 void _xt_coproc_init(void)
\r
342 *******************************************************************************/
\r
344 #if XCHAL_CP_NUM > 0
\r
346 .global _xt_coproc_init
\r
347 .type _xt_coproc_init,@function
\r
352 /* Initialize thread co-processor ownerships to 0 (unowned). */
\r
353 movi a2, _xt_coproc_owner_sa /* a2 = base of owner array */
\r
354 addi a3, a2, XCHAL_CP_MAX << 2 /* a3 = top+1 of owner array */
\r
355 movi a4, 0 /* a4 = 0 (unowned) */
\r
365 /*******************************************************************************
\r
369 Releases any and all co-processors owned by a given thread. The thread is
\r
370 identified by it's co-processor state save area defined in xtensa_context.h .
\r
372 Must be called before a thread's co-proc save area is deleted to avoid
\r
373 memory corruption when the exception handler tries to save the state.
\r
374 May be called when a thread terminates or completes but does not delete
\r
375 the co-proc save area, to avoid the exception handler having to save the
\r
376 thread's co-proc state before another thread can use it (optimization).
\r
379 A2 = Pointer to base of co-processor state save area.
\r
384 Obeys ABI conventions per prototype:
\r
385 void _xt_coproc_release(void * coproc_sa_base)
\r
387 *******************************************************************************/
\r
389 #if XCHAL_CP_NUM > 0
\r
391 .global _xt_coproc_release
\r
392 .type _xt_coproc_release,@function
\r
394 _xt_coproc_release:
\r
395 ENTRY0 /* a2 = base of save area */
\r
397 movi a3, _xt_coproc_owner_sa /* a3 = base of owner array */
\r
398 addi a4, a3, XCHAL_CP_MAX << 2 /* a4 = top+1 of owner array */
\r
399 movi a5, 0 /* a5 = 0 (unowned) */
\r
401 rsil a6, XCHAL_EXCM_LEVEL /* lock interrupts */
\r
403 1: l32i a7, a3, 0 /* a7 = owner at a3 */
\r
404 bne a2, a7, 2f /* if (coproc_sa_base == owner) */
\r
405 s32i a5, a3, 0 /* owner = unowned */
\r
406 2: addi a3, a3, 1<<2 /* a3 = next entry in owner array */
\r
407 bltu a3, a4, 1b /* repeat until end of array */
\r
409 3: wsr a6, PS /* restore interrupts */
\r
416 /*******************************************************************************
\r
419 If there is a current thread and it has a coprocessor state save area, then
\r
420 save all callee-saved state into this area. This function is called from the
\r
421 solicited context switch handler. It calls a system-specific function to get
\r
422 the coprocessor save area base address.
\r
425 - The thread being switched out is still the current thread.
\r
426 - CPENABLE state reflects which coprocessors are active.
\r
427 - Registers have been saved/spilled already.
\r
430 - All necessary CP callee-saved state has been saved.
\r
431 - Registers a2-a7, a13-a15 have been trashed.
\r
433 Must be called from assembly code only, using CALL0.
\r
434 *******************************************************************************/
\r
435 #if XCHAL_CP_NUM > 0
\r
437 .extern _xt_coproc_sa_offset /* external reference */
\r
439 .global _xt_coproc_savecs
\r
440 .type _xt_coproc_savecs,@function
\r
444 /* At entry, CPENABLE should be showing which CPs are enabled. */
\r
446 rsr a2, CPENABLE /* a2 = which CPs are enabled */
\r
447 beqz a2, .Ldone /* quick exit if none */
\r
448 mov a14, a0 /* save return address */
\r
449 call0 XT_RTOS_CP_STATE /* get address of CP save area */
\r
450 mov a0, a14 /* restore return address */
\r
451 beqz a15, .Ldone /* if none then nothing to do */
\r
452 s16i a2, a15, XT_CP_CS_ST /* save mask of CPs being stored */
\r
453 movi a13, _xt_coproc_sa_offset /* array of CP save offsets */
\r
454 l32i a15, a15, XT_CP_ASA /* a15 = base of aligned save area */
\r
456 #if XCHAL_CP0_SA_SIZE
\r
457 bbci.l a2, 0, 2f /* CP 0 not enabled */
\r
458 l32i a14, a13, 0 /* a14 = _xt_coproc_sa_offset[0] */
\r
459 add a3, a14, a15 /* a3 = save area for CP 0 */
\r
460 xchal_cp0_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
\r
464 #if XCHAL_CP1_SA_SIZE
\r
465 bbci.l a2, 1, 2f /* CP 1 not enabled */
\r
466 l32i a14, a13, 4 /* a14 = _xt_coproc_sa_offset[1] */
\r
467 add a3, a14, a15 /* a3 = save area for CP 1 */
\r
468 xchal_cp1_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
\r
472 #if XCHAL_CP2_SA_SIZE
\r
476 xchal_cp2_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
\r
480 #if XCHAL_CP3_SA_SIZE
\r
484 xchal_cp3_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
\r
488 #if XCHAL_CP4_SA_SIZE
\r
492 xchal_cp4_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
\r
496 #if XCHAL_CP5_SA_SIZE
\r
500 xchal_cp5_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
\r
504 #if XCHAL_CP6_SA_SIZE
\r
508 xchal_cp6_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
\r
512 #if XCHAL_CP7_SA_SIZE
\r
516 xchal_cp7_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
\r
525 /*******************************************************************************
\r
526 _xt_coproc_restorecs
\r
528 Restore any callee-saved coprocessor state for the incoming thread.
\r
529 This function is called from coprocessor exception handling, when giving
\r
530 ownership to a thread that solicited a context switch earlier. It calls a
\r
531 system-specific function to get the coprocessor save area base address.
\r
534 - The incoming thread is set as the current thread.
\r
535 - CPENABLE is set up correctly for all required coprocessors.
\r
536 - a2 = mask of coprocessors to be restored.
\r
539 - All necessary CP callee-saved state has been restored.
\r
540 - CPENABLE - unchanged.
\r
541 - Registers a2-a7, a13-a15 have been trashed.
\r
543 Must be called from assembly code only, using CALL0.
\r
544 *******************************************************************************/
\r
545 #if XCHAL_CP_NUM > 0
\r
547 .global _xt_coproc_restorecs
\r
548 .type _xt_coproc_restorecs,@function
\r
550 _xt_coproc_restorecs:
\r
552 mov a14, a0 /* save return address */
\r
553 call0 XT_RTOS_CP_STATE /* get address of CP save area */
\r
554 mov a0, a14 /* restore return address */
\r
555 beqz a15, .Ldone2 /* if none then nothing to do */
\r
556 l16ui a3, a15, XT_CP_CS_ST /* a3 = which CPs have been saved */
\r
557 xor a3, a3, a2 /* clear the ones being restored */
\r
558 s32i a3, a15, XT_CP_CS_ST /* update saved CP mask */
\r
559 movi a13, _xt_coproc_sa_offset /* array of CP save offsets */
\r
560 l32i a15, a15, XT_CP_ASA /* a15 = base of aligned save area */
\r
562 #if XCHAL_CP0_SA_SIZE
\r
563 bbci.l a2, 0, 2f /* CP 0 not enabled */
\r
564 l32i a14, a13, 0 /* a14 = _xt_coproc_sa_offset[0] */
\r
565 add a3, a14, a15 /* a3 = save area for CP 0 */
\r
566 xchal_cp0_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
\r
570 #if XCHAL_CP1_SA_SIZE
\r
571 bbci.l a2, 1, 2f /* CP 1 not enabled */
\r
572 l32i a14, a13, 4 /* a14 = _xt_coproc_sa_offset[1] */
\r
573 add a3, a14, a15 /* a3 = save area for CP 1 */
\r
574 xchal_cp1_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
\r
578 #if XCHAL_CP2_SA_SIZE
\r
582 xchal_cp2_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
\r
586 #if XCHAL_CP3_SA_SIZE
\r
590 xchal_cp3_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
\r
594 #if XCHAL_CP4_SA_SIZE
\r
598 xchal_cp4_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
\r
602 #if XCHAL_CP5_SA_SIZE
\r
606 xchal_cp5_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
\r
610 #if XCHAL_CP6_SA_SIZE
\r
614 xchal_cp6_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
\r
618 #if XCHAL_CP7_SA_SIZE
\r
622 xchal_cp7_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
\r