1 /**************************************************************************//**
3 * @brief CMSIS Cortex-A Core Peripheral Access Layer Header File
6 ******************************************************************************/
8 * Copyright (c) 2009-2017 ARM Limited. All rights reserved.
10 * SPDX-License-Identifier: Apache-2.0
12 * Licensed under the Apache License, Version 2.0 (the License); you may
13 * not use this file except in compliance with the License.
14 * You may obtain a copy of the License at
16 * www.apache.org/licenses/LICENSE-2.0
18 * Unless required by applicable law or agreed to in writing, software
19 * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21 * See the License for the specific language governing permissions and
22 * limitations under the License.
25 #if defined ( __ICCARM__ )
26 #pragma system_include /* treat file as system include file for MISRA check */
33 #ifndef __CORE_CA_H_GENERIC
34 #define __CORE_CA_H_GENERIC
37 /*******************************************************************************
39 ******************************************************************************/
41 /* CMSIS CA definitions */
42 #define __CA_CMSIS_VERSION_MAIN (1U) /*!< \brief [31:16] CMSIS HAL main version */
43 #define __CA_CMSIS_VERSION_SUB (0U) /*!< \brief [15:0] CMSIS HAL sub version */
44 #define __CA_CMSIS_VERSION ((__CA_CMSIS_VERSION_MAIN << 16U) | \
45 __CA_CMSIS_VERSION_SUB ) /*!< \brief CMSIS HAL version number */
47 #if defined ( __CC_ARM )
48 #if defined __TARGET_FPU_VFP
49 #if (__FPU_PRESENT == 1)
52 #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
59 #elif defined ( __ICCARM__ )
60 #if defined __ARMVFP__
61 #if (__FPU_PRESENT == 1)
64 #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
71 #elif defined ( __TMS470__ )
72 #if defined __TI_VFP_SUPPORT__
73 #if (__FPU_PRESENT == 1)
76 #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
83 #elif defined ( __GNUC__ )
84 #if defined (__VFP_FP__) && !defined(__SOFTFP__)
85 #if (__FPU_PRESENT == 1)
88 #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
95 #elif defined ( __TASKING__ )
96 #if defined __FPU_VFP__
97 #if (__FPU_PRESENT == 1)
100 #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
101 #define __FPU_USED 0U
104 #define __FPU_USED 0U
108 #include "cmsis_compiler.h" /* CMSIS compiler specific defines */
114 #endif /* __CORE_CA_H_GENERIC */
116 #ifndef __CMSIS_GENERIC
118 #ifndef __CORE_CA_H_DEPENDANT
119 #define __CORE_CA_H_DEPENDANT
125 /* check device defines and use defaults */
126 #if defined __CHECK_DEVICE_DEFINES
128 #define __CA_REV 0x0000U
129 #warning "__CA_REV not defined in device header file; using default!"
132 #ifndef __FPU_PRESENT
133 #define __FPU_PRESENT 0U
134 #warning "__FPU_PRESENT not defined in device header file; using default!"
137 #ifndef __MPU_PRESENT
138 #define __MPU_PRESENT 0U
139 #warning "__MPU_PRESENT not defined in device header file; using default!"
142 #ifndef __GIC_PRESENT
143 #define __GIC_PRESENT 1U
144 #warning "__GIC_PRESENT not defined in device header file; using default!"
147 #ifndef __TIM_PRESENT
148 #define __TIM_PRESENT 1U
149 #warning "__TIM_PRESENT not defined in device header file; using default!"
152 #ifndef __L2C_PRESENT
153 #define __L2C_PRESENT 0U
154 #warning "__L2C_PRESENT not defined in device header file; using default!"
158 /* IO definitions (access restrictions to peripheral registers) */
160 #define __I volatile /*!< \brief Defines 'read only' permissions */
162 #define __I volatile const /*!< \brief Defines 'read only' permissions */
164 #define __O volatile /*!< \brief Defines 'write only' permissions */
165 #define __IO volatile /*!< \brief Defines 'read / write' permissions */
167 /* following defines should be used for structure members */
168 #define __IM volatile const /*!< \brief Defines 'read only' structure member permissions */
169 #define __OM volatile /*!< \brief Defines 'write only' structure member permissions */
170 #define __IOM volatile /*!< \brief Defines 'read / write' structure member permissions */
173 /*******************************************************************************
174 * Register Abstraction
175 Core Register contain:
178 - L2C-310 Cache Controller
179 - Generic Interrupt Controller Distributor
180 - Generic Interrupt Controller Interface
181 ******************************************************************************/
183 /* Core Register CPSR */
188 uint32_t M:5; /*!< \brief bit: 0.. 4 Mode field */
189 uint32_t T:1; /*!< \brief bit: 5 Thumb execution state bit */
190 uint32_t F:1; /*!< \brief bit: 6 FIQ mask bit */
191 uint32_t I:1; /*!< \brief bit: 7 IRQ mask bit */
192 uint32_t A:1; /*!< \brief bit: 8 Asynchronous abort mask bit */
193 uint32_t E:1; /*!< \brief bit: 9 Endianness execution state bit */
194 uint32_t IT1:6; /*!< \brief bit: 10..15 If-Then execution state bits 2-7 */
195 uint32_t GE:4; /*!< \brief bit: 16..19 Greater than or Equal flags */
196 uint32_t _reserved0:4; /*!< \brief bit: 20..23 Reserved */
197 uint32_t J:1; /*!< \brief bit: 24 Jazelle bit */
198 uint32_t IT0:2; /*!< \brief bit: 25..26 If-Then execution state bits 0-1 */
199 uint32_t Q:1; /*!< \brief bit: 27 Saturation condition flag */
200 uint32_t V:1; /*!< \brief bit: 28 Overflow condition code flag */
201 uint32_t C:1; /*!< \brief bit: 29 Carry condition code flag */
202 uint32_t Z:1; /*!< \brief bit: 30 Zero condition code flag */
203 uint32_t N:1; /*!< \brief bit: 31 Negative condition code flag */
204 } b; /*!< \brief Structure used for bit access */
205 uint32_t w; /*!< \brief Type used for word access */
208 /* CPSR Register Definitions */
209 #define CPSR_N_Pos 31U /*!< \brief CPSR: N Position */
210 #define CPSR_N_Msk (1UL << CPSR_N_Pos) /*!< \brief CPSR: N Mask */
212 #define CPSR_Z_Pos 30U /*!< \brief CPSR: Z Position */
213 #define CPSR_Z_Msk (1UL << CPSR_Z_Pos) /*!< \brief CPSR: Z Mask */
215 #define CPSR_C_Pos 29U /*!< \brief CPSR: C Position */
216 #define CPSR_C_Msk (1UL << CPSR_C_Pos) /*!< \brief CPSR: C Mask */
218 #define CPSR_V_Pos 28U /*!< \brief CPSR: V Position */
219 #define CPSR_V_Msk (1UL << CPSR_V_Pos) /*!< \brief CPSR: V Mask */
221 #define CPSR_Q_Pos 27U /*!< \brief CPSR: Q Position */
222 #define CPSR_Q_Msk (1UL << CPSR_Q_Pos) /*!< \brief CPSR: Q Mask */
224 #define CPSR_IT0_Pos 25U /*!< \brief CPSR: IT0 Position */
225 #define CPSR_IT0_Msk (3UL << CPSR_IT0_Pos) /*!< \brief CPSR: IT0 Mask */
227 #define CPSR_J_Pos 24U /*!< \brief CPSR: J Position */
228 #define CPSR_J_Msk (1UL << CPSR_J_Pos) /*!< \brief CPSR: J Mask */
230 #define CPSR_GE_Pos 16U /*!< \brief CPSR: GE Position */
231 #define CPSR_GE_Msk (0xFUL << CPSR_GE_Pos) /*!< \brief CPSR: GE Mask */
233 #define CPSR_IT1_Pos 10U /*!< \brief CPSR: IT1 Position */
234 #define CPSR_IT1_Msk (0x3FUL << CPSR_IT1_Pos) /*!< \brief CPSR: IT1 Mask */
236 #define CPSR_E_Pos 9U /*!< \brief CPSR: E Position */
237 #define CPSR_E_Msk (1UL << CPSR_E_Pos) /*!< \brief CPSR: E Mask */
239 #define CPSR_A_Pos 8U /*!< \brief CPSR: A Position */
240 #define CPSR_A_Msk (1UL << CPSR_A_Pos) /*!< \brief CPSR: A Mask */
242 #define CPSR_I_Pos 7U /*!< \brief CPSR: I Position */
243 #define CPSR_I_Msk (1UL << CPSR_I_Pos) /*!< \brief CPSR: I Mask */
245 #define CPSR_F_Pos 6U /*!< \brief CPSR: F Position */
246 #define CPSR_F_Msk (1UL << CPSR_F_Pos) /*!< \brief CPSR: F Mask */
248 #define CPSR_T_Pos 5U /*!< \brief CPSR: T Position */
249 #define CPSR_T_Msk (1UL << CPSR_T_Pos) /*!< \brief CPSR: T Mask */
251 #define CPSR_M_Pos 0U /*!< \brief CPSR: M Position */
252 #define CPSR_M_Msk (0x1FUL << CPSR_M_Pos) /*!< \brief CPSR: M Mask */
254 /* CP15 Register SCTLR */
259 uint32_t M:1; /*!< \brief bit: 0 MMU enable */
260 uint32_t A:1; /*!< \brief bit: 1 Alignment check enable */
261 uint32_t C:1; /*!< \brief bit: 2 Cache enable */
262 uint32_t _reserved0:2; /*!< \brief bit: 3.. 4 Reserved */
263 uint32_t CP15BEN:1; /*!< \brief bit: 5 CP15 barrier enable */
264 uint32_t _reserved1:1; /*!< \brief bit: 6 Reserved */
265 uint32_t B:1; /*!< \brief bit: 7 Endianness model */
266 uint32_t _reserved2:2; /*!< \brief bit: 8.. 9 Reserved */
267 uint32_t SW:1; /*!< \brief bit: 10 SWP and SWPB enable */
268 uint32_t Z:1; /*!< \brief bit: 11 Branch prediction enable */
269 uint32_t I:1; /*!< \brief bit: 12 Instruction cache enable */
270 uint32_t V:1; /*!< \brief bit: 13 Vectors bit */
271 uint32_t RR:1; /*!< \brief bit: 14 Round Robin select */
272 uint32_t _reserved3:2; /*!< \brief bit:15..16 Reserved */
273 uint32_t HA:1; /*!< \brief bit: 17 Hardware Access flag enable */
274 uint32_t _reserved4:1; /*!< \brief bit: 18 Reserved */
275 uint32_t WXN:1; /*!< \brief bit: 19 Write permission implies XN */
276 uint32_t UWXN:1; /*!< \brief bit: 20 Unprivileged write permission implies PL1 XN */
277 uint32_t FI:1; /*!< \brief bit: 21 Fast interrupts configuration enable */
278 uint32_t U:1; /*!< \brief bit: 22 Alignment model */
279 uint32_t _reserved5:1; /*!< \brief bit: 23 Reserved */
280 uint32_t VE:1; /*!< \brief bit: 24 Interrupt Vectors Enable */
281 uint32_t EE:1; /*!< \brief bit: 25 Exception Endianness */
282 uint32_t _reserved6:1; /*!< \brief bit: 26 Reserved */
283 uint32_t NMFI:1; /*!< \brief bit: 27 Non-maskable FIQ (NMFI) support */
284 uint32_t TRE:1; /*!< \brief bit: 28 TEX remap enable. */
285 uint32_t AFE:1; /*!< \brief bit: 29 Access flag enable */
286 uint32_t TE:1; /*!< \brief bit: 30 Thumb Exception enable */
287 uint32_t _reserved7:1; /*!< \brief bit: 31 Reserved */
288 } b; /*!< \brief Structure used for bit access */
289 uint32_t w; /*!< \brief Type used for word access */
292 #define SCTLR_TE_Pos 30U /*!< \brief SCTLR: TE Position */
293 #define SCTLR_TE_Msk (1UL << SCTLR_TE_Pos) /*!< \brief SCTLR: TE Mask */
295 #define SCTLR_AFE_Pos 29U /*!< \brief SCTLR: AFE Position */
296 #define SCTLR_AFE_Msk (1UL << SCTLR_AFE_Pos) /*!< \brief SCTLR: AFE Mask */
298 #define SCTLR_TRE_Pos 28U /*!< \brief SCTLR: TRE Position */
299 #define SCTLR_TRE_Msk (1UL << SCTLR_TRE_Pos) /*!< \brief SCTLR: TRE Mask */
301 #define SCTLR_NMFI_Pos 27U /*!< \brief SCTLR: NMFI Position */
302 #define SCTLR_NMFI_Msk (1UL << SCTLR_NMFI_Pos) /*!< \brief SCTLR: NMFI Mask */
304 #define SCTLR_EE_Pos 25U /*!< \brief SCTLR: EE Position */
305 #define SCTLR_EE_Msk (1UL << SCTLR_EE_Pos) /*!< \brief SCTLR: EE Mask */
307 #define SCTLR_VE_Pos 24U /*!< \brief SCTLR: VE Position */
308 #define SCTLR_VE_Msk (1UL << SCTLR_VE_Pos) /*!< \brief SCTLR: VE Mask */
310 #define SCTLR_U_Pos 22U /*!< \brief SCTLR: U Position */
311 #define SCTLR_U_Msk (1UL << SCTLR_U_Pos) /*!< \brief SCTLR: U Mask */
313 #define SCTLR_FI_Pos 21U /*!< \brief SCTLR: FI Position */
314 #define SCTLR_FI_Msk (1UL << SCTLR_FI_Pos) /*!< \brief SCTLR: FI Mask */
316 #define SCTLR_UWXN_Pos 20U /*!< \brief SCTLR: UWXN Position */
317 #define SCTLR_UWXN_Msk (1UL << SCTLR_UWXN_Pos) /*!< \brief SCTLR: UWXN Mask */
319 #define SCTLR_WXN_Pos 19U /*!< \brief SCTLR: WXN Position */
320 #define SCTLR_WXN_Msk (1UL << SCTLR_WXN_Pos) /*!< \brief SCTLR: WXN Mask */
322 #define SCTLR_HA_Pos 17U /*!< \brief SCTLR: HA Position */
323 #define SCTLR_HA_Msk (1UL << SCTLR_HA_Pos) /*!< \brief SCTLR: HA Mask */
325 #define SCTLR_RR_Pos 14U /*!< \brief SCTLR: RR Position */
326 #define SCTLR_RR_Msk (1UL << SCTLR_RR_Pos) /*!< \brief SCTLR: RR Mask */
328 #define SCTLR_V_Pos 13U /*!< \brief SCTLR: V Position */
329 #define SCTLR_V_Msk (1UL << SCTLR_V_Pos) /*!< \brief SCTLR: V Mask */
331 #define SCTLR_I_Pos 12U /*!< \brief SCTLR: I Position */
332 #define SCTLR_I_Msk (1UL << SCTLR_I_Pos) /*!< \brief SCTLR: I Mask */
334 #define SCTLR_Z_Pos 11U /*!< \brief SCTLR: Z Position */
335 #define SCTLR_Z_Msk (1UL << SCTLR_Z_Pos) /*!< \brief SCTLR: Z Mask */
337 #define SCTLR_SW_Pos 10U /*!< \brief SCTLR: SW Position */
338 #define SCTLR_SW_Msk (1UL << SCTLR_SW_Pos) /*!< \brief SCTLR: SW Mask */
340 #define SCTLR_B_Pos 7U /*!< \brief SCTLR: B Position */
341 #define SCTLR_B_Msk (1UL << SCTLR_B_Pos) /*!< \brief SCTLR: B Mask */
343 #define SCTLR_CP15BEN_Pos 5U /*!< \brief SCTLR: CP15BEN Position */
344 #define SCTLR_CP15BEN_Msk (1UL << SCTLR_CP15BEN_Pos) /*!< \brief SCTLR: CP15BEN Mask */
346 #define SCTLR_C_Pos 2U /*!< \brief SCTLR: C Position */
347 #define SCTLR_C_Msk (1UL << SCTLR_C_Pos) /*!< \brief SCTLR: C Mask */
349 #define SCTLR_A_Pos 1U /*!< \brief SCTLR: A Position */
350 #define SCTLR_A_Msk (1UL << SCTLR_A_Pos) /*!< \brief SCTLR: A Mask */
352 #define SCTLR_M_Pos 0U /*!< \brief SCTLR: M Position */
353 #define SCTLR_M_Msk (1UL << SCTLR_M_Pos) /*!< \brief SCTLR: M Mask */
355 /* CP15 Register ACTLR */
360 uint32_t _reserved0:6; /*!< bit: 0.. 5 Reserved */
361 uint32_t SMP:1; /*!< bit: 6 Enables coherent requests to the processor */
362 uint32_t _reserved1:3; /*!< bit: 7.. 9 Reserved */
363 uint32_t DODMBS:1; /*!< bit: 10 Disable optimized data memory barrier behavior */
364 uint32_t L2RADIS:1; /*!< bit: 11 L2 Data Cache read-allocate mode disable */
365 uint32_t L1RADIS:1; /*!< bit: 12 L1 Data Cache read-allocate mode disable */
366 uint32_t L1PCTL:2; /*!< bit:13..14 L1 Data prefetch control */
367 uint32_t DDVM:1; /*!< bit: 15 Disable Distributed Virtual Memory (DVM) transactions */
368 uint32_t _reserved3:12; /*!< bit:16..27 Reserved */
369 uint32_t DDI:1; /*!< bit: 28 Disable dual issue */
370 uint32_t _reserved7:3; /*!< bit:29..31 Reserved */
371 } b; /*!< Structure used for bit access */
372 uint32_t w; /*!< Type used for word access */
375 #define ACTLR_DDI_Pos 28U /*!< ACTLR: DDI Position */
376 #define ACTLR_DDI_Msk (1UL << ACTLR_DDI_Pos) /*!< ACTLR: DDI Mask */
378 #define ACTLR_DDVM_Pos 15U /*!< ACTLR: DDVM Position */
379 #define ACTLR_DDVM_Msk (1UL << ACTLR_DDVM_Pos) /*!< ACTLR: DDVM Mask */
381 #define ACTLR_L1PCTL_Pos 13U /*!< ACTLR: L1PCTL Position */
382 #define ACTLR_L1PCTL_Msk (3UL << ACTLR_L1PCTL_Pos) /*!< ACTLR: L1PCTL Mask */
384 #define ACTLR_L1RADIS_Pos 12U /*!< ACTLR: L1RADIS Position */
385 #define ACTLR_L1RADIS_Msk (1UL << ACTLR_L1RADIS_Pos) /*!< ACTLR: L1RADIS Mask */
387 #define ACTLR_L2RADIS_Pos 11U /*!< ACTLR: L2RADIS Position */
388 #define ACTLR_L2RADIS_Msk (1UL << ACTLR_L2RADIS_Pos) /*!< ACTLR: L2RADIS Mask */
390 #define ACTLR_DODMBS_Pos 10U /*!< ACTLR: DODMBS Position */
391 #define ACTLR_DODMBS_Msk (1UL << ACTLR_DODMBS_Pos) /*!< ACTLR: DODMBS Mask */
393 #define ACTLR_SMP_Pos 6U /*!< ACTLR: SMP Position */
394 #define ACTLR_SMP_Msk (1UL << ACTLR_SMP_Pos) /*!< ACTLR: SMP Mask */
396 /* CP15 Register CPACR */
401 uint32_t _reserved0:20; /*!< \brief bit: 0..19 Reserved */
402 uint32_t cp10:2; /*!< \brief bit:20..21 Access rights for coprocessor 10 */
403 uint32_t cp11:2; /*!< \brief bit:22..23 Access rights for coprocessor 11 */
404 uint32_t _reserved1:6; /*!< \brief bit:24..29 Reserved */
405 uint32_t D32DIS:1; /*!< \brief bit: 30 Disable use of registers D16-D31 of the VFP register file */
406 uint32_t ASEDIS:1; /*!< \brief bit: 31 Disable Advanced SIMD Functionality */
407 } b; /*!< \brief Structure used for bit access */
408 uint32_t w; /*!< \brief Type used for word access */
411 #define CPACR_ASEDIS_Pos 31U /*!< \brief CPACR: ASEDIS Position */
412 #define CPACR_ASEDIS_Msk (1UL << CPACR_ASEDIS_Pos) /*!< \brief CPACR: ASEDIS Mask */
414 #define CPACR_D32DIS_Pos 30U /*!< \brief CPACR: D32DIS Position */
415 #define CPACR_D32DIS_Msk (1UL << CPACR_D32DIS_Pos) /*!< \brief CPACR: D32DIS Mask */
417 #define CPACR_cp11_Pos 22U /*!< \brief CPACR: cp11 Position */
418 #define CPACR_cp11_Msk (3UL << CPACR_cp11_Pos) /*!< \brief CPACR: cp11 Mask */
420 #define CPACR_cp10_Pos 20U /*!< \brief CPACR: cp10 Position */
421 #define CPACR_cp10_Msk (3UL << CPACR_cp10_Pos) /*!< \brief CPACR: cp10 Mask */
423 /* CP15 Register DFSR */
428 uint32_t FS0:4; /*!< \brief bit: 0.. 3 Fault Status bits bit 0-3 */
429 uint32_t Domain:4; /*!< \brief bit: 4.. 7 Fault on which domain */
430 uint32_t _reserved0:2; /*!< \brief bit: 8.. 9 Reserved */
431 uint32_t FS1:1; /*!< \brief bit: 10 Fault Status bits bit 4 */
432 uint32_t WnR:1; /*!< \brief bit: 11 Write not Read bit */
433 uint32_t ExT:1; /*!< \brief bit: 12 External abort type */
434 uint32_t CM:1; /*!< \brief bit: 13 Cache maintenance fault */
435 uint32_t _reserved1:18; /*!< \brief bit:14..31 Reserved */
436 } b; /*!< \brief Structure used for bit access */
437 uint32_t w; /*!< \brief Type used for word access */
440 #define DFSR_CM_Pos 13U /*!< \brief DFSR: CM Position */
441 #define DFSR_CM_Msk (1UL << DFSR_CM_Pos) /*!< \brief DFSR: CM Mask */
443 #define DFSR_Ext_Pos 12U /*!< \brief DFSR: Ext Position */
444 #define DFSR_Ext_Msk (1UL << DFSR_Ext_Pos) /*!< \brief DFSR: Ext Mask */
446 #define DFSR_WnR_Pos 11U /*!< \brief DFSR: WnR Position */
447 #define DFSR_WnR_Msk (1UL << DFSR_WnR_Pos) /*!< \brief DFSR: WnR Mask */
449 #define DFSR_FS1_Pos 10U /*!< \brief DFSR: FS1 Position */
450 #define DFSR_FS1_Msk (1UL << DFSR_FS1_Pos) /*!< \brief DFSR: FS1 Mask */
452 #define DFSR_Domain_Pos 4U /*!< \brief DFSR: Domain Position */
453 #define DFSR_Domain_Msk (0xFUL << DFSR_Domain_Pos) /*!< \brief DFSR: Domain Mask */
455 #define DFSR_FS0_Pos 0U /*!< \brief DFSR: FS0 Position */
456 #define DFSR_FS0_Msk (0xFUL << DFSR_FS0_Pos) /*!< \brief DFSR: FS0 Mask */
458 /* CP15 Register IFSR */
463 uint32_t FS0:4; /*!< \brief bit: 0.. 3 Fault Status bits bit 0-3 */
464 uint32_t _reserved0:6; /*!< \brief bit: 4.. 9 Reserved */
465 uint32_t FS1:1; /*!< \brief bit: 10 Fault Status bits bit 4 */
466 uint32_t _reserved1:1; /*!< \brief bit: 11 Reserved */
467 uint32_t ExT:1; /*!< \brief bit: 12 External abort type */
468 uint32_t _reserved2:19; /*!< \brief bit:13..31 Reserved */
469 } b; /*!< \brief Structure used for bit access */
470 uint32_t w; /*!< \brief Type used for word access */
473 #define IFSR_ExT_Pos 12U /*!< \brief IFSR: ExT Position */
474 #define IFSR_ExT_Msk (1UL << IFSR_ExT_Pos) /*!< \brief IFSR: ExT Mask */
476 #define IFSR_FS1_Pos 10U /*!< \brief IFSR: FS1 Position */
477 #define IFSR_FS1_Msk (1UL << IFSR_FS1_Pos) /*!< \brief IFSR: FS1 Mask */
479 #define IFSR_FS0_Pos 0U /*!< \brief IFSR: FS0 Position */
480 #define IFSR_FS0_Msk (0xFUL << IFSR_FS0_Pos) /*!< \brief IFSR: FS0 Mask */
482 /* CP15 Register ISR */
487 uint32_t _reserved0:6; /*!< \brief bit: 0.. 5 Reserved */
488 uint32_t F:1; /*!< \brief bit: 6 FIQ pending bit */
489 uint32_t I:1; /*!< \brief bit: 7 IRQ pending bit */
490 uint32_t A:1; /*!< \brief bit: 8 External abort pending bit */
491 uint32_t _reserved1:23; /*!< \brief bit:14..31 Reserved */
492 } b; /*!< \brief Structure used for bit access */
493 uint32_t w; /*!< \brief Type used for word access */
496 #define ISR_A_Pos 13U /*!< \brief ISR: A Position */
497 #define ISR_A_Msk (1UL << ISR_A_Pos) /*!< \brief ISR: A Mask */
499 #define ISR_I_Pos 12U /*!< \brief ISR: I Position */
500 #define ISR_I_Msk (1UL << ISR_I_Pos) /*!< \brief ISR: I Mask */
502 #define ISR_F_Pos 11U /*!< \brief ISR: F Position */
503 #define ISR_F_Msk (1UL << ISR_F_Pos) /*!< \brief ISR: F Mask */
507 \brief Union type to access the L2C_310 Cache Controller.
509 #if (__L2C_PRESENT == 1U)
512 __I uint32_t CACHE_ID; /*!< \brief Offset: 0x0000 Cache ID Register */
513 __I uint32_t CACHE_TYPE; /*!< \brief Offset: 0x0004 Cache Type Register */
514 uint32_t RESERVED0[0x3e];
515 __IO uint32_t CONTROL; /*!< \brief Offset: 0x0100 Control Register */
516 __IO uint32_t AUX_CNT; /*!< \brief Offset: 0x0104 Auxiliary Control */
517 uint32_t RESERVED1[0x3e];
518 __IO uint32_t EVENT_CONTROL; /*!< \brief Offset: 0x0200 Event Counter Control */
519 __IO uint32_t EVENT_COUNTER1_CONF; /*!< \brief Offset: 0x0204 Event Counter 1 Configuration */
520 __IO uint32_t EVENT_COUNTER0_CONF; /*!< \brief Offset: 0x0208 Event Counter 1 Configuration */
521 uint32_t RESERVED2[0x2];
522 __IO uint32_t INTERRUPT_MASK; /*!< \brief Offset: 0x0214 Interrupt Mask */
523 __I uint32_t MASKED_INT_STATUS; /*!< \brief Offset: 0x0218 Masked Interrupt Status */
524 __I uint32_t RAW_INT_STATUS; /*!< \brief Offset: 0x021c Raw Interrupt Status */
525 __O uint32_t INTERRUPT_CLEAR; /*!< \brief Offset: 0x0220 Interrupt Clear */
526 uint32_t RESERVED3[0x143];
527 __IO uint32_t CACHE_SYNC; /*!< \brief Offset: 0x0730 Cache Sync */
528 uint32_t RESERVED4[0xf];
529 __IO uint32_t INV_LINE_PA; /*!< \brief Offset: 0x0770 Invalidate Line By PA */
530 uint32_t RESERVED6[2];
531 __IO uint32_t INV_WAY; /*!< \brief Offset: 0x077c Invalidate by Way */
532 uint32_t RESERVED5[0xc];
533 __IO uint32_t CLEAN_LINE_PA; /*!< \brief Offset: 0x07b0 Clean Line by PA */
534 uint32_t RESERVED7[1];
535 __IO uint32_t CLEAN_LINE_INDEX_WAY; /*!< \brief Offset: 0x07b8 Clean Line by Index/Way */
536 __IO uint32_t CLEAN_WAY; /*!< \brief Offset: 0x07bc Clean by Way */
537 uint32_t RESERVED8[0xc];
538 __IO uint32_t CLEAN_INV_LINE_PA; /*!< \brief Offset: 0x07f0 Clean and Invalidate Line by PA */
539 uint32_t RESERVED9[1];
540 __IO uint32_t CLEAN_INV_LINE_INDEX_WAY; /*!< \brief Offset: 0x07f8 Clean and Invalidate Line by Index/Way */
541 __IO uint32_t CLEAN_INV_WAY; /*!< \brief Offset: 0x07fc Clean and Invalidate by Way */
542 uint32_t RESERVED10[0x40];
543 __IO uint32_t DATA_LOCK_0_WAY; /*!< \brief Offset: 0x0900 Data Lockdown 0 by Way */
544 __IO uint32_t INST_LOCK_0_WAY; /*!< \brief Offset: 0x0904 Instruction Lockdown 0 by Way */
545 __IO uint32_t DATA_LOCK_1_WAY; /*!< \brief Offset: 0x0908 Data Lockdown 1 by Way */
546 __IO uint32_t INST_LOCK_1_WAY; /*!< \brief Offset: 0x090c Instruction Lockdown 1 by Way */
547 __IO uint32_t DATA_LOCK_2_WAY; /*!< \brief Offset: 0x0910 Data Lockdown 2 by Way */
548 __IO uint32_t INST_LOCK_2_WAY; /*!< \brief Offset: 0x0914 Instruction Lockdown 2 by Way */
549 __IO uint32_t DATA_LOCK_3_WAY; /*!< \brief Offset: 0x0918 Data Lockdown 3 by Way */
550 __IO uint32_t INST_LOCK_3_WAY; /*!< \brief Offset: 0x091c Instruction Lockdown 3 by Way */
551 __IO uint32_t DATA_LOCK_4_WAY; /*!< \brief Offset: 0x0920 Data Lockdown 4 by Way */
552 __IO uint32_t INST_LOCK_4_WAY; /*!< \brief Offset: 0x0924 Instruction Lockdown 4 by Way */
553 __IO uint32_t DATA_LOCK_5_WAY; /*!< \brief Offset: 0x0928 Data Lockdown 5 by Way */
554 __IO uint32_t INST_LOCK_5_WAY; /*!< \brief Offset: 0x092c Instruction Lockdown 5 by Way */
555 __IO uint32_t DATA_LOCK_6_WAY; /*!< \brief Offset: 0x0930 Data Lockdown 5 by Way */
556 __IO uint32_t INST_LOCK_6_WAY; /*!< \brief Offset: 0x0934 Instruction Lockdown 5 by Way */
557 __IO uint32_t DATA_LOCK_7_WAY; /*!< \brief Offset: 0x0938 Data Lockdown 6 by Way */
558 __IO uint32_t INST_LOCK_7_WAY; /*!< \brief Offset: 0x093c Instruction Lockdown 6 by Way */
559 uint32_t RESERVED11[0x4];
560 __IO uint32_t LOCK_LINE_EN; /*!< \brief Offset: 0x0950 Lockdown by Line Enable */
561 __IO uint32_t UNLOCK_ALL_BY_WAY; /*!< \brief Offset: 0x0954 Unlock All Lines by Way */
562 uint32_t RESERVED12[0xaa];
563 __IO uint32_t ADDRESS_FILTER_START; /*!< \brief Offset: 0x0c00 Address Filtering Start */
564 __IO uint32_t ADDRESS_FILTER_END; /*!< \brief Offset: 0x0c04 Address Filtering End */
565 uint32_t RESERVED13[0xce];
566 __IO uint32_t DEBUG_CONTROL; /*!< \brief Offset: 0x0f40 Debug Control Register */
569 #define L2C_310 ((L2C_310_TypeDef *)L2C_310_BASE) /*!< \brief L2C_310 Declaration */
572 #if (__GIC_PRESENT == 1U)
573 /** \brief Structure type to access the Generic Interrupt Controller Distributor (GICD)
577 __IOM uint32_t D_CTLR; /*!< \brief +0x000 (R/W) Distributor Control Register */
578 __IM uint32_t D_TYPER; /*!< \brief +0x004 (R/ ) Interrupt Controller Type Register */
579 __IM uint32_t D_IIDR; /*!< \brief +0x008 (R/ ) Distributor Implementer Identification Register */
580 uint32_t RESERVED1[29];
581 __IOM uint32_t D_IGROUPR[16]; /*!< \brief +0x080 - 0x0BC (R/W) Interrupt Group Registers */
582 uint32_t RESERVED2[16];
583 __IOM uint32_t D_ISENABLER[16]; /*!< \brief +0x100 - 0x13C (R/W) Interrupt Set-Enable Registers */
584 uint32_t RESERVED3[16];
585 __IOM uint32_t D_ICENABLER[16]; /*!< \brief +0x180 - 0x1BC (R/W) Interrupt Clear-Enable Registers */
586 uint32_t RESERVED4[16];
587 __IOM uint32_t D_ISPENDR[16]; /*!< \brief +0x200 - 0x23C (R/W) Interrupt Set-Pending Registers */
588 uint32_t RESERVED5[16];
589 __IOM uint32_t D_ICPENDR[16]; /*!< \brief +0x280 - 0x2BC (R/W) Interrupt Clear-Pending Registers */
590 uint32_t RESERVED6[16];
591 __IOM uint32_t D_ISACTIVER[16]; /*!< \brief +0x300 - 0x33C (R/W) Interrupt Set-Active Registers */
592 uint32_t RESERVED7[16];
593 __IOM uint32_t D_ICACTIVER[16]; /*!< \brief +0x380 - 0x3BC (R/W) Interrupt Clear-Active Registers */
594 uint32_t RESERVED8[16];
595 __IOM uint8_t D_IPRIORITYR[512]; /*!< \brief +0x400 - 0x5FC (R/W) Interrupt Priority Registers */
596 uint32_t RESERVED9[128];
597 __IOM uint8_t D_ITARGETSR[512]; /*!< \brief +0x800 - 0x9FC (R/W) Interrupt Targets Registers */
598 uint32_t RESERVED10[128];
599 __IOM uint32_t D_ICFGR[32]; /*!< \brief +0xC00 - 0xC7C (R/W) Interrupt Configuration Registers */
600 uint32_t RESERVED11[32];
601 __IM uint32_t D_PPISR; /*!< \brief +0xD00 (R/ ) Private Peripheral Interrupt Status Register */
602 __IM uint32_t D_SPISR[15]; /*!< \brief +0xD04 - 0xD3C (R/ ) Shared Peripheral Interrupt Status Registers */
603 uint32_t RESERVED12[112];
604 __OM uint32_t D_SGIR; /*!< \brief +0xF00 ( /W) Software Generated Interrupt Register */
605 uint32_t RESERVED13[3];
606 __IOM uint8_t D_CPENDSGIR[16]; /*!< \brief +0xF10 - 0xF1C (R/W) SGI Clear-Pending Registers */
607 __IOM uint8_t D_SPENDSGIR[16]; /*!< \brief +0xF20 - 0xF2C (R/W) SGI Set-Pending Registers */
608 uint32_t RESERVED14[40];
609 __IM uint32_t D_PIDR4; /*!< \brief +0xFD0 (R/ ) Peripheral ID4 Register */
610 __IM uint32_t D_PIDR5; /*!< \brief +0xFD4 (R/ ) Peripheral ID5 Register */
611 __IM uint32_t D_PIDR6; /*!< \brief +0xFD8 (R/ ) Peripheral ID6 Register */
612 __IM uint32_t D_PIDR7; /*!< \brief +0xFDC (R/ ) Peripheral ID7 Register */
613 __IM uint32_t D_PIDR0; /*!< \brief +0xFE0 (R/ ) Peripheral ID0 Register */
614 __IM uint32_t D_PIDR1; /*!< \brief +0xFE4 (R/ ) Peripheral ID1 Register */
615 __IM uint32_t D_PIDR2; /*!< \brief +0xFE8 (R/ ) Peripheral ID2 Register */
616 __IM uint32_t D_PIDR3; /*!< \brief +0xFEC (R/ ) Peripheral ID3 Register */
617 __IM uint32_t D_CIDR0; /*!< \brief +0xFF0 (R/ ) Component ID0 Register */
618 __IM uint32_t D_CIDR1; /*!< \brief +0xFF4 (R/ ) Component ID1 Register */
619 __IM uint32_t D_CIDR2; /*!< \brief +0xFF8 (R/ ) Component ID2 Register */
620 __IM uint32_t D_CIDR3; /*!< \brief +0xFFC (R/ ) Component ID3 Register */
621 } GICDistributor_Type;
623 #define GICDistributor ((GICDistributor_Type *) GIC_DISTRIBUTOR_BASE ) /*!< GIC Distributor configuration struct */
625 /** \brief Structure type to access the Generic Interrupt Controller Interface (GICC)
629 __IOM uint32_t C_CTLR; /*!< \brief +0x000 (R/W) CPU Interface Control Register */
630 __IOM uint32_t C_PMR; /*!< \brief +0x004 (R/W) Interrupt Priority Mask Register */
631 __IOM uint32_t C_BPR; /*!< \brief +0x008 (R/W) Binary Point Register */
632 __IM uint32_t C_IAR; /*!< \brief +0x00C (R/ ) Interrupt Acknowledge Register */
633 __OM uint32_t C_EOIR; /*!< \brief +0x010 ( /W) End Of Interrupt Register */
634 __IM uint32_t C_RPR; /*!< \brief +0x014 (R/ ) Running Priority Register */
635 __IM uint32_t C_HPPIR; /*!< \brief +0x018 (R/ ) Highest Priority Pending Interrupt Register */
636 __IOM uint32_t C_ABPR; /*!< \brief +0x01C (R/W) Aliased Binary Point Register */
637 __IM uint32_t C_AIAR; /*!< \brief +0x020 (R/ ) Aliased Interrupt Acknowledge Register */
638 __OM uint32_t C_AEOIR; /*!< \brief +0x024 ( /W) Aliased End Of Interrupt Register */
639 __IM uint32_t C_AHPPIR; /*!< \brief +0x028 (R/ ) Aliased Highest Priority Pending Interrupt Register */
640 uint32_t RESERVED15[41];
641 __IOM uint32_t C_APR0; /*!< \brief +0x0D0 (R/W) Active Priority Register */
642 uint32_t RESERVED16[3];
643 __IOM uint32_t C_NSAPR0; /*!< \brief +0x0E0 (R/W) Non-secure Active Priority Register */
644 uint32_t RESERVED17[6];
645 __IM uint32_t C_IIDR; /*!< \brief +0x0FC (R/ ) CPU Interface Identification Register */
646 uint32_t RESERVED18[960];
647 __OM uint32_t C_DIR; /*!< \brief +0x000 ( /W) Deactivate Interrupt Register */
650 #define GICInterface ((GICInterface_Type *) GIC_INTERFACE_BASE ) /*!< GIC Interface configuration struct */
653 #if (__TIM_PRESENT == 1U)
654 #if ((__CORTEX_A == 5U)||(__CORTEX_A == 9U))
655 /** \brief Structure type to access the Private Timer
659 __IO uint32_t LOAD; //!< \brief +0x000 - RW - Private Timer Load Register
660 __IO uint32_t COUNTER; //!< \brief +0x004 - RW - Private Timer Counter Register
661 __IO uint32_t CONTROL; //!< \brief +0x008 - RW - Private Timer Control Register
662 __IO uint32_t ISR; //!< \brief +0x00C - RO - Private Timer Interrupt Status Register
663 uint32_t RESERVED[8];
664 __IO uint32_t WLOAD; //!< \brief +0x020 - RW - Watchdog Load Register
665 __IO uint32_t WCOUNTER; //!< \brief +0x024 - RW - Watchdog Counter Register
666 __IO uint32_t WCONTROL; //!< \brief +0x028 - RW - Watchdog Control Register
667 __IO uint32_t WISR; //!< \brief +0x02C - RW - Watchdog Interrupt Status Register
668 __IO uint32_t WRESET; //!< \brief +0x030 - RW - Watchdog Reset Status Register
669 __I uint32_t WDISABLE; //!< \brief +0x0FC - RO - Watchdog Disable Register
671 #define PTIM ((Timer_Type *) TIMER_BASE ) /*!< \brief Timer configuration struct */
675 /*******************************************************************************
676 * Hardware Abstraction Layer
677 Core Function Interface contains:
679 - L2C-310 Cache Controller Functions
680 - PL1 Timer Functions
683 ******************************************************************************/
685 /* ########################## L1 Cache functions ################################# */
687 /** \brief Enable Caches
691 __STATIC_INLINE void L1C_EnableCaches(void) {
692 // Set I bit 12 to enable I Cache
693 // Set C bit 2 to enable D Cache
694 __set_SCTLR( __get_SCTLR() | (1 << 12) | (1 << 2));
697 /** \brief Disable Caches
701 __STATIC_INLINE void L1C_DisableCaches(void) {
702 // Clear I bit 12 to disable I Cache
703 // Clear C bit 2 to disable D Cache
704 __set_SCTLR( __get_SCTLR() & ~(1 << 12) & ~(1 << 2));
708 /** \brief Enable BTAC
712 __STATIC_INLINE void L1C_EnableBTAC(void) {
713 // Set Z bit 11 to enable branch prediction
714 __set_SCTLR( __get_SCTLR() | (1 << 11));
718 /** \brief Disable BTAC
722 __STATIC_INLINE void L1C_DisableBTAC(void) {
723 // Clear Z bit 11 to disable branch prediction
724 __set_SCTLR( __get_SCTLR() & ~(1 << 11));
727 /** \brief Invalidate entire branch predictor array
729 BPIALL. Branch Predictor Invalidate All.
732 __STATIC_INLINE void L1C_InvalidateBTAC(void) {
734 __DSB(); //ensure completion of the invalidation
735 __ISB(); //ensure instruction fetch path sees new state
738 /** \brief Invalidate the whole I$
740 ICIALLU. Instruction Cache Invalidate All to PoU
742 __STATIC_INLINE void L1C_InvalidateICacheAll(void) {
744 __DSB(); //ensure completion of the invalidation
745 __ISB(); //ensure instruction fetch path sees new I cache state
748 /** \brief Clean D$ by MVA
750 DCCMVAC. Data cache clean by MVA to PoC
752 __STATIC_INLINE void L1C_CleanDCacheMVA(void *va) {
753 __set_DCCMVAC((uint32_t)va);
754 __DMB(); //ensure the ordering of data cache maintenance operations and their effects
757 /** \brief Invalidate D$ by MVA
759 DCIMVAC. Data cache invalidate by MVA to PoC
761 __STATIC_INLINE void L1C_InvalidateDCacheMVA(void *va) {
762 __set_DCIMVAC((uint32_t)va);
763 __DMB(); //ensure the ordering of data cache maintenance operations and their effects
766 /** \brief Clean and Invalidate D$ by MVA
768 DCCIMVAC. Data cache clean and invalidate by MVA to PoC
770 __STATIC_INLINE void L1C_CleanInvalidateDCacheMVA(void *va) {
771 __set_DCCIMVAC((uint32_t)va);
772 __DMB(); //ensure the ordering of data cache maintenance operations and their effects
775 /** \brief Clean and Invalidate the entire data or unified cache
777 Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency.
779 __STATIC_INLINE void L1C_CleanInvalidateCache(uint32_t op) {
780 __L1C_CleanInvalidateCache(op); // compiler specific call
784 /** \brief Invalidate the whole D$
786 DCISW. Invalidate by Set/Way
789 __STATIC_INLINE void L1C_InvalidateDCacheAll(void) {
790 L1C_CleanInvalidateCache(0);
793 /** \brief Clean the whole D$
795 DCCSW. Clean by Set/Way
798 __STATIC_INLINE void L1C_CleanDCacheAll(void) {
799 L1C_CleanInvalidateCache(1);
802 /** \brief Clean and invalidate the whole D$
804 DCCISW. Clean and Invalidate by Set/Way
807 __STATIC_INLINE void L1C_CleanInvalidateDCacheAll(void) {
808 L1C_CleanInvalidateCache(2);
812 /* ########################## L2 Cache functions ################################# */
813 #if (__L2C_PRESENT == 1U)
814 //Cache Sync operation
815 __STATIC_INLINE void L2C_Sync(void)
817 L2C_310->CACHE_SYNC = 0x0;
820 //return Cache controller cache ID
821 __STATIC_INLINE int L2C_GetID (void)
823 return L2C_310->CACHE_ID;
826 //return Cache controller cache Type
827 __STATIC_INLINE int L2C_GetType (void)
829 return L2C_310->CACHE_TYPE;
832 //Invalidate all cache by way
833 __STATIC_INLINE void L2C_InvAllByWay (void)
837 if (L2C_310->AUX_CNT & (1<<16))
842 L2C_310->INV_WAY = (1 << assoc) - 1;
843 while(L2C_310->INV_WAY & ((1 << assoc) - 1)); //poll invalidate
848 //Clean and Invalidate all cache by way
849 __STATIC_INLINE void L2C_CleanInvAllByWay (void)
853 if (L2C_310->AUX_CNT & (1<<16))
858 L2C_310->CLEAN_INV_WAY = (1 << assoc) - 1;
859 while(L2C_310->CLEAN_INV_WAY & ((1 << assoc) - 1)); //poll invalidate
865 __STATIC_INLINE void L2C_Enable(void)
867 L2C_310->CONTROL = 0;
868 L2C_310->INTERRUPT_CLEAR = 0x000001FFuL;
869 L2C_310->DEBUG_CONTROL = 0;
870 L2C_310->DATA_LOCK_0_WAY = 0;
871 L2C_310->CACHE_SYNC = 0;
872 L2C_310->CONTROL = 0x01;
876 __STATIC_INLINE void L2C_Disable(void)
878 L2C_310->CONTROL = 0x00;
882 //Invalidate cache by physical address
883 __STATIC_INLINE void L2C_InvPa (void *pa)
885 L2C_310->INV_LINE_PA = (unsigned int)pa;
889 //Clean cache by physical address
890 __STATIC_INLINE void L2C_CleanPa (void *pa)
892 L2C_310->CLEAN_LINE_PA = (unsigned int)pa;
896 //Clean and invalidate cache by physical address
897 __STATIC_INLINE void L2C_CleanInvPa (void *pa)
899 L2C_310->CLEAN_INV_LINE_PA = (unsigned int)pa;
904 /* ########################## GIC functions ###################################### */
905 #if (__GIC_PRESENT == 1U)
907 __STATIC_INLINE void GIC_EnableDistributor(void)
909 GICDistributor->D_CTLR |= 1; //enable distributor
912 __STATIC_INLINE void GIC_DisableDistributor(void)
914 GICDistributor->D_CTLR &=~1; //disable distributor
917 __STATIC_INLINE uint32_t GIC_DistributorInfo(void)
919 return (uint32_t)(GICDistributor->D_TYPER);
922 __STATIC_INLINE uint32_t GIC_DistributorImplementer(void)
924 return (uint32_t)(GICDistributor->D_IIDR);
927 __STATIC_INLINE void GIC_SetTarget(IRQn_Type IRQn, uint32_t cpu_target)
929 GICDistributor->D_ITARGETSR[((uint32_t)(int32_t)IRQn)] = (uint8_t)(cpu_target & 0x0f);
932 __STATIC_INLINE uint32_t GIC_GetTarget(IRQn_Type IRQn)
934 return ((uint32_t) GICDistributor->D_ITARGETSR[((uint32_t)(int32_t)IRQn)] & 0x0f);
937 __STATIC_INLINE void GIC_EnableInterface(void)
939 GICInterface->C_CTLR |= 1; //enable interface
942 __STATIC_INLINE void GIC_DisableInterface(void)
944 GICInterface->C_CTLR &=~1; //disable distributor
947 __STATIC_INLINE IRQn_Type GIC_AcknowledgePending(void)
949 return (IRQn_Type)(GICInterface->C_IAR);
952 __STATIC_INLINE void GIC_EndInterrupt(IRQn_Type IRQn)
954 GICInterface->C_EOIR = IRQn;
957 __STATIC_INLINE void GIC_EnableIRQ(IRQn_Type IRQn)
959 GICDistributor->D_ISENABLER[IRQn / 32] = 1 << (IRQn % 32);
962 __STATIC_INLINE void GIC_DisableIRQ(IRQn_Type IRQn)
964 GICDistributor->D_ICENABLER[IRQn / 32] = 1 << (IRQn % 32);
967 __STATIC_INLINE void GIC_SetPendingIRQ(IRQn_Type IRQn)
969 GICDistributor->D_ISPENDR[IRQn / 32] = 1 << (IRQn % 32);
972 __STATIC_INLINE void GIC_ClearPendingIRQ(IRQn_Type IRQn)
974 GICDistributor->D_ICPENDR[IRQn / 32] = 1 << (IRQn % 32);
977 __STATIC_INLINE void GIC_SetLevelModel(IRQn_Type IRQn, int8_t edge_level, int8_t model)
979 // Word-size read/writes must be used to access this register
980 volatile uint32_t * field = &(GICDistributor->D_ICFGR[IRQn / 16]);
981 unsigned bit_shift = (IRQn % 16)<<1;
982 unsigned int save_word;
985 save_word &= (~(3 << bit_shift));
987 *field = (save_word | (((edge_level<<1) | model) << bit_shift));
990 __STATIC_INLINE void GIC_SetPriority(IRQn_Type IRQn, uint32_t priority)
992 GICDistributor->D_IPRIORITYR[((uint32_t)(int32_t)IRQn)] = (uint8_t)(priority);
995 __STATIC_INLINE uint32_t GIC_GetPriority(IRQn_Type IRQn)
997 return((uint32_t)GICDistributor->D_IPRIORITYR[((uint32_t)(int32_t)IRQn)]);
1000 __STATIC_INLINE void GIC_SetInterfacePriorityMask(uint32_t priority)
1002 GICInterface->C_PMR = priority & 0xff; //set priority mask
1005 __STATIC_INLINE uint32_t GIC_GetInterfacePriorityMask(void)
1007 return (uint32_t)GICInterface->C_PMR;
1010 __STATIC_INLINE void GIC_SetBinaryPoint(uint32_t binary_point)
1012 GICInterface->C_BPR = binary_point & 0x07; //set binary point
1015 __STATIC_INLINE uint32_t GIC_GetBinaryPoint(void)
1017 return (uint32_t)GICInterface->C_BPR;
1020 __STATIC_INLINE uint32_t GIC_GetIRQStatus(IRQn_Type IRQn)
1022 uint32_t pending, active;
1024 active = ((GICDistributor->D_ISACTIVER[IRQn / 32]) >> (IRQn % 32)) & 0x1;
1025 pending =((GICDistributor->D_ISPENDR[IRQn / 32]) >> (IRQn % 32)) & 0x1;
1027 return ((active<<1) | pending);
1030 __STATIC_INLINE void GIC_SendSGI(IRQn_Type IRQn, uint32_t target_list, uint32_t filter_list)
1032 GICDistributor->D_SGIR = ((filter_list & 0x3) << 24) | ((target_list & 0xff) << 16) | (IRQn & 0xf);
1035 __STATIC_INLINE uint32_t GIC_GetHighPendingIRQ(void)
1037 return GICInterface->C_HPPIR;
1040 __STATIC_INLINE uint32_t GIC_GetInterfaceId(void)
1042 return GICInterface->C_IIDR;
1046 __STATIC_INLINE void GIC_DistInit(void)
1049 uint32_t num_irq = 0;
1050 uint32_t priority_field;
1052 //A reset sets all bits in the D_IGROUPRs corresponding to the SPIs to 0,
1053 //configuring all of the interrupts as Secure.
1055 //Disable interrupt forwarding
1056 GIC_DisableDistributor();
1057 //Get the maximum number of interrupts that the GIC supports
1058 num_irq = 32 * ((GIC_DistributorInfo() & 0x1f) + 1);
1060 /* Priority level is implementation defined.
1061 To determine the number of priority bits implemented write 0xFF to an D_IPRIORITYR
1062 priority field and read back the value stored.*/
1063 GIC_SetPriority((IRQn_Type)0, 0xff);
1064 priority_field = GIC_GetPriority((IRQn_Type)0);
1066 for (i = (IRQn_Type)32; i < num_irq; i++)
1068 //Disable the SPI interrupt
1070 //Set level-sensitive and 1-N model
1071 GIC_SetLevelModel(i, 0, 1);
1073 GIC_SetPriority(i, priority_field/2);
1074 //Set target list to CPU0
1075 GIC_SetTarget(i, 1);
1077 //Enable distributor
1078 GIC_EnableDistributor();
1081 __STATIC_INLINE void GIC_CPUInterfaceInit(void)
1084 uint32_t priority_field;
1086 //A reset sets all bits in the D_IGROUPRs corresponding to the SPIs to 0,
1087 //configuring all of the interrupts as Secure.
1089 //Disable interrupt forwarding
1090 GIC_DisableInterface();
1092 /* Priority level is implementation defined.
1093 To determine the number of priority bits implemented write 0xFF to an D_IPRIORITYR
1094 priority field and read back the value stored.*/
1095 GIC_SetPriority((IRQn_Type)0, 0xff);
1096 priority_field = GIC_GetPriority((IRQn_Type)0);
1099 for (i = (IRQn_Type)0; i < 32; i++)
1101 //Set level-sensitive and 1-N model for PPI
1103 GIC_SetLevelModel(i, 0, 1);
1104 //Disable SGI and PPI interrupts
1107 GIC_SetPriority(i, priority_field/2);
1110 GIC_EnableInterface();
1111 //Set binary point to 0
1112 GIC_SetBinaryPoint(0);
1114 GIC_SetInterfacePriorityMask(0xff);
1117 __STATIC_INLINE void GIC_Enable(void)
1120 GIC_CPUInterfaceInit(); //per CPU
1124 /* ########################## Generic Timer functions ############################ */
1125 #if (__TIM_PRESENT == 1U)
1127 /* PL1 Physical Timer */
1128 #if (__CORTEX_A == 7U)
1129 __STATIC_INLINE void PL1_SetCounterFrequency(uint32_t value) {
1130 __set_CNTFRQ(value);
1134 __STATIC_INLINE void PL1_SetLoadValue(uint32_t value) {
1135 __set_CNTP_TVAL(value);
1139 __STATIC_INLINE uint32_t PL1_GetCurrentValue() {
1140 return(__get_CNTP_TVAL());
1143 __STATIC_INLINE void PL1_SetControl(uint32_t value) {
1144 __set_CNTP_CTL(value);
1149 #elif ((__CORTEX_A == 5U)||(__CORTEX_A == 9U))
1150 __STATIC_INLINE void PTIM_SetLoadValue(uint32_t value) {
1154 __STATIC_INLINE uint32_t PTIM_GetLoadValue() {
1158 __STATIC_INLINE uint32_t PTIM_GetCurrentValue() {
1159 return(PTIM->COUNTER);
1162 __STATIC_INLINE void PTIM_SetControl(uint32_t value) {
1163 PTIM->CONTROL = value;
1166 __STATIC_INLINE uint32_t PTIM_GetControl(void) {
1167 return(PTIM->CONTROL);
1170 __STATIC_INLINE void PTIM_ClearEventFlag(void) {
1176 /* ########################## MMU functions ###################################### */
1178 #define SECTION_DESCRIPTOR (0x2)
1179 #define SECTION_MASK (0xFFFFFFFC)
1181 #define SECTION_TEXCB_MASK (0xFFFF8FF3)
1182 #define SECTION_B_SHIFT (2)
1183 #define SECTION_C_SHIFT (3)
1184 #define SECTION_TEX0_SHIFT (12)
1185 #define SECTION_TEX1_SHIFT (13)
1186 #define SECTION_TEX2_SHIFT (14)
1188 #define SECTION_XN_MASK (0xFFFFFFEF)
1189 #define SECTION_XN_SHIFT (4)
1191 #define SECTION_DOMAIN_MASK (0xFFFFFE1F)
1192 #define SECTION_DOMAIN_SHIFT (5)
1194 #define SECTION_P_MASK (0xFFFFFDFF)
1195 #define SECTION_P_SHIFT (9)
1197 #define SECTION_AP_MASK (0xFFFF73FF)
1198 #define SECTION_AP_SHIFT (10)
1199 #define SECTION_AP2_SHIFT (15)
1201 #define SECTION_S_MASK (0xFFFEFFFF)
1202 #define SECTION_S_SHIFT (16)
1204 #define SECTION_NG_MASK (0xFFFDFFFF)
1205 #define SECTION_NG_SHIFT (17)
1207 #define SECTION_NS_MASK (0xFFF7FFFF)
1208 #define SECTION_NS_SHIFT (19)
1210 #define PAGE_L1_DESCRIPTOR (0x1)
1211 #define PAGE_L1_MASK (0xFFFFFFFC)
1213 #define PAGE_L2_4K_DESC (0x2)
1214 #define PAGE_L2_4K_MASK (0xFFFFFFFD)
1216 #define PAGE_L2_64K_DESC (0x1)
1217 #define PAGE_L2_64K_MASK (0xFFFFFFFC)
1219 #define PAGE_4K_TEXCB_MASK (0xFFFFFE33)
1220 #define PAGE_4K_B_SHIFT (2)
1221 #define PAGE_4K_C_SHIFT (3)
1222 #define PAGE_4K_TEX0_SHIFT (6)
1223 #define PAGE_4K_TEX1_SHIFT (7)
1224 #define PAGE_4K_TEX2_SHIFT (8)
1226 #define PAGE_64K_TEXCB_MASK (0xFFFF8FF3)
1227 #define PAGE_64K_B_SHIFT (2)
1228 #define PAGE_64K_C_SHIFT (3)
1229 #define PAGE_64K_TEX0_SHIFT (12)
1230 #define PAGE_64K_TEX1_SHIFT (13)
1231 #define PAGE_64K_TEX2_SHIFT (14)
1233 #define PAGE_TEXCB_MASK (0xFFFF8FF3)
1234 #define PAGE_B_SHIFT (2)
1235 #define PAGE_C_SHIFT (3)
1236 #define PAGE_TEX_SHIFT (12)
1238 #define PAGE_XN_4K_MASK (0xFFFFFFFE)
1239 #define PAGE_XN_4K_SHIFT (0)
1240 #define PAGE_XN_64K_MASK (0xFFFF7FFF)
1241 #define PAGE_XN_64K_SHIFT (15)
1243 #define PAGE_DOMAIN_MASK (0xFFFFFE1F)
1244 #define PAGE_DOMAIN_SHIFT (5)
1246 #define PAGE_P_MASK (0xFFFFFDFF)
1247 #define PAGE_P_SHIFT (9)
1249 #define PAGE_AP_MASK (0xFFFFFDCF)
1250 #define PAGE_AP_SHIFT (4)
1251 #define PAGE_AP2_SHIFT (9)
1253 #define PAGE_S_MASK (0xFFFFFBFF)
1254 #define PAGE_S_SHIFT (10)
1256 #define PAGE_NG_MASK (0xFFFFF7FF)
1257 #define PAGE_NG_SHIFT (11)
1259 #define PAGE_NS_MASK (0xFFFFFFF7)
1260 #define PAGE_NS_SHIFT (3)
1262 #define OFFSET_1M (0x00100000)
1263 #define OFFSET_64K (0x00010000)
1264 #define OFFSET_4K (0x00001000)
1266 #define DESCRIPTOR_FAULT (0x00000000)
1268 /* Attributes enumerations */
1270 /* Region size attributes */
1276 } mmu_region_size_Type;
1278 /* Region type attributes */
1288 /* Region cacheability attributes */
1295 } mmu_cacheability_Type;
1297 /* Region parity check attributes */
1302 } mmu_ecc_check_Type;
1304 /* Region execution attributes */
1311 /* Region global attributes */
1318 /* Region shareability attributes */
1325 /* Region security attributes */
1332 /* Region access attributes */
1340 /* Memory Region definition */
1341 typedef struct RegionStruct {
1342 mmu_region_size_Type rg_t;
1343 mmu_memory_Type mem_t;
1345 mmu_cacheability_Type inner_norm_t;
1346 mmu_cacheability_Type outer_norm_t;
1347 mmu_ecc_check_Type e_t;
1348 mmu_execute_Type xn_t;
1349 mmu_global_Type g_t;
1350 mmu_secure_Type sec_t;
1351 mmu_access_Type priv_t;
1352 mmu_access_Type user_t;
1353 mmu_shared_Type sh_t;
1355 } mmu_region_attributes_Type;
1357 //Following macros define the descriptors and attributes
1358 //Sect_Normal. Outer & inner wb/wa, non-shareable, executable, rw, domain 0
1359 #define section_normal(descriptor_l1, region) region.rg_t = SECTION; \
1360 region.domain = 0x0; \
1361 region.e_t = ECC_DISABLED; \
1362 region.g_t = GLOBAL; \
1363 region.inner_norm_t = WB_WA; \
1364 region.outer_norm_t = WB_WA; \
1365 region.mem_t = NORMAL; \
1366 region.sec_t = SECURE; \
1367 region.xn_t = EXECUTE; \
1368 region.priv_t = RW; \
1369 region.user_t = RW; \
1370 region.sh_t = NON_SHARED; \
1371 MMU_GetSectionDescriptor(&descriptor_l1, region);
1373 //Sect_Normal_Cod. Outer & inner wb/wa, non-shareable, executable, ro, domain 0
1374 #define section_normal_cod(descriptor_l1, region) region.rg_t = SECTION; \
1375 region.domain = 0x0; \
1376 region.e_t = ECC_DISABLED; \
1377 region.g_t = GLOBAL; \
1378 region.inner_norm_t = WB_WA; \
1379 region.outer_norm_t = WB_WA; \
1380 region.mem_t = NORMAL; \
1381 region.sec_t = SECURE; \
1382 region.xn_t = EXECUTE; \
1383 region.priv_t = READ; \
1384 region.user_t = READ; \
1385 region.sh_t = NON_SHARED; \
1386 MMU_GetSectionDescriptor(&descriptor_l1, region);
1388 //Sect_Normal_RO. Sect_Normal_Cod, but not executable
1389 #define section_normal_ro(descriptor_l1, region) region.rg_t = SECTION; \
1390 region.domain = 0x0; \
1391 region.e_t = ECC_DISABLED; \
1392 region.g_t = GLOBAL; \
1393 region.inner_norm_t = WB_WA; \
1394 region.outer_norm_t = WB_WA; \
1395 region.mem_t = NORMAL; \
1396 region.sec_t = SECURE; \
1397 region.xn_t = NON_EXECUTE; \
1398 region.priv_t = READ; \
1399 region.user_t = READ; \
1400 region.sh_t = NON_SHARED; \
1401 MMU_GetSectionDescriptor(&descriptor_l1, region);
1403 //Sect_Normal_RW. Sect_Normal_Cod, but writeable and not executable
1404 #define section_normal_rw(descriptor_l1, region) region.rg_t = SECTION; \
1405 region.domain = 0x0; \
1406 region.e_t = ECC_DISABLED; \
1407 region.g_t = GLOBAL; \
1408 region.inner_norm_t = WB_WA; \
1409 region.outer_norm_t = WB_WA; \
1410 region.mem_t = NORMAL; \
1411 region.sec_t = SECURE; \
1412 region.xn_t = NON_EXECUTE; \
1413 region.priv_t = RW; \
1414 region.user_t = RW; \
1415 region.sh_t = NON_SHARED; \
1416 MMU_GetSectionDescriptor(&descriptor_l1, region);
1417 //Sect_SO. Strongly-ordered (therefore shareable), not executable, rw, domain 0, base addr 0
1418 #define section_so(descriptor_l1, region) region.rg_t = SECTION; \
1419 region.domain = 0x0; \
1420 region.e_t = ECC_DISABLED; \
1421 region.g_t = GLOBAL; \
1422 region.inner_norm_t = NON_CACHEABLE; \
1423 region.outer_norm_t = NON_CACHEABLE; \
1424 region.mem_t = STRONGLY_ORDERED; \
1425 region.sec_t = SECURE; \
1426 region.xn_t = NON_EXECUTE; \
1427 region.priv_t = RW; \
1428 region.user_t = RW; \
1429 region.sh_t = NON_SHARED; \
1430 MMU_GetSectionDescriptor(&descriptor_l1, region);
1432 //Sect_Device_RO. Device, non-shareable, non-executable, ro, domain 0, base addr 0
1433 #define section_device_ro(descriptor_l1, region) region.rg_t = SECTION; \
1434 region.domain = 0x0; \
1435 region.e_t = ECC_DISABLED; \
1436 region.g_t = GLOBAL; \
1437 region.inner_norm_t = NON_CACHEABLE; \
1438 region.outer_norm_t = NON_CACHEABLE; \
1439 region.mem_t = STRONGLY_ORDERED; \
1440 region.sec_t = SECURE; \
1441 region.xn_t = NON_EXECUTE; \
1442 region.priv_t = READ; \
1443 region.user_t = READ; \
1444 region.sh_t = NON_SHARED; \
1445 MMU_GetSectionDescriptor(&descriptor_l1, region);
1447 //Sect_Device_RW. Sect_Device_RO, but writeable
1448 #define section_device_rw(descriptor_l1, region) region.rg_t = SECTION; \
1449 region.domain = 0x0; \
1450 region.e_t = ECC_DISABLED; \
1451 region.g_t = GLOBAL; \
1452 region.inner_norm_t = NON_CACHEABLE; \
1453 region.outer_norm_t = NON_CACHEABLE; \
1454 region.mem_t = STRONGLY_ORDERED; \
1455 region.sec_t = SECURE; \
1456 region.xn_t = NON_EXECUTE; \
1457 region.priv_t = RW; \
1458 region.user_t = RW; \
1459 region.sh_t = NON_SHARED; \
1460 MMU_GetSectionDescriptor(&descriptor_l1, region);
1461 //Page_4k_Device_RW. Shared device, not executable, rw, domain 0
1462 #define page4k_device_rw(descriptor_l1, descriptor_l2, region) region.rg_t = PAGE_4k; \
1463 region.domain = 0x0; \
1464 region.e_t = ECC_DISABLED; \
1465 region.g_t = GLOBAL; \
1466 region.inner_norm_t = NON_CACHEABLE; \
1467 region.outer_norm_t = NON_CACHEABLE; \
1468 region.mem_t = SHARED_DEVICE; \
1469 region.sec_t = SECURE; \
1470 region.xn_t = NON_EXECUTE; \
1471 region.priv_t = RW; \
1472 region.user_t = RW; \
1473 region.sh_t = NON_SHARED; \
1474 MMU_GetPageDescriptor(&descriptor_l1, &descriptor_l2, region);
1476 //Page_64k_Device_RW. Shared device, not executable, rw, domain 0
1477 #define page64k_device_rw(descriptor_l1, descriptor_l2, region) region.rg_t = PAGE_64k; \
1478 region.domain = 0x0; \
1479 region.e_t = ECC_DISABLED; \
1480 region.g_t = GLOBAL; \
1481 region.inner_norm_t = NON_CACHEABLE; \
1482 region.outer_norm_t = NON_CACHEABLE; \
1483 region.mem_t = SHARED_DEVICE; \
1484 region.sec_t = SECURE; \
1485 region.xn_t = NON_EXECUTE; \
1486 region.priv_t = RW; \
1487 region.user_t = RW; \
1488 region.sh_t = NON_SHARED; \
1489 MMU_GetPageDescriptor(&descriptor_l1, &descriptor_l2, region);
1491 /** \brief Set section execution-never attribute
1493 \param [out] descriptor_l1 L1 descriptor.
1494 \param [in] xn Section execution-never attribute : EXECUTE , NON_EXECUTE.
1498 __STATIC_INLINE int MMU_XNSection(uint32_t *descriptor_l1, mmu_execute_Type xn)
1500 *descriptor_l1 &= SECTION_XN_MASK;
1501 *descriptor_l1 |= ((xn & 0x1) << SECTION_XN_SHIFT);
1505 /** \brief Set section domain
1507 \param [out] descriptor_l1 L1 descriptor.
1508 \param [in] domain Section domain
1512 __STATIC_INLINE int MMU_DomainSection(uint32_t *descriptor_l1, uint8_t domain)
1514 *descriptor_l1 &= SECTION_DOMAIN_MASK;
1515 *descriptor_l1 |= ((domain & 0xF) << SECTION_DOMAIN_SHIFT);
1519 /** \brief Set section parity check
1521 \param [out] descriptor_l1 L1 descriptor.
1522 \param [in] p_bit Parity check: ECC_DISABLED, ECC_ENABLED
1526 __STATIC_INLINE int MMU_PSection(uint32_t *descriptor_l1, mmu_ecc_check_Type p_bit)
1528 *descriptor_l1 &= SECTION_P_MASK;
1529 *descriptor_l1 |= ((p_bit & 0x1) << SECTION_P_SHIFT);
1533 /** \brief Set section access privileges
1535 \param [out] descriptor_l1 L1 descriptor.
1536 \param [in] user User Level Access: NO_ACCESS, RW, READ
1537 \param [in] priv Privilege Level Access: NO_ACCESS, RW, READ
1538 \param [in] afe Access flag enable
1542 __STATIC_INLINE int MMU_APSection(uint32_t *descriptor_l1, mmu_access_Type user, mmu_access_Type priv, uint32_t afe)
1546 if (afe == 0) { //full access
1547 if ((priv == NO_ACCESS) && (user == NO_ACCESS)) { ap = 0x0; }
1548 else if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; }
1549 else if ((priv == RW) && (user == READ)) { ap = 0x2; }
1550 else if ((priv == RW) && (user == RW)) { ap = 0x3; }
1551 else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; }
1552 else if ((priv == READ) && (user == READ)) { ap = 0x7; }
1555 else { //Simplified access
1556 if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; }
1557 else if ((priv == RW) && (user == RW)) { ap = 0x3; }
1558 else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; }
1559 else if ((priv == READ) && (user == READ)) { ap = 0x7; }
1562 *descriptor_l1 &= SECTION_AP_MASK;
1563 *descriptor_l1 |= (ap & 0x3) << SECTION_AP_SHIFT;
1564 *descriptor_l1 |= ((ap & 0x4)>>2) << SECTION_AP2_SHIFT;
1569 /** \brief Set section shareability
1571 \param [out] descriptor_l1 L1 descriptor.
1572 \param [in] s_bit Section shareability: NON_SHARED, SHARED
1576 __STATIC_INLINE int MMU_SharedSection(uint32_t *descriptor_l1, mmu_shared_Type s_bit)
1578 *descriptor_l1 &= SECTION_S_MASK;
1579 *descriptor_l1 |= ((s_bit & 0x1) << SECTION_S_SHIFT);
1583 /** \brief Set section Global attribute
1585 \param [out] descriptor_l1 L1 descriptor.
1586 \param [in] g_bit Section attribute: GLOBAL, NON_GLOBAL
1590 __STATIC_INLINE int MMU_GlobalSection(uint32_t *descriptor_l1, mmu_global_Type g_bit)
1592 *descriptor_l1 &= SECTION_NG_MASK;
1593 *descriptor_l1 |= ((g_bit & 0x1) << SECTION_NG_SHIFT);
1597 /** \brief Set section Security attribute
1599 \param [out] descriptor_l1 L1 descriptor.
1600 \param [in] s_bit Section Security attribute: SECURE, NON_SECURE
1604 __STATIC_INLINE int MMU_SecureSection(uint32_t *descriptor_l1, mmu_secure_Type s_bit)
1606 *descriptor_l1 &= SECTION_NS_MASK;
1607 *descriptor_l1 |= ((s_bit & 0x1) << SECTION_NS_SHIFT);
1611 /* Page 4k or 64k */
1612 /** \brief Set 4k/64k page execution-never attribute
1614 \param [out] descriptor_l2 L2 descriptor.
1615 \param [in] xn Page execution-never attribute : EXECUTE , NON_EXECUTE.
1616 \param [in] page Page size: PAGE_4k, PAGE_64k,
1620 __STATIC_INLINE int MMU_XNPage(uint32_t *descriptor_l2, mmu_execute_Type xn, mmu_region_size_Type page)
1622 if (page == PAGE_4k)
1624 *descriptor_l2 &= PAGE_XN_4K_MASK;
1625 *descriptor_l2 |= ((xn & 0x1) << PAGE_XN_4K_SHIFT);
1629 *descriptor_l2 &= PAGE_XN_64K_MASK;
1630 *descriptor_l2 |= ((xn & 0x1) << PAGE_XN_64K_SHIFT);
1635 /** \brief Set 4k/64k page domain
1637 \param [out] descriptor_l1 L1 descriptor.
1638 \param [in] domain Page domain
1642 __STATIC_INLINE int MMU_DomainPage(uint32_t *descriptor_l1, uint8_t domain)
1644 *descriptor_l1 &= PAGE_DOMAIN_MASK;
1645 *descriptor_l1 |= ((domain & 0xf) << PAGE_DOMAIN_SHIFT);
1649 /** \brief Set 4k/64k page parity check
1651 \param [out] descriptor_l1 L1 descriptor.
1652 \param [in] p_bit Parity check: ECC_DISABLED, ECC_ENABLED
1656 __STATIC_INLINE int MMU_PPage(uint32_t *descriptor_l1, mmu_ecc_check_Type p_bit)
1658 *descriptor_l1 &= SECTION_P_MASK;
1659 *descriptor_l1 |= ((p_bit & 0x1) << SECTION_P_SHIFT);
1663 /** \brief Set 4k/64k page access privileges
1665 \param [out] descriptor_l2 L2 descriptor.
1666 \param [in] user User Level Access: NO_ACCESS, RW, READ
1667 \param [in] priv Privilege Level Access: NO_ACCESS, RW, READ
1668 \param [in] afe Access flag enable
1672 __STATIC_INLINE int MMU_APPage(uint32_t *descriptor_l2, mmu_access_Type user, mmu_access_Type priv, uint32_t afe)
1676 if (afe == 0) { //full access
1677 if ((priv == NO_ACCESS) && (user == NO_ACCESS)) { ap = 0x0; }
1678 else if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; }
1679 else if ((priv == RW) && (user == READ)) { ap = 0x2; }
1680 else if ((priv == RW) && (user == RW)) { ap = 0x3; }
1681 else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; }
1682 else if ((priv == READ) && (user == READ)) { ap = 0x6; }
1685 else { //Simplified access
1686 if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; }
1687 else if ((priv == RW) && (user == RW)) { ap = 0x3; }
1688 else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; }
1689 else if ((priv == READ) && (user == READ)) { ap = 0x7; }
1692 *descriptor_l2 &= PAGE_AP_MASK;
1693 *descriptor_l2 |= (ap & 0x3) << PAGE_AP_SHIFT;
1694 *descriptor_l2 |= ((ap & 0x4)>>2) << PAGE_AP2_SHIFT;
1699 /** \brief Set 4k/64k page shareability
1701 \param [out] descriptor_l2 L2 descriptor.
1702 \param [in] s_bit 4k/64k page shareability: NON_SHARED, SHARED
1706 __STATIC_INLINE int MMU_SharedPage(uint32_t *descriptor_l2, mmu_shared_Type s_bit)
1708 *descriptor_l2 &= PAGE_S_MASK;
1709 *descriptor_l2 |= ((s_bit & 0x1) << PAGE_S_SHIFT);
1713 /** \brief Set 4k/64k page Global attribute
1715 \param [out] descriptor_l2 L2 descriptor.
1716 \param [in] g_bit 4k/64k page attribute: GLOBAL, NON_GLOBAL
1720 __STATIC_INLINE int MMU_GlobalPage(uint32_t *descriptor_l2, mmu_global_Type g_bit)
1722 *descriptor_l2 &= PAGE_NG_MASK;
1723 *descriptor_l2 |= ((g_bit & 0x1) << PAGE_NG_SHIFT);
1727 /** \brief Set 4k/64k page Security attribute
1729 \param [out] descriptor_l1 L1 descriptor.
1730 \param [in] s_bit 4k/64k page Security attribute: SECURE, NON_SECURE
1734 __STATIC_INLINE int MMU_SecurePage(uint32_t *descriptor_l1, mmu_secure_Type s_bit)
1736 *descriptor_l1 &= PAGE_NS_MASK;
1737 *descriptor_l1 |= ((s_bit & 0x1) << PAGE_NS_SHIFT);
1741 /** \brief Set Section memory attributes
1743 \param [out] descriptor_l1 L1 descriptor.
1744 \param [in] mem Section memory type: NORMAL, DEVICE, SHARED_DEVICE, NON_SHARED_DEVICE, STRONGLY_ORDERED
1745 \param [in] outer Outer cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA,
1746 \param [in] inner Inner cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA,
1750 __STATIC_INLINE int MMU_MemorySection(uint32_t *descriptor_l1, mmu_memory_Type mem, mmu_cacheability_Type outer, mmu_cacheability_Type inner)
1752 *descriptor_l1 &= SECTION_TEXCB_MASK;
1754 if (STRONGLY_ORDERED == mem)
1758 else if (SHARED_DEVICE == mem)
1760 *descriptor_l1 |= (1 << SECTION_B_SHIFT);
1762 else if (NON_SHARED_DEVICE == mem)
1764 *descriptor_l1 |= (1 << SECTION_TEX1_SHIFT);
1766 else if (NORMAL == mem)
1768 *descriptor_l1 |= 1 << SECTION_TEX2_SHIFT;
1774 *descriptor_l1 |= (1 << SECTION_B_SHIFT);
1777 *descriptor_l1 |= 1 << SECTION_C_SHIFT;
1780 *descriptor_l1 |= (1 << SECTION_B_SHIFT) | (1 << SECTION_C_SHIFT);
1788 *descriptor_l1 |= (1 << SECTION_TEX0_SHIFT);
1791 *descriptor_l1 |= 1 << SECTION_TEX1_SHIFT;
1794 *descriptor_l1 |= (1 << SECTION_TEX0_SHIFT) | (1 << SECTION_TEX0_SHIFT);
1801 /** \brief Set 4k/64k page memory attributes
1803 \param [out] descriptor_l2 L2 descriptor.
1804 \param [in] mem 4k/64k page memory type: NORMAL, DEVICE, SHARED_DEVICE, NON_SHARED_DEVICE, STRONGLY_ORDERED
1805 \param [in] outer Outer cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA,
1806 \param [in] inner Inner cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA,
1807 \param [in] page Page size
1811 __STATIC_INLINE int MMU_MemoryPage(uint32_t *descriptor_l2, mmu_memory_Type mem, mmu_cacheability_Type outer, mmu_cacheability_Type inner, mmu_region_size_Type page)
1813 *descriptor_l2 &= PAGE_4K_TEXCB_MASK;
1815 if (page == PAGE_64k)
1818 MMU_MemorySection(descriptor_l2, mem, outer, inner);
1822 if (STRONGLY_ORDERED == mem)
1826 else if (SHARED_DEVICE == mem)
1828 *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT);
1830 else if (NON_SHARED_DEVICE == mem)
1832 *descriptor_l2 |= (1 << PAGE_4K_TEX1_SHIFT);
1834 else if (NORMAL == mem)
1836 *descriptor_l2 |= 1 << PAGE_4K_TEX2_SHIFT;
1842 *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT);
1845 *descriptor_l2 |= 1 << PAGE_4K_C_SHIFT;
1848 *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT) | (1 << PAGE_4K_C_SHIFT);
1856 *descriptor_l2 |= (1 << PAGE_4K_TEX0_SHIFT);
1859 *descriptor_l2 |= 1 << PAGE_4K_TEX1_SHIFT;
1862 *descriptor_l2 |= (1 << PAGE_4K_TEX0_SHIFT) | (1 << PAGE_4K_TEX0_SHIFT);
1871 /** \brief Create a L1 section descriptor
1873 \param [out] descriptor L1 descriptor
1874 \param [in] reg Section attributes
1878 __STATIC_INLINE int MMU_GetSectionDescriptor(uint32_t *descriptor, mmu_region_attributes_Type reg)
1882 MMU_MemorySection(descriptor, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t);
1883 MMU_XNSection(descriptor,reg.xn_t);
1884 MMU_DomainSection(descriptor, reg.domain);
1885 MMU_PSection(descriptor, reg.e_t);
1886 MMU_APSection(descriptor, reg.priv_t, reg.user_t, 1);
1887 MMU_SharedSection(descriptor,reg.sh_t);
1888 MMU_GlobalSection(descriptor,reg.g_t);
1889 MMU_SecureSection(descriptor,reg.sec_t);
1890 *descriptor &= SECTION_MASK;
1891 *descriptor |= SECTION_DESCRIPTOR;
1897 /** \brief Create a L1 and L2 4k/64k page descriptor
1899 \param [out] descriptor L1 descriptor
1900 \param [out] descriptor2 L2 descriptor
1901 \param [in] reg 4k/64k page attributes
1905 __STATIC_INLINE int MMU_GetPageDescriptor(uint32_t *descriptor, uint32_t *descriptor2, mmu_region_attributes_Type reg)
1913 MMU_MemoryPage(descriptor2, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t, PAGE_4k);
1914 MMU_XNPage(descriptor2, reg.xn_t, PAGE_4k);
1915 MMU_DomainPage(descriptor, reg.domain);
1916 MMU_PPage(descriptor, reg.e_t);
1917 MMU_APPage(descriptor2, reg.priv_t, reg.user_t, 1);
1918 MMU_SharedPage(descriptor2,reg.sh_t);
1919 MMU_GlobalPage(descriptor2,reg.g_t);
1920 MMU_SecurePage(descriptor,reg.sec_t);
1921 *descriptor &= PAGE_L1_MASK;
1922 *descriptor |= PAGE_L1_DESCRIPTOR;
1923 *descriptor2 &= PAGE_L2_4K_MASK;
1924 *descriptor2 |= PAGE_L2_4K_DESC;
1928 MMU_MemoryPage(descriptor2, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t, PAGE_64k);
1929 MMU_XNPage(descriptor2, reg.xn_t, PAGE_64k);
1930 MMU_DomainPage(descriptor, reg.domain);
1931 MMU_PPage(descriptor, reg.e_t);
1932 MMU_APPage(descriptor2, reg.priv_t, reg.user_t, 1);
1933 MMU_SharedPage(descriptor2,reg.sh_t);
1934 MMU_GlobalPage(descriptor2,reg.g_t);
1935 MMU_SecurePage(descriptor,reg.sec_t);
1936 *descriptor &= PAGE_L1_MASK;
1937 *descriptor |= PAGE_L1_DESCRIPTOR;
1938 *descriptor2 &= PAGE_L2_64K_MASK;
1939 *descriptor2 |= PAGE_L2_64K_DESC;
1950 /** \brief Create a 1MB Section
1952 \param [in] ttb Translation table base address
1953 \param [in] base_address Section base address
1954 \param [in] count Number of sections to create
1955 \param [in] descriptor_l1 L1 descriptor (region attributes)
1958 __STATIC_INLINE void MMU_TTSection(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1)
1964 offset = base_address >> 20;
1965 entry = (base_address & 0xFFF00000) | descriptor_l1;
1970 for (i = 0; i < count; i++ )
1978 /** \brief Create a 4k page entry
1980 \param [in] ttb L1 table base address
1981 \param [in] base_address 4k base address
1982 \param [in] count Number of 4k pages to create
1983 \param [in] descriptor_l1 L1 descriptor (region attributes)
1984 \param [in] ttb_l2 L2 table base address
1985 \param [in] descriptor_l2 L2 descriptor (region attributes)
1988 __STATIC_INLINE void MMU_TTPage4k(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1, uint32_t *ttb_l2, uint32_t descriptor_l2 )
1991 uint32_t offset, offset2;
1992 uint32_t entry, entry2;
1995 offset = base_address >> 20;
1996 entry = ((int)ttb_l2 & 0xFFFFFC00) | descriptor_l1;
2003 offset2 = (base_address & 0xff000) >> 12;
2005 entry2 = (base_address & 0xFFFFF000) | descriptor_l2;
2006 for (i = 0; i < count; i++ )
2010 entry2 += OFFSET_4K;
2014 /** \brief Create a 64k page entry
2016 \param [in] ttb L1 table base address
2017 \param [in] base_address 64k base address
2018 \param [in] count Number of 64k pages to create
2019 \param [in] descriptor_l1 L1 descriptor (region attributes)
2020 \param [in] ttb_l2 L2 table base address
2021 \param [in] descriptor_l2 L2 descriptor (region attributes)
2024 __STATIC_INLINE void MMU_TTPage64k(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1, uint32_t *ttb_l2, uint32_t descriptor_l2 )
2026 uint32_t offset, offset2;
2027 uint32_t entry, entry2;
2031 offset = base_address >> 20;
2032 entry = ((int)ttb_l2 & 0xFFFFFC00) | descriptor_l1;
2039 offset2 = (base_address & 0xff000) >> 12;
2041 entry2 = (base_address & 0xFFFF0000) | descriptor_l2;
2042 for (i = 0; i < count; i++ )
2045 for (j = 0; j < 16; j++)
2050 entry2 += OFFSET_64K;
2054 /** \brief Enable MMU
2058 __STATIC_INLINE void MMU_Enable(void) {
2059 // Set M bit 0 to enable the MMU
2060 // Set AFE bit to enable simplified access permissions model
2061 // Clear TRE bit to disable TEX remap and A bit to disable strict alignment fault checking
2062 __set_SCTLR( (__get_SCTLR() & ~(1 << 28) & ~(1 << 1)) | 1 | (1 << 29));
2066 /** \brief Disable MMU
2070 __STATIC_INLINE void MMU_Disable(void) {
2071 // Clear M bit 0 to disable the MMU
2072 __set_SCTLR( __get_SCTLR() & ~1);
2076 /** \brief Invalidate entire unified TLB
2078 TLBIALL. Invalidate entire unified TLB
2081 __STATIC_INLINE void MMU_InvalidateTLB(void) {
2083 __DSB(); //ensure completion of the invalidation
2084 __ISB(); //ensure instruction fetch path sees new state
2092 #endif /* __CORE_CA_H_DEPENDANT */
2094 #endif /* __CMSIS_GENERIC */