2 * FreeRTOS Kernel <DEVELOPMENT BRANCH>
3 * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5 * SPDX-License-Identifier: MIT
7 * Permission is hereby granted, free of charge, to any person obtaining a copy of
8 * this software and associated documentation files (the "Software"), to deal in
9 * the Software without restriction, including without limitation the rights to
10 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
11 * the Software, and to permit persons to whom the Software is furnished to do so,
12 * subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in all
15 * copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
19 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
20 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
21 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * https://www.FreeRTOS.org
25 * https://github.com/FreeRTOS
29 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
30 * all the API functions to use the MPU wrappers. That should only be done when
31 * task.h is included from an application file. */
32 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
34 /* Scheduler includes. */
39 #include "mpu_wrappers.h"
40 #include "mpu_syscall_numbers.h"
42 /* Portasm includes. */
45 #if ( configENABLE_TRUSTZONE == 1 )
46 /* Secure components includes. */
47 #include "secure_context.h"
48 #include "secure_init.h"
49 #endif /* configENABLE_TRUSTZONE */
51 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
54 * The FreeRTOS Cortex M33 port can be configured to run on the Secure Side only
55 * i.e. the processor boots as secure and never jumps to the non-secure side.
56 * The Trust Zone support in the port must be disabled in order to run FreeRTOS
57 * on the secure side. The following are the valid configuration seetings:
59 * 1. Run FreeRTOS on the Secure Side:
60 * configRUN_FREERTOS_SECURE_ONLY = 1 and configENABLE_TRUSTZONE = 0
62 * 2. Run FreeRTOS on the Non-Secure Side with Secure Side function call support:
63 * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 1
65 * 3. Run FreeRTOS on the Non-Secure Side only i.e. no Secure Side function call support:
66 * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 0
68 #if ( ( configRUN_FREERTOS_SECURE_ONLY == 1 ) && ( configENABLE_TRUSTZONE == 1 ) )
69 #error TrustZone needs to be disabled in order to run FreeRTOS on the Secure Side.
73 * Cortex-M23 does not have non-secure PSPLIM. We should use PSPLIM on Cortex-M23
74 * only when FreeRTOS runs on secure side.
76 #if ( ( portHAS_ARMV8M_MAIN_EXTENSION == 0 ) && ( configRUN_FREERTOS_SECURE_ONLY == 0 ) )
77 #define portUSE_PSPLIM_REGISTER 0
79 #define portUSE_PSPLIM_REGISTER 1
81 /*-----------------------------------------------------------*/
84 * @brief Prototype of all Interrupt Service Routines (ISRs).
86 typedef void ( * portISR_t )( void );
87 /*-----------------------------------------------------------*/
90 * @brief Constants required to manipulate the NVIC.
92 #define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) )
93 #define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) )
94 #define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) )
95 #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) )
96 #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL )
97 #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL )
98 #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL )
99 #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL )
100 #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL )
101 #define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL )
102 #define portMIN_INTERRUPT_PRIORITY ( 255UL )
103 #define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL )
104 #define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL )
105 /*-----------------------------------------------------------*/
108 * @brief Constants required to manipulate the SCB.
110 #define portSCB_VTOR_REG ( *( ( portISR_t ** ) 0xe000ed08 ) )
111 #define portSCB_SYS_HANDLER_CTRL_STATE_REG ( *( ( volatile uint32_t * ) 0xe000ed24 ) )
112 #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL )
113 /*-----------------------------------------------------------*/
116 * @brief Constants used to check the installation of the FreeRTOS interrupt handlers.
118 #define portVECTOR_INDEX_SVC ( 11 )
119 #define portVECTOR_INDEX_PENDSV ( 14 )
120 /*-----------------------------------------------------------*/
123 * @brief Constants required to check the validity of an interrupt priority.
125 #define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) )
126 #define portFIRST_USER_INTERRUPT_NUMBER ( 16 )
127 #define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 )
128 #define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) )
129 #define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 )
130 #define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 )
131 #define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL )
132 #define portPRIGROUP_SHIFT ( 8UL )
133 /*-----------------------------------------------------------*/
136 * @brief Constants used during system call enter and exit.
138 #define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
139 #define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
140 /*-----------------------------------------------------------*/
143 * @brief Constants required to manipulate the FPU.
145 #define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
146 #define portCPACR_CP10_VALUE ( 3UL )
147 #define portCPACR_CP11_VALUE portCPACR_CP10_VALUE
148 #define portCPACR_CP10_POS ( 20UL )
149 #define portCPACR_CP11_POS ( 22UL )
151 #define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */
152 #define portFPCCR_ASPEN_POS ( 31UL )
153 #define portFPCCR_ASPEN_MASK ( 1UL << portFPCCR_ASPEN_POS )
154 #define portFPCCR_LSPEN_POS ( 30UL )
155 #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS )
156 /*-----------------------------------------------------------*/
159 * @brief Offsets in the stack to the parameters when inside the SVC handler.
161 #define portOFFSET_TO_LR ( 5 )
162 #define portOFFSET_TO_PC ( 6 )
163 #define portOFFSET_TO_PSR ( 7 )
164 /*-----------------------------------------------------------*/
167 * @brief Constants required to manipulate the MPU.
169 #define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
170 #define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) )
171 #define portMPU_RNR_REG ( *( ( volatile uint32_t * ) 0xe000ed98 ) )
173 #define portMPU_RBAR_REG ( *( ( volatile uint32_t * ) 0xe000ed9c ) )
174 #define portMPU_RLAR_REG ( *( ( volatile uint32_t * ) 0xe000eda0 ) )
176 #define portMPU_RBAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda4 ) )
177 #define portMPU_RLAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda8 ) )
179 #define portMPU_RBAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edac ) )
180 #define portMPU_RLAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edb0 ) )
182 #define portMPU_RBAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb4 ) )
183 #define portMPU_RLAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb8 ) )
185 #define portMPU_MAIR0_REG ( *( ( volatile uint32_t * ) 0xe000edc0 ) )
186 #define portMPU_MAIR1_REG ( *( ( volatile uint32_t * ) 0xe000edc4 ) )
188 #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
189 #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
191 #define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
193 #define portMPU_MAIR_ATTR0_POS ( 0UL )
194 #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
196 #define portMPU_MAIR_ATTR1_POS ( 8UL )
197 #define portMPU_MAIR_ATTR1_MASK ( 0x0000ff00 )
199 #define portMPU_MAIR_ATTR2_POS ( 16UL )
200 #define portMPU_MAIR_ATTR2_MASK ( 0x00ff0000 )
202 #define portMPU_MAIR_ATTR3_POS ( 24UL )
203 #define portMPU_MAIR_ATTR3_MASK ( 0xff000000 )
205 #define portMPU_MAIR_ATTR4_POS ( 0UL )
206 #define portMPU_MAIR_ATTR4_MASK ( 0x000000ff )
208 #define portMPU_MAIR_ATTR5_POS ( 8UL )
209 #define portMPU_MAIR_ATTR5_MASK ( 0x0000ff00 )
211 #define portMPU_MAIR_ATTR6_POS ( 16UL )
212 #define portMPU_MAIR_ATTR6_MASK ( 0x00ff0000 )
214 #define portMPU_MAIR_ATTR7_POS ( 24UL )
215 #define portMPU_MAIR_ATTR7_MASK ( 0xff000000 )
217 #define portMPU_RLAR_ATTR_INDEX0 ( 0UL << 1UL )
218 #define portMPU_RLAR_ATTR_INDEX1 ( 1UL << 1UL )
219 #define portMPU_RLAR_ATTR_INDEX2 ( 2UL << 1UL )
220 #define portMPU_RLAR_ATTR_INDEX3 ( 3UL << 1UL )
221 #define portMPU_RLAR_ATTR_INDEX4 ( 4UL << 1UL )
222 #define portMPU_RLAR_ATTR_INDEX5 ( 5UL << 1UL )
223 #define portMPU_RLAR_ATTR_INDEX6 ( 6UL << 1UL )
224 #define portMPU_RLAR_ATTR_INDEX7 ( 7UL << 1UL )
226 #define portMPU_RLAR_REGION_ENABLE ( 1UL )
228 #if ( portARMV8M_MINOR_VERSION >= 1 )
229 /* Enable Privileged eXecute Never MPU attribute for the selected memory
231 #define portMPU_RLAR_PRIVILEGED_EXECUTE_NEVER ( 1UL << 4UL )
232 #endif /* portARMV8M_MINOR_VERSION >= 1 */
234 /* Enable privileged access to unmapped region. */
235 #define portMPU_PRIV_BACKGROUND_ENABLE_BIT ( 1UL << 2UL )
238 #define portMPU_ENABLE_BIT ( 1UL << 0UL )
240 /* Expected value of the portMPU_TYPE register. */
241 #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
243 /* Extract first address of the MPU region as encoded in the
244 * RBAR (Region Base Address Register) value. */
245 #define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
246 ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
248 /* Extract last address of the MPU region as encoded in the
249 * RLAR (Region Limit Address Register) value. */
250 #define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
251 ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
253 /* Does addr lies within [start, end] address range? */
254 #define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
255 ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
257 /* Is the access request satisfied by the available permissions? */
258 #define portIS_AUTHORIZED( accessRequest, permissions ) \
259 ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
261 /* Max value that fits in a uint32_t type. */
262 #define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
264 /* Check if adding a and b will result in overflow. */
265 #define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
266 /*-----------------------------------------------------------*/
269 * @brief The maximum 24-bit number.
271 * It is needed because the systick is a 24-bit counter.
273 #define portMAX_24_BIT_NUMBER ( 0xffffffUL )
276 * @brief A fiddle factor to estimate the number of SysTick counts that would
277 * have occurred while the SysTick counter is stopped during tickless idle
280 #define portMISSED_COUNTS_FACTOR ( 94UL )
281 /*-----------------------------------------------------------*/
284 * @brief Constants required to set up the initial stack.
286 #define portINITIAL_XPSR ( 0x01000000 )
288 #if ( configRUN_FREERTOS_SECURE_ONLY == 1 )
291 * @brief Initial EXC_RETURN value.
294 * 1111 1111 1111 1111 1111 1111 1111 1101
296 * Bit[6] - 1 --> The exception was taken from the Secure state.
297 * Bit[5] - 1 --> Do not skip stacking of additional state context.
298 * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context.
299 * Bit[3] - 1 --> Return to the Thread mode.
300 * Bit[2] - 1 --> Restore registers from the process stack.
301 * Bit[1] - 0 --> Reserved, 0.
302 * Bit[0] - 1 --> The exception was taken to the Secure state.
304 #define portINITIAL_EXC_RETURN ( 0xfffffffd )
308 * @brief Initial EXC_RETURN value.
311 * 1111 1111 1111 1111 1111 1111 1011 1100
313 * Bit[6] - 0 --> The exception was taken from the Non-Secure state.
314 * Bit[5] - 1 --> Do not skip stacking of additional state context.
315 * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context.
316 * Bit[3] - 1 --> Return to the Thread mode.
317 * Bit[2] - 1 --> Restore registers from the process stack.
318 * Bit[1] - 0 --> Reserved, 0.
319 * Bit[0] - 0 --> The exception was taken to the Non-Secure state.
321 #define portINITIAL_EXC_RETURN ( 0xffffffbc )
322 #endif /* configRUN_FREERTOS_SECURE_ONLY */
325 * @brief CONTROL register privileged bit mask.
327 * Bit[0] in CONTROL register tells the privilege:
328 * Bit[0] = 0 ==> The task is privileged.
329 * Bit[0] = 1 ==> The task is not privileged.
331 #define portCONTROL_PRIVILEGED_MASK ( 1UL << 0UL )
334 * @brief Initial CONTROL register values.
336 #define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 )
337 #define portINITIAL_CONTROL_PRIVILEGED ( 0x2 )
340 * @brief Let the user override the default SysTick clock rate. If defined by the
341 * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the
342 * configuration register.
344 #ifndef configSYSTICK_CLOCK_HZ
345 #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ )
346 /* Ensure the SysTick is clocked at the same frequency as the core. */
347 #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT )
349 /* Select the option to clock SysTick not at the same frequency as the core. */
350 #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 )
354 * @brief Let the user override the pre-loading of the initial LR with the
355 * address of prvTaskExitError() in case it messes up unwinding of the stack
358 #ifdef configTASK_RETURN_ADDRESS
359 #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS
361 #define portTASK_RETURN_ADDRESS prvTaskExitError
365 * @brief If portPRELOAD_REGISTERS then registers will be given an initial value
366 * when a task is created. This helps in debugging at the cost of code size.
368 #define portPRELOAD_REGISTERS 1
371 * @brief A task is created without a secure context, and must call
372 * portALLOCATE_SECURE_CONTEXT() to give itself a secure context before it makes
375 #define portNO_SECURE_CONTEXT 0
376 /*-----------------------------------------------------------*/
379 * @brief Used to catch tasks that attempt to return from their implementing
382 static void prvTaskExitError( void );
384 #if ( configENABLE_MPU == 1 )
387 * @brief Extract MPU region's access permissions from the Region Base Address
388 * Register (RBAR) value.
390 * @param ulRBARValue RBAR value for the MPU region.
392 * @return uint32_t Access permissions.
394 static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
395 #endif /* configENABLE_MPU */
397 #if ( configENABLE_MPU == 1 )
400 * @brief Setup the Memory Protection Unit (MPU).
402 static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
403 #endif /* configENABLE_MPU */
405 #if ( configENABLE_FPU == 1 )
408 * @brief Setup the Floating Point Unit (FPU).
410 static void prvSetupFPU( void ) PRIVILEGED_FUNCTION;
411 #endif /* configENABLE_FPU */
414 * @brief Setup the timer to generate the tick interrupts.
416 * The implementation in this file is weak to allow application writers to
417 * change the timer used to generate the tick interrupt.
419 void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION;
422 * @brief Checks whether the current execution context is interrupt.
424 * @return pdTRUE if the current execution context is interrupt, pdFALSE
427 BaseType_t xPortIsInsideInterrupt( void );
430 * @brief Yield the processor.
432 void vPortYield( void ) PRIVILEGED_FUNCTION;
435 * @brief Enter critical section.
437 void vPortEnterCritical( void ) PRIVILEGED_FUNCTION;
440 * @brief Exit from critical section.
442 void vPortExitCritical( void ) PRIVILEGED_FUNCTION;
445 * @brief SysTick handler.
447 void SysTick_Handler( void ) PRIVILEGED_FUNCTION;
450 * @brief C part of SVC handler.
452 portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
454 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
457 * @brief Sets up the system call stack so that upon returning from
458 * SVC, the system call stack is used.
460 * @param pulTaskStack The current SP when the SVC was raised.
461 * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
462 * @param ucSystemCallNumber The system call number of the system call.
464 void vSystemCallEnter( uint32_t * pulTaskStack,
466 uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
468 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
470 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
473 * @brief Raise SVC for exiting from a system call.
475 void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
477 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
479 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
482 * @brief Sets up the task stack so that upon returning from
483 * SVC, the task stack is used again.
485 * @param pulSystemCallStack The current SP when the SVC was raised.
486 * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
488 void vSystemCallExit( uint32_t * pulSystemCallStack,
489 uint32_t ulLR ) PRIVILEGED_FUNCTION;
491 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
493 #if ( configENABLE_MPU == 1 )
496 * @brief Checks whether or not the calling task is privileged.
498 * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
500 BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
502 #endif /* configENABLE_MPU == 1 */
503 /*-----------------------------------------------------------*/
505 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
508 * @brief This variable is set to pdTRUE when the scheduler is started.
510 PRIVILEGED_DATA static BaseType_t xSchedulerRunning = pdFALSE;
512 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
515 * @brief Each task maintains its own interrupt status in the critical nesting
518 PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL;
520 #if ( configENABLE_TRUSTZONE == 1 )
523 * @brief Saved as part of the task context to indicate which context the
524 * task is using on the secure side.
526 PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT;
527 #endif /* configENABLE_TRUSTZONE */
530 * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure
531 * FreeRTOS API functions are not called from interrupts that have been assigned
532 * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY.
534 #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_ARMV8M_MAIN_EXTENSION == 1 ) )
536 static uint8_t ucMaxSysCallPriority = 0;
537 static uint32_t ulMaxPRIGROUPValue = 0;
538 static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16;
540 #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_ARMV8M_MAIN_EXTENSION == 1 ) ) */
542 #if ( configUSE_TICKLESS_IDLE == 1 )
545 * @brief The number of SysTick increments that make up one tick period.
547 PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0;
550 * @brief The maximum number of tick periods that can be suppressed is
551 * limited by the 24 bit resolution of the SysTick timer.
553 PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0;
556 * @brief Compensate for the CPU cycles that pass while the SysTick is
557 * stopped (low power functionality only).
559 PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0;
560 #endif /* configUSE_TICKLESS_IDLE */
561 /*-----------------------------------------------------------*/
563 #if ( configUSE_TICKLESS_IDLE == 1 )
565 __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime )
567 uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft;
568 TickType_t xModifiableIdleTime;
570 /* Make sure the SysTick reload value does not overflow the counter. */
571 if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks )
573 xExpectedIdleTime = xMaximumPossibleSuppressedTicks;
576 /* Enter a critical section but don't use the taskENTER_CRITICAL()
577 * method as that will mask interrupts that should exit sleep mode. */
578 __asm volatile ( "cpsid i" ::: "memory" );
579 __asm volatile ( "dsb" );
580 __asm volatile ( "isb" );
582 /* If a context switch is pending or a task is waiting for the scheduler
583 * to be unsuspended then abandon the low power entry. */
584 if( eTaskConfirmSleepModeStatus() == eAbortSleep )
586 /* Re-enable interrupts - see comments above the cpsid instruction
588 __asm volatile ( "cpsie i" ::: "memory" );
592 /* Stop the SysTick momentarily. The time the SysTick is stopped for
593 * is accounted for as best it can be, but using the tickless mode will
594 * inevitably result in some tiny drift of the time maintained by the
595 * kernel with respect to calendar time. */
596 portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT );
598 /* Use the SysTick current-value register to determine the number of
599 * SysTick decrements remaining until the next tick interrupt. If the
600 * current-value register is zero, then there are actually
601 * ulTimerCountsForOneTick decrements remaining, not zero, because the
602 * SysTick requests the interrupt when decrementing from 1 to 0. */
603 ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG;
605 if( ulSysTickDecrementsLeft == 0 )
607 ulSysTickDecrementsLeft = ulTimerCountsForOneTick;
610 /* Calculate the reload value required to wait xExpectedIdleTime
611 * tick periods. -1 is used because this code normally executes part
612 * way through the first tick period. But if the SysTick IRQ is now
613 * pending, then clear the IRQ, suppressing the first tick, and correct
614 * the reload value to reflect that the second tick period is already
615 * underway. The expected idle time is always at least two ticks. */
616 ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) );
618 if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 )
620 portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT;
621 ulReloadValue -= ulTimerCountsForOneTick;
624 if( ulReloadValue > ulStoppedTimerCompensation )
626 ulReloadValue -= ulStoppedTimerCompensation;
629 /* Set the new reload value. */
630 portNVIC_SYSTICK_LOAD_REG = ulReloadValue;
632 /* Clear the SysTick count flag and set the count value back to
634 portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
636 /* Restart SysTick. */
637 portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT;
639 /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can
640 * set its parameter to 0 to indicate that its implementation contains
641 * its own wait for interrupt or wait for event instruction, and so wfi
642 * should not be executed again. However, the original expected idle
643 * time variable must remain unmodified, so a copy is taken. */
644 xModifiableIdleTime = xExpectedIdleTime;
645 configPRE_SLEEP_PROCESSING( xModifiableIdleTime );
647 if( xModifiableIdleTime > 0 )
649 __asm volatile ( "dsb" ::: "memory" );
650 __asm volatile ( "wfi" );
651 __asm volatile ( "isb" );
654 configPOST_SLEEP_PROCESSING( xExpectedIdleTime );
656 /* Re-enable interrupts to allow the interrupt that brought the MCU
657 * out of sleep mode to execute immediately. See comments above
658 * the cpsid instruction above. */
659 __asm volatile ( "cpsie i" ::: "memory" );
660 __asm volatile ( "dsb" );
661 __asm volatile ( "isb" );
663 /* Disable interrupts again because the clock is about to be stopped
664 * and interrupts that execute while the clock is stopped will increase
665 * any slippage between the time maintained by the RTOS and calendar
667 __asm volatile ( "cpsid i" ::: "memory" );
668 __asm volatile ( "dsb" );
669 __asm volatile ( "isb" );
671 /* Disable the SysTick clock without reading the
672 * portNVIC_SYSTICK_CTRL_REG register to ensure the
673 * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again,
674 * the time the SysTick is stopped for is accounted for as best it can
675 * be, but using the tickless mode will inevitably result in some tiny
676 * drift of the time maintained by the kernel with respect to calendar
678 portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT );
680 /* Determine whether the SysTick has already counted to zero. */
681 if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
683 uint32_t ulCalculatedLoadValue;
685 /* The tick interrupt ended the sleep (or is now pending), and
686 * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG
687 * with whatever remains of the new tick period. */
688 ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG );
690 /* Don't allow a tiny value, or values that have somehow
691 * underflowed because the post sleep hook did something
692 * that took too long or because the SysTick current-value register
694 if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) )
696 ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL );
699 portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue;
701 /* As the pending tick will be processed as soon as this
702 * function exits, the tick value maintained by the tick is stepped
703 * forward by one less than the time spent waiting. */
704 ulCompleteTickPeriods = xExpectedIdleTime - 1UL;
708 /* Something other than the tick interrupt ended the sleep. */
710 /* Use the SysTick current-value register to determine the
711 * number of SysTick decrements remaining until the expected idle
712 * time would have ended. */
713 ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG;
714 #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT )
716 /* If the SysTick is not using the core clock, the current-
717 * value register might still be zero here. In that case, the
718 * SysTick didn't load from the reload register, and there are
719 * ulReloadValue decrements remaining in the expected idle
721 if( ulSysTickDecrementsLeft == 0 )
723 ulSysTickDecrementsLeft = ulReloadValue;
726 #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */
728 /* Work out how long the sleep lasted rounded to complete tick
729 * periods (not the ulReload value which accounted for part
731 ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft;
733 /* How many complete tick periods passed while the processor
735 ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick;
737 /* The reload value is set to whatever fraction of a single tick
739 portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements;
742 /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again,
743 * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If
744 * the SysTick is not using the core clock, temporarily configure it to
745 * use the core clock. This configuration forces the SysTick to load
746 * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next
747 * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready
748 * to receive the standard value immediately. */
749 portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
750 portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
751 #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT )
753 portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
757 /* The temporary usage of the core clock has served its purpose,
758 * as described above. Resume usage of the other clock. */
759 portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT;
761 if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
763 /* The partial tick period already ended. Be sure the SysTick
764 * counts it only once. */
765 portNVIC_SYSTICK_CURRENT_VALUE_REG = 0;
768 portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
769 portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
771 #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */
773 /* Step the tick to account for any tick periods that elapsed. */
774 vTaskStepTick( ulCompleteTickPeriods );
776 /* Exit with interrupts enabled. */
777 __asm volatile ( "cpsie i" ::: "memory" );
781 #endif /* configUSE_TICKLESS_IDLE */
782 /*-----------------------------------------------------------*/
784 __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */
786 /* Calculate the constants required to configure the tick interrupt. */
787 #if ( configUSE_TICKLESS_IDLE == 1 )
789 ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ );
790 xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick;
791 ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ );
793 #endif /* configUSE_TICKLESS_IDLE */
795 /* Stop and reset SysTick.
797 * QEMU versions older than 7.0.0 contain a bug which causes an error if we
798 * enable SysTick without first selecting a valid clock source. We trigger
799 * the bug if we change clock sources from a clock with a zero clock period
800 * to one with a nonzero clock period and enable Systick at the same time.
801 * So we configure the CLKSOURCE bit here, prior to setting the ENABLE bit.
802 * This workaround avoids the bug in QEMU versions older than 7.0.0. */
803 portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG;
804 portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
806 /* Configure SysTick to interrupt at the requested rate. */
807 portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL;
808 portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
810 /*-----------------------------------------------------------*/
812 static void prvTaskExitError( void )
814 volatile uint32_t ulDummy = 0UL;
816 /* A function that implements a task must not exit or attempt to return to
817 * its caller as there is nothing to return to. If a task wants to exit it
818 * should instead call vTaskDelete( NULL ). Artificially force an assert()
819 * to be triggered if configASSERT() is defined, then stop here so
820 * application writers can catch the error. */
821 configASSERT( ulCriticalNesting == ~0UL );
822 portDISABLE_INTERRUPTS();
824 while( ulDummy == 0 )
826 /* This file calls prvTaskExitError() after the scheduler has been
827 * started to remove a compiler warning about the function being
828 * defined but never called. ulDummy is used purely to quieten other
829 * warnings about code appearing after this function is called - making
830 * ulDummy volatile makes the compiler think the function could return
831 * and therefore not output an 'unreachable code' warning for code that
832 * appears after it. */
835 /*-----------------------------------------------------------*/
837 #if ( configENABLE_MPU == 1 )
839 static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
841 uint32_t ulAccessPermissions = 0;
843 if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
845 ulAccessPermissions = tskMPU_READ_PERMISSION;
848 if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
850 ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
853 return ulAccessPermissions;
856 #endif /* configENABLE_MPU */
857 /*-----------------------------------------------------------*/
859 #if ( configENABLE_MPU == 1 )
861 static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
863 #if defined( __ARMCC_VERSION )
865 /* Declaration when these variable are defined in code instead of being
866 * exported from linker scripts. */
867 extern uint32_t * __privileged_functions_start__;
868 extern uint32_t * __privileged_functions_end__;
869 extern uint32_t * __syscalls_flash_start__;
870 extern uint32_t * __syscalls_flash_end__;
871 extern uint32_t * __unprivileged_flash_start__;
872 extern uint32_t * __unprivileged_flash_end__;
873 extern uint32_t * __privileged_sram_start__;
874 extern uint32_t * __privileged_sram_end__;
875 #else /* if defined( __ARMCC_VERSION ) */
876 /* Declaration when these variable are exported from linker scripts. */
877 extern uint32_t __privileged_functions_start__[];
878 extern uint32_t __privileged_functions_end__[];
879 extern uint32_t __syscalls_flash_start__[];
880 extern uint32_t __syscalls_flash_end__[];
881 extern uint32_t __unprivileged_flash_start__[];
882 extern uint32_t __unprivileged_flash_end__[];
883 extern uint32_t __privileged_sram_start__[];
884 extern uint32_t __privileged_sram_end__[];
885 #endif /* defined( __ARMCC_VERSION ) */
887 /* The only permitted number of regions are 8 or 16. */
888 configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) );
890 /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */
891 configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE );
893 /* Check that the MPU is present. */
894 if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE )
896 /* MAIR0 - Index 0. */
897 portMPU_MAIR0_REG |= ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK );
898 /* MAIR0 - Index 1. */
899 portMPU_MAIR0_REG |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK );
901 /* Setup privileged flash as Read Only so that privileged tasks can
902 * read it but not modify. */
903 portMPU_RNR_REG = portPRIVILEGED_FLASH_REGION;
904 portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
905 ( portMPU_REGION_NON_SHAREABLE ) |
906 ( portMPU_REGION_PRIVILEGED_READ_ONLY );
907 portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_functions_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
908 ( portMPU_RLAR_ATTR_INDEX0 ) |
909 ( portMPU_RLAR_REGION_ENABLE );
911 /* Setup unprivileged flash as Read Only by both privileged and
912 * unprivileged tasks. All tasks can read it but no-one can modify. */
913 portMPU_RNR_REG = portUNPRIVILEGED_FLASH_REGION;
914 portMPU_RBAR_REG = ( ( ( uint32_t ) __unprivileged_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
915 ( portMPU_REGION_NON_SHAREABLE ) |
916 ( portMPU_REGION_READ_ONLY );
917 portMPU_RLAR_REG = ( ( ( uint32_t ) __unprivileged_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
918 ( portMPU_RLAR_ATTR_INDEX0 ) |
919 ( portMPU_RLAR_REGION_ENABLE );
921 /* Setup unprivileged syscalls flash as Read Only by both privileged
922 * and unprivileged tasks. All tasks can read it but no-one can modify. */
923 portMPU_RNR_REG = portUNPRIVILEGED_SYSCALLS_REGION;
924 portMPU_RBAR_REG = ( ( ( uint32_t ) __syscalls_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
925 ( portMPU_REGION_NON_SHAREABLE ) |
926 ( portMPU_REGION_READ_ONLY );
927 portMPU_RLAR_REG = ( ( ( uint32_t ) __syscalls_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
928 ( portMPU_RLAR_ATTR_INDEX0 ) |
929 ( portMPU_RLAR_REGION_ENABLE );
931 /* Setup RAM containing kernel data for privileged access only. */
932 portMPU_RNR_REG = portPRIVILEGED_RAM_REGION;
933 portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_sram_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
934 ( portMPU_REGION_NON_SHAREABLE ) |
935 ( portMPU_REGION_PRIVILEGED_READ_WRITE ) |
936 ( portMPU_REGION_EXECUTE_NEVER );
937 portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_sram_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
938 ( portMPU_RLAR_ATTR_INDEX0 ) |
939 ( portMPU_RLAR_REGION_ENABLE );
941 /* Enable mem fault. */
942 portSCB_SYS_HANDLER_CTRL_STATE_REG |= portSCB_MEM_FAULT_ENABLE_BIT;
944 /* Enable MPU with privileged background access i.e. unmapped
945 * regions have privileged access. */
946 portMPU_CTRL_REG |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT );
950 #endif /* configENABLE_MPU */
951 /*-----------------------------------------------------------*/
953 #if ( configENABLE_FPU == 1 )
955 static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */
957 #if ( configENABLE_TRUSTZONE == 1 )
959 /* Enable non-secure access to the FPU. */
960 SecureInit_EnableNSFPUAccess();
962 #endif /* configENABLE_TRUSTZONE */
964 /* CP10 = 11 ==> Full access to FPU i.e. both privileged and
965 * unprivileged code should be able to access FPU. CP11 should be
966 * programmed to the same value as CP10. */
967 *( portCPACR ) |= ( ( portCPACR_CP10_VALUE << portCPACR_CP10_POS ) |
968 ( portCPACR_CP11_VALUE << portCPACR_CP11_POS )
971 /* ASPEN = 1 ==> Hardware should automatically preserve floating point
972 * context on exception entry and restore on exception return.
973 * LSPEN = 1 ==> Enable lazy context save of FP state. */
974 *( portFPCCR ) |= ( portFPCCR_ASPEN_MASK | portFPCCR_LSPEN_MASK );
977 #endif /* configENABLE_FPU */
978 /*-----------------------------------------------------------*/
980 void vPortYield( void ) /* PRIVILEGED_FUNCTION */
982 /* Set a PendSV to request a context switch. */
983 portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
985 /* Barriers are normally not required but do ensure the code is
986 * completely within the specified behaviour for the architecture. */
987 __asm volatile ( "dsb" ::: "memory" );
988 __asm volatile ( "isb" );
990 /*-----------------------------------------------------------*/
992 void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */
994 portDISABLE_INTERRUPTS();
997 /* Barriers are normally not required but do ensure the code is
998 * completely within the specified behaviour for the architecture. */
999 __asm volatile ( "dsb" ::: "memory" );
1000 __asm volatile ( "isb" );
1002 /*-----------------------------------------------------------*/
1004 void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */
1006 configASSERT( ulCriticalNesting );
1007 ulCriticalNesting--;
1009 if( ulCriticalNesting == 0 )
1011 portENABLE_INTERRUPTS();
1014 /*-----------------------------------------------------------*/
1016 void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */
1018 uint32_t ulPreviousMask;
1020 ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR();
1023 /* Increment the RTOS tick. */
1024 if( xTaskIncrementTick() != pdFALSE )
1026 traceISR_EXIT_TO_SCHEDULER();
1027 /* Pend a context switch. */
1028 portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
1035 portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask );
1037 /*-----------------------------------------------------------*/
1039 void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
1041 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
1042 #if defined( __ARMCC_VERSION )
1044 /* Declaration when these variable are defined in code instead of being
1045 * exported from linker scripts. */
1046 extern uint32_t * __syscalls_flash_start__;
1047 extern uint32_t * __syscalls_flash_end__;
1049 /* Declaration when these variable are exported from linker scripts. */
1050 extern uint32_t __syscalls_flash_start__[];
1051 extern uint32_t __syscalls_flash_end__[];
1052 #endif /* defined( __ARMCC_VERSION ) */
1053 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
1057 #if ( configENABLE_TRUSTZONE == 1 )
1058 uint32_t ulR0, ulR1;
1059 extern TaskHandle_t pxCurrentTCB;
1060 #if ( configENABLE_MPU == 1 )
1061 uint32_t ulControl, ulIsTaskPrivileged;
1062 #endif /* configENABLE_MPU */
1063 #endif /* configENABLE_TRUSTZONE */
1064 uint8_t ucSVCNumber;
1066 /* Register are stored on the stack in the following order - R0, R1, R2, R3,
1067 * R12, LR, PC, xPSR. */
1068 ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
1069 ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
1071 switch( ucSVCNumber )
1073 #if ( configENABLE_TRUSTZONE == 1 )
1074 case portSVC_ALLOCATE_SECURE_CONTEXT:
1076 /* R0 contains the stack size passed as parameter to the
1077 * vPortAllocateSecureContext function. */
1078 ulR0 = pulCallerStackAddress[ 0 ];
1080 #if ( configENABLE_MPU == 1 )
1082 /* Read the CONTROL register value. */
1083 __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) );
1085 /* The task that raised the SVC is privileged if Bit[0]
1086 * in the CONTROL register is 0. */
1087 ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 );
1089 /* Allocate and load a context for the secure task. */
1090 xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB );
1092 #else /* if ( configENABLE_MPU == 1 ) */
1094 /* Allocate and load a context for the secure task. */
1095 xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB );
1097 #endif /* configENABLE_MPU */
1099 configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID );
1100 SecureContext_LoadContext( xSecureContext, pxCurrentTCB );
1103 case portSVC_FREE_SECURE_CONTEXT:
1105 /* R0 contains TCB being freed and R1 contains the secure
1106 * context handle to be freed. */
1107 ulR0 = pulCallerStackAddress[ 0 ];
1108 ulR1 = pulCallerStackAddress[ 1 ];
1110 /* Free the secure context. */
1111 SecureContext_FreeContext( ( SecureContextHandle_t ) ulR1, ( void * ) ulR0 );
1113 #endif /* configENABLE_TRUSTZONE */
1115 case portSVC_START_SCHEDULER:
1116 #if ( configENABLE_TRUSTZONE == 1 )
1118 /* De-prioritize the non-secure exceptions so that the
1119 * non-secure pendSV runs at the lowest priority. */
1120 SecureInit_DePrioritizeNSExceptions();
1122 /* Initialize the secure context management system. */
1123 SecureContext_Init();
1125 #endif /* configENABLE_TRUSTZONE */
1127 #if ( configENABLE_FPU == 1 )
1129 /* Setup the Floating Point Unit (FPU). */
1132 #endif /* configENABLE_FPU */
1134 /* Setup the context of the first task so that the first task starts
1136 vRestoreContextOfFirstTask();
1139 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
1140 case portSVC_RAISE_PRIVILEGE:
1142 /* Only raise the privilege, if the svc was raised from any of
1143 * the system calls. */
1144 if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
1145 ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
1150 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
1152 #if ( configENABLE_MPU == 1 )
1156 #endif /* configENABLE_MPU == 1 */
1159 /* Incorrect SVC call. */
1160 configASSERT( pdFALSE );
1163 /*-----------------------------------------------------------*/
1165 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
1167 void vSystemCallEnter( uint32_t * pulTaskStack,
1169 uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
1171 extern TaskHandle_t pxCurrentTCB;
1172 extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
1173 xMPU_SETTINGS * pxMpuSettings;
1174 uint32_t * pulSystemCallStack;
1175 uint32_t ulStackFrameSize, ulSystemCallLocation, i;
1177 #if defined( __ARMCC_VERSION )
1178 /* Declaration when these variable are defined in code instead of being
1179 * exported from linker scripts. */
1180 extern uint32_t * __syscalls_flash_start__;
1181 extern uint32_t * __syscalls_flash_end__;
1183 /* Declaration when these variable are exported from linker scripts. */
1184 extern uint32_t __syscalls_flash_start__[];
1185 extern uint32_t __syscalls_flash_end__[];
1186 #endif /* #if defined( __ARMCC_VERSION ) */
1188 ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
1189 pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
1192 * 1. SVC is raised from the system call section (i.e. application is
1193 * not raising SVC directly).
1194 * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
1195 * it is non-NULL only during the execution of a system call (i.e.
1196 * between system call enter and exit).
1197 * 3. System call is not for a kernel API disabled by the configuration
1198 * in FreeRTOSConfig.h.
1199 * 4. We do not need to check that ucSystemCallNumber is within range
1200 * because the assembly SVC handler checks that before calling
1203 if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
1204 ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
1205 ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
1206 ( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
1208 pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
1210 #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
1212 if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
1214 /* Extended frame i.e. FPU in use. */
1215 ulStackFrameSize = 26;
1217 " vpush {s0} \n" /* Trigger lazy stacking. */
1218 " vpop {s0} \n" /* Nullify the affect of the above instruction. */
1224 /* Standard frame i.e. FPU not in use. */
1225 ulStackFrameSize = 8;
1228 #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
1230 ulStackFrameSize = 8;
1232 #endif /* configENABLE_FPU || configENABLE_MVE */
1234 /* Make space on the system call stack for the stack frame. */
1235 pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
1237 /* Copy the stack frame. */
1238 for( i = 0; i < ulStackFrameSize; i++ )
1240 pulSystemCallStack[ i ] = pulTaskStack[ i ];
1243 /* Store the value of the Link Register before the SVC was raised.
1244 * It contains the address of the caller of the System Call entry
1245 * point (i.e. the caller of the MPU_<API>). We need to restore it
1246 * when we exit from the system call. */
1247 pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
1248 /* Store the value of the PSPLIM register before the SVC was raised.
1249 * We need to restore it when we exit from the system call. */
1250 #if ( portUSE_PSPLIM_REGISTER == 1 )
1252 __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
1256 /* Use the pulSystemCallStack in thread mode. */
1257 __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
1258 #if ( portUSE_PSPLIM_REGISTER == 1 )
1260 __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
1264 /* Start executing the system call upon returning from this handler. */
1265 pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
1266 /* Raise a request to exit from the system call upon finishing the
1268 pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
1270 /* Remember the location where we should copy the stack frame when we exit from
1271 * the system call. */
1272 pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
1274 /* Record if the hardware used padding to force the stack pointer
1275 * to be double word aligned. */
1276 if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
1278 pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
1282 pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
1285 /* We ensure in pxPortInitialiseStack that the system call stack is
1286 * double word aligned and therefore, there is no need of padding.
1287 * Clear the bit[9] of stacked xPSR. */
1288 pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
1290 /* Raise the privilege for the duration of the system call. */
1292 " mrs r0, control \n" /* Obtain current control value. */
1293 " movs r1, #1 \n" /* r1 = 1. */
1294 " bics r0, r1 \n" /* Clear nPRIV bit. */
1295 " msr control, r0 \n" /* Write back new control value. */
1296 ::: "r0", "r1", "memory"
1301 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
1302 /*-----------------------------------------------------------*/
1304 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
1306 void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
1308 __asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
1311 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
1312 /*-----------------------------------------------------------*/
1314 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
1316 void vSystemCallExit( uint32_t * pulSystemCallStack,
1317 uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
1319 extern TaskHandle_t pxCurrentTCB;
1320 xMPU_SETTINGS * pxMpuSettings;
1321 uint32_t * pulTaskStack;
1322 uint32_t ulStackFrameSize, ulSystemCallLocation, i;
1324 #if defined( __ARMCC_VERSION )
1325 /* Declaration when these variable are defined in code instead of being
1326 * exported from linker scripts. */
1327 extern uint32_t * __privileged_functions_start__;
1328 extern uint32_t * __privileged_functions_end__;
1330 /* Declaration when these variable are exported from linker scripts. */
1331 extern uint32_t __privileged_functions_start__[];
1332 extern uint32_t __privileged_functions_end__[];
1333 #endif /* #if defined( __ARMCC_VERSION ) */
1335 ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
1336 pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
1339 * 1. SVC is raised from the privileged code (i.e. application is not
1340 * raising SVC directly). This SVC is only raised from
1341 * vRequestSystemCallExit which is in the privileged code section.
1342 * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
1343 * this means that we previously entered a system call and the
1344 * application is not attempting to exit without entering a system
1347 if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
1348 ( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
1349 ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
1351 pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
1353 #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
1355 if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
1357 /* Extended frame i.e. FPU in use. */
1358 ulStackFrameSize = 26;
1360 " vpush {s0} \n" /* Trigger lazy stacking. */
1361 " vpop {s0} \n" /* Nullify the affect of the above instruction. */
1367 /* Standard frame i.e. FPU not in use. */
1368 ulStackFrameSize = 8;
1371 #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
1373 ulStackFrameSize = 8;
1375 #endif /* configENABLE_FPU || configENABLE_MVE */
1377 /* Make space on the task stack for the stack frame. */
1378 pulTaskStack = pulTaskStack - ulStackFrameSize;
1380 /* Copy the stack frame. */
1381 for( i = 0; i < ulStackFrameSize; i++ )
1383 pulTaskStack[ i ] = pulSystemCallStack[ i ];
1386 /* Use the pulTaskStack in thread mode. */
1387 __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
1389 /* Return to the caller of the System Call entry point (i.e. the
1390 * caller of the MPU_<API>). */
1391 pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
1392 /* Ensure that LR has a valid value.*/
1393 pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
1395 /* Restore the PSPLIM register to what it was at the time of
1396 * system call entry. */
1397 #if ( portUSE_PSPLIM_REGISTER == 1 )
1399 __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
1403 /* If the hardware used padding to force the stack pointer
1404 * to be double word aligned, set the stacked xPSR bit[9],
1405 * otherwise clear it. */
1406 if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
1408 pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
1412 pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
1415 /* This is not NULL only for the duration of the system call. */
1416 pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
1418 /* Drop the privilege before returning to the thread mode. */
1420 " mrs r0, control \n" /* Obtain current control value. */
1421 " movs r1, #1 \n" /* r1 = 1. */
1422 " orrs r0, r1 \n" /* Set nPRIV bit. */
1423 " msr control, r0 \n" /* Write back new control value. */
1424 ::: "r0", "r1", "memory"
1429 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
1430 /*-----------------------------------------------------------*/
1432 #if ( configENABLE_MPU == 1 )
1434 BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
1436 BaseType_t xTaskIsPrivileged = pdFALSE;
1437 const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
1439 if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
1441 xTaskIsPrivileged = pdTRUE;
1444 return xTaskIsPrivileged;
1447 #endif /* configENABLE_MPU == 1 */
1448 /*-----------------------------------------------------------*/
1450 #if ( configENABLE_MPU == 1 )
1452 StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
1453 StackType_t * pxEndOfStack,
1454 TaskFunction_t pxCode,
1455 void * pvParameters,
1456 BaseType_t xRunPrivileged,
1457 xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
1459 uint32_t ulIndex = 0;
1461 xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
1463 xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
1465 xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
1467 xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
1469 xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
1471 xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
1473 xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
1475 xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
1478 xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
1480 xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
1482 xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
1484 xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
1486 xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
1488 xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
1490 xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
1492 xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
1495 #if ( configENABLE_TRUSTZONE == 1 )
1497 xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
1500 #endif /* configENABLE_TRUSTZONE */
1501 xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
1503 xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
1506 if( xRunPrivileged == pdTRUE )
1508 xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
1509 xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
1514 xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
1515 xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
1519 xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
1522 #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
1524 /* Ensure that the system call stack is double word aligned. */
1525 xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
1526 xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
1527 ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
1529 xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
1530 xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
1531 ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
1532 ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
1534 /* This is not NULL only for the duration of a system call. */
1535 xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
1537 #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
1539 return &( xMPUSettings->ulContext[ ulIndex ] );
1542 #else /* configENABLE_MPU */
1544 StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
1545 StackType_t * pxEndOfStack,
1546 TaskFunction_t pxCode,
1547 void * pvParameters ) /* PRIVILEGED_FUNCTION */
1549 /* Simulate the stack frame as it would be created by a context switch
1551 #if ( portPRELOAD_REGISTERS == 0 )
1553 pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
1554 *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
1556 *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
1558 *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
1559 pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
1560 *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
1561 pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
1562 *pxTopOfStack = portINITIAL_EXC_RETURN;
1564 *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
1566 #if ( configENABLE_TRUSTZONE == 1 )
1569 *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
1571 #endif /* configENABLE_TRUSTZONE */
1573 #else /* portPRELOAD_REGISTERS */
1575 pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
1576 *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
1578 *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
1580 *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
1582 *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
1584 *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
1586 *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
1588 *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
1590 *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
1592 *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
1594 *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
1596 *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
1598 *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
1600 *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
1602 *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
1604 *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
1606 *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
1608 *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
1610 *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
1612 #if ( configENABLE_TRUSTZONE == 1 )
1615 *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
1617 #endif /* configENABLE_TRUSTZONE */
1619 #endif /* portPRELOAD_REGISTERS */
1621 return pxTopOfStack;
1624 #endif /* configENABLE_MPU */
1625 /*-----------------------------------------------------------*/
1627 BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
1629 /* An application can install FreeRTOS interrupt handlers in one of the
1631 * 1. Direct Routing - Install the functions SVC_Handler and PendSV_Handler
1632 * for SVCall and PendSV interrupts respectively.
1633 * 2. Indirect Routing - Install separate handlers for SVCall and PendSV
1634 * interrupts and route program control from those handlers to
1635 * SVC_Handler and PendSV_Handler functions.
1637 * Applications that use Indirect Routing must set
1638 * configCHECK_HANDLER_INSTALLATION to 0 in their FreeRTOSConfig.h. Direct
1639 * routing, which is validated here when configCHECK_HANDLER_INSTALLATION
1640 * is 1, should be preferred when possible. */
1641 #if ( configCHECK_HANDLER_INSTALLATION == 1 )
1643 const portISR_t * const pxVectorTable = portSCB_VTOR_REG;
1645 /* Validate that the application has correctly installed the FreeRTOS
1646 * handlers for SVCall and PendSV interrupts. We do not check the
1647 * installation of the SysTick handler because the application may
1648 * choose to drive the RTOS tick using a timer other than the SysTick
1649 * timer by overriding the weak function vPortSetupTimerInterrupt().
1651 * Assertion failures here indicate incorrect installation of the
1652 * FreeRTOS handlers. For help installing the FreeRTOS handlers, see
1653 * https://www.FreeRTOS.org/FAQHelp.html.
1655 * Systems with a configurable address for the interrupt vector table
1656 * can also encounter assertion failures or even system faults here if
1657 * VTOR is not set correctly to point to the application's vector table. */
1658 configASSERT( pxVectorTable[ portVECTOR_INDEX_SVC ] == SVC_Handler );
1659 configASSERT( pxVectorTable[ portVECTOR_INDEX_PENDSV ] == PendSV_Handler );
1661 #endif /* configCHECK_HANDLER_INSTALLATION */
1663 #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_ARMV8M_MAIN_EXTENSION == 1 ) )
1665 volatile uint32_t ulImplementedPrioBits = 0;
1666 volatile uint8_t ucMaxPriorityValue;
1668 /* Determine the maximum priority from which ISR safe FreeRTOS API
1669 * functions can be called. ISR safe functions are those that end in
1670 * "FromISR". FreeRTOS maintains separate thread and ISR API functions to
1671 * ensure interrupt entry is as fast and simple as possible.
1673 * First, determine the number of priority bits available. Write to all
1674 * possible bits in the priority setting for SVCall. */
1675 portNVIC_SHPR2_REG = 0xFF000000;
1677 /* Read the value back to see how many bits stuck. */
1678 ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 );
1680 /* Use the same mask on the maximum system call priority. */
1681 ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
1683 /* Check that the maximum system call priority is nonzero after
1684 * accounting for the number of priority bits supported by the
1685 * hardware. A priority of 0 is invalid because setting the BASEPRI
1686 * register to 0 unmasks all interrupts, and interrupts with priority 0
1687 * cannot be masked using BASEPRI.
1688 * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
1689 configASSERT( ucMaxSysCallPriority );
1691 /* Check that the bits not implemented in hardware are zero in
1692 * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
1693 configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( uint8_t ) ( ~( uint32_t ) ucMaxPriorityValue ) ) == 0U );
1695 /* Calculate the maximum acceptable priority group value for the number
1696 * of bits read back. */
1697 while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
1699 ulImplementedPrioBits++;
1700 ucMaxPriorityValue <<= ( uint8_t ) 0x01;
1703 if( ulImplementedPrioBits == 8 )
1705 /* When the hardware implements 8 priority bits, there is no way for
1706 * the software to configure PRIGROUP to not have sub-priorities. As
1707 * a result, the least significant bit is always used for sub-priority
1708 * and there are 128 preemption priorities and 2 sub-priorities.
1710 * This may cause some confusion in some cases - for example, if
1711 * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
1712 * priority interrupts will be masked in Critical Sections as those
1713 * are at the same preemption priority. This may appear confusing as
1714 * 4 is higher (numerically lower) priority than
1715 * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
1716 * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
1717 * to 4, this confusion does not happen and the behaviour remains the same.
1719 * The following assert ensures that the sub-priority bit in the
1720 * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
1722 configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
1723 ulMaxPRIGROUPValue = 0;
1727 ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
1730 /* Shift the priority group value back to its position within the AIRCR
1732 ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
1733 ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
1735 #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_ARMV8M_MAIN_EXTENSION == 1 ) ) */
1737 /* Make PendSV and SysTick the lowest priority interrupts, and make SVCall
1738 * the highest priority. */
1739 portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI;
1740 portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI;
1741 portNVIC_SHPR2_REG = 0;
1743 #if ( configENABLE_MPU == 1 )
1745 /* Setup the Memory Protection Unit (MPU). */
1748 #endif /* configENABLE_MPU */
1750 /* Start the timer that generates the tick ISR. Interrupts are disabled
1752 vPortSetupTimerInterrupt();
1754 /* Initialize the critical nesting count ready for the first task. */
1755 ulCriticalNesting = 0;
1757 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
1759 xSchedulerRunning = pdTRUE;
1761 #endif /* ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
1763 /* Start the first task. */
1766 /* Should never get here as the tasks will now be executing. Call the task
1767 * exit error function to prevent compiler warnings about a static function
1768 * not being called in the case that the application writer overrides this
1769 * functionality by defining configTASK_RETURN_ADDRESS. Call
1770 * vTaskSwitchContext() so link time optimization does not remove the
1772 vTaskSwitchContext();
1775 /* Should not get here. */
1778 /*-----------------------------------------------------------*/
1780 void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
1782 /* Not implemented in ports where there is nothing to return to.
1783 * Artificially force an assert. */
1784 configASSERT( ulCriticalNesting == 1000UL );
1786 /*-----------------------------------------------------------*/
1788 #if ( configENABLE_MPU == 1 )
1790 void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings,
1791 const struct xMEMORY_REGION * const xRegions,
1792 StackType_t * pxBottomOfStack,
1793 configSTACK_DEPTH_TYPE uxStackDepth )
1795 uint32_t ulRegionStartAddress, ulRegionEndAddress, ulRegionNumber;
1798 #if defined( __ARMCC_VERSION )
1800 /* Declaration when these variable are defined in code instead of being
1801 * exported from linker scripts. */
1802 extern uint32_t * __privileged_sram_start__;
1803 extern uint32_t * __privileged_sram_end__;
1805 /* Declaration when these variable are exported from linker scripts. */
1806 extern uint32_t __privileged_sram_start__[];
1807 extern uint32_t __privileged_sram_end__[];
1808 #endif /* defined( __ARMCC_VERSION ) */
1811 xMPUSettings->ulMAIR0 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK );
1812 xMPUSettings->ulMAIR0 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK );
1814 /* This function is called automatically when the task is created - in
1815 * which case the stack region parameters will be valid. At all other
1816 * times the stack parameters will not be valid and it is assumed that
1817 * the stack region has already been configured. */
1818 if( uxStackDepth > 0 )
1820 ulRegionStartAddress = ( uint32_t ) pxBottomOfStack;
1821 ulRegionEndAddress = ( uint32_t ) pxBottomOfStack + ( uxStackDepth * ( configSTACK_DEPTH_TYPE ) sizeof( StackType_t ) ) - 1;
1823 /* If the stack is within the privileged SRAM, do not protect it
1824 * using a separate MPU region. This is needed because privileged
1825 * SRAM is already protected using an MPU region and ARMv8-M does
1826 * not allow overlapping MPU regions. */
1827 if( ( ulRegionStartAddress >= ( uint32_t ) __privileged_sram_start__ ) &&
1828 ( ulRegionEndAddress <= ( uint32_t ) __privileged_sram_end__ ) )
1830 xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = 0;
1831 xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = 0;
1835 /* Define the region that allows access to the stack. */
1836 ulRegionStartAddress &= portMPU_RBAR_ADDRESS_MASK;
1837 ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK;
1839 xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = ( ulRegionStartAddress ) |
1840 ( portMPU_REGION_NON_SHAREABLE ) |
1841 ( portMPU_REGION_READ_WRITE ) |
1842 ( portMPU_REGION_EXECUTE_NEVER );
1844 xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = ( ulRegionEndAddress ) |
1845 ( portMPU_RLAR_ATTR_INDEX0 ) |
1846 ( portMPU_RLAR_REGION_ENABLE );
1850 /* User supplied configurable regions. */
1851 for( ulRegionNumber = 1; ulRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ulRegionNumber++ )
1853 /* If xRegions is NULL i.e. the task has not specified any MPU
1854 * region, the else part ensures that all the configurable MPU
1855 * regions are invalidated. */
1856 if( ( xRegions != NULL ) && ( xRegions[ lIndex ].ulLengthInBytes > 0UL ) )
1858 /* Translate the generic region definition contained in xRegions
1859 * into the ARMv8 specific MPU settings that are then stored in
1861 ulRegionStartAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) & portMPU_RBAR_ADDRESS_MASK;
1862 ulRegionEndAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1;
1863 ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK;
1865 /* Start address. */
1866 xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = ( ulRegionStartAddress ) |
1867 ( portMPU_REGION_NON_SHAREABLE );
1870 if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 )
1872 xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_ONLY );
1876 xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_WRITE );
1880 if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 )
1882 xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_EXECUTE_NEVER );
1886 xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = ( ulRegionEndAddress ) |
1887 ( portMPU_RLAR_REGION_ENABLE );
1890 #if ( portARMV8M_MINOR_VERSION >= 1 )
1892 if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_PRIVILEGED_EXECUTE_NEVER ) != 0 )
1894 xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= ( portMPU_RLAR_PRIVILEGED_EXECUTE_NEVER );
1897 #endif /* portARMV8M_MINOR_VERSION >= 1 */
1899 /* Normal memory/ Device memory. */
1900 if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 )
1902 /* Attr1 in MAIR0 is configured as device memory. */
1903 xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX1;
1907 /* Attr0 in MAIR0 is configured as normal memory. */
1908 xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX0;
1913 /* Invalidate the region. */
1914 xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = 0UL;
1915 xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = 0UL;
1922 #endif /* configENABLE_MPU */
1923 /*-----------------------------------------------------------*/
1925 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
1927 BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
1928 uint32_t ulBufferLength,
1929 uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
1932 uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
1933 BaseType_t xAccessGranted = pdFALSE;
1934 const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
1936 if( xSchedulerRunning == pdFALSE )
1938 /* Grant access to all the kernel objects before the scheduler
1939 * is started. It is necessary because there is no task running
1940 * yet and therefore, we cannot use the permissions of any
1942 xAccessGranted = pdTRUE;
1944 else if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
1946 xAccessGranted = pdTRUE;
1950 if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
1952 ulBufferStartAddress = ( uint32_t ) pvBuffer;
1953 ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
1955 for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
1957 /* Is the MPU region enabled? */
1958 if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
1960 if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
1961 portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
1962 portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
1963 portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
1964 portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
1965 portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
1966 portIS_AUTHORIZED( ulAccessRequested,
1967 prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
1969 xAccessGranted = pdTRUE;
1977 return xAccessGranted;
1980 #endif /* #if ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
1981 /*-----------------------------------------------------------*/
1983 BaseType_t xPortIsInsideInterrupt( void )
1985 uint32_t ulCurrentInterrupt;
1988 /* Obtain the number of the currently executing interrupt. Interrupt Program
1989 * Status Register (IPSR) holds the exception number of the currently-executing
1990 * exception or zero for Thread mode.*/
1991 __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
1993 if( ulCurrentInterrupt == 0 )
2004 /*-----------------------------------------------------------*/
2006 #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_ARMV8M_MAIN_EXTENSION == 1 ) )
2008 void vPortValidateInterruptPriority( void )
2010 uint32_t ulCurrentInterrupt;
2011 uint8_t ucCurrentPriority;
2013 /* Obtain the number of the currently executing interrupt. */
2014 __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
2016 /* Is the interrupt number a user defined interrupt? */
2017 if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER )
2019 /* Look up the interrupt's priority. */
2020 ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ];
2022 /* The following assertion will fail if a service routine (ISR) for
2023 * an interrupt that has been assigned a priority above
2024 * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API
2025 * function. ISR safe FreeRTOS API functions must *only* be called
2026 * from interrupts that have been assigned a priority at or below
2027 * configMAX_SYSCALL_INTERRUPT_PRIORITY.
2029 * Numerically low interrupt priority numbers represent logically high
2030 * interrupt priorities, therefore the priority of the interrupt must
2031 * be set to a value equal to or numerically *higher* than
2032 * configMAX_SYSCALL_INTERRUPT_PRIORITY.
2034 * Interrupts that use the FreeRTOS API must not be left at their
2035 * default priority of zero as that is the highest possible priority,
2036 * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
2037 * and therefore also guaranteed to be invalid.
2039 * FreeRTOS maintains separate thread and ISR API functions to ensure
2040 * interrupt entry is as fast and simple as possible.
2042 * The following links provide detailed information:
2043 * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html
2044 * https://www.FreeRTOS.org/FAQHelp.html */
2045 configASSERT( ucCurrentPriority >= ucMaxSysCallPriority );
2048 /* Priority grouping: The interrupt controller (NVIC) allows the bits
2049 * that define each interrupt's priority to be split between bits that
2050 * define the interrupt's pre-emption priority bits and bits that define
2051 * the interrupt's sub-priority. For simplicity all bits must be defined
2052 * to be pre-emption priority bits. The following assertion will fail if
2053 * this is not the case (if some bits represent a sub-priority).
2055 * If the application only uses CMSIS libraries for interrupt
2056 * configuration then the correct setting can be achieved on all Cortex-M
2057 * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the
2058 * scheduler. Note however that some vendor specific peripheral libraries
2059 * assume a non-zero priority group setting, in which cases using a value
2060 * of zero will result in unpredictable behaviour. */
2061 configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue );
2064 #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_ARMV8M_MAIN_EXTENSION == 1 ) ) */
2065 /*-----------------------------------------------------------*/
2067 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
2069 void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
2070 int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
2072 uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
2073 xMPU_SETTINGS * xTaskMpuSettings;
2075 ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
2076 ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
2078 xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
2080 xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] |= ( 1U << ulAccessControlListEntryBit );
2083 #endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
2084 /*-----------------------------------------------------------*/
2086 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
2088 void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
2089 int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
2091 uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
2092 xMPU_SETTINGS * xTaskMpuSettings;
2094 ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
2095 ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
2097 xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
2099 xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] &= ~( 1U << ulAccessControlListEntryBit );
2102 #endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
2103 /*-----------------------------------------------------------*/
2105 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
2107 #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
2109 BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
2111 uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
2112 BaseType_t xAccessGranted = pdFALSE;
2113 const xMPU_SETTINGS * xTaskMpuSettings;
2115 if( xSchedulerRunning == pdFALSE )
2117 /* Grant access to all the kernel objects before the scheduler
2118 * is started. It is necessary because there is no task running
2119 * yet and therefore, we cannot use the permissions of any
2121 xAccessGranted = pdTRUE;
2125 xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
2127 ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
2128 ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
2130 if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
2132 xAccessGranted = pdTRUE;
2136 if( ( xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] & ( 1U << ulAccessControlListEntryBit ) ) != 0 )
2138 xAccessGranted = pdTRUE;
2143 return xAccessGranted;
2146 #else /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
2148 BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
2150 ( void ) lInternalIndexOfKernelObject;
2152 /* If Access Control List feature is not used, all the tasks have
2153 * access to all the kernel objects. */
2157 #endif /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
2159 #endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
2160 /*-----------------------------------------------------------*/