2 * FreeRTOS Kernel <DEVELOPMENT BRANCH>
3 * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5 * SPDX-License-Identifier: MIT
7 * Permission is hereby granted, free of charge, to any person obtaining a copy of
8 * this software and associated documentation files (the "Software"), to deal in
9 * the Software without restriction, including without limitation the rights to
10 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
11 * the Software, and to permit persons to whom the Software is furnished to do so,
12 * subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in all
15 * copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
19 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
20 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
21 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * https://www.FreeRTOS.org
25 * https://github.com/FreeRTOS
29 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
30 * all the API functions to use the MPU wrappers. That should only be done when
31 * task.h is included from an application file. */
32 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
34 /* Scheduler includes. */
38 /* MPU wrappers includes. */
39 #include "mpu_wrappers.h"
41 /* Portasm includes. */
44 #if ( configENABLE_TRUSTZONE == 1 )
45 /* Secure components includes. */
46 #include "secure_context.h"
47 #include "secure_init.h"
48 #endif /* configENABLE_TRUSTZONE */
50 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
53 * The FreeRTOS Cortex M33 port can be configured to run on the Secure Side only
54 * i.e. the processor boots as secure and never jumps to the non-secure side.
55 * The Trust Zone support in the port must be disabled in order to run FreeRTOS
56 * on the secure side. The following are the valid configuration seetings:
58 * 1. Run FreeRTOS on the Secure Side:
59 * configRUN_FREERTOS_SECURE_ONLY = 1 and configENABLE_TRUSTZONE = 0
61 * 2. Run FreeRTOS on the Non-Secure Side with Secure Side function call support:
62 * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 1
64 * 3. Run FreeRTOS on the Non-Secure Side only i.e. no Secure Side function call support:
65 * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 0
67 #if ( ( configRUN_FREERTOS_SECURE_ONLY == 1 ) && ( configENABLE_TRUSTZONE == 1 ) )
68 #error TrustZone needs to be disabled in order to run FreeRTOS on the Secure Side.
72 * Cortex-M23 does not have non-secure PSPLIM. We should use PSPLIM on Cortex-M23
73 * only when FreeRTOS runs on secure side.
75 #if ( ( portHAS_ARMV8M_MAIN_EXTENSION == 0 ) && ( configRUN_FREERTOS_SECURE_ONLY == 0 ) )
76 #define portUSE_PSPLIM_REGISTER 0
78 #define portUSE_PSPLIM_REGISTER 1
80 /*-----------------------------------------------------------*/
83 * @brief Constants required to manipulate the NVIC.
85 #define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) )
86 #define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) )
87 #define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) )
88 #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) )
89 #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL )
90 #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL )
91 #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL )
92 #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL )
93 #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL )
94 #define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL )
95 #define portMIN_INTERRUPT_PRIORITY ( 255UL )
96 #define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL )
97 #define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL )
98 /*-----------------------------------------------------------*/
101 * @brief Constants required to manipulate the SCB.
103 #define portSCB_SYS_HANDLER_CTRL_STATE_REG ( *( volatile uint32_t * ) 0xe000ed24 )
104 #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL )
105 /*-----------------------------------------------------------*/
108 * @brief Constants required to check the validity of an interrupt priority.
110 #define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) )
111 #define portFIRST_USER_INTERRUPT_NUMBER ( 16 )
112 #define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 )
113 #define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) )
114 #define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 )
115 #define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 )
116 #define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL )
117 #define portPRIGROUP_SHIFT ( 8UL )
118 /*-----------------------------------------------------------*/
121 * @brief Constants used during system call enter and exit.
123 #define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
124 #define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
125 /*-----------------------------------------------------------*/
128 * @brief Constants required to manipulate the FPU.
130 #define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
131 #define portCPACR_CP10_VALUE ( 3UL )
132 #define portCPACR_CP11_VALUE portCPACR_CP10_VALUE
133 #define portCPACR_CP10_POS ( 20UL )
134 #define portCPACR_CP11_POS ( 22UL )
136 #define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */
137 #define portFPCCR_ASPEN_POS ( 31UL )
138 #define portFPCCR_ASPEN_MASK ( 1UL << portFPCCR_ASPEN_POS )
139 #define portFPCCR_LSPEN_POS ( 30UL )
140 #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS )
141 /*-----------------------------------------------------------*/
144 * @brief Offsets in the stack to the parameters when inside the SVC handler.
146 #define portOFFSET_TO_LR ( 5 )
147 #define portOFFSET_TO_PC ( 6 )
148 #define portOFFSET_TO_PSR ( 7 )
149 /*-----------------------------------------------------------*/
152 * @brief Constants required to manipulate the MPU.
154 #define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
155 #define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) )
156 #define portMPU_RNR_REG ( *( ( volatile uint32_t * ) 0xe000ed98 ) )
158 #define portMPU_RBAR_REG ( *( ( volatile uint32_t * ) 0xe000ed9c ) )
159 #define portMPU_RLAR_REG ( *( ( volatile uint32_t * ) 0xe000eda0 ) )
161 #define portMPU_RBAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda4 ) )
162 #define portMPU_RLAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda8 ) )
164 #define portMPU_RBAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edac ) )
165 #define portMPU_RLAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edb0 ) )
167 #define portMPU_RBAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb4 ) )
168 #define portMPU_RLAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb8 ) )
170 #define portMPU_MAIR0_REG ( *( ( volatile uint32_t * ) 0xe000edc0 ) )
171 #define portMPU_MAIR1_REG ( *( ( volatile uint32_t * ) 0xe000edc4 ) )
173 #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
174 #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
176 #define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
178 #define portMPU_MAIR_ATTR0_POS ( 0UL )
179 #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
181 #define portMPU_MAIR_ATTR1_POS ( 8UL )
182 #define portMPU_MAIR_ATTR1_MASK ( 0x0000ff00 )
184 #define portMPU_MAIR_ATTR2_POS ( 16UL )
185 #define portMPU_MAIR_ATTR2_MASK ( 0x00ff0000 )
187 #define portMPU_MAIR_ATTR3_POS ( 24UL )
188 #define portMPU_MAIR_ATTR3_MASK ( 0xff000000 )
190 #define portMPU_MAIR_ATTR4_POS ( 0UL )
191 #define portMPU_MAIR_ATTR4_MASK ( 0x000000ff )
193 #define portMPU_MAIR_ATTR5_POS ( 8UL )
194 #define portMPU_MAIR_ATTR5_MASK ( 0x0000ff00 )
196 #define portMPU_MAIR_ATTR6_POS ( 16UL )
197 #define portMPU_MAIR_ATTR6_MASK ( 0x00ff0000 )
199 #define portMPU_MAIR_ATTR7_POS ( 24UL )
200 #define portMPU_MAIR_ATTR7_MASK ( 0xff000000 )
202 #define portMPU_RLAR_ATTR_INDEX0 ( 0UL << 1UL )
203 #define portMPU_RLAR_ATTR_INDEX1 ( 1UL << 1UL )
204 #define portMPU_RLAR_ATTR_INDEX2 ( 2UL << 1UL )
205 #define portMPU_RLAR_ATTR_INDEX3 ( 3UL << 1UL )
206 #define portMPU_RLAR_ATTR_INDEX4 ( 4UL << 1UL )
207 #define portMPU_RLAR_ATTR_INDEX5 ( 5UL << 1UL )
208 #define portMPU_RLAR_ATTR_INDEX6 ( 6UL << 1UL )
209 #define portMPU_RLAR_ATTR_INDEX7 ( 7UL << 1UL )
211 #define portMPU_RLAR_REGION_ENABLE ( 1UL )
213 /* Enable privileged access to unmapped region. */
214 #define portMPU_PRIV_BACKGROUND_ENABLE_BIT ( 1UL << 2UL )
217 #define portMPU_ENABLE_BIT ( 1UL << 0UL )
219 /* Expected value of the portMPU_TYPE register. */
220 #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
222 /* Extract first address of the MPU region as encoded in the
223 * RBAR (Region Base Address Register) value. */
224 #define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
225 ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
227 /* Extract last address of the MPU region as encoded in the
228 * RLAR (Region Limit Address Register) value. */
229 #define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
230 ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
232 /* Does addr lies within [start, end] address range? */
233 #define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
234 ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
236 /* Is the access request satisfied by the available permissions? */
237 #define portIS_AUTHORIZED( accessRequest, permissions ) \
238 ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
240 /* Max value that fits in a uint32_t type. */
241 #define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
243 /* Check if adding a and b will result in overflow. */
244 #define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
245 /*-----------------------------------------------------------*/
248 * @brief The maximum 24-bit number.
250 * It is needed because the systick is a 24-bit counter.
252 #define portMAX_24_BIT_NUMBER ( 0xffffffUL )
255 * @brief A fiddle factor to estimate the number of SysTick counts that would
256 * have occurred while the SysTick counter is stopped during tickless idle
259 #define portMISSED_COUNTS_FACTOR ( 94UL )
260 /*-----------------------------------------------------------*/
263 * @brief Constants required to set up the initial stack.
265 #define portINITIAL_XPSR ( 0x01000000 )
267 #if ( configRUN_FREERTOS_SECURE_ONLY == 1 )
270 * @brief Initial EXC_RETURN value.
273 * 1111 1111 1111 1111 1111 1111 1111 1101
275 * Bit[6] - 1 --> The exception was taken from the Secure state.
276 * Bit[5] - 1 --> Do not skip stacking of additional state context.
277 * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context.
278 * Bit[3] - 1 --> Return to the Thread mode.
279 * Bit[2] - 1 --> Restore registers from the process stack.
280 * Bit[1] - 0 --> Reserved, 0.
281 * Bit[0] - 1 --> The exception was taken to the Secure state.
283 #define portINITIAL_EXC_RETURN ( 0xfffffffd )
287 * @brief Initial EXC_RETURN value.
290 * 1111 1111 1111 1111 1111 1111 1011 1100
292 * Bit[6] - 0 --> The exception was taken from the Non-Secure state.
293 * Bit[5] - 1 --> Do not skip stacking of additional state context.
294 * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context.
295 * Bit[3] - 1 --> Return to the Thread mode.
296 * Bit[2] - 1 --> Restore registers from the process stack.
297 * Bit[1] - 0 --> Reserved, 0.
298 * Bit[0] - 0 --> The exception was taken to the Non-Secure state.
300 #define portINITIAL_EXC_RETURN ( 0xffffffbc )
301 #endif /* configRUN_FREERTOS_SECURE_ONLY */
304 * @brief CONTROL register privileged bit mask.
306 * Bit[0] in CONTROL register tells the privilege:
307 * Bit[0] = 0 ==> The task is privileged.
308 * Bit[0] = 1 ==> The task is not privileged.
310 #define portCONTROL_PRIVILEGED_MASK ( 1UL << 0UL )
313 * @brief Initial CONTROL register values.
315 #define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 )
316 #define portINITIAL_CONTROL_PRIVILEGED ( 0x2 )
319 * @brief Let the user override the default SysTick clock rate. If defined by the
320 * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the
321 * configuration register.
323 #ifndef configSYSTICK_CLOCK_HZ
324 #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ )
325 /* Ensure the SysTick is clocked at the same frequency as the core. */
326 #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT )
328 /* Select the option to clock SysTick not at the same frequency as the core. */
329 #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 )
333 * @brief Let the user override the pre-loading of the initial LR with the
334 * address of prvTaskExitError() in case it messes up unwinding of the stack
337 #ifdef configTASK_RETURN_ADDRESS
338 #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS
340 #define portTASK_RETURN_ADDRESS prvTaskExitError
344 * @brief If portPRELOAD_REGISTERS then registers will be given an initial value
345 * when a task is created. This helps in debugging at the cost of code size.
347 #define portPRELOAD_REGISTERS 1
350 * @brief A task is created without a secure context, and must call
351 * portALLOCATE_SECURE_CONTEXT() to give itself a secure context before it makes
354 #define portNO_SECURE_CONTEXT 0
355 /*-----------------------------------------------------------*/
358 * @brief Used to catch tasks that attempt to return from their implementing
361 static void prvTaskExitError( void );
363 #if ( configENABLE_MPU == 1 )
366 * @brief Extract MPU region's access permissions from the Region Base Address
367 * Register (RBAR) value.
369 * @param ulRBARValue RBAR value for the MPU region.
371 * @return uint32_t Access permissions.
373 static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
374 #endif /* configENABLE_MPU */
376 #if ( configENABLE_MPU == 1 )
379 * @brief Setup the Memory Protection Unit (MPU).
381 static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
382 #endif /* configENABLE_MPU */
384 #if ( configENABLE_FPU == 1 )
387 * @brief Setup the Floating Point Unit (FPU).
389 static void prvSetupFPU( void ) PRIVILEGED_FUNCTION;
390 #endif /* configENABLE_FPU */
393 * @brief Setup the timer to generate the tick interrupts.
395 * The implementation in this file is weak to allow application writers to
396 * change the timer used to generate the tick interrupt.
398 void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION;
401 * @brief Checks whether the current execution context is interrupt.
403 * @return pdTRUE if the current execution context is interrupt, pdFALSE
406 BaseType_t xPortIsInsideInterrupt( void );
409 * @brief Yield the processor.
411 void vPortYield( void ) PRIVILEGED_FUNCTION;
414 * @brief Enter critical section.
416 void vPortEnterCritical( void ) PRIVILEGED_FUNCTION;
419 * @brief Exit from critical section.
421 void vPortExitCritical( void ) PRIVILEGED_FUNCTION;
424 * @brief SysTick handler.
426 void SysTick_Handler( void ) PRIVILEGED_FUNCTION;
429 * @brief C part of SVC handler.
431 portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
433 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
436 * @brief Sets up the system call stack so that upon returning from
437 * SVC, the system call stack is used.
439 * It is used for the system calls with up to 4 parameters.
441 * @param pulTaskStack The current SP when the SVC was raised.
442 * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
444 void vSystemCallEnter( uint32_t * pulTaskStack,
445 uint32_t ulLR ) PRIVILEGED_FUNCTION;
447 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
449 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
452 * @brief Sets up the system call stack so that upon returning from
453 * SVC, the system call stack is used.
455 * It is used for the system calls with 5 parameters.
457 * @param pulTaskStack The current SP when the SVC was raised.
458 * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
460 void vSystemCallEnter_1( uint32_t * pulTaskStack,
461 uint32_t ulLR ) PRIVILEGED_FUNCTION;
463 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
465 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
468 * @brief Sets up the task stack so that upon returning from
469 * SVC, the task stack is used again.
471 * @param pulSystemCallStack The current SP when the SVC was raised.
472 * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
474 void vSystemCallExit( uint32_t * pulSystemCallStack,
475 uint32_t ulLR ) PRIVILEGED_FUNCTION;
477 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
479 #if ( configENABLE_MPU == 1 )
482 * @brief Checks whether or not the calling task is privileged.
484 * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
486 BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
488 #endif /* configENABLE_MPU == 1 */
489 /*-----------------------------------------------------------*/
492 * @brief Each task maintains its own interrupt status in the critical nesting
495 PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL;
497 #if ( configENABLE_TRUSTZONE == 1 )
500 * @brief Saved as part of the task context to indicate which context the
501 * task is using on the secure side.
503 PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT;
504 #endif /* configENABLE_TRUSTZONE */
507 * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure
508 * FreeRTOS API functions are not called from interrupts that have been assigned
509 * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY.
511 #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_ARMV8M_MAIN_EXTENSION == 1 ) )
513 static uint8_t ucMaxSysCallPriority = 0;
514 static uint32_t ulMaxPRIGROUPValue = 0;
515 static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16;
517 #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_ARMV8M_MAIN_EXTENSION == 1 ) ) */
519 #if ( configUSE_TICKLESS_IDLE == 1 )
522 * @brief The number of SysTick increments that make up one tick period.
524 PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0;
527 * @brief The maximum number of tick periods that can be suppressed is
528 * limited by the 24 bit resolution of the SysTick timer.
530 PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0;
533 * @brief Compensate for the CPU cycles that pass while the SysTick is
534 * stopped (low power functionality only).
536 PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0;
537 #endif /* configUSE_TICKLESS_IDLE */
538 /*-----------------------------------------------------------*/
540 #if ( configUSE_TICKLESS_IDLE == 1 )
541 __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime )
543 uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft;
544 TickType_t xModifiableIdleTime;
546 /* Make sure the SysTick reload value does not overflow the counter. */
547 if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks )
549 xExpectedIdleTime = xMaximumPossibleSuppressedTicks;
552 /* Enter a critical section but don't use the taskENTER_CRITICAL()
553 * method as that will mask interrupts that should exit sleep mode. */
554 __asm volatile ( "cpsid i" ::: "memory" );
555 __asm volatile ( "dsb" );
556 __asm volatile ( "isb" );
558 /* If a context switch is pending or a task is waiting for the scheduler
559 * to be unsuspended then abandon the low power entry. */
560 if( eTaskConfirmSleepModeStatus() == eAbortSleep )
562 /* Re-enable interrupts - see comments above the cpsid instruction
564 __asm volatile ( "cpsie i" ::: "memory" );
568 /* Stop the SysTick momentarily. The time the SysTick is stopped for
569 * is accounted for as best it can be, but using the tickless mode will
570 * inevitably result in some tiny drift of the time maintained by the
571 * kernel with respect to calendar time. */
572 portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT );
574 /* Use the SysTick current-value register to determine the number of
575 * SysTick decrements remaining until the next tick interrupt. If the
576 * current-value register is zero, then there are actually
577 * ulTimerCountsForOneTick decrements remaining, not zero, because the
578 * SysTick requests the interrupt when decrementing from 1 to 0. */
579 ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG;
581 if( ulSysTickDecrementsLeft == 0 )
583 ulSysTickDecrementsLeft = ulTimerCountsForOneTick;
586 /* Calculate the reload value required to wait xExpectedIdleTime
587 * tick periods. -1 is used because this code normally executes part
588 * way through the first tick period. But if the SysTick IRQ is now
589 * pending, then clear the IRQ, suppressing the first tick, and correct
590 * the reload value to reflect that the second tick period is already
591 * underway. The expected idle time is always at least two ticks. */
592 ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) );
594 if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 )
596 portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT;
597 ulReloadValue -= ulTimerCountsForOneTick;
600 if( ulReloadValue > ulStoppedTimerCompensation )
602 ulReloadValue -= ulStoppedTimerCompensation;
605 /* Set the new reload value. */
606 portNVIC_SYSTICK_LOAD_REG = ulReloadValue;
608 /* Clear the SysTick count flag and set the count value back to
610 portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
612 /* Restart SysTick. */
613 portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT;
615 /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can
616 * set its parameter to 0 to indicate that its implementation contains
617 * its own wait for interrupt or wait for event instruction, and so wfi
618 * should not be executed again. However, the original expected idle
619 * time variable must remain unmodified, so a copy is taken. */
620 xModifiableIdleTime = xExpectedIdleTime;
621 configPRE_SLEEP_PROCESSING( xModifiableIdleTime );
623 if( xModifiableIdleTime > 0 )
625 __asm volatile ( "dsb" ::: "memory" );
626 __asm volatile ( "wfi" );
627 __asm volatile ( "isb" );
630 configPOST_SLEEP_PROCESSING( xExpectedIdleTime );
632 /* Re-enable interrupts to allow the interrupt that brought the MCU
633 * out of sleep mode to execute immediately. See comments above
634 * the cpsid instruction above. */
635 __asm volatile ( "cpsie i" ::: "memory" );
636 __asm volatile ( "dsb" );
637 __asm volatile ( "isb" );
639 /* Disable interrupts again because the clock is about to be stopped
640 * and interrupts that execute while the clock is stopped will increase
641 * any slippage between the time maintained by the RTOS and calendar
643 __asm volatile ( "cpsid i" ::: "memory" );
644 __asm volatile ( "dsb" );
645 __asm volatile ( "isb" );
647 /* Disable the SysTick clock without reading the
648 * portNVIC_SYSTICK_CTRL_REG register to ensure the
649 * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again,
650 * the time the SysTick is stopped for is accounted for as best it can
651 * be, but using the tickless mode will inevitably result in some tiny
652 * drift of the time maintained by the kernel with respect to calendar
654 portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT );
656 /* Determine whether the SysTick has already counted to zero. */
657 if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
659 uint32_t ulCalculatedLoadValue;
661 /* The tick interrupt ended the sleep (or is now pending), and
662 * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG
663 * with whatever remains of the new tick period. */
664 ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG );
666 /* Don't allow a tiny value, or values that have somehow
667 * underflowed because the post sleep hook did something
668 * that took too long or because the SysTick current-value register
670 if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) )
672 ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL );
675 portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue;
677 /* As the pending tick will be processed as soon as this
678 * function exits, the tick value maintained by the tick is stepped
679 * forward by one less than the time spent waiting. */
680 ulCompleteTickPeriods = xExpectedIdleTime - 1UL;
684 /* Something other than the tick interrupt ended the sleep. */
686 /* Use the SysTick current-value register to determine the
687 * number of SysTick decrements remaining until the expected idle
688 * time would have ended. */
689 ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG;
690 #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT )
692 /* If the SysTick is not using the core clock, the current-
693 * value register might still be zero here. In that case, the
694 * SysTick didn't load from the reload register, and there are
695 * ulReloadValue decrements remaining in the expected idle
697 if( ulSysTickDecrementsLeft == 0 )
699 ulSysTickDecrementsLeft = ulReloadValue;
702 #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */
704 /* Work out how long the sleep lasted rounded to complete tick
705 * periods (not the ulReload value which accounted for part
707 ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft;
709 /* How many complete tick periods passed while the processor
711 ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick;
713 /* The reload value is set to whatever fraction of a single tick
715 portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements;
718 /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again,
719 * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If
720 * the SysTick is not using the core clock, temporarily configure it to
721 * use the core clock. This configuration forces the SysTick to load
722 * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next
723 * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready
724 * to receive the standard value immediately. */
725 portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
726 portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
727 #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT )
729 portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
733 /* The temporary usage of the core clock has served its purpose,
734 * as described above. Resume usage of the other clock. */
735 portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT;
737 if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
739 /* The partial tick period already ended. Be sure the SysTick
740 * counts it only once. */
741 portNVIC_SYSTICK_CURRENT_VALUE_REG = 0;
744 portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
745 portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
747 #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */
749 /* Step the tick to account for any tick periods that elapsed. */
750 vTaskStepTick( ulCompleteTickPeriods );
752 /* Exit with interrupts enabled. */
753 __asm volatile ( "cpsie i" ::: "memory" );
756 #endif /* configUSE_TICKLESS_IDLE */
757 /*-----------------------------------------------------------*/
759 __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */
761 /* Calculate the constants required to configure the tick interrupt. */
762 #if ( configUSE_TICKLESS_IDLE == 1 )
764 ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ );
765 xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick;
766 ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ );
768 #endif /* configUSE_TICKLESS_IDLE */
770 /* Stop and reset SysTick.
772 * QEMU versions older than 7.0.0 contain a bug which causes an error if we
773 * enable SysTick without first selecting a valid clock source. We trigger
774 * the bug if we change clock sources from a clock with a zero clock period
775 * to one with a nonzero clock period and enable Systick at the same time.
776 * So we configure the CLKSOURCE bit here, prior to setting the ENABLE bit.
777 * This workaround avoids the bug in QEMU versions older than 7.0.0. */
778 portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG;
779 portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
781 /* Configure SysTick to interrupt at the requested rate. */
782 portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL;
783 portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
785 /*-----------------------------------------------------------*/
787 static void prvTaskExitError( void )
789 volatile uint32_t ulDummy = 0UL;
791 /* A function that implements a task must not exit or attempt to return to
792 * its caller as there is nothing to return to. If a task wants to exit it
793 * should instead call vTaskDelete( NULL ). Artificially force an assert()
794 * to be triggered if configASSERT() is defined, then stop here so
795 * application writers can catch the error. */
796 configASSERT( ulCriticalNesting == ~0UL );
797 portDISABLE_INTERRUPTS();
799 while( ulDummy == 0 )
801 /* This file calls prvTaskExitError() after the scheduler has been
802 * started to remove a compiler warning about the function being
803 * defined but never called. ulDummy is used purely to quieten other
804 * warnings about code appearing after this function is called - making
805 * ulDummy volatile makes the compiler think the function could return
806 * and therefore not output an 'unreachable code' warning for code that
807 * appears after it. */
810 /*-----------------------------------------------------------*/
812 #if ( configENABLE_MPU == 1 )
813 static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
815 uint32_t ulAccessPermissions = 0;
817 if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
819 ulAccessPermissions = tskMPU_READ_PERMISSION;
822 if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
824 ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
827 return ulAccessPermissions;
829 #endif /* configENABLE_MPU */
830 /*-----------------------------------------------------------*/
832 #if ( configENABLE_MPU == 1 )
833 static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
835 #if defined( __ARMCC_VERSION )
837 /* Declaration when these variable are defined in code instead of being
838 * exported from linker scripts. */
839 extern uint32_t * __privileged_functions_start__;
840 extern uint32_t * __privileged_functions_end__;
841 extern uint32_t * __syscalls_flash_start__;
842 extern uint32_t * __syscalls_flash_end__;
843 extern uint32_t * __unprivileged_flash_start__;
844 extern uint32_t * __unprivileged_flash_end__;
845 extern uint32_t * __privileged_sram_start__;
846 extern uint32_t * __privileged_sram_end__;
847 #else /* if defined( __ARMCC_VERSION ) */
848 /* Declaration when these variable are exported from linker scripts. */
849 extern uint32_t __privileged_functions_start__[];
850 extern uint32_t __privileged_functions_end__[];
851 extern uint32_t __syscalls_flash_start__[];
852 extern uint32_t __syscalls_flash_end__[];
853 extern uint32_t __unprivileged_flash_start__[];
854 extern uint32_t __unprivileged_flash_end__[];
855 extern uint32_t __privileged_sram_start__[];
856 extern uint32_t __privileged_sram_end__[];
857 #endif /* defined( __ARMCC_VERSION ) */
859 /* The only permitted number of regions are 8 or 16. */
860 configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) );
862 /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */
863 configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE );
865 /* Check that the MPU is present. */
866 if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE )
868 /* MAIR0 - Index 0. */
869 portMPU_MAIR0_REG |= ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK );
870 /* MAIR0 - Index 1. */
871 portMPU_MAIR0_REG |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK );
873 /* Setup privileged flash as Read Only so that privileged tasks can
874 * read it but not modify. */
875 portMPU_RNR_REG = portPRIVILEGED_FLASH_REGION;
876 portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
877 ( portMPU_REGION_NON_SHAREABLE ) |
878 ( portMPU_REGION_PRIVILEGED_READ_ONLY );
879 portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_functions_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
880 ( portMPU_RLAR_ATTR_INDEX0 ) |
881 ( portMPU_RLAR_REGION_ENABLE );
883 /* Setup unprivileged flash as Read Only by both privileged and
884 * unprivileged tasks. All tasks can read it but no-one can modify. */
885 portMPU_RNR_REG = portUNPRIVILEGED_FLASH_REGION;
886 portMPU_RBAR_REG = ( ( ( uint32_t ) __unprivileged_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
887 ( portMPU_REGION_NON_SHAREABLE ) |
888 ( portMPU_REGION_READ_ONLY );
889 portMPU_RLAR_REG = ( ( ( uint32_t ) __unprivileged_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
890 ( portMPU_RLAR_ATTR_INDEX0 ) |
891 ( portMPU_RLAR_REGION_ENABLE );
893 /* Setup unprivileged syscalls flash as Read Only by both privileged
894 * and unprivileged tasks. All tasks can read it but no-one can modify. */
895 portMPU_RNR_REG = portUNPRIVILEGED_SYSCALLS_REGION;
896 portMPU_RBAR_REG = ( ( ( uint32_t ) __syscalls_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
897 ( portMPU_REGION_NON_SHAREABLE ) |
898 ( portMPU_REGION_READ_ONLY );
899 portMPU_RLAR_REG = ( ( ( uint32_t ) __syscalls_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
900 ( portMPU_RLAR_ATTR_INDEX0 ) |
901 ( portMPU_RLAR_REGION_ENABLE );
903 /* Setup RAM containing kernel data for privileged access only. */
904 portMPU_RNR_REG = portPRIVILEGED_RAM_REGION;
905 portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_sram_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
906 ( portMPU_REGION_NON_SHAREABLE ) |
907 ( portMPU_REGION_PRIVILEGED_READ_WRITE ) |
908 ( portMPU_REGION_EXECUTE_NEVER );
909 portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_sram_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
910 ( portMPU_RLAR_ATTR_INDEX0 ) |
911 ( portMPU_RLAR_REGION_ENABLE );
913 /* Enable mem fault. */
914 portSCB_SYS_HANDLER_CTRL_STATE_REG |= portSCB_MEM_FAULT_ENABLE_BIT;
916 /* Enable MPU with privileged background access i.e. unmapped
917 * regions have privileged access. */
918 portMPU_CTRL_REG |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT );
921 #endif /* configENABLE_MPU */
922 /*-----------------------------------------------------------*/
924 #if ( configENABLE_FPU == 1 )
925 static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */
927 #if ( configENABLE_TRUSTZONE == 1 )
929 /* Enable non-secure access to the FPU. */
930 SecureInit_EnableNSFPUAccess();
932 #endif /* configENABLE_TRUSTZONE */
934 /* CP10 = 11 ==> Full access to FPU i.e. both privileged and
935 * unprivileged code should be able to access FPU. CP11 should be
936 * programmed to the same value as CP10. */
937 *( portCPACR ) |= ( ( portCPACR_CP10_VALUE << portCPACR_CP10_POS ) |
938 ( portCPACR_CP11_VALUE << portCPACR_CP11_POS )
941 /* ASPEN = 1 ==> Hardware should automatically preserve floating point
942 * context on exception entry and restore on exception return.
943 * LSPEN = 1 ==> Enable lazy context save of FP state. */
944 *( portFPCCR ) |= ( portFPCCR_ASPEN_MASK | portFPCCR_LSPEN_MASK );
946 #endif /* configENABLE_FPU */
947 /*-----------------------------------------------------------*/
949 void vPortYield( void ) /* PRIVILEGED_FUNCTION */
951 /* Set a PendSV to request a context switch. */
952 portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
954 /* Barriers are normally not required but do ensure the code is
955 * completely within the specified behaviour for the architecture. */
956 __asm volatile ( "dsb" ::: "memory" );
957 __asm volatile ( "isb" );
959 /*-----------------------------------------------------------*/
961 void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */
963 portDISABLE_INTERRUPTS();
966 /* Barriers are normally not required but do ensure the code is
967 * completely within the specified behaviour for the architecture. */
968 __asm volatile ( "dsb" ::: "memory" );
969 __asm volatile ( "isb" );
971 /*-----------------------------------------------------------*/
973 void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */
975 configASSERT( ulCriticalNesting );
978 if( ulCriticalNesting == 0 )
980 portENABLE_INTERRUPTS();
983 /*-----------------------------------------------------------*/
985 void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */
987 uint32_t ulPreviousMask;
989 ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR();
992 /* Increment the RTOS tick. */
993 if( xTaskIncrementTick() != pdFALSE )
995 traceISR_EXIT_TO_SCHEDULER();
996 /* Pend a context switch. */
997 portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
1004 portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask );
1006 /*-----------------------------------------------------------*/
1008 void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
1010 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
1011 #if defined( __ARMCC_VERSION )
1013 /* Declaration when these variable are defined in code instead of being
1014 * exported from linker scripts. */
1015 extern uint32_t * __syscalls_flash_start__;
1016 extern uint32_t * __syscalls_flash_end__;
1018 /* Declaration when these variable are exported from linker scripts. */
1019 extern uint32_t __syscalls_flash_start__[];
1020 extern uint32_t __syscalls_flash_end__[];
1021 #endif /* defined( __ARMCC_VERSION ) */
1022 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
1026 #if ( configENABLE_TRUSTZONE == 1 )
1027 uint32_t ulR0, ulR1;
1028 extern TaskHandle_t pxCurrentTCB;
1029 #if ( configENABLE_MPU == 1 )
1030 uint32_t ulControl, ulIsTaskPrivileged;
1031 #endif /* configENABLE_MPU */
1032 #endif /* configENABLE_TRUSTZONE */
1033 uint8_t ucSVCNumber;
1035 /* Register are stored on the stack in the following order - R0, R1, R2, R3,
1036 * R12, LR, PC, xPSR. */
1037 ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
1038 ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
1040 switch( ucSVCNumber )
1042 #if ( configENABLE_TRUSTZONE == 1 )
1043 case portSVC_ALLOCATE_SECURE_CONTEXT:
1045 /* R0 contains the stack size passed as parameter to the
1046 * vPortAllocateSecureContext function. */
1047 ulR0 = pulCallerStackAddress[ 0 ];
1049 #if ( configENABLE_MPU == 1 )
1051 /* Read the CONTROL register value. */
1052 __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) );
1054 /* The task that raised the SVC is privileged if Bit[0]
1055 * in the CONTROL register is 0. */
1056 ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 );
1058 /* Allocate and load a context for the secure task. */
1059 xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB );
1061 #else /* if ( configENABLE_MPU == 1 ) */
1063 /* Allocate and load a context for the secure task. */
1064 xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB );
1066 #endif /* configENABLE_MPU */
1068 configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID );
1069 SecureContext_LoadContext( xSecureContext, pxCurrentTCB );
1072 case portSVC_FREE_SECURE_CONTEXT:
1074 /* R0 contains TCB being freed and R1 contains the secure
1075 * context handle to be freed. */
1076 ulR0 = pulCallerStackAddress[ 0 ];
1077 ulR1 = pulCallerStackAddress[ 1 ];
1079 /* Free the secure context. */
1080 SecureContext_FreeContext( ( SecureContextHandle_t ) ulR1, ( void * ) ulR0 );
1082 #endif /* configENABLE_TRUSTZONE */
1084 case portSVC_START_SCHEDULER:
1085 #if ( configENABLE_TRUSTZONE == 1 )
1087 /* De-prioritize the non-secure exceptions so that the
1088 * non-secure pendSV runs at the lowest priority. */
1089 SecureInit_DePrioritizeNSExceptions();
1091 /* Initialize the secure context management system. */
1092 SecureContext_Init();
1094 #endif /* configENABLE_TRUSTZONE */
1096 #if ( configENABLE_FPU == 1 )
1098 /* Setup the Floating Point Unit (FPU). */
1101 #endif /* configENABLE_FPU */
1103 /* Setup the context of the first task so that the first task starts
1105 vRestoreContextOfFirstTask();
1108 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
1109 case portSVC_RAISE_PRIVILEGE:
1111 /* Only raise the privilege, if the svc was raised from any of
1112 * the system calls. */
1113 if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
1114 ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
1119 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
1122 /* Incorrect SVC call. */
1123 configASSERT( pdFALSE );
1126 /*-----------------------------------------------------------*/
1128 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
1130 void vSystemCallEnter( uint32_t * pulTaskStack,
1131 uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
1133 extern TaskHandle_t pxCurrentTCB;
1134 xMPU_SETTINGS * pxMpuSettings;
1135 uint32_t * pulSystemCallStack;
1136 uint32_t ulStackFrameSize, ulSystemCallLocation, i;
1138 #if defined( __ARMCC_VERSION )
1140 /* Declaration when these variable are defined in code instead of being
1141 * exported from linker scripts. */
1142 extern uint32_t * __syscalls_flash_start__;
1143 extern uint32_t * __syscalls_flash_end__;
1145 /* Declaration when these variable are exported from linker scripts. */
1146 extern uint32_t __syscalls_flash_start__[];
1147 extern uint32_t __syscalls_flash_end__[];
1148 #endif /* #if defined( __ARMCC_VERSION ) */
1150 ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
1152 /* If the request did not come from the system call section, do nothing. */
1153 if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
1154 ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
1156 pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
1157 pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
1159 /* This is not NULL only for the duration of the system call. */
1160 configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
1162 #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
1164 if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
1166 /* Extended frame i.e. FPU in use. */
1167 ulStackFrameSize = 26;
1169 " vpush {s0} \n" /* Trigger lazy stacking. */
1170 " vpop {s0} \n" /* Nullify the affect of the above instruction. */
1176 /* Standard frame i.e. FPU not in use. */
1177 ulStackFrameSize = 8;
1180 #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
1182 ulStackFrameSize = 8;
1184 #endif /* configENABLE_FPU || configENABLE_MVE */
1186 /* Make space on the system call stack for the stack frame. */
1187 pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
1189 /* Copy the stack frame. */
1190 for( i = 0; i < ulStackFrameSize; i++ )
1192 pulSystemCallStack[ i ] = pulTaskStack[ i ];
1195 /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
1196 * restore it when we exit from the system call. */
1197 pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
1198 #if ( portUSE_PSPLIM_REGISTER == 1 )
1200 __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
1204 /* Use the pulSystemCallStack in thread mode. */
1205 __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
1206 #if ( portUSE_PSPLIM_REGISTER == 1 )
1208 __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
1212 /* Remember the location where we should copy the stack frame when we exit from
1213 * the system call. */
1214 pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
1216 /* Record if the hardware used padding to force the stack pointer
1217 * to be double word aligned. */
1218 if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
1220 pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
1224 pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
1227 /* We ensure in pxPortInitialiseStack that the system call stack is
1228 * double word aligned and therefore, there is no need of padding.
1229 * Clear the bit[9] of stacked xPSR. */
1230 pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
1232 /* Raise the privilege for the duration of the system call. */
1234 " mrs r0, control \n" /* Obtain current control value. */
1235 " movs r1, #1 \n" /* r1 = 1. */
1236 " bics r0, r1 \n" /* Clear nPRIV bit. */
1237 " msr control, r0 \n" /* Write back new control value. */
1238 ::: "r0", "r1", "memory"
1243 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
1244 /*-----------------------------------------------------------*/
1246 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
1248 void vSystemCallEnter_1( uint32_t * pulTaskStack,
1249 uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
1251 extern TaskHandle_t pxCurrentTCB;
1252 xMPU_SETTINGS * pxMpuSettings;
1253 uint32_t * pulSystemCallStack;
1254 uint32_t ulStackFrameSize, ulSystemCallLocation, i;
1256 #if defined( __ARMCC_VERSION )
1258 /* Declaration when these variable are defined in code instead of being
1259 * exported from linker scripts. */
1260 extern uint32_t * __syscalls_flash_start__;
1261 extern uint32_t * __syscalls_flash_end__;
1263 /* Declaration when these variable are exported from linker scripts. */
1264 extern uint32_t __syscalls_flash_start__[];
1265 extern uint32_t __syscalls_flash_end__[];
1266 #endif /* #if defined( __ARMCC_VERSION ) */
1268 ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
1270 /* If the request did not come from the system call section, do nothing. */
1271 if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
1272 ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
1274 pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
1275 pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
1277 /* This is not NULL only for the duration of the system call. */
1278 configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
1280 #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
1282 if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
1284 /* Extended frame i.e. FPU in use. */
1285 ulStackFrameSize = 26;
1287 " vpush {s0} \n" /* Trigger lazy stacking. */
1288 " vpop {s0} \n" /* Nullify the affect of the above instruction. */
1294 /* Standard frame i.e. FPU not in use. */
1295 ulStackFrameSize = 8;
1298 #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
1300 ulStackFrameSize = 8;
1302 #endif /* configENABLE_FPU || configENABLE_MVE */
1304 /* Make space on the system call stack for the stack frame and
1305 * the parameter passed on the stack. We only need to copy one
1306 * parameter but we still reserve 2 spaces to keep the stack
1307 * double word aligned. */
1308 pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
1310 /* Copy the stack frame. */
1311 for( i = 0; i < ulStackFrameSize; i++ )
1313 pulSystemCallStack[ i ] = pulTaskStack[ i ];
1316 /* Copy the parameter which is passed the stack. */
1317 if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
1319 pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
1321 /* Record if the hardware used padding to force the stack pointer
1322 * to be double word aligned. */
1323 pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
1327 pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
1329 /* Record if the hardware used padding to force the stack pointer
1330 * to be double word aligned. */
1331 pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
1334 /* Store the value of the LR and PSPLIM registers before the SVC was raised.
1335 * We need to restore it when we exit from the system call. */
1336 pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
1337 #if ( portUSE_PSPLIM_REGISTER == 1 )
1339 __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
1343 /* Use the pulSystemCallStack in thread mode. */
1344 __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
1345 #if ( portUSE_PSPLIM_REGISTER == 1 )
1347 __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
1351 /* Remember the location where we should copy the stack frame when we exit from
1352 * the system call. */
1353 pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
1355 /* We ensure in pxPortInitialiseStack that the system call stack is
1356 * double word aligned and therefore, there is no need of padding.
1357 * Clear the bit[9] of stacked xPSR. */
1358 pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
1360 /* Raise the privilege for the duration of the system call. */
1362 " mrs r0, control \n" /* Obtain current control value. */
1363 " movs r1, #1 \n" /* r1 = 1. */
1364 " bics r0, r1 \n" /* Clear nPRIV bit. */
1365 " msr control, r0 \n" /* Write back new control value. */
1366 ::: "r0", "r1", "memory"
1371 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
1372 /*-----------------------------------------------------------*/
1374 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
1376 void vSystemCallExit( uint32_t * pulSystemCallStack,
1377 uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
1379 extern TaskHandle_t pxCurrentTCB;
1380 xMPU_SETTINGS * pxMpuSettings;
1381 uint32_t * pulTaskStack;
1382 uint32_t ulStackFrameSize, ulSystemCallLocation, i;
1384 #if defined( __ARMCC_VERSION )
1386 /* Declaration when these variable are defined in code instead of being
1387 * exported from linker scripts. */
1388 extern uint32_t * __syscalls_flash_start__;
1389 extern uint32_t * __syscalls_flash_end__;
1391 /* Declaration when these variable are exported from linker scripts. */
1392 extern uint32_t __syscalls_flash_start__[];
1393 extern uint32_t __syscalls_flash_end__[];
1394 #endif /* #if defined( __ARMCC_VERSION ) */
1396 ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
1398 /* If the request did not come from the system call section, do nothing. */
1399 if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
1400 ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
1402 pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
1403 pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
1405 #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
1407 if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
1409 /* Extended frame i.e. FPU in use. */
1410 ulStackFrameSize = 26;
1412 " vpush {s0} \n" /* Trigger lazy stacking. */
1413 " vpop {s0} \n" /* Nullify the affect of the above instruction. */
1419 /* Standard frame i.e. FPU not in use. */
1420 ulStackFrameSize = 8;
1423 #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
1425 ulStackFrameSize = 8;
1427 #endif /* configENABLE_FPU || configENABLE_MVE */
1429 /* Make space on the task stack for the stack frame. */
1430 pulTaskStack = pulTaskStack - ulStackFrameSize;
1432 /* Copy the stack frame. */
1433 for( i = 0; i < ulStackFrameSize; i++ )
1435 pulTaskStack[ i ] = pulSystemCallStack[ i ];
1438 /* Use the pulTaskStack in thread mode. */
1439 __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
1441 /* Restore the LR and PSPLIM to what they were at the time of
1442 * system call entry. */
1443 pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
1444 #if ( portUSE_PSPLIM_REGISTER == 1 )
1446 __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
1450 /* If the hardware used padding to force the stack pointer
1451 * to be double word aligned, set the stacked xPSR bit[9],
1452 * otherwise clear it. */
1453 if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
1455 pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
1459 pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
1462 /* This is not NULL only for the duration of the system call. */
1463 pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
1465 /* Drop the privilege before returning to the thread mode. */
1467 " mrs r0, control \n" /* Obtain current control value. */
1468 " movs r1, #1 \n" /* r1 = 1. */
1469 " orrs r0, r1 \n" /* Set nPRIV bit. */
1470 " msr control, r0 \n" /* Write back new control value. */
1471 ::: "r0", "r1", "memory"
1476 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
1477 /*-----------------------------------------------------------*/
1479 #if ( configENABLE_MPU == 1 )
1481 BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
1483 BaseType_t xTaskIsPrivileged = pdFALSE;
1484 const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
1486 if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
1488 xTaskIsPrivileged = pdTRUE;
1491 return xTaskIsPrivileged;
1494 #endif /* configENABLE_MPU == 1 */
1495 /*-----------------------------------------------------------*/
1497 #if ( configENABLE_MPU == 1 )
1499 StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
1500 StackType_t * pxEndOfStack,
1501 TaskFunction_t pxCode,
1502 void * pvParameters,
1503 BaseType_t xRunPrivileged,
1504 xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
1506 uint32_t ulIndex = 0;
1508 xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
1510 xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
1512 xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
1514 xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
1516 xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
1518 xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
1520 xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
1522 xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
1525 xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
1527 xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
1529 xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
1531 xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
1533 xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
1535 xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
1537 xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
1539 xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
1542 #if ( configENABLE_TRUSTZONE == 1 )
1544 xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
1547 #endif /* configENABLE_TRUSTZONE */
1548 xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
1550 xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
1553 if( xRunPrivileged == pdTRUE )
1555 xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
1556 xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
1561 xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
1562 xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
1566 xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
1569 #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
1571 /* Ensure that the system call stack is double word aligned. */
1572 xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
1573 xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
1574 ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
1576 xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
1577 xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
1578 ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
1579 ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
1581 /* This is not NULL only for the duration of a system call. */
1582 xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
1584 #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
1586 return &( xMPUSettings->ulContext[ ulIndex ] );
1589 #else /* configENABLE_MPU */
1591 StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
1592 StackType_t * pxEndOfStack,
1593 TaskFunction_t pxCode,
1594 void * pvParameters ) /* PRIVILEGED_FUNCTION */
1596 /* Simulate the stack frame as it would be created by a context switch
1598 #if ( portPRELOAD_REGISTERS == 0 )
1600 pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
1601 *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
1603 *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
1605 *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
1606 pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
1607 *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
1608 pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
1609 *pxTopOfStack = portINITIAL_EXC_RETURN;
1611 *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
1613 #if ( configENABLE_TRUSTZONE == 1 )
1616 *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
1618 #endif /* configENABLE_TRUSTZONE */
1620 #else /* portPRELOAD_REGISTERS */
1622 pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
1623 *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
1625 *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
1627 *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
1629 *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
1631 *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
1633 *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
1635 *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
1637 *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
1639 *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
1641 *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
1643 *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
1645 *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
1647 *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
1649 *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
1651 *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
1653 *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
1655 *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
1657 *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
1659 #if ( configENABLE_TRUSTZONE == 1 )
1662 *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
1664 #endif /* configENABLE_TRUSTZONE */
1666 #endif /* portPRELOAD_REGISTERS */
1668 return pxTopOfStack;
1671 #endif /* configENABLE_MPU */
1672 /*-----------------------------------------------------------*/
1674 BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
1676 #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_ARMV8M_MAIN_EXTENSION == 1 ) )
1678 volatile uint32_t ulOriginalPriority;
1679 volatile uint32_t ulImplementedPrioBits = 0;
1680 volatile uint8_t ucMaxPriorityValue;
1682 /* Determine the maximum priority from which ISR safe FreeRTOS API
1683 * functions can be called. ISR safe functions are those that end in
1684 * "FromISR". FreeRTOS maintains separate thread and ISR API functions to
1685 * ensure interrupt entry is as fast and simple as possible.
1687 * Save the interrupt priority value that is about to be clobbered. */
1688 ulOriginalPriority = portNVIC_SHPR2_REG;
1690 /* Determine the number of priority bits available. First write to all
1692 portNVIC_SHPR2_REG = 0xFF000000;
1694 /* Read the value back to see how many bits stuck. */
1695 ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 );
1697 /* Use the same mask on the maximum system call priority. */
1698 ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
1700 /* Check that the maximum system call priority is nonzero after
1701 * accounting for the number of priority bits supported by the
1702 * hardware. A priority of 0 is invalid because setting the BASEPRI
1703 * register to 0 unmasks all interrupts, and interrupts with priority 0
1704 * cannot be masked using BASEPRI.
1705 * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
1706 configASSERT( ucMaxSysCallPriority );
1708 /* Check that the bits not implemented in hardware are zero in
1709 * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
1710 configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( uint8_t ) ( ~( uint32_t ) ucMaxPriorityValue ) ) == 0U );
1712 /* Calculate the maximum acceptable priority group value for the number
1713 * of bits read back. */
1715 while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
1717 ulImplementedPrioBits++;
1718 ucMaxPriorityValue <<= ( uint8_t ) 0x01;
1721 if( ulImplementedPrioBits == 8 )
1723 /* When the hardware implements 8 priority bits, there is no way for
1724 * the software to configure PRIGROUP to not have sub-priorities. As
1725 * a result, the least significant bit is always used for sub-priority
1726 * and there are 128 preemption priorities and 2 sub-priorities.
1728 * This may cause some confusion in some cases - for example, if
1729 * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
1730 * priority interrupts will be masked in Critical Sections as those
1731 * are at the same preemption priority. This may appear confusing as
1732 * 4 is higher (numerically lower) priority than
1733 * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
1734 * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
1735 * to 4, this confusion does not happen and the behaviour remains the same.
1737 * The following assert ensures that the sub-priority bit in the
1738 * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
1740 configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
1741 ulMaxPRIGROUPValue = 0;
1745 ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
1748 /* Shift the priority group value back to its position within the AIRCR
1750 ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
1751 ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
1753 /* Restore the clobbered interrupt priority register to its original
1755 portNVIC_SHPR2_REG = ulOriginalPriority;
1757 #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_ARMV8M_MAIN_EXTENSION == 1 ) ) */
1759 /* Make PendSV, CallSV and SysTick the same priority as the kernel. */
1760 portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI;
1761 portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI;
1763 #if ( configENABLE_MPU == 1 )
1765 /* Setup the Memory Protection Unit (MPU). */
1768 #endif /* configENABLE_MPU */
1770 /* Start the timer that generates the tick ISR. Interrupts are disabled
1772 vPortSetupTimerInterrupt();
1774 /* Initialize the critical nesting count ready for the first task. */
1775 ulCriticalNesting = 0;
1777 /* Start the first task. */
1780 /* Should never get here as the tasks will now be executing. Call the task
1781 * exit error function to prevent compiler warnings about a static function
1782 * not being called in the case that the application writer overrides this
1783 * functionality by defining configTASK_RETURN_ADDRESS. Call
1784 * vTaskSwitchContext() so link time optimization does not remove the
1786 vTaskSwitchContext();
1789 /* Should not get here. */
1792 /*-----------------------------------------------------------*/
1794 void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
1796 /* Not implemented in ports where there is nothing to return to.
1797 * Artificially force an assert. */
1798 configASSERT( ulCriticalNesting == 1000UL );
1800 /*-----------------------------------------------------------*/
1802 #if ( configENABLE_MPU == 1 )
1803 void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings,
1804 const struct xMEMORY_REGION * const xRegions,
1805 StackType_t * pxBottomOfStack,
1806 uint32_t ulStackDepth )
1808 uint32_t ulRegionStartAddress, ulRegionEndAddress, ulRegionNumber;
1811 #if defined( __ARMCC_VERSION )
1813 /* Declaration when these variable are defined in code instead of being
1814 * exported from linker scripts. */
1815 extern uint32_t * __privileged_sram_start__;
1816 extern uint32_t * __privileged_sram_end__;
1818 /* Declaration when these variable are exported from linker scripts. */
1819 extern uint32_t __privileged_sram_start__[];
1820 extern uint32_t __privileged_sram_end__[];
1821 #endif /* defined( __ARMCC_VERSION ) */
1824 xMPUSettings->ulMAIR0 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK );
1825 xMPUSettings->ulMAIR0 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK );
1827 /* This function is called automatically when the task is created - in
1828 * which case the stack region parameters will be valid. At all other
1829 * times the stack parameters will not be valid and it is assumed that
1830 * the stack region has already been configured. */
1831 if( ulStackDepth > 0 )
1833 ulRegionStartAddress = ( uint32_t ) pxBottomOfStack;
1834 ulRegionEndAddress = ( uint32_t ) pxBottomOfStack + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1;
1836 /* If the stack is within the privileged SRAM, do not protect it
1837 * using a separate MPU region. This is needed because privileged
1838 * SRAM is already protected using an MPU region and ARMv8-M does
1839 * not allow overlapping MPU regions. */
1840 if( ( ulRegionStartAddress >= ( uint32_t ) __privileged_sram_start__ ) &&
1841 ( ulRegionEndAddress <= ( uint32_t ) __privileged_sram_end__ ) )
1843 xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = 0;
1844 xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = 0;
1848 /* Define the region that allows access to the stack. */
1849 ulRegionStartAddress &= portMPU_RBAR_ADDRESS_MASK;
1850 ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK;
1852 xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = ( ulRegionStartAddress ) |
1853 ( portMPU_REGION_NON_SHAREABLE ) |
1854 ( portMPU_REGION_READ_WRITE ) |
1855 ( portMPU_REGION_EXECUTE_NEVER );
1857 xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = ( ulRegionEndAddress ) |
1858 ( portMPU_RLAR_ATTR_INDEX0 ) |
1859 ( portMPU_RLAR_REGION_ENABLE );
1863 /* User supplied configurable regions. */
1864 for( ulRegionNumber = 1; ulRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ulRegionNumber++ )
1866 /* If xRegions is NULL i.e. the task has not specified any MPU
1867 * region, the else part ensures that all the configurable MPU
1868 * regions are invalidated. */
1869 if( ( xRegions != NULL ) && ( xRegions[ lIndex ].ulLengthInBytes > 0UL ) )
1871 /* Translate the generic region definition contained in xRegions
1872 * into the ARMv8 specific MPU settings that are then stored in
1874 ulRegionStartAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) & portMPU_RBAR_ADDRESS_MASK;
1875 ulRegionEndAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1;
1876 ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK;
1878 /* Start address. */
1879 xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = ( ulRegionStartAddress ) |
1880 ( portMPU_REGION_NON_SHAREABLE );
1883 if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 )
1885 xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_ONLY );
1889 xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_WRITE );
1893 if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 )
1895 xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_EXECUTE_NEVER );
1899 xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = ( ulRegionEndAddress ) |
1900 ( portMPU_RLAR_REGION_ENABLE );
1902 /* Normal memory/ Device memory. */
1903 if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 )
1905 /* Attr1 in MAIR0 is configured as device memory. */
1906 xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX1;
1910 /* Attr0 in MAIR0 is configured as normal memory. */
1911 xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX0;
1916 /* Invalidate the region. */
1917 xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = 0UL;
1918 xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = 0UL;
1924 #endif /* configENABLE_MPU */
1925 /*-----------------------------------------------------------*/
1927 #if ( configENABLE_MPU == 1 )
1928 BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
1929 uint32_t ulBufferLength,
1930 uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
1933 uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
1934 BaseType_t xAccessGranted = pdFALSE;
1935 const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
1937 if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
1939 xAccessGranted = pdTRUE;
1943 if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
1945 ulBufferStartAddress = ( uint32_t ) pvBuffer;
1946 ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
1948 for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
1950 /* Is the MPU region enabled? */
1951 if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
1953 if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
1954 portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
1955 portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
1956 portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
1957 portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
1958 portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
1959 portIS_AUTHORIZED( ulAccessRequested,
1960 prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
1962 xAccessGranted = pdTRUE;
1970 return xAccessGranted;
1972 #endif /* configENABLE_MPU */
1973 /*-----------------------------------------------------------*/
1975 BaseType_t xPortIsInsideInterrupt( void )
1977 uint32_t ulCurrentInterrupt;
1980 /* Obtain the number of the currently executing interrupt. Interrupt Program
1981 * Status Register (IPSR) holds the exception number of the currently-executing
1982 * exception or zero for Thread mode.*/
1983 __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
1985 if( ulCurrentInterrupt == 0 )
1996 /*-----------------------------------------------------------*/
1998 #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_ARMV8M_MAIN_EXTENSION == 1 ) )
2000 void vPortValidateInterruptPriority( void )
2002 uint32_t ulCurrentInterrupt;
2003 uint8_t ucCurrentPriority;
2005 /* Obtain the number of the currently executing interrupt. */
2006 __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
2008 /* Is the interrupt number a user defined interrupt? */
2009 if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER )
2011 /* Look up the interrupt's priority. */
2012 ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ];
2014 /* The following assertion will fail if a service routine (ISR) for
2015 * an interrupt that has been assigned a priority above
2016 * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API
2017 * function. ISR safe FreeRTOS API functions must *only* be called
2018 * from interrupts that have been assigned a priority at or below
2019 * configMAX_SYSCALL_INTERRUPT_PRIORITY.
2021 * Numerically low interrupt priority numbers represent logically high
2022 * interrupt priorities, therefore the priority of the interrupt must
2023 * be set to a value equal to or numerically *higher* than
2024 * configMAX_SYSCALL_INTERRUPT_PRIORITY.
2026 * Interrupts that use the FreeRTOS API must not be left at their
2027 * default priority of zero as that is the highest possible priority,
2028 * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
2029 * and therefore also guaranteed to be invalid.
2031 * FreeRTOS maintains separate thread and ISR API functions to ensure
2032 * interrupt entry is as fast and simple as possible.
2034 * The following links provide detailed information:
2035 * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html
2036 * https://www.FreeRTOS.org/FAQHelp.html */
2037 configASSERT( ucCurrentPriority >= ucMaxSysCallPriority );
2040 /* Priority grouping: The interrupt controller (NVIC) allows the bits
2041 * that define each interrupt's priority to be split between bits that
2042 * define the interrupt's pre-emption priority bits and bits that define
2043 * the interrupt's sub-priority. For simplicity all bits must be defined
2044 * to be pre-emption priority bits. The following assertion will fail if
2045 * this is not the case (if some bits represent a sub-priority).
2047 * If the application only uses CMSIS libraries for interrupt
2048 * configuration then the correct setting can be achieved on all Cortex-M
2049 * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the
2050 * scheduler. Note however that some vendor specific peripheral libraries
2051 * assume a non-zero priority group setting, in which cases using a value
2052 * of zero will result in unpredictable behaviour. */
2053 configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue );
2056 #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_ARMV8M_MAIN_EXTENSION == 1 ) ) */
2057 /*-----------------------------------------------------------*/
2059 #if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
2061 void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
2062 int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
2064 uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
2065 xMPU_SETTINGS * xTaskMpuSettings;
2067 ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
2068 ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
2070 xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
2072 xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] |= ( 1U << ulAccessControlListEntryBit );
2075 #endif /* #if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
2076 /*-----------------------------------------------------------*/
2078 #if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
2080 void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
2081 int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
2083 uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
2084 xMPU_SETTINGS * xTaskMpuSettings;
2086 ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
2087 ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
2089 xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
2091 xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] &= ~( 1U << ulAccessControlListEntryBit );
2094 #endif /* #if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
2095 /*-----------------------------------------------------------*/
2097 #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
2099 #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
2101 BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
2103 uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
2104 BaseType_t xAccessGranted = pdFALSE;
2105 const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
2107 ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
2108 ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
2110 if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
2112 xAccessGranted = pdTRUE;
2116 if( ( xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] & ( 1U << ulAccessControlListEntryBit ) ) != 0 )
2118 xAccessGranted = pdTRUE;
2122 return xAccessGranted;
2125 #else /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
2127 BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
2129 ( void ) lInternalIndexOfKernelObject;
2131 /* If Access Control List feature is not used, all the tasks have
2132 * access to all the kernel objects. */
2136 #endif /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
2138 #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
2139 /*-----------------------------------------------------------*/