1
0
mirror of https://github.com/RIOT-OS/RIOT.git synced 2024-12-29 04:50:03 +01:00

cortex-m0: Update to CMSIS HAL 4.0

Signed-off-by: Joakim Gebart <joakim.gebart@eistec.se>
This commit is contained in:
Joakim Gebart 2015-01-29 09:29:13 +01:00
parent 068dfdff4b
commit 8c00ee0d7b
4 changed files with 1323 additions and 953 deletions

File diff suppressed because it is too large Load Diff

View File

@ -1,35 +1,51 @@
/**************************************************************************//**
* @file core_cm0plus.h
* @brief CMSIS Cortex-M0+ Core Peripheral Access Layer Header File
* @version V3.01
* @date 22. March 2012
* @version V4.00
* @date 22. August 2014
*
* @note
* Copyright (C) 2009-2012 ARM Limited. All rights reserved.
*
* @par
* ARM Limited (ARM) is supplying this software for use with Cortex-M
* processor based microcontrollers. This file can be freely distributed
* within development tools that are supporting such ARM based processors.
*
* THIS SOFTWARE IS PROVIDED "AS IS". NO WARRANTIES, WHETHER EXPRESS, IMPLIED
* OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE APPLY TO THIS SOFTWARE.
* ARM SHALL NOT, IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL, OR
* CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER.
*
******************************************************************************/
/* Copyright (c) 2009 - 2014 ARM LIMITED
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of ARM nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission.
*
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------*/
#if defined ( __ICCARM__ )
#pragma system_include /* treat file as system include file for MISRA check */
#endif
#ifndef __CORE_CM0PLUS_H_GENERIC
#define __CORE_CM0PLUS_H_GENERIC
#ifdef __cplusplus
extern "C" {
#endif
#ifndef __CORE_CM0PLUS_H_GENERIC
#define __CORE_CM0PLUS_H_GENERIC
/** \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions
CMSIS violates the following MISRA-C:2004 rules:
@ -52,8 +68,8 @@
*/
/* CMSIS CM0P definitions */
#define __CM0PLUS_CMSIS_VERSION_MAIN (0x03) /*!< [31:16] CMSIS HAL main version */
#define __CM0PLUS_CMSIS_VERSION_SUB (0x01) /*!< [15:0] CMSIS HAL sub version */
#define __CM0PLUS_CMSIS_VERSION_MAIN (0x04) /*!< [31:16] CMSIS HAL main version */
#define __CM0PLUS_CMSIS_VERSION_SUB (0x00) /*!< [15:0] CMSIS HAL sub version */
#define __CM0PLUS_CMSIS_VERSION ((__CM0PLUS_CMSIS_VERSION_MAIN << 16) | \
__CM0PLUS_CMSIS_VERSION_SUB) /*!< CMSIS HAL version number */
@ -65,14 +81,18 @@
#define __INLINE __inline /*!< inline keyword for ARM Compiler */
#define __STATIC_INLINE static __inline
#elif defined ( __GNUC__ )
#define __ASM __asm /*!< asm keyword for GNU Compiler */
#define __INLINE inline /*!< inline keyword for GNU Compiler */
#define __STATIC_INLINE static inline
#elif defined ( __ICCARM__ )
#define __ASM __asm /*!< asm keyword for IAR Compiler */
#define __INLINE inline /*!< inline keyword for IAR Compiler. Only available in High optimization mode! */
#define __STATIC_INLINE static inline
#elif defined ( __GNUC__ )
#define __ASM __asm /*!< asm keyword for GNU Compiler */
#define __INLINE inline /*!< inline keyword for GNU Compiler */
#elif defined ( __TMS470__ )
#define __ASM __asm /*!< asm keyword for TI CCS Compiler */
#define __STATIC_INLINE static inline
#elif defined ( __TASKING__ )
@ -80,9 +100,16 @@
#define __INLINE inline /*!< inline keyword for TASKING Compiler */
#define __STATIC_INLINE static inline
#elif defined ( __CSMC__ )
#define __packed
#define __ASM _asm /*!< asm keyword for COSMIC Compiler */
#define __INLINE inline /*use -pc99 on compile line !< inline keyword for COSMIC Compiler */
#define __STATIC_INLINE static inline
#endif
/** __FPU_USED indicates whether an FPU is used or not. This core does not support an FPU at all
/** __FPU_USED indicates whether an FPU is used or not.
This core does not support an FPU at all
*/
#define __FPU_USED 0
@ -91,13 +118,18 @@
#warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
#endif
#elif defined ( __GNUC__ )
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
#warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
#endif
#elif defined ( __ICCARM__ )
#if defined __ARMVFP__
#warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
#endif
#elif defined ( __GNUC__ )
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
#elif defined ( __TMS470__ )
#if defined __TI__VFP_SUPPORT____
#warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
#endif
@ -105,10 +137,11 @@
#if defined __FPU_VFP__
#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
#endif
#endif
#ifdef __cplusplus
}
#elif defined ( __CSMC__ ) /* Cosmic */
#if ( __CSMC__ & 0x400) // FPU present for parser
#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
#endif
#endif
#include <stdint.h> /* standard types definitions */
@ -116,7 +149,7 @@
#include <core_cmFunc.h> /* Core Function Access */
#ifdef __cplusplus
extern "C" {
}
#endif
#endif /* __CORE_CM0PLUS_H_GENERIC */
@ -126,6 +159,10 @@ extern "C" {
#ifndef __CORE_CM0PLUS_H_DEPENDANT
#define __CORE_CM0PLUS_H_DEPENDANT
#ifdef __cplusplus
extern "C" {
#endif
/* check device defines and use defaults */
#if defined __CHECK_DEVICE_DEFINES
#ifndef __CM0PLUS_REV
@ -368,8 +405,8 @@ typedef struct
#if (__VTOR_PRESENT == 1)
/* SCB Interrupt Control State Register Definitions */
#define SCB_VTOR_TBLOFF_Pos 7 /*!< SCB VTOR: TBLOFF Position */
#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */
#define SCB_VTOR_TBLOFF_Pos 8 /*!< SCB VTOR: TBLOFF Position */
#define SCB_VTOR_TBLOFF_Msk (0xFFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */
#endif
/* SCB Application Interrupt and Reset Control Register Definitions */
@ -457,7 +494,7 @@ typedef struct
#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */
#define SysTick_CALIB_TENMS_Pos 0 /*!< SysTick CALIB: TENMS Position */
#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL << SysTick_VAL_CURRENT_Pos) /*!< SysTick CALIB: TENMS Mask */
#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL << SysTick_CALIB_TENMS_Pos) /*!< SysTick CALIB: TENMS Mask */
/*@} end of group CMSIS_SysTick */
@ -708,9 +745,9 @@ __STATIC_INLINE uint32_t NVIC_GetPriority(IRQn_Type IRQn)
{
if(IRQn < 0) {
return((uint32_t)((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) >> (8 - __NVIC_PRIO_BITS))); } /* get priority for Cortex-M0+ system interrupts */
return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & 0xFF) >> (8 - __NVIC_PRIO_BITS))); } /* get priority for Cortex-M0 system interrupts */
else {
return((uint32_t)((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) >> (8 - __NVIC_PRIO_BITS))); } /* get priority for device specific interrupts */
return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & 0xFF) >> (8 - __NVIC_PRIO_BITS))); } /* get priority for device specific interrupts */
}
@ -758,9 +795,9 @@ __STATIC_INLINE void NVIC_SystemReset(void)
*/
__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks)
{
if (ticks > SysTick_LOAD_RELOAD_Msk) return (1); /* Reload value impossible */
if ((ticks - 1) > SysTick_LOAD_RELOAD_Msk) return (1); /* Reload value impossible */
SysTick->LOAD = (ticks & SysTick_LOAD_RELOAD_Msk) - 1; /* set reload register */
SysTick->LOAD = ticks - 1; /* set reload register */
NVIC_SetPriority (SysTick_IRQn, (1<<__NVIC_PRIO_BITS) - 1); /* set Priority for Systick Interrupt */
SysTick->VAL = 0; /* Load the SysTick Counter Value */
SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk |
@ -776,10 +813,10 @@ __STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks)
#endif /* __CORE_CM0PLUS_H_DEPENDANT */
#endif /* __CMSIS_GENERIC */
#ifdef __cplusplus
}
#endif
#endif /* __CORE_CM0PLUS_H_DEPENDANT */
#endif /* __CMSIS_GENERIC */

View File

@ -1,34 +1,46 @@
/**************************************************************************//**
* @file core_cmFunc.h
* @brief CMSIS Cortex-M Core Function Access Header File
* @version V2.10
* @date 26. July 2011
* @version V4.00
* @date 28. August 2014
*
* @note
* Copyright (C) 2009-2011 ARM Limited. All rights reserved.
*
* @par
* ARM Limited (ARM) is supplying this software for use with Cortex-M
* processor based microcontrollers. This file can be freely distributed
* within development tools that are supporting such ARM based processors.
*
* THIS SOFTWARE IS PROVIDED "AS IS". NO WARRANTIES, WHETHER EXPRESS, IMPLIED
* OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE APPLY TO THIS SOFTWARE.
* ARM SHALL NOT, IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL, OR
* CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER.
*
******************************************************************************/
/* Copyright (c) 2009 - 2014 ARM LIMITED
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of ARM nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission.
*
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------*/
#ifndef __CORE_CMFUNC_H
#define __CORE_CMFUNC_H
#ifdef __cplusplus
extern "C" {
#endif
/* ########################### Core Function Access ########################### */
/** \ingroup CMSIS_Core_FunctionInterface
/** \ingroup CMSIS_Core_FunctionInterface
\defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
@{
*/
@ -49,9 +61,9 @@ extern "C" {
\return Control Register value
*/
static __INLINE uint32_t __get_CONTROL(void)
__STATIC_INLINE uint32_t __get_CONTROL(void)
{
register uint32_t __regControl asm("control");
register uint32_t __regControl __ASM("control");
return(__regControl);
}
@ -62,22 +74,22 @@ static __INLINE uint32_t __get_CONTROL(void)
\param [in] control Control Register value to set
*/
static __INLINE void __set_CONTROL(uint32_t control)
__STATIC_INLINE void __set_CONTROL(uint32_t control)
{
register uint32_t __regControl asm("control");
register uint32_t __regControl __ASM("control");
__regControl = control;
}
/** \brief Get ISPR Register
/** \brief Get IPSR Register
This function returns the content of the ISPR Register.
This function returns the content of the IPSR Register.
\return ISPR Register value
\return IPSR Register value
*/
static __INLINE uint32_t __get_IPSR(void)
__STATIC_INLINE uint32_t __get_IPSR(void)
{
register uint32_t __regIPSR asm("ipsr");
register uint32_t __regIPSR __ASM("ipsr");
return(__regIPSR);
}
@ -88,9 +100,9 @@ static __INLINE uint32_t __get_IPSR(void)
\return APSR Register value
*/
static __INLINE uint32_t __get_APSR(void)
__STATIC_INLINE uint32_t __get_APSR(void)
{
register uint32_t __regAPSR asm("apsr");
register uint32_t __regAPSR __ASM("apsr");
return(__regAPSR);
}
@ -101,9 +113,9 @@ static __INLINE uint32_t __get_APSR(void)
\return xPSR Register value
*/
static __INLINE uint32_t __get_xPSR(void)
__STATIC_INLINE uint32_t __get_xPSR(void)
{
register uint32_t __regXPSR asm("xpsr");
register uint32_t __regXPSR __ASM("xpsr");
return(__regXPSR);
}
@ -114,9 +126,9 @@ static __INLINE uint32_t __get_xPSR(void)
\return PSP Register value
*/
static __INLINE uint32_t __get_PSP(void)
__STATIC_INLINE uint32_t __get_PSP(void)
{
register uint32_t __regProcessStackPointer asm("psp");
register uint32_t __regProcessStackPointer __ASM("psp");
return(__regProcessStackPointer);
}
@ -127,9 +139,9 @@ static __INLINE uint32_t __get_PSP(void)
\param [in] topOfProcStack Process Stack Pointer value to set
*/
static __INLINE void __set_PSP(uint32_t topOfProcStack)
__STATIC_INLINE void __set_PSP(uint32_t topOfProcStack)
{
register uint32_t __regProcessStackPointer asm("psp");
register uint32_t __regProcessStackPointer __ASM("psp");
__regProcessStackPointer = topOfProcStack;
}
@ -140,9 +152,9 @@ static __INLINE void __set_PSP(uint32_t topOfProcStack)
\return MSP Register value
*/
static __INLINE uint32_t __get_MSP(void)
__STATIC_INLINE uint32_t __get_MSP(void)
{
register uint32_t __regMainStackPointer asm("msp");
register uint32_t __regMainStackPointer __ASM("msp");
return(__regMainStackPointer);
}
@ -153,9 +165,9 @@ static __INLINE uint32_t __get_MSP(void)
\param [in] topOfMainStack Main Stack Pointer value to set
*/
static __INLINE void __set_MSP(uint32_t topOfMainStack)
__STATIC_INLINE void __set_MSP(uint32_t topOfMainStack)
{
register uint32_t __regMainStackPointer asm("msp");
register uint32_t __regMainStackPointer __ASM("msp");
__regMainStackPointer = topOfMainStack;
}
@ -166,9 +178,9 @@ static __INLINE void __set_MSP(uint32_t topOfMainStack)
\return Priority Mask value
*/
static __INLINE uint32_t __get_PRIMASK(void)
__STATIC_INLINE uint32_t __get_PRIMASK(void)
{
register uint32_t __regPriMask asm("primask");
register uint32_t __regPriMask __ASM("primask");
return(__regPriMask);
}
@ -179,14 +191,14 @@ static __INLINE uint32_t __get_PRIMASK(void)
\param [in] priMask Priority Mask
*/
static __INLINE void __set_PRIMASK(uint32_t priMask)
__STATIC_INLINE void __set_PRIMASK(uint32_t priMask)
{
register uint32_t __regPriMask asm("primask");
register uint32_t __regPriMask __ASM("primask");
__regPriMask = (priMask);
}
#if (__CORTEX_M >= 0x03)
#if (__CORTEX_M >= 0x03) || (__CORTEX_SC >= 300)
/** \brief Enable FIQ
@ -210,9 +222,9 @@ static __INLINE void __set_PRIMASK(uint32_t priMask)
\return Base Priority register value
*/
static __INLINE uint32_t __get_BASEPRI(void)
__STATIC_INLINE uint32_t __get_BASEPRI(void)
{
register uint32_t __regBasePri asm("basepri");
register uint32_t __regBasePri __ASM("basepri");
return(__regBasePri);
}
@ -223,12 +235,12 @@ static __INLINE uint32_t __get_BASEPRI(void)
\param [in] basePri Base Priority value to set
*/
static __INLINE void __set_BASEPRI(uint32_t basePri)
__STATIC_INLINE void __set_BASEPRI(uint32_t basePri)
{
register uint32_t __regBasePri asm("basepri");
register uint32_t __regBasePri __ASM("basepri");
__regBasePri = (basePri & 0xff);
}
/** \brief Get Fault Mask
@ -236,9 +248,9 @@ static __INLINE void __set_BASEPRI(uint32_t basePri)
\return Fault Mask register value
*/
static __INLINE uint32_t __get_FAULTMASK(void)
__STATIC_INLINE uint32_t __get_FAULTMASK(void)
{
register uint32_t __regFaultMask asm("faultmask");
register uint32_t __regFaultMask __ASM("faultmask");
return(__regFaultMask);
}
@ -249,16 +261,16 @@ static __INLINE uint32_t __get_FAULTMASK(void)
\param [in] faultMask Fault Mask value to set
*/
static __INLINE void __set_FAULTMASK(uint32_t faultMask)
__STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask)
{
register uint32_t __regFaultMask asm("faultmask");
register uint32_t __regFaultMask __ASM("faultmask");
__regFaultMask = (faultMask & (uint32_t)1);
}
#endif /* (__CORTEX_M >= 0x03) */
#endif /* (__CORTEX_M >= 0x03) || (__CORTEX_SC >= 300) */
#if (__CORTEX_M == 0x04)
#if (__CORTEX_M == 0x04) || (__CORTEX_M == 0x07)
/** \brief Get FPSCR
@ -266,10 +278,10 @@ static __INLINE void __set_FAULTMASK(uint32_t faultMask)
\return Floating Point Status/Control register value
*/
static __INLINE uint32_t __get_FPSCR(void)
__STATIC_INLINE uint32_t __get_FPSCR(void)
{
#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
register uint32_t __regfpscr asm("fpscr");
register uint32_t __regfpscr __ASM("fpscr");
return(__regfpscr);
#else
return(0);
@ -283,22 +295,17 @@ static __INLINE uint32_t __get_FPSCR(void)
\param [in] fpscr Floating Point Status/Control value to set
*/
static __INLINE void __set_FPSCR(uint32_t fpscr)
__STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
{
#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
register uint32_t __regfpscr asm("fpscr");
register uint32_t __regfpscr __ASM("fpscr");
__regfpscr = (fpscr);
#endif
}
#endif /* (__CORTEX_M == 0x04) */
#endif /* (__CORTEX_M == 0x04) || (__CORTEX_M == 0x07) */
#elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/
/* IAR iccarm specific functions */
#include <cmsis_iar.h>
#elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/
/* GNU gcc specific functions */
@ -307,9 +314,9 @@ static __INLINE void __set_FPSCR(uint32_t fpscr)
This function enables IRQ interrupts by clearing the I-bit in the CPSR.
Can only be executed in Privileged modes.
*/
__attribute__( ( always_inline ) ) static __INLINE void __enable_irq(void)
__attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_irq(void)
{
asm volatile ("cpsie i");
__ASM volatile ("cpsie i" : : : "memory");
}
@ -318,9 +325,9 @@ __attribute__( ( always_inline ) ) static __INLINE void __enable_irq(void)
This function disables IRQ interrupts by setting the I-bit in the CPSR.
Can only be executed in Privileged modes.
*/
__attribute__( ( always_inline ) ) static __INLINE void __disable_irq(void)
__attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_irq(void)
{
asm volatile ("cpsid i");
__ASM volatile ("cpsid i" : : : "memory");
}
@ -330,11 +337,11 @@ __attribute__( ( always_inline ) ) static __INLINE void __disable_irq(void)
\return Control Register value
*/
__attribute__( ( always_inline ) ) static __INLINE uint32_t __get_CONTROL(void)
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_CONTROL(void)
{
uint32_t result;
asm volatile ("MRS %0, control" : "=r" (result) );
__ASM volatile ("MRS %0, control" : "=r" (result) );
return(result);
}
@ -345,23 +352,23 @@ __attribute__( ( always_inline ) ) static __INLINE uint32_t __get_CONTROL(void)
\param [in] control Control Register value to set
*/
__attribute__( ( always_inline ) ) static __INLINE void __set_CONTROL(uint32_t control)
__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_CONTROL(uint32_t control)
{
asm volatile ("MSR control, %0" : : "r" (control) );
__ASM volatile ("MSR control, %0" : : "r" (control) : "memory");
}
/** \brief Get ISPR Register
/** \brief Get IPSR Register
This function returns the content of the ISPR Register.
This function returns the content of the IPSR Register.
\return ISPR Register value
\return IPSR Register value
*/
__attribute__( ( always_inline ) ) static __INLINE uint32_t __get_IPSR(void)
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_IPSR(void)
{
uint32_t result;
asm volatile ("MRS %0, ipsr" : "=r" (result) );
__ASM volatile ("MRS %0, ipsr" : "=r" (result) );
return(result);
}
@ -372,11 +379,11 @@ __attribute__( ( always_inline ) ) static __INLINE uint32_t __get_IPSR(void)
\return APSR Register value
*/
__attribute__( ( always_inline ) ) static __INLINE uint32_t __get_APSR(void)
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_APSR(void)
{
uint32_t result;
asm volatile ("MRS %0, apsr" : "=r" (result) );
__ASM volatile ("MRS %0, apsr" : "=r" (result) );
return(result);
}
@ -387,11 +394,11 @@ __attribute__( ( always_inline ) ) static __INLINE uint32_t __get_APSR(void)
\return xPSR Register value
*/
__attribute__( ( always_inline ) ) static __INLINE uint32_t __get_xPSR(void)
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_xPSR(void)
{
uint32_t result;
asm volatile ("MRS %0, xpsr" : "=r" (result) );
__ASM volatile ("MRS %0, xpsr" : "=r" (result) );
return(result);
}
@ -402,14 +409,14 @@ __attribute__( ( always_inline ) ) static __INLINE uint32_t __get_xPSR(void)
\return PSP Register value
*/
__attribute__( ( always_inline ) ) static __INLINE uint32_t __get_PSP(void)
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_PSP(void)
{
register uint32_t result;
asm volatile ("MRS %0, psp\n" : "=r" (result) );
__ASM volatile ("MRS %0, psp\n" : "=r" (result) );
return(result);
}
/** \brief Set Process Stack Pointer
@ -417,9 +424,9 @@ __attribute__( ( always_inline ) ) static __INLINE uint32_t __get_PSP(void)
\param [in] topOfProcStack Process Stack Pointer value to set
*/
__attribute__( ( always_inline ) ) static __INLINE void __set_PSP(uint32_t topOfProcStack)
__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PSP(uint32_t topOfProcStack)
{
asm volatile ("MSR psp, %0\n" : : "r" (topOfProcStack) );
__ASM volatile ("MSR psp, %0\n" : : "r" (topOfProcStack) : "sp");
}
@ -429,14 +436,14 @@ __attribute__( ( always_inline ) ) static __INLINE void __set_PSP(uint32_t topOf
\return MSP Register value
*/
__attribute__( ( always_inline ) ) static __INLINE uint32_t __get_MSP(void)
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_MSP(void)
{
register uint32_t result;
asm volatile ("MRS %0, msp\n" : "=r" (result) );
__ASM volatile ("MRS %0, msp\n" : "=r" (result) );
return(result);
}
/** \brief Set Main Stack Pointer
@ -444,9 +451,9 @@ __attribute__( ( always_inline ) ) static __INLINE uint32_t __get_MSP(void)
\param [in] topOfMainStack Main Stack Pointer value to set
*/
__attribute__( ( always_inline ) ) static __INLINE void __set_MSP(uint32_t topOfMainStack)
__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_MSP(uint32_t topOfMainStack)
{
asm volatile ("MSR msp, %0\n" : : "r" (topOfMainStack) );
__ASM volatile ("MSR msp, %0\n" : : "r" (topOfMainStack) : "sp");
}
@ -456,11 +463,11 @@ __attribute__( ( always_inline ) ) static __INLINE void __set_MSP(uint32_t topOf
\return Priority Mask value
*/
__attribute__( ( always_inline ) ) static __INLINE uint32_t __get_PRIMASK(void)
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_PRIMASK(void)
{
uint32_t result;
asm volatile ("MRS %0, primask" : "=r" (result) );
__ASM volatile ("MRS %0, primask" : "=r" (result) );
return(result);
}
@ -471,11 +478,11 @@ __attribute__( ( always_inline ) ) static __INLINE uint32_t __get_PRIMASK(void)
\param [in] priMask Priority Mask
*/
__attribute__( ( always_inline ) ) static __INLINE void __set_PRIMASK(uint32_t priMask)
__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PRIMASK(uint32_t priMask)
{
asm volatile ("MSR primask, %0" : : "r" (priMask) );
__ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory");
}
#if (__CORTEX_M >= 0x03)
@ -484,9 +491,9 @@ __attribute__( ( always_inline ) ) static __INLINE void __set_PRIMASK(uint32_t p
This function enables FIQ interrupts by clearing the F-bit in the CPSR.
Can only be executed in Privileged modes.
*/
__attribute__( ( always_inline ) ) static __INLINE void __enable_fault_irq(void)
__attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_fault_irq(void)
{
asm volatile ("cpsie f");
__ASM volatile ("cpsie f" : : : "memory");
}
@ -495,9 +502,9 @@ __attribute__( ( always_inline ) ) static __INLINE void __enable_fault_irq(void)
This function disables FIQ interrupts by setting the F-bit in the CPSR.
Can only be executed in Privileged modes.
*/
__attribute__( ( always_inline ) ) static __INLINE void __disable_fault_irq(void)
__attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_fault_irq(void)
{
asm volatile ("cpsid f");
__ASM volatile ("cpsid f" : : : "memory");
}
@ -507,11 +514,11 @@ __attribute__( ( always_inline ) ) static __INLINE void __disable_fault_irq(void
\return Base Priority register value
*/
__attribute__( ( always_inline ) ) static __INLINE uint32_t __get_BASEPRI(void)
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_BASEPRI(void)
{
uint32_t result;
asm volatile ("MRS %0, basepri_max" : "=r" (result) );
__ASM volatile ("MRS %0, basepri_max" : "=r" (result) );
return(result);
}
@ -522,9 +529,9 @@ __attribute__( ( always_inline ) ) static __INLINE uint32_t __get_BASEPRI(void)
\param [in] basePri Base Priority value to set
*/
__attribute__( ( always_inline ) ) static __INLINE void __set_BASEPRI(uint32_t value)
__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_BASEPRI(uint32_t value)
{
asm volatile ("MSR basepri, %0" : : "r" (value) );
__ASM volatile ("MSR basepri, %0" : : "r" (value) : "memory");
}
@ -534,11 +541,11 @@ __attribute__( ( always_inline ) ) static __INLINE void __set_BASEPRI(uint32_t v
\return Fault Mask register value
*/
__attribute__( ( always_inline ) ) static __INLINE uint32_t __get_FAULTMASK(void)
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FAULTMASK(void)
{
uint32_t result;
asm volatile ("MRS %0, faultmask" : "=r" (result) );
__ASM volatile ("MRS %0, faultmask" : "=r" (result) );
return(result);
}
@ -549,15 +556,15 @@ __attribute__( ( always_inline ) ) static __INLINE uint32_t __get_FAULTMASK(void
\param [in] faultMask Fault Mask value to set
*/
__attribute__( ( always_inline ) ) static __INLINE void __set_FAULTMASK(uint32_t faultMask)
__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask)
{
asm volatile ("MSR faultmask, %0" : : "r" (faultMask) );
__ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory");
}
#endif /* (__CORTEX_M >= 0x03) */
#if (__CORTEX_M == 0x04)
#if (__CORTEX_M == 0x04) || (__CORTEX_M == 0x07)
/** \brief Get FPSCR
@ -565,12 +572,15 @@ __attribute__( ( always_inline ) ) static __INLINE void __set_FAULTMASK(uint32_t
\return Floating Point Status/Control register value
*/
__attribute__( ( always_inline ) ) static __INLINE uint32_t __get_FPSCR(void)
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FPSCR(void)
{
#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
uint32_t result;
asm volatile ("VMRS %0, fpscr" : "=r" (result) );
/* Empty asm statement works as a scheduling barrier */
__ASM volatile ("");
__ASM volatile ("VMRS %0, fpscr" : "=r" (result) );
__ASM volatile ("");
return(result);
#else
return(0);
@ -584,31 +594,44 @@ __attribute__( ( always_inline ) ) static __INLINE uint32_t __get_FPSCR(void)
\param [in] fpscr Floating Point Status/Control value to set
*/
__attribute__( ( always_inline ) ) static __INLINE void __set_FPSCR(uint32_t fpscr)
__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
{
#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
asm volatile ("VMSR fpscr, %0" : : "r" (fpscr) );
/* Empty asm statement works as a scheduling barrier */
__ASM volatile ("");
__ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc");
__ASM volatile ("");
#endif
}
#endif /* (__CORTEX_M == 0x04) */
#endif /* (__CORTEX_M == 0x04) || (__CORTEX_M == 0x07) */
#elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/
/* IAR iccarm specific functions */
#include <cmsis_iar.h>
#elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/
/* TI CCS specific functions */
#include <cmsis_ccs.h>
#elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/
/* TASKING carm specific functions */
/*
* The CMSIS functions have been implemented as intrinsics in the compiler.
* Please use "carm -?i" to get an up to date list of all instrinsics,
* Please use "carm -?i" to get an up to date list of all intrinsics,
* Including the CMSIS ones.
*/
#elif defined ( __CSMC__ ) /*------------------ COSMIC Compiler -------------------*/
/* Cosmic specific functions */
#include <cmsis_csm.h>
#endif
/*@} end of CMSIS_Core_RegAccFunctions */
#ifdef __cplusplus
}
#endif
#endif /* __CORE_CMFUNC_H */

View File

@ -1,31 +1,43 @@
/**************************************************************************//**
* @file core_cmInstr.h
* @brief CMSIS Cortex-M Core Instruction Access Header File
* @version V2.10
* @date 19. July 2011
* @version V4.00
* @date 28. August 2014
*
* @note
* Copyright (C) 2009-2011 ARM Limited. All rights reserved.
*
* @par
* ARM Limited (ARM) is supplying this software for use with Cortex-M
* processor based microcontrollers. This file can be freely distributed
* within development tools that are supporting such ARM based processors.
*
* THIS SOFTWARE IS PROVIDED "AS IS". NO WARRANTIES, WHETHER EXPRESS, IMPLIED
* OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE APPLY TO THIS SOFTWARE.
* ARM SHALL NOT, IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL, OR
* CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER.
*
******************************************************************************/
/* Copyright (c) 2009 - 2014 ARM LIMITED
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of ARM nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission.
*
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------*/
#ifndef __CORE_CMINSTR_H
#define __CORE_CMINSTR_H
#ifdef __cplusplus
extern "C" {
#endif
/* ########################## Core Instruction Access ######################### */
/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface
@ -73,8 +85,8 @@ extern "C" {
/** \brief Instruction Synchronization Barrier
Instruction Synchronization Barrier flushes the pipeline in the processor,
so that all instructions following the ISB are fetched from cache or
Instruction Synchronization Barrier flushes the pipeline in the processor,
so that all instructions following the ISB are fetched from cache or
memory, after the instruction has been completed.
*/
#define __ISB() __isb(0xF)
@ -82,7 +94,7 @@ extern "C" {
/** \brief Data Synchronization Barrier
This function acts as a special kind of Data Memory Barrier.
This function acts as a special kind of Data Memory Barrier.
It completes when all explicit memory accesses before this instruction complete.
*/
#define __DSB() __dsb(0xF)
@ -90,7 +102,7 @@ extern "C" {
/** \brief Data Memory Barrier
This function ensures the apparent order of the explicit memory operations before
This function ensures the apparent order of the explicit memory operations before
and after the instruction, without ensuring their completion.
*/
#define __DMB() __dmb(0xF)
@ -113,12 +125,13 @@ extern "C" {
\param [in] value Value to reverse
\return Reversed value
*/
static __INLINE __ASM uint32_t __REV16(uint32_t value)
#ifndef __NO_EMBEDDED_ASM
__attribute__((section(".rev16_text"))) __STATIC_INLINE __ASM uint32_t __REV16(uint32_t value)
{
rev16 r0, r0
bx lr
}
#endif
/** \brief Reverse byte order in signed short value
@ -127,14 +140,38 @@ static __INLINE __ASM uint32_t __REV16(uint32_t value)
\param [in] value Value to reverse
\return Reversed value
*/
static __INLINE __ASM int32_t __REVSH(int32_t value)
#ifndef __NO_EMBEDDED_ASM
__attribute__((section(".revsh_text"))) __STATIC_INLINE __ASM int32_t __REVSH(int32_t value)
{
revsh r0, r0
bx lr
}
#endif
#if (__CORTEX_M >= 0x03)
/** \brief Rotate Right in unsigned value (32 bit)
This function Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
\param [in] value Value to rotate
\param [in] value Number of Bits to rotate
\return Rotated value
*/
#define __ROR __ror
/** \brief Breakpoint
This function causes the processor to enter Debug state.
Debug tools can use this to investigate system state when the instruction at a particular address is reached.
\param [in] value is ignored by the processor.
If required, a debugger can use it to store additional information about the breakpoint.
*/
#define __BKPT(value) __breakpoint(value)
#if (__CORTEX_M >= 0x03) || (__CORTEX_SC >= 300)
/** \brief Reverse bit order of value
@ -148,7 +185,7 @@ static __INLINE __ASM int32_t __REVSH(int32_t value)
/** \brief LDR Exclusive (8 bit)
This function performs a exclusive LDR command for 8 bit value.
This function executes a exclusive LDR instruction for 8 bit value.
\param [in] ptr Pointer to data
\return value of type uint8_t at (*ptr)
@ -158,7 +195,7 @@ static __INLINE __ASM int32_t __REVSH(int32_t value)
/** \brief LDR Exclusive (16 bit)
This function performs a exclusive LDR command for 16 bit values.
This function executes a exclusive LDR instruction for 16 bit values.
\param [in] ptr Pointer to data
\return value of type uint16_t at (*ptr)
@ -168,7 +205,7 @@ static __INLINE __ASM int32_t __REVSH(int32_t value)
/** \brief LDR Exclusive (32 bit)
This function performs a exclusive LDR command for 32 bit values.
This function executes a exclusive LDR instruction for 32 bit values.
\param [in] ptr Pointer to data
\return value of type uint32_t at (*ptr)
@ -178,7 +215,7 @@ static __INLINE __ASM int32_t __REVSH(int32_t value)
/** \brief STR Exclusive (8 bit)
This function performs a exclusive STR command for 8 bit values.
This function executes a exclusive STR instruction for 8 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
@ -190,7 +227,7 @@ static __INLINE __ASM int32_t __REVSH(int32_t value)
/** \brief STR Exclusive (16 bit)
This function performs a exclusive STR command for 16 bit values.
This function executes a exclusive STR instruction for 16 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
@ -202,7 +239,7 @@ static __INLINE __ASM int32_t __REVSH(int32_t value)
/** \brief STR Exclusive (32 bit)
This function performs a exclusive STR command for 32 bit values.
This function executes a exclusive STR instruction for 32 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
@ -249,26 +286,106 @@ static __INLINE __ASM int32_t __REVSH(int32_t value)
\param [in] value Value to count the leading zeros
\return number of leading zeros in value
*/
#define __CLZ __clz
#endif /* (__CORTEX_M >= 0x03) */
#define __CLZ __clz
/** \brief Rotate Right with Extend (32 bit)
#elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/
/* IAR iccarm specific functions */
This function moves each bit of a bitstring right by one bit. The carry input is shifted in at the left end of the bitstring.
#include <cmsis_iar.h>
\param [in] value Value to rotate
\return Rotated value
*/
#ifndef __NO_EMBEDDED_ASM
__attribute__((section(".rrx_text"))) __STATIC_INLINE __ASM uint32_t __RRX(uint32_t value)
{
rrx r0, r0
bx lr
}
#endif
/** \brief LDRT Unprivileged (8 bit)
This function executes a Unprivileged LDRT instruction for 8 bit value.
\param [in] ptr Pointer to data
\return value of type uint8_t at (*ptr)
*/
#define __LDRBT(ptr) ((uint8_t ) __ldrt(ptr))
/** \brief LDRT Unprivileged (16 bit)
This function executes a Unprivileged LDRT instruction for 16 bit values.
\param [in] ptr Pointer to data
\return value of type uint16_t at (*ptr)
*/
#define __LDRHT(ptr) ((uint16_t) __ldrt(ptr))
/** \brief LDRT Unprivileged (32 bit)
This function executes a Unprivileged LDRT instruction for 32 bit values.
\param [in] ptr Pointer to data
\return value of type uint32_t at (*ptr)
*/
#define __LDRT(ptr) ((uint32_t ) __ldrt(ptr))
/** \brief STRT Unprivileged (8 bit)
This function executes a Unprivileged STRT instruction for 8 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
*/
#define __STRBT(value, ptr) __strt(value, ptr)
/** \brief STRT Unprivileged (16 bit)
This function executes a Unprivileged STRT instruction for 16 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
*/
#define __STRHT(value, ptr) __strt(value, ptr)
/** \brief STRT Unprivileged (32 bit)
This function executes a Unprivileged STRT instruction for 32 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
*/
#define __STRT(value, ptr) __strt(value, ptr)
#endif /* (__CORTEX_M >= 0x03) || (__CORTEX_SC >= 300) */
#elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/
/* GNU gcc specific functions */
/* Define macros for porting to both thumb1 and thumb2.
* For thumb1, use low register (r0-r7), specified by constrant "l"
* Otherwise, use general registers, specified by constrant "r" */
#if defined (__thumb__) && !defined (__thumb2__)
#define __CMSIS_GCC_OUT_REG(r) "=l" (r)
#define __CMSIS_GCC_USE_REG(r) "l" (r)
#else
#define __CMSIS_GCC_OUT_REG(r) "=r" (r)
#define __CMSIS_GCC_USE_REG(r) "r" (r)
#endif
/** \brief No Operation
No Operation does nothing. This instruction can be used for code alignment purposes.
*/
__attribute__( ( always_inline ) ) static __INLINE void __NOP(void)
__attribute__( ( always_inline ) ) __STATIC_INLINE void __NOP(void)
{
__ASM volatile ("nop");
}
@ -279,7 +396,7 @@ __attribute__( ( always_inline ) ) static __INLINE void __NOP(void)
Wait For Interrupt is a hint instruction that suspends execution
until one of a number of events occurs.
*/
__attribute__( ( always_inline ) ) static __INLINE void __WFI(void)
__attribute__( ( always_inline ) ) __STATIC_INLINE void __WFI(void)
{
__ASM volatile ("wfi");
}
@ -290,7 +407,7 @@ __attribute__( ( always_inline ) ) static __INLINE void __WFI(void)
Wait For Event is a hint instruction that permits the processor to enter
a low-power state until one of a number of events occurs.
*/
__attribute__( ( always_inline ) ) static __INLINE void __WFE(void)
__attribute__( ( always_inline ) ) __STATIC_INLINE void __WFE(void)
{
__ASM volatile ("wfe");
}
@ -300,7 +417,7 @@ __attribute__( ( always_inline ) ) static __INLINE void __WFE(void)
Send Event is a hint instruction. It causes an event to be signaled to the CPU.
*/
__attribute__( ( always_inline ) ) static __INLINE void __SEV(void)
__attribute__( ( always_inline ) ) __STATIC_INLINE void __SEV(void)
{
__ASM volatile ("sev");
}
@ -308,11 +425,11 @@ __attribute__( ( always_inline ) ) static __INLINE void __SEV(void)
/** \brief Instruction Synchronization Barrier
Instruction Synchronization Barrier flushes the pipeline in the processor,
so that all instructions following the ISB are fetched from cache or
Instruction Synchronization Barrier flushes the pipeline in the processor,
so that all instructions following the ISB are fetched from cache or
memory, after the instruction has been completed.
*/
__attribute__( ( always_inline ) ) static __INLINE void __ISB(void)
__attribute__( ( always_inline ) ) __STATIC_INLINE void __ISB(void)
{
__ASM volatile ("isb");
}
@ -320,10 +437,10 @@ __attribute__( ( always_inline ) ) static __INLINE void __ISB(void)
/** \brief Data Synchronization Barrier
This function acts as a special kind of Data Memory Barrier.
This function acts as a special kind of Data Memory Barrier.
It completes when all explicit memory accesses before this instruction complete.
*/
__attribute__( ( always_inline ) ) static __INLINE void __DSB(void)
__attribute__( ( always_inline ) ) __STATIC_INLINE void __DSB(void)
{
__ASM volatile ("dsb");
}
@ -331,10 +448,10 @@ __attribute__( ( always_inline ) ) static __INLINE void __DSB(void)
/** \brief Data Memory Barrier
This function ensures the apparent order of the explicit memory operations before
This function ensures the apparent order of the explicit memory operations before
and after the instruction, without ensuring their completion.
*/
__attribute__( ( always_inline ) ) static __INLINE void __DMB(void)
__attribute__( ( always_inline ) ) __STATIC_INLINE void __DMB(void)
{
__ASM volatile ("dmb");
}
@ -347,12 +464,16 @@ __attribute__( ( always_inline ) ) static __INLINE void __DMB(void)
\param [in] value Value to reverse
\return Reversed value
*/
__attribute__( ( always_inline ) ) static __INLINE uint32_t __REV(uint32_t value)
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __REV(uint32_t value)
{
#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
return __builtin_bswap32(value);
#else
uint32_t result;
__ASM volatile ("rev %0, %1" : "=r" (result) : "r" (value) );
__ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
return(result);
#endif
}
@ -363,11 +484,11 @@ __attribute__( ( always_inline ) ) static __INLINE uint32_t __REV(uint32_t value
\param [in] value Value to reverse
\return Reversed value
*/
__attribute__( ( always_inline ) ) static __INLINE uint32_t __REV16(uint32_t value)
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __REV16(uint32_t value)
{
uint32_t result;
__ASM volatile ("rev16 %0, %1" : "=r" (result) : "r" (value) );
__ASM volatile ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
return(result);
}
@ -379,16 +500,45 @@ __attribute__( ( always_inline ) ) static __INLINE uint32_t __REV16(uint32_t val
\param [in] value Value to reverse
\return Reversed value
*/
__attribute__( ( always_inline ) ) static __INLINE int32_t __REVSH(int32_t value)
__attribute__( ( always_inline ) ) __STATIC_INLINE int32_t __REVSH(int32_t value)
{
#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
return (short)__builtin_bswap16(value);
#else
uint32_t result;
__ASM volatile ("revsh %0, %1" : "=r" (result) : "r" (value) );
__ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
return(result);
#endif
}
#if (__CORTEX_M >= 0x03)
/** \brief Rotate Right in unsigned value (32 bit)
This function Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
\param [in] value Value to rotate
\param [in] value Number of Bits to rotate
\return Rotated value
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
{
return (op1 >> op2) | (op1 << (32 - op2));
}
/** \brief Breakpoint
This function causes the processor to enter Debug state.
Debug tools can use this to investigate system state when the instruction at a particular address is reached.
\param [in] value is ignored by the processor.
If required, a debugger can use it to store additional information about the breakpoint.
*/
#define __BKPT(value) __ASM volatile ("bkpt "#value)
#if (__CORTEX_M >= 0x03) || (__CORTEX_SC >= 300)
/** \brief Reverse bit order of value
@ -397,10 +547,10 @@ __attribute__( ( always_inline ) ) static __INLINE int32_t __REVSH(int32_t value
\param [in] value Value to reverse
\return Reversed value
*/
__attribute__( ( always_inline ) ) static __INLINE uint32_t __RBIT(uint32_t value)
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __RBIT(uint32_t value)
{
uint32_t result;
__ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) );
return(result);
}
@ -408,102 +558,116 @@ __attribute__( ( always_inline ) ) static __INLINE uint32_t __RBIT(uint32_t valu
/** \brief LDR Exclusive (8 bit)
This function performs a exclusive LDR command for 8 bit value.
This function executes a exclusive LDR instruction for 8 bit value.
\param [in] ptr Pointer to data
\return value of type uint8_t at (*ptr)
*/
__attribute__( ( always_inline ) ) static __INLINE uint8_t __LDREXB(volatile uint8_t *addr)
__attribute__( ( always_inline ) ) __STATIC_INLINE uint8_t __LDREXB(volatile uint8_t *addr)
{
uint8_t result;
__ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) );
return(result);
uint32_t result;
#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
__ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) );
#else
/* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
accepted by assembler. So has to use following less efficient pattern.
*/
__ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
#endif
return ((uint8_t) result); /* Add explicit type cast here */
}
/** \brief LDR Exclusive (16 bit)
This function performs a exclusive LDR command for 16 bit values.
This function executes a exclusive LDR instruction for 16 bit values.
\param [in] ptr Pointer to data
\return value of type uint16_t at (*ptr)
*/
__attribute__( ( always_inline ) ) static __INLINE uint16_t __LDREXH(volatile uint16_t *addr)
__attribute__( ( always_inline ) ) __STATIC_INLINE uint16_t __LDREXH(volatile uint16_t *addr)
{
uint16_t result;
__ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) );
return(result);
uint32_t result;
#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
__ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) );
#else
/* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
accepted by assembler. So has to use following less efficient pattern.
*/
__ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
#endif
return ((uint16_t) result); /* Add explicit type cast here */
}
/** \brief LDR Exclusive (32 bit)
This function performs a exclusive LDR command for 32 bit values.
This function executes a exclusive LDR instruction for 32 bit values.
\param [in] ptr Pointer to data
\return value of type uint32_t at (*ptr)
*/
__attribute__( ( always_inline ) ) static __INLINE uint32_t __LDREXW(volatile uint32_t *addr)
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __LDREXW(volatile uint32_t *addr)
{
uint32_t result;
__ASM volatile ("ldrex %0, [%1]" : "=r" (result) : "r" (addr) );
__ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) );
return(result);
}
/** \brief STR Exclusive (8 bit)
This function performs a exclusive STR command for 8 bit values.
This function executes a exclusive STR instruction for 8 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
\return 0 Function succeeded
\return 1 Function failed
*/
__attribute__( ( always_inline ) ) static __INLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr)
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr)
{
uint32_t result;
__ASM volatile ("strexb %0, %2, [%1]" : "=r" (result) : "r" (addr), "r" (value) );
__ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
return(result);
}
/** \brief STR Exclusive (16 bit)
This function performs a exclusive STR command for 16 bit values.
This function executes a exclusive STR instruction for 16 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
\return 0 Function succeeded
\return 1 Function failed
*/
__attribute__( ( always_inline ) ) static __INLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr)
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr)
{
uint32_t result;
__ASM volatile ("strexh %0, %2, [%1]" : "=r" (result) : "r" (addr), "r" (value) );
__ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
return(result);
}
/** \brief STR Exclusive (32 bit)
This function performs a exclusive STR command for 32 bit values.
This function executes a exclusive STR instruction for 32 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
\return 0 Function succeeded
\return 1 Function failed
*/
__attribute__( ( always_inline ) ) static __INLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr)
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr)
{
uint32_t result;
__ASM volatile ("strex %0, %2, [%1]" : "=r" (result) : "r" (addr), "r" (value) );
__ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );
return(result);
}
@ -513,9 +677,9 @@ __attribute__( ( always_inline ) ) static __INLINE uint32_t __STREXW(uint32_t va
This function removes the exclusive lock which is created by LDREX.
*/
__attribute__( ( always_inline ) ) static __INLINE void __CLREX(void)
__attribute__( ( always_inline ) ) __STATIC_INLINE void __CLREX(void)
{
__ASM volatile ("clrex");
__ASM volatile ("clrex" ::: "memory");
}
@ -558,34 +722,159 @@ __attribute__( ( always_inline ) ) static __INLINE void __CLREX(void)
\param [in] value Value to count the leading zeros
\return number of leading zeros in value
*/
__attribute__( ( always_inline ) ) static __INLINE uint8_t __CLZ(uint32_t value)
__attribute__( ( always_inline ) ) __STATIC_INLINE uint8_t __CLZ(uint32_t value)
{
uint8_t result;
uint32_t result;
__ASM volatile ("clz %0, %1" : "=r" (result) : "r" (value) );
return ((uint8_t) result); /* Add explicit type cast here */
}
/** \brief Rotate Right with Extend (32 bit)
This function moves each bit of a bitstring right by one bit. The carry input is shifted in at the left end of the bitstring.
\param [in] value Value to rotate
\return Rotated value
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __RRX(uint32_t value)
{
uint32_t result;
__ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
return(result);
}
#endif /* (__CORTEX_M >= 0x03) */
/** \brief LDRT Unprivileged (8 bit)
This function executes a Unprivileged LDRT instruction for 8 bit value.
\param [in] ptr Pointer to data
\return value of type uint8_t at (*ptr)
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE uint8_t __LDRBT(volatile uint8_t *addr)
{
uint32_t result;
#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
__ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*addr) );
#else
/* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
accepted by assembler. So has to use following less efficient pattern.
*/
__ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
#endif
return ((uint8_t) result); /* Add explicit type cast here */
}
/** \brief LDRT Unprivileged (16 bit)
This function executes a Unprivileged LDRT instruction for 16 bit values.
\param [in] ptr Pointer to data
\return value of type uint16_t at (*ptr)
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE uint16_t __LDRHT(volatile uint16_t *addr)
{
uint32_t result;
#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
__ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*addr) );
#else
/* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
accepted by assembler. So has to use following less efficient pattern.
*/
__ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
#endif
return ((uint16_t) result); /* Add explicit type cast here */
}
/** \brief LDRT Unprivileged (32 bit)
This function executes a Unprivileged LDRT instruction for 32 bit values.
\param [in] ptr Pointer to data
\return value of type uint32_t at (*ptr)
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __LDRT(volatile uint32_t *addr)
{
uint32_t result;
__ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*addr) );
return(result);
}
/** \brief STRT Unprivileged (8 bit)
This function executes a Unprivileged STRT instruction for 8 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __STRBT(uint8_t value, volatile uint8_t *addr)
{
__ASM volatile ("strbt %1, %0" : "=Q" (*addr) : "r" ((uint32_t)value) );
}
/** \brief STRT Unprivileged (16 bit)
This function executes a Unprivileged STRT instruction for 16 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __STRHT(uint16_t value, volatile uint16_t *addr)
{
__ASM volatile ("strht %1, %0" : "=Q" (*addr) : "r" ((uint32_t)value) );
}
/** \brief STRT Unprivileged (32 bit)
This function executes a Unprivileged STRT instruction for 32 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __STRT(uint32_t value, volatile uint32_t *addr)
{
__ASM volatile ("strt %1, %0" : "=Q" (*addr) : "r" (value) );
}
#endif /* (__CORTEX_M >= 0x03) || (__CORTEX_SC >= 300) */
#elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/
/* IAR iccarm specific functions */
#include <cmsis_iar.h>
#elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/
/* TI CCS specific functions */
#include <cmsis_ccs.h>
#elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/
/* TASKING carm specific functions */
/*
* The CMSIS functions have been implemented as intrinsics in the compiler.
* Please use "carm -?i" to get an up to date list of all intrinsics,
* Including the CMSIS ones.
*/
#elif defined ( __CSMC__ ) /*------------------ COSMIC Compiler -------------------*/
/* Cosmic specific functions */
#include <cmsis_csm.h>
#endif
/*@}*/ /* end of group CMSIS_Core_InstructionInterface */
#ifdef __cplusplus
}
#endif
#endif /* __CORE_CMINSTR_H */