From 8c00ee0d7ba01e2ba9d71eb1ed14bf61840203dc Mon Sep 17 00:00:00 2001 From: Joakim Gebart Date: Thu, 29 Jan 2015 09:29:13 +0100 Subject: [PATCH] cortex-m0: Update to CMSIS HAL 4.0 Signed-off-by: Joakim Gebart --- cpu/cortex-m0_common/include/core_cm0.h | 1401 ++++++++++--------- cpu/cortex-m0_common/include/core_cm0plus.h | 117 +- cpu/cortex-m0_common/include/core_cmFunc.h | 271 ++-- cpu/cortex-m0_common/include/core_cmInstr.h | 487 +++++-- 4 files changed, 1323 insertions(+), 953 deletions(-) diff --git a/cpu/cortex-m0_common/include/core_cm0.h b/cpu/cortex-m0_common/include/core_cm0.h index cb809423ac..5186cb4838 100644 --- a/cpu/cortex-m0_common/include/core_cm0.h +++ b/cpu/cortex-m0_common/include/core_cm0.h @@ -1,690 +1,711 @@ -/**************************************************************************//** - * @file core_cm0.h - * @brief CMSIS Cortex-M0 Core Peripheral Access Layer Header File - * @version V3.20 - * @date 25. February 2013 - * - * @note - * - ******************************************************************************/ -/* Copyright (c) 2009 - 2013 ARM LIMITED - - All rights reserved. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - Neither the name of ARM nor the names of its contributors may be used - to endorse or promote products derived from this software without - specific prior written permission. - * - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - POSSIBILITY OF SUCH DAMAGE. - ---------------------------------------------------------------------------*/ - - -#if defined ( __ICCARM__ ) - #pragma system_include /* treat file as system include file for MISRA check */ -#endif - -#ifdef __cplusplus - extern "C" { -#endif - -#ifndef __CORE_CM0_H_GENERIC -#define __CORE_CM0_H_GENERIC - -/** \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions - CMSIS violates the following MISRA-C:2004 rules: - - \li Required Rule 8.5, object/function definition in header file.
- Function definitions in header files are used to allow 'inlining'. - - \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
- Unions are used for effective representation of core registers. - - \li Advisory Rule 19.7, Function-like macro defined.
- Function-like macros are used to allow more efficient code. - */ - - -/******************************************************************************* - * CMSIS definitions - ******************************************************************************/ -/** \ingroup Cortex_M0 - @{ - */ - -/* CMSIS CM0 definitions */ -#define __CM0_CMSIS_VERSION_MAIN (0x03) /*!< [31:16] CMSIS HAL main version */ -#define __CM0_CMSIS_VERSION_SUB (0x20) /*!< [15:0] CMSIS HAL sub version */ -#define __CM0_CMSIS_VERSION ((__CM0_CMSIS_VERSION_MAIN << 16) | \ - __CM0_CMSIS_VERSION_SUB ) /*!< CMSIS HAL version number */ - -#define __CORTEX_M (0x00) /*!< Cortex-M Core */ - - -#if defined ( __CC_ARM ) - #define __ASM __asm /*!< asm keyword for ARM Compiler */ - #define __INLINE __inline /*!< inline keyword for ARM Compiler */ - #define __STATIC_INLINE static __inline - -#elif defined ( __ICCARM__ ) - #define __ASM __asm /*!< asm keyword for IAR Compiler */ - #define __INLINE inline /*!< inline keyword for IAR Compiler. Only available in High optimization mode! */ - #define __STATIC_INLINE static inline - -#elif defined ( __GNUC__ ) - #define __ASM __asm /*!< asm keyword for GNU Compiler */ - #define __INLINE inline /*!< inline keyword for GNU Compiler */ - #define __STATIC_INLINE static inline - -#elif defined ( __TASKING__ ) - #define __ASM __asm /*!< asm keyword for TASKING Compiler */ - #define __INLINE inline /*!< inline keyword for TASKING Compiler */ - #define __STATIC_INLINE static inline - -#endif - -/** __FPU_USED indicates whether an FPU is used or not. This core does not support an FPU at all -*/ -#define __FPU_USED 0 - -#if defined ( __CC_ARM ) - #if defined __TARGET_FPU_VFP - #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" - #endif - -#elif defined ( __ICCARM__ ) - #if defined __ARMVFP__ - #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" - #endif - -#elif defined ( __GNUC__ ) - #if defined (__VFP_FP__) && !defined(__SOFTFP__) - #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" - #endif - -#elif defined ( __TASKING__ ) - #if defined __FPU_VFP__ - #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" - #endif -#endif - -#ifdef __cplusplus -} -#endif - -#include /* standard types definitions */ -#include /* Core Instruction Access */ -#include /* Core Function Access */ - -#ifdef __cplusplus -extern "C" { -#endif - -#endif /* __CORE_CM0_H_GENERIC */ - -#ifndef __CMSIS_GENERIC - -#ifndef __CORE_CM0_H_DEPENDANT -#define __CORE_CM0_H_DEPENDANT - -/* check device defines and use defaults */ -#if defined __CHECK_DEVICE_DEFINES - #ifndef __CM0_REV - #define __CM0_REV 0x0000 - #warning "__CM0_REV not defined in device header file; using default!" - #endif - - #ifndef __NVIC_PRIO_BITS - #define __NVIC_PRIO_BITS 2 - #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" - #endif - - #ifndef __Vendor_SysTickConfig - #define __Vendor_SysTickConfig 0 - #warning "__Vendor_SysTickConfig not defined in device header file; using default!" - #endif -#endif - -/* IO definitions (access restrictions to peripheral registers) */ -/** - \defgroup CMSIS_glob_defs CMSIS Global Defines - - IO Type Qualifiers are used - \li to specify the access to peripheral variables. - \li for automatic generation of peripheral register debug information. -*/ -#ifdef __cplusplus - #define __I volatile /*!< Defines 'read only' permissions */ -#else - #define __I volatile const /*!< Defines 'read only' permissions */ -#endif -#define __O volatile /*!< Defines 'write only' permissions */ -#define __IO volatile /*!< Defines 'read / write' permissions */ - -/*@} end of group Cortex_M0 */ - - - -/******************************************************************************* - * Register Abstraction - Core Register contain: - - Core Register - - Core NVIC Register - - Core SCB Register - - Core SysTick Register - ******************************************************************************/ -/** \defgroup CMSIS_core_register Defines and Type Definitions - \brief Type definitions and defines for Cortex-M processor based devices. -*/ - -/** \ingroup CMSIS_core_register - \defgroup CMSIS_CORE Status and Control Registers - \brief Core Register type definitions. - @{ - */ - -/** \brief Union type to access the Application Program Status Register (APSR). - */ -typedef union -{ - struct - { -#if (__CORTEX_M != 0x04) - uint32_t _reserved0:27; /*!< bit: 0..26 Reserved */ -#else - uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ - uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ - uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ -#endif - uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ - uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ - uint32_t C:1; /*!< bit: 29 Carry condition code flag */ - uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ - uint32_t N:1; /*!< bit: 31 Negative condition code flag */ - } b; /*!< Structure used for bit access */ - uint32_t w; /*!< Type used for word access */ -} APSR_Type; - - -/** \brief Union type to access the Interrupt Program Status Register (IPSR). - */ -typedef union -{ - struct - { - uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ - uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ - } b; /*!< Structure used for bit access */ - uint32_t w; /*!< Type used for word access */ -} IPSR_Type; - - -/** \brief Union type to access the Special-Purpose Program Status Registers (xPSR). - */ -typedef union -{ - struct - { - uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ -#if (__CORTEX_M != 0x04) - uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ -#else - uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ - uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ - uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ -#endif - uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ - uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ - uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ - uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ - uint32_t C:1; /*!< bit: 29 Carry condition code flag */ - uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ - uint32_t N:1; /*!< bit: 31 Negative condition code flag */ - } b; /*!< Structure used for bit access */ - uint32_t w; /*!< Type used for word access */ -} xPSR_Type; - - -/** \brief Union type to access the Control Registers (CONTROL). - */ -typedef union -{ - struct - { - uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ - uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ - uint32_t FPCA:1; /*!< bit: 2 FP extension active flag */ - uint32_t _reserved0:29; /*!< bit: 3..31 Reserved */ - } b; /*!< Structure used for bit access */ - uint32_t w; /*!< Type used for word access */ -} CONTROL_Type; - -/*@} end of group CMSIS_CORE */ - - -/** \ingroup CMSIS_core_register - \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) - \brief Type definitions for the NVIC Registers - @{ - */ - -/** \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). - */ -typedef struct -{ - __IO uint32_t ISER[1]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ - uint32_t RESERVED0[31]; - __IO uint32_t ICER[1]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ - uint32_t RSERVED1[31]; - __IO uint32_t ISPR[1]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ - uint32_t RESERVED2[31]; - __IO uint32_t ICPR[1]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ - uint32_t RESERVED3[31]; - uint32_t RESERVED4[64]; - __IO uint32_t IP[8]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ -} NVIC_Type; - -/*@} end of group CMSIS_NVIC */ - - -/** \ingroup CMSIS_core_register - \defgroup CMSIS_SCB System Control Block (SCB) - \brief Type definitions for the System Control Block Registers - @{ - */ - -/** \brief Structure type to access the System Control Block (SCB). - */ -typedef struct -{ - __I uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ - __IO uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ - uint32_t RESERVED0; - __IO uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ - __IO uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ - __IO uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ - uint32_t RESERVED1; - __IO uint32_t SHP[2]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ - __IO uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ -} SCB_Type; - -/* SCB CPUID Register Definitions */ -#define SCB_CPUID_IMPLEMENTER_Pos 24 /*!< SCB CPUID: IMPLEMENTER Position */ -#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ - -#define SCB_CPUID_VARIANT_Pos 20 /*!< SCB CPUID: VARIANT Position */ -#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ - -#define SCB_CPUID_ARCHITECTURE_Pos 16 /*!< SCB CPUID: ARCHITECTURE Position */ -#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ - -#define SCB_CPUID_PARTNO_Pos 4 /*!< SCB CPUID: PARTNO Position */ -#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ - -#define SCB_CPUID_REVISION_Pos 0 /*!< SCB CPUID: REVISION Position */ -#define SCB_CPUID_REVISION_Msk (0xFUL << SCB_CPUID_REVISION_Pos) /*!< SCB CPUID: REVISION Mask */ - -/* SCB Interrupt Control State Register Definitions */ -#define SCB_ICSR_NMIPENDSET_Pos 31 /*!< SCB ICSR: NMIPENDSET Position */ -#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ - -#define SCB_ICSR_PENDSVSET_Pos 28 /*!< SCB ICSR: PENDSVSET Position */ -#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ - -#define SCB_ICSR_PENDSVCLR_Pos 27 /*!< SCB ICSR: PENDSVCLR Position */ -#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ - -#define SCB_ICSR_PENDSTSET_Pos 26 /*!< SCB ICSR: PENDSTSET Position */ -#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ - -#define SCB_ICSR_PENDSTCLR_Pos 25 /*!< SCB ICSR: PENDSTCLR Position */ -#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ - -#define SCB_ICSR_ISRPREEMPT_Pos 23 /*!< SCB ICSR: ISRPREEMPT Position */ -#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ - -#define SCB_ICSR_ISRPENDING_Pos 22 /*!< SCB ICSR: ISRPENDING Position */ -#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ - -#define SCB_ICSR_VECTPENDING_Pos 12 /*!< SCB ICSR: VECTPENDING Position */ -#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ - -#define SCB_ICSR_VECTACTIVE_Pos 0 /*!< SCB ICSR: VECTACTIVE Position */ -#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL << SCB_ICSR_VECTACTIVE_Pos) /*!< SCB ICSR: VECTACTIVE Mask */ - -/* SCB Application Interrupt and Reset Control Register Definitions */ -#define SCB_AIRCR_VECTKEY_Pos 16 /*!< SCB AIRCR: VECTKEY Position */ -#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ - -#define SCB_AIRCR_VECTKEYSTAT_Pos 16 /*!< SCB AIRCR: VECTKEYSTAT Position */ -#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ - -#define SCB_AIRCR_ENDIANESS_Pos 15 /*!< SCB AIRCR: ENDIANESS Position */ -#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ - -#define SCB_AIRCR_SYSRESETREQ_Pos 2 /*!< SCB AIRCR: SYSRESETREQ Position */ -#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ - -#define SCB_AIRCR_VECTCLRACTIVE_Pos 1 /*!< SCB AIRCR: VECTCLRACTIVE Position */ -#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ - -/* SCB System Control Register Definitions */ -#define SCB_SCR_SEVONPEND_Pos 4 /*!< SCB SCR: SEVONPEND Position */ -#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ - -#define SCB_SCR_SLEEPDEEP_Pos 2 /*!< SCB SCR: SLEEPDEEP Position */ -#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ - -#define SCB_SCR_SLEEPONEXIT_Pos 1 /*!< SCB SCR: SLEEPONEXIT Position */ -#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ - -/* SCB Configuration Control Register Definitions */ -#define SCB_CCR_STKALIGN_Pos 9 /*!< SCB CCR: STKALIGN Position */ -#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ - -#define SCB_CCR_UNALIGN_TRP_Pos 3 /*!< SCB CCR: UNALIGN_TRP Position */ -#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ - -/* SCB System Handler Control and State Register Definitions */ -#define SCB_SHCSR_SVCALLPENDED_Pos 15 /*!< SCB SHCSR: SVCALLPENDED Position */ -#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ - -/*@} end of group CMSIS_SCB */ - - -/** \ingroup CMSIS_core_register - \defgroup CMSIS_SysTick System Tick Timer (SysTick) - \brief Type definitions for the System Timer Registers. - @{ - */ - -/** \brief Structure type to access the System Timer (SysTick). - */ -typedef struct -{ - __IO uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ - __IO uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ - __IO uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ - __I uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ -} SysTick_Type; - -/* SysTick Control / Status Register Definitions */ -#define SysTick_CTRL_COUNTFLAG_Pos 16 /*!< SysTick CTRL: COUNTFLAG Position */ -#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ - -#define SysTick_CTRL_CLKSOURCE_Pos 2 /*!< SysTick CTRL: CLKSOURCE Position */ -#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ - -#define SysTick_CTRL_TICKINT_Pos 1 /*!< SysTick CTRL: TICKINT Position */ -#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ - -#define SysTick_CTRL_ENABLE_Pos 0 /*!< SysTick CTRL: ENABLE Position */ -#define SysTick_CTRL_ENABLE_Msk (1UL << SysTick_CTRL_ENABLE_Pos) /*!< SysTick CTRL: ENABLE Mask */ - -/* SysTick Reload Register Definitions */ -#define SysTick_LOAD_RELOAD_Pos 0 /*!< SysTick LOAD: RELOAD Position */ -#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL << SysTick_LOAD_RELOAD_Pos) /*!< SysTick LOAD: RELOAD Mask */ - -/* SysTick Current Register Definitions */ -#define SysTick_VAL_CURRENT_Pos 0 /*!< SysTick VAL: CURRENT Position */ -#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL << SysTick_VAL_CURRENT_Pos) /*!< SysTick VAL: CURRENT Mask */ - -/* SysTick Calibration Register Definitions */ -#define SysTick_CALIB_NOREF_Pos 31 /*!< SysTick CALIB: NOREF Position */ -#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ - -#define SysTick_CALIB_SKEW_Pos 30 /*!< SysTick CALIB: SKEW Position */ -#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ - -#define SysTick_CALIB_TENMS_Pos 0 /*!< SysTick CALIB: TENMS Position */ -#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL << SysTick_VAL_CURRENT_Pos) /*!< SysTick CALIB: TENMS Mask */ - -/*@} end of group CMSIS_SysTick */ - - -/** \ingroup CMSIS_core_register - \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) - \brief Cortex-M0 Core Debug Registers (DCB registers, SHCSR, and DFSR) - are only accessible over DAP and not via processor. Therefore - they are not covered by the Cortex-M0 header file. - @{ - */ -/*@} end of group CMSIS_CoreDebug */ - - -/** \ingroup CMSIS_core_register - \defgroup CMSIS_core_base Core Definitions - \brief Definitions for base addresses, unions, and structures. - @{ - */ - -/* Memory mapping of Cortex-M0 Hardware */ -#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ -#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ -#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ -#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ - -#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ -#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ -#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ - - -/*@} */ - - - -/******************************************************************************* - * Hardware Abstraction Layer - Core Function Interface contains: - - Core NVIC Functions - - Core SysTick Functions - - Core Register Access Functions - ******************************************************************************/ -/** \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference -*/ - - - -/* ########################## NVIC functions #################################### */ -/** \ingroup CMSIS_Core_FunctionInterface - \defgroup CMSIS_Core_NVICFunctions NVIC Functions - \brief Functions that manage interrupts and exceptions via the NVIC. - @{ - */ - -/* Interrupt Priorities are WORD accessible only under ARMv6M */ -/* The following MACROS handle generation of the register offset and byte masks */ -#define _BIT_SHIFT(IRQn) ( (((uint32_t)(IRQn) ) & 0x03) * 8 ) -#define _SHP_IDX(IRQn) ( ((((uint32_t)(IRQn) & 0x0F)-8) >> 2) ) -#define _IP_IDX(IRQn) ( ((uint32_t)(IRQn) >> 2) ) - - -/** \brief Enable External Interrupt - - The function enables a device-specific interrupt in the NVIC interrupt controller. - - \param [in] IRQn External interrupt number. Value cannot be negative. - */ -__STATIC_INLINE void NVIC_EnableIRQ(IRQn_Type IRQn) -{ - NVIC->ISER[0] = (1 << ((uint32_t)(IRQn) & 0x1F)); -} - - -/** \brief Disable External Interrupt - - The function disables a device-specific interrupt in the NVIC interrupt controller. - - \param [in] IRQn External interrupt number. Value cannot be negative. - */ -__STATIC_INLINE void NVIC_DisableIRQ(IRQn_Type IRQn) -{ - NVIC->ICER[0] = (1 << ((uint32_t)(IRQn) & 0x1F)); -} - - -/** \brief Get Pending Interrupt - - The function reads the pending register in the NVIC and returns the pending bit - for the specified interrupt. - - \param [in] IRQn Interrupt number. - - \return 0 Interrupt status is not pending. - \return 1 Interrupt status is pending. - */ -__STATIC_INLINE uint32_t NVIC_GetPendingIRQ(IRQn_Type IRQn) -{ - return((uint32_t) ((NVIC->ISPR[0] & (1 << ((uint32_t)(IRQn) & 0x1F)))?1:0)); -} - - -/** \brief Set Pending Interrupt - - The function sets the pending bit of an external interrupt. - - \param [in] IRQn Interrupt number. Value cannot be negative. - */ -__STATIC_INLINE void NVIC_SetPendingIRQ(IRQn_Type IRQn) -{ - NVIC->ISPR[0] = (1 << ((uint32_t)(IRQn) & 0x1F)); -} - - -/** \brief Clear Pending Interrupt - - The function clears the pending bit of an external interrupt. - - \param [in] IRQn External interrupt number. Value cannot be negative. - */ -__STATIC_INLINE void NVIC_ClearPendingIRQ(IRQn_Type IRQn) -{ - NVIC->ICPR[0] = (1 << ((uint32_t)(IRQn) & 0x1F)); /* Clear pending interrupt */ -} - - -/** \brief Set Interrupt Priority - - The function sets the priority of an interrupt. - - \note The priority cannot be set for every core interrupt. - - \param [in] IRQn Interrupt number. - \param [in] priority Priority to set. - */ -__STATIC_INLINE void NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) -{ - if(IRQn < 0) { - SCB->SHP[_SHP_IDX(IRQn)] = (SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFF << _BIT_SHIFT(IRQn))) | - (((priority << (8 - __NVIC_PRIO_BITS)) & 0xFF) << _BIT_SHIFT(IRQn)); } - else { - NVIC->IP[_IP_IDX(IRQn)] = (NVIC->IP[_IP_IDX(IRQn)] & ~(0xFF << _BIT_SHIFT(IRQn))) | - (((priority << (8 - __NVIC_PRIO_BITS)) & 0xFF) << _BIT_SHIFT(IRQn)); } -} - - -/** \brief Get Interrupt Priority - - The function reads the priority of an interrupt. The interrupt - number can be positive to specify an external (device specific) - interrupt, or negative to specify an internal (core) interrupt. - - - \param [in] IRQn Interrupt number. - \return Interrupt Priority. Value is aligned automatically to the implemented - priority bits of the microcontroller. - */ -__STATIC_INLINE uint32_t NVIC_GetPriority(IRQn_Type IRQn) -{ - - if(IRQn < 0) { - return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & 0xFF) >> (8 - __NVIC_PRIO_BITS))); } /* get priority for Cortex-M0 system interrupts */ - else { - return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & 0xFF) >> (8 - __NVIC_PRIO_BITS))); } /* get priority for device specific interrupts */ -} - - -/** \brief System Reset - - The function initiates a system reset request to reset the MCU. - */ -__STATIC_INLINE void NVIC_SystemReset(void) -{ - __DSB(); /* Ensure all outstanding memory accesses included - buffered write are completed before reset */ - SCB->AIRCR = ((0x5FA << SCB_AIRCR_VECTKEY_Pos) | - SCB_AIRCR_SYSRESETREQ_Msk); - __DSB(); /* Ensure completion of memory access */ - while(1); /* wait until reset */ -} - -/*@} end of CMSIS_Core_NVICFunctions */ - - - -/* ################################## SysTick function ############################################ */ -/** \ingroup CMSIS_Core_FunctionInterface - \defgroup CMSIS_Core_SysTickFunctions SysTick Functions - \brief Functions that configure the System. - @{ - */ - -#if (__Vendor_SysTickConfig == 0) - -/** \brief System Tick Configuration - - The function initializes the System Timer and its interrupt, and starts the System Tick Timer. - Counter is in free running mode to generate periodic interrupts. - - \param [in] ticks Number of ticks between two interrupts. - - \return 0 Function succeeded. - \return 1 Function failed. - - \note When the variable __Vendor_SysTickConfig is set to 1, then the - function SysTick_Config is not included. In this case, the file device.h - must contain a vendor-specific implementation of this function. - - */ -__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) -{ - if ((ticks - 1) > SysTick_LOAD_RELOAD_Msk) return (1); /* Reload value impossible */ - - SysTick->LOAD = ticks - 1; /* set reload register */ - NVIC_SetPriority (SysTick_IRQn, (1<<__NVIC_PRIO_BITS) - 1); /* set Priority for Systick Interrupt */ - SysTick->VAL = 0; /* Load the SysTick Counter Value */ - SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | - SysTick_CTRL_TICKINT_Msk | - SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ - return (0); /* Function successful */ -} - -#endif - -/*@} end of CMSIS_Core_SysTickFunctions */ - - - - -#endif /* __CORE_CM0_H_DEPENDANT */ - -#endif /* __CMSIS_GENERIC */ - -#ifdef __cplusplus -} -#endif +/**************************************************************************//** + * @file core_cm0.h + * @brief CMSIS Cortex-M0 Core Peripheral Access Layer Header File + * @version V4.00 + * @date 22. August 2014 + * + * @note + * + ******************************************************************************/ +/* Copyright (c) 2009 - 2014 ARM LIMITED + + All rights reserved. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + - Neither the name of ARM nor the names of its contributors may be used + to endorse or promote products derived from this software without + specific prior written permission. + * + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + ---------------------------------------------------------------------------*/ + + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#endif + +#ifndef __CORE_CM0_H_GENERIC +#define __CORE_CM0_H_GENERIC + +#ifdef __cplusplus + extern "C" { +#endif + +/** \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** \ingroup Cortex_M0 + @{ + */ + +/* CMSIS CM0 definitions */ +#define __CM0_CMSIS_VERSION_MAIN (0x04) /*!< [31:16] CMSIS HAL main version */ +#define __CM0_CMSIS_VERSION_SUB (0x00) /*!< [15:0] CMSIS HAL sub version */ +#define __CM0_CMSIS_VERSION ((__CM0_CMSIS_VERSION_MAIN << 16) | \ + __CM0_CMSIS_VERSION_SUB ) /*!< CMSIS HAL version number */ + +#define __CORTEX_M (0x00) /*!< Cortex-M Core */ + + +#if defined ( __CC_ARM ) + #define __ASM __asm /*!< asm keyword for ARM Compiler */ + #define __INLINE __inline /*!< inline keyword for ARM Compiler */ + #define __STATIC_INLINE static __inline + +#elif defined ( __GNUC__ ) + #define __ASM __asm /*!< asm keyword for GNU Compiler */ + #define __INLINE inline /*!< inline keyword for GNU Compiler */ + #define __STATIC_INLINE static inline + +#elif defined ( __ICCARM__ ) + #define __ASM __asm /*!< asm keyword for IAR Compiler */ + #define __INLINE inline /*!< inline keyword for IAR Compiler. Only available in High optimization mode! */ + #define __STATIC_INLINE static inline + +#elif defined ( __TMS470__ ) + #define __ASM __asm /*!< asm keyword for TI CCS Compiler */ + #define __STATIC_INLINE static inline + +#elif defined ( __TASKING__ ) + #define __ASM __asm /*!< asm keyword for TASKING Compiler */ + #define __INLINE inline /*!< inline keyword for TASKING Compiler */ + #define __STATIC_INLINE static inline + +#elif defined ( __CSMC__ ) + #define __packed + #define __ASM _asm /*!< asm keyword for COSMIC Compiler */ + #define __INLINE inline /*use -pc99 on compile line !< inline keyword for COSMIC Compiler */ + #define __STATIC_INLINE static inline + +#endif + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0 + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TMS470__ ) + #if defined __TI__VFP_SUPPORT____ + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) /* Cosmic */ + #if ( __CSMC__ & 0x400) // FPU present for parser + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif +#endif + +#include /* standard types definitions */ +#include /* Core Instruction Access */ +#include /* Core Function Access */ + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM0_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM0_H_DEPENDANT +#define __CORE_CM0_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM0_REV + #define __CM0_REV 0x0000 + #warning "__CM0_REV not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2 + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0 + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/*@} end of group Cortex_M0 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + ******************************************************************************/ +/** \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { +#if (__CORTEX_M != 0x04) + uint32_t _reserved0:27; /*!< bit: 0..26 Reserved */ +#else + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ +#endif + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + + +/** \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + + +/** \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ +#if (__CORTEX_M != 0x04) + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ +#else + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ +#endif + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + + +/** \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t FPCA:1; /*!< bit: 2 FP extension active flag */ + uint32_t _reserved0:29; /*!< bit: 3..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/*@} end of group CMSIS_CORE */ + + +/** \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IO uint32_t ISER[1]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[31]; + __IO uint32_t ICER[1]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[31]; + __IO uint32_t ISPR[1]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[31]; + __IO uint32_t ICPR[1]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[31]; + uint32_t RESERVED4[64]; + __IO uint32_t IP[8]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __I uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IO uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + uint32_t RESERVED0; + __IO uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IO uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IO uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED1; + __IO uint32_t SHP[2]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IO uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24 /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20 /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16 /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4 /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0 /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL << SCB_CPUID_REVISION_Pos) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31 /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28 /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27 /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26 /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25 /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23 /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22 /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12 /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0 /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL << SCB_ICSR_VECTACTIVE_Pos) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16 /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16 /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15 /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2 /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1 /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4 /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2 /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1 /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9 /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3 /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_SVCALLPENDED_Pos 15 /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IO uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IO uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IO uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __I uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16 /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2 /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1 /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0 /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL << SysTick_CTRL_ENABLE_Pos) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0 /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL << SysTick_LOAD_RELOAD_Pos) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0 /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL << SysTick_VAL_CURRENT_Pos) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31 /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30 /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0 /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL << SysTick_CALIB_TENMS_Pos) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Cortex-M0 Core Debug Registers (DCB registers, SHCSR, and DFSR) + are only accessible over DAP and not via processor. Therefore + they are not covered by the Cortex-M0 header file. + @{ + */ +/*@} end of group CMSIS_CoreDebug */ + + +/** \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Cortex-M0 Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Register Access Functions + ******************************************************************************/ +/** \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +/* Interrupt Priorities are WORD accessible only under ARMv6M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( (((uint32_t)(IRQn) ) & 0x03) * 8 ) +#define _SHP_IDX(IRQn) ( ((((uint32_t)(IRQn) & 0x0F)-8) >> 2) ) +#define _IP_IDX(IRQn) ( ((uint32_t)(IRQn) >> 2) ) + + +/** \brief Enable External Interrupt + + The function enables a device-specific interrupt in the NVIC interrupt controller. + + \param [in] IRQn External interrupt number. Value cannot be negative. + */ +__STATIC_INLINE void NVIC_EnableIRQ(IRQn_Type IRQn) +{ + NVIC->ISER[0] = (1 << ((uint32_t)(IRQn) & 0x1F)); +} + + +/** \brief Disable External Interrupt + + The function disables a device-specific interrupt in the NVIC interrupt controller. + + \param [in] IRQn External interrupt number. Value cannot be negative. + */ +__STATIC_INLINE void NVIC_DisableIRQ(IRQn_Type IRQn) +{ + NVIC->ICER[0] = (1 << ((uint32_t)(IRQn) & 0x1F)); +} + + +/** \brief Get Pending Interrupt + + The function reads the pending register in the NVIC and returns the pending bit + for the specified interrupt. + + \param [in] IRQn Interrupt number. + + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + */ +__STATIC_INLINE uint32_t NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + return((uint32_t) ((NVIC->ISPR[0] & (1 << ((uint32_t)(IRQn) & 0x1F)))?1:0)); +} + + +/** \brief Set Pending Interrupt + + The function sets the pending bit of an external interrupt. + + \param [in] IRQn Interrupt number. Value cannot be negative. + */ +__STATIC_INLINE void NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + NVIC->ISPR[0] = (1 << ((uint32_t)(IRQn) & 0x1F)); +} + + +/** \brief Clear Pending Interrupt + + The function clears the pending bit of an external interrupt. + + \param [in] IRQn External interrupt number. Value cannot be negative. + */ +__STATIC_INLINE void NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + NVIC->ICPR[0] = (1 << ((uint32_t)(IRQn) & 0x1F)); /* Clear pending interrupt */ +} + + +/** \brief Set Interrupt Priority + + The function sets the priority of an interrupt. + + \note The priority cannot be set for every core interrupt. + + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + */ +__STATIC_INLINE void NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if(IRQn < 0) { + SCB->SHP[_SHP_IDX(IRQn)] = (SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFF << _BIT_SHIFT(IRQn))) | + (((priority << (8 - __NVIC_PRIO_BITS)) & 0xFF) << _BIT_SHIFT(IRQn)); } + else { + NVIC->IP[_IP_IDX(IRQn)] = (NVIC->IP[_IP_IDX(IRQn)] & ~(0xFF << _BIT_SHIFT(IRQn))) | + (((priority << (8 - __NVIC_PRIO_BITS)) & 0xFF) << _BIT_SHIFT(IRQn)); } +} + + +/** \brief Get Interrupt Priority + + The function reads the priority of an interrupt. The interrupt + number can be positive to specify an external (device specific) + interrupt, or negative to specify an internal (core) interrupt. + + + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented + priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t NVIC_GetPriority(IRQn_Type IRQn) +{ + + if(IRQn < 0) { + return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & 0xFF) >> (8 - __NVIC_PRIO_BITS))); } /* get priority for Cortex-M0 system interrupts */ + else { + return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & 0xFF) >> (8 - __NVIC_PRIO_BITS))); } /* get priority for device specific interrupts */ +} + + +/** \brief System Reset + + The function initiates a system reset request to reset the MCU. + */ +__STATIC_INLINE void NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FA << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + while(1); /* wait until reset */ +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if (__Vendor_SysTickConfig == 0) + +/** \brief System Tick Configuration + + The function initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + + \param [in] ticks Number of ticks between two interrupts. + + \return 0 Function succeeded. + \return 1 Function failed. + + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1) > SysTick_LOAD_RELOAD_Msk) return (1); /* Reload value impossible */ + + SysTick->LOAD = ticks - 1; /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1<<__NVIC_PRIO_BITS) - 1); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM0_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/cpu/cortex-m0_common/include/core_cm0plus.h b/cpu/cortex-m0_common/include/core_cm0plus.h index d948786fba..17e43984fc 100644 --- a/cpu/cortex-m0_common/include/core_cm0plus.h +++ b/cpu/cortex-m0_common/include/core_cm0plus.h @@ -1,35 +1,51 @@ /**************************************************************************//** * @file core_cm0plus.h * @brief CMSIS Cortex-M0+ Core Peripheral Access Layer Header File - * @version V3.01 - * @date 22. March 2012 + * @version V4.00 + * @date 22. August 2014 * * @note - * Copyright (C) 2009-2012 ARM Limited. All rights reserved. - * - * @par - * ARM Limited (ARM) is supplying this software for use with Cortex-M - * processor based microcontrollers. This file can be freely distributed - * within development tools that are supporting such ARM based processors. - * - * THIS SOFTWARE IS PROVIDED "AS IS". NO WARRANTIES, WHETHER EXPRESS, IMPLIED - * OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE APPLY TO THIS SOFTWARE. - * ARM SHALL NOT, IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL, OR - * CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER. * ******************************************************************************/ +/* Copyright (c) 2009 - 2014 ARM LIMITED + + All rights reserved. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + - Neither the name of ARM nor the names of its contributors may be used + to endorse or promote products derived from this software without + specific prior written permission. + * + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + ---------------------------------------------------------------------------*/ + + #if defined ( __ICCARM__ ) #pragma system_include /* treat file as system include file for MISRA check */ #endif +#ifndef __CORE_CM0PLUS_H_GENERIC +#define __CORE_CM0PLUS_H_GENERIC + #ifdef __cplusplus extern "C" { #endif -#ifndef __CORE_CM0PLUS_H_GENERIC -#define __CORE_CM0PLUS_H_GENERIC - /** \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions CMSIS violates the following MISRA-C:2004 rules: @@ -52,8 +68,8 @@ */ /* CMSIS CM0P definitions */ -#define __CM0PLUS_CMSIS_VERSION_MAIN (0x03) /*!< [31:16] CMSIS HAL main version */ -#define __CM0PLUS_CMSIS_VERSION_SUB (0x01) /*!< [15:0] CMSIS HAL sub version */ +#define __CM0PLUS_CMSIS_VERSION_MAIN (0x04) /*!< [31:16] CMSIS HAL main version */ +#define __CM0PLUS_CMSIS_VERSION_SUB (0x00) /*!< [15:0] CMSIS HAL sub version */ #define __CM0PLUS_CMSIS_VERSION ((__CM0PLUS_CMSIS_VERSION_MAIN << 16) | \ __CM0PLUS_CMSIS_VERSION_SUB) /*!< CMSIS HAL version number */ @@ -65,14 +81,18 @@ #define __INLINE __inline /*!< inline keyword for ARM Compiler */ #define __STATIC_INLINE static __inline +#elif defined ( __GNUC__ ) + #define __ASM __asm /*!< asm keyword for GNU Compiler */ + #define __INLINE inline /*!< inline keyword for GNU Compiler */ + #define __STATIC_INLINE static inline + #elif defined ( __ICCARM__ ) #define __ASM __asm /*!< asm keyword for IAR Compiler */ #define __INLINE inline /*!< inline keyword for IAR Compiler. Only available in High optimization mode! */ #define __STATIC_INLINE static inline -#elif defined ( __GNUC__ ) - #define __ASM __asm /*!< asm keyword for GNU Compiler */ - #define __INLINE inline /*!< inline keyword for GNU Compiler */ +#elif defined ( __TMS470__ ) + #define __ASM __asm /*!< asm keyword for TI CCS Compiler */ #define __STATIC_INLINE static inline #elif defined ( __TASKING__ ) @@ -80,9 +100,16 @@ #define __INLINE inline /*!< inline keyword for TASKING Compiler */ #define __STATIC_INLINE static inline +#elif defined ( __CSMC__ ) + #define __packed + #define __ASM _asm /*!< asm keyword for COSMIC Compiler */ + #define __INLINE inline /*use -pc99 on compile line !< inline keyword for COSMIC Compiler */ + #define __STATIC_INLINE static inline + #endif -/** __FPU_USED indicates whether an FPU is used or not. This core does not support an FPU at all +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all */ #define __FPU_USED 0 @@ -91,13 +118,18 @@ #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" #endif +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + #elif defined ( __ICCARM__ ) #if defined __ARMVFP__ #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" #endif -#elif defined ( __GNUC__ ) - #if defined (__VFP_FP__) && !defined(__SOFTFP__) +#elif defined ( __TMS470__ ) + #if defined __TI__VFP_SUPPORT____ #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" #endif @@ -105,10 +137,11 @@ #if defined __FPU_VFP__ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" #endif -#endif -#ifdef __cplusplus -} +#elif defined ( __CSMC__ ) /* Cosmic */ + #if ( __CSMC__ & 0x400) // FPU present for parser + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif #endif #include /* standard types definitions */ @@ -116,7 +149,7 @@ #include /* Core Function Access */ #ifdef __cplusplus -extern "C" { +} #endif #endif /* __CORE_CM0PLUS_H_GENERIC */ @@ -126,6 +159,10 @@ extern "C" { #ifndef __CORE_CM0PLUS_H_DEPENDANT #define __CORE_CM0PLUS_H_DEPENDANT +#ifdef __cplusplus + extern "C" { +#endif + /* check device defines and use defaults */ #if defined __CHECK_DEVICE_DEFINES #ifndef __CM0PLUS_REV @@ -368,8 +405,8 @@ typedef struct #if (__VTOR_PRESENT == 1) /* SCB Interrupt Control State Register Definitions */ -#define SCB_VTOR_TBLOFF_Pos 7 /*!< SCB VTOR: TBLOFF Position */ -#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ +#define SCB_VTOR_TBLOFF_Pos 8 /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0xFFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ #endif /* SCB Application Interrupt and Reset Control Register Definitions */ @@ -457,7 +494,7 @@ typedef struct #define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ #define SysTick_CALIB_TENMS_Pos 0 /*!< SysTick CALIB: TENMS Position */ -#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL << SysTick_VAL_CURRENT_Pos) /*!< SysTick CALIB: TENMS Mask */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL << SysTick_CALIB_TENMS_Pos) /*!< SysTick CALIB: TENMS Mask */ /*@} end of group CMSIS_SysTick */ @@ -708,9 +745,9 @@ __STATIC_INLINE uint32_t NVIC_GetPriority(IRQn_Type IRQn) { if(IRQn < 0) { - return((uint32_t)((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) >> (8 - __NVIC_PRIO_BITS))); } /* get priority for Cortex-M0+ system interrupts */ + return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & 0xFF) >> (8 - __NVIC_PRIO_BITS))); } /* get priority for Cortex-M0 system interrupts */ else { - return((uint32_t)((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) >> (8 - __NVIC_PRIO_BITS))); } /* get priority for device specific interrupts */ + return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & 0xFF) >> (8 - __NVIC_PRIO_BITS))); } /* get priority for device specific interrupts */ } @@ -758,9 +795,9 @@ __STATIC_INLINE void NVIC_SystemReset(void) */ __STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) { - if (ticks > SysTick_LOAD_RELOAD_Msk) return (1); /* Reload value impossible */ + if ((ticks - 1) > SysTick_LOAD_RELOAD_Msk) return (1); /* Reload value impossible */ - SysTick->LOAD = (ticks & SysTick_LOAD_RELOAD_Msk) - 1; /* set reload register */ + SysTick->LOAD = ticks - 1; /* set reload register */ NVIC_SetPriority (SysTick_IRQn, (1<<__NVIC_PRIO_BITS) - 1); /* set Priority for Systick Interrupt */ SysTick->VAL = 0; /* Load the SysTick Counter Value */ SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | @@ -776,10 +813,10 @@ __STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) -#endif /* __CORE_CM0PLUS_H_DEPENDANT */ - -#endif /* __CMSIS_GENERIC */ - #ifdef __cplusplus } #endif + +#endif /* __CORE_CM0PLUS_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/cpu/cortex-m0_common/include/core_cmFunc.h b/cpu/cortex-m0_common/include/core_cmFunc.h index 4da273e71a..01089f1333 100644 --- a/cpu/cortex-m0_common/include/core_cmFunc.h +++ b/cpu/cortex-m0_common/include/core_cmFunc.h @@ -1,34 +1,46 @@ /**************************************************************************//** * @file core_cmFunc.h * @brief CMSIS Cortex-M Core Function Access Header File - * @version V2.10 - * @date 26. July 2011 + * @version V4.00 + * @date 28. August 2014 * * @note - * Copyright (C) 2009-2011 ARM Limited. All rights reserved. - * - * @par - * ARM Limited (ARM) is supplying this software for use with Cortex-M - * processor based microcontrollers. This file can be freely distributed - * within development tools that are supporting such ARM based processors. - * - * THIS SOFTWARE IS PROVIDED "AS IS". NO WARRANTIES, WHETHER EXPRESS, IMPLIED - * OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE APPLY TO THIS SOFTWARE. - * ARM SHALL NOT, IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL, OR - * CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER. * ******************************************************************************/ +/* Copyright (c) 2009 - 2014 ARM LIMITED + + All rights reserved. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + - Neither the name of ARM nor the names of its contributors may be used + to endorse or promote products derived from this software without + specific prior written permission. + * + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + ---------------------------------------------------------------------------*/ + #ifndef __CORE_CMFUNC_H #define __CORE_CMFUNC_H -#ifdef __cplusplus -extern "C" { -#endif /* ########################### Core Function Access ########################### */ -/** \ingroup CMSIS_Core_FunctionInterface +/** \ingroup CMSIS_Core_FunctionInterface \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions @{ */ @@ -49,9 +61,9 @@ extern "C" { \return Control Register value */ -static __INLINE uint32_t __get_CONTROL(void) +__STATIC_INLINE uint32_t __get_CONTROL(void) { - register uint32_t __regControl asm("control"); + register uint32_t __regControl __ASM("control"); return(__regControl); } @@ -62,22 +74,22 @@ static __INLINE uint32_t __get_CONTROL(void) \param [in] control Control Register value to set */ -static __INLINE void __set_CONTROL(uint32_t control) +__STATIC_INLINE void __set_CONTROL(uint32_t control) { - register uint32_t __regControl asm("control"); + register uint32_t __regControl __ASM("control"); __regControl = control; } -/** \brief Get ISPR Register +/** \brief Get IPSR Register - This function returns the content of the ISPR Register. + This function returns the content of the IPSR Register. - \return ISPR Register value + \return IPSR Register value */ -static __INLINE uint32_t __get_IPSR(void) +__STATIC_INLINE uint32_t __get_IPSR(void) { - register uint32_t __regIPSR asm("ipsr"); + register uint32_t __regIPSR __ASM("ipsr"); return(__regIPSR); } @@ -88,9 +100,9 @@ static __INLINE uint32_t __get_IPSR(void) \return APSR Register value */ -static __INLINE uint32_t __get_APSR(void) +__STATIC_INLINE uint32_t __get_APSR(void) { - register uint32_t __regAPSR asm("apsr"); + register uint32_t __regAPSR __ASM("apsr"); return(__regAPSR); } @@ -101,9 +113,9 @@ static __INLINE uint32_t __get_APSR(void) \return xPSR Register value */ -static __INLINE uint32_t __get_xPSR(void) +__STATIC_INLINE uint32_t __get_xPSR(void) { - register uint32_t __regXPSR asm("xpsr"); + register uint32_t __regXPSR __ASM("xpsr"); return(__regXPSR); } @@ -114,9 +126,9 @@ static __INLINE uint32_t __get_xPSR(void) \return PSP Register value */ -static __INLINE uint32_t __get_PSP(void) +__STATIC_INLINE uint32_t __get_PSP(void) { - register uint32_t __regProcessStackPointer asm("psp"); + register uint32_t __regProcessStackPointer __ASM("psp"); return(__regProcessStackPointer); } @@ -127,9 +139,9 @@ static __INLINE uint32_t __get_PSP(void) \param [in] topOfProcStack Process Stack Pointer value to set */ -static __INLINE void __set_PSP(uint32_t topOfProcStack) +__STATIC_INLINE void __set_PSP(uint32_t topOfProcStack) { - register uint32_t __regProcessStackPointer asm("psp"); + register uint32_t __regProcessStackPointer __ASM("psp"); __regProcessStackPointer = topOfProcStack; } @@ -140,9 +152,9 @@ static __INLINE void __set_PSP(uint32_t topOfProcStack) \return MSP Register value */ -static __INLINE uint32_t __get_MSP(void) +__STATIC_INLINE uint32_t __get_MSP(void) { - register uint32_t __regMainStackPointer asm("msp"); + register uint32_t __regMainStackPointer __ASM("msp"); return(__regMainStackPointer); } @@ -153,9 +165,9 @@ static __INLINE uint32_t __get_MSP(void) \param [in] topOfMainStack Main Stack Pointer value to set */ -static __INLINE void __set_MSP(uint32_t topOfMainStack) +__STATIC_INLINE void __set_MSP(uint32_t topOfMainStack) { - register uint32_t __regMainStackPointer asm("msp"); + register uint32_t __regMainStackPointer __ASM("msp"); __regMainStackPointer = topOfMainStack; } @@ -166,9 +178,9 @@ static __INLINE void __set_MSP(uint32_t topOfMainStack) \return Priority Mask value */ -static __INLINE uint32_t __get_PRIMASK(void) +__STATIC_INLINE uint32_t __get_PRIMASK(void) { - register uint32_t __regPriMask asm("primask"); + register uint32_t __regPriMask __ASM("primask"); return(__regPriMask); } @@ -179,14 +191,14 @@ static __INLINE uint32_t __get_PRIMASK(void) \param [in] priMask Priority Mask */ -static __INLINE void __set_PRIMASK(uint32_t priMask) +__STATIC_INLINE void __set_PRIMASK(uint32_t priMask) { - register uint32_t __regPriMask asm("primask"); + register uint32_t __regPriMask __ASM("primask"); __regPriMask = (priMask); } - -#if (__CORTEX_M >= 0x03) + +#if (__CORTEX_M >= 0x03) || (__CORTEX_SC >= 300) /** \brief Enable FIQ @@ -210,9 +222,9 @@ static __INLINE void __set_PRIMASK(uint32_t priMask) \return Base Priority register value */ -static __INLINE uint32_t __get_BASEPRI(void) +__STATIC_INLINE uint32_t __get_BASEPRI(void) { - register uint32_t __regBasePri asm("basepri"); + register uint32_t __regBasePri __ASM("basepri"); return(__regBasePri); } @@ -223,12 +235,12 @@ static __INLINE uint32_t __get_BASEPRI(void) \param [in] basePri Base Priority value to set */ -static __INLINE void __set_BASEPRI(uint32_t basePri) +__STATIC_INLINE void __set_BASEPRI(uint32_t basePri) { - register uint32_t __regBasePri asm("basepri"); + register uint32_t __regBasePri __ASM("basepri"); __regBasePri = (basePri & 0xff); } - + /** \brief Get Fault Mask @@ -236,9 +248,9 @@ static __INLINE void __set_BASEPRI(uint32_t basePri) \return Fault Mask register value */ -static __INLINE uint32_t __get_FAULTMASK(void) +__STATIC_INLINE uint32_t __get_FAULTMASK(void) { - register uint32_t __regFaultMask asm("faultmask"); + register uint32_t __regFaultMask __ASM("faultmask"); return(__regFaultMask); } @@ -249,16 +261,16 @@ static __INLINE uint32_t __get_FAULTMASK(void) \param [in] faultMask Fault Mask value to set */ -static __INLINE void __set_FAULTMASK(uint32_t faultMask) +__STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask) { - register uint32_t __regFaultMask asm("faultmask"); + register uint32_t __regFaultMask __ASM("faultmask"); __regFaultMask = (faultMask & (uint32_t)1); } -#endif /* (__CORTEX_M >= 0x03) */ +#endif /* (__CORTEX_M >= 0x03) || (__CORTEX_SC >= 300) */ -#if (__CORTEX_M == 0x04) +#if (__CORTEX_M == 0x04) || (__CORTEX_M == 0x07) /** \brief Get FPSCR @@ -266,10 +278,10 @@ static __INLINE void __set_FAULTMASK(uint32_t faultMask) \return Floating Point Status/Control register value */ -static __INLINE uint32_t __get_FPSCR(void) +__STATIC_INLINE uint32_t __get_FPSCR(void) { #if (__FPU_PRESENT == 1) && (__FPU_USED == 1) - register uint32_t __regfpscr asm("fpscr"); + register uint32_t __regfpscr __ASM("fpscr"); return(__regfpscr); #else return(0); @@ -283,22 +295,17 @@ static __INLINE uint32_t __get_FPSCR(void) \param [in] fpscr Floating Point Status/Control value to set */ -static __INLINE void __set_FPSCR(uint32_t fpscr) +__STATIC_INLINE void __set_FPSCR(uint32_t fpscr) { #if (__FPU_PRESENT == 1) && (__FPU_USED == 1) - register uint32_t __regfpscr asm("fpscr"); + register uint32_t __regfpscr __ASM("fpscr"); __regfpscr = (fpscr); #endif } -#endif /* (__CORTEX_M == 0x04) */ +#endif /* (__CORTEX_M == 0x04) || (__CORTEX_M == 0x07) */ -#elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/ -/* IAR iccarm specific functions */ - -#include - #elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/ /* GNU gcc specific functions */ @@ -307,9 +314,9 @@ static __INLINE void __set_FPSCR(uint32_t fpscr) This function enables IRQ interrupts by clearing the I-bit in the CPSR. Can only be executed in Privileged modes. */ -__attribute__( ( always_inline ) ) static __INLINE void __enable_irq(void) +__attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_irq(void) { - asm volatile ("cpsie i"); + __ASM volatile ("cpsie i" : : : "memory"); } @@ -318,9 +325,9 @@ __attribute__( ( always_inline ) ) static __INLINE void __enable_irq(void) This function disables IRQ interrupts by setting the I-bit in the CPSR. Can only be executed in Privileged modes. */ -__attribute__( ( always_inline ) ) static __INLINE void __disable_irq(void) +__attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_irq(void) { - asm volatile ("cpsid i"); + __ASM volatile ("cpsid i" : : : "memory"); } @@ -330,11 +337,11 @@ __attribute__( ( always_inline ) ) static __INLINE void __disable_irq(void) \return Control Register value */ -__attribute__( ( always_inline ) ) static __INLINE uint32_t __get_CONTROL(void) +__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_CONTROL(void) { uint32_t result; - asm volatile ("MRS %0, control" : "=r" (result) ); + __ASM volatile ("MRS %0, control" : "=r" (result) ); return(result); } @@ -345,23 +352,23 @@ __attribute__( ( always_inline ) ) static __INLINE uint32_t __get_CONTROL(void) \param [in] control Control Register value to set */ -__attribute__( ( always_inline ) ) static __INLINE void __set_CONTROL(uint32_t control) +__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_CONTROL(uint32_t control) { - asm volatile ("MSR control, %0" : : "r" (control) ); + __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); } -/** \brief Get ISPR Register +/** \brief Get IPSR Register - This function returns the content of the ISPR Register. + This function returns the content of the IPSR Register. - \return ISPR Register value + \return IPSR Register value */ -__attribute__( ( always_inline ) ) static __INLINE uint32_t __get_IPSR(void) +__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_IPSR(void) { uint32_t result; - asm volatile ("MRS %0, ipsr" : "=r" (result) ); + __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); return(result); } @@ -372,11 +379,11 @@ __attribute__( ( always_inline ) ) static __INLINE uint32_t __get_IPSR(void) \return APSR Register value */ -__attribute__( ( always_inline ) ) static __INLINE uint32_t __get_APSR(void) +__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_APSR(void) { uint32_t result; - asm volatile ("MRS %0, apsr" : "=r" (result) ); + __ASM volatile ("MRS %0, apsr" : "=r" (result) ); return(result); } @@ -387,11 +394,11 @@ __attribute__( ( always_inline ) ) static __INLINE uint32_t __get_APSR(void) \return xPSR Register value */ -__attribute__( ( always_inline ) ) static __INLINE uint32_t __get_xPSR(void) +__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_xPSR(void) { uint32_t result; - asm volatile ("MRS %0, xpsr" : "=r" (result) ); + __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); return(result); } @@ -402,14 +409,14 @@ __attribute__( ( always_inline ) ) static __INLINE uint32_t __get_xPSR(void) \return PSP Register value */ -__attribute__( ( always_inline ) ) static __INLINE uint32_t __get_PSP(void) +__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_PSP(void) { register uint32_t result; - asm volatile ("MRS %0, psp\n" : "=r" (result) ); + __ASM volatile ("MRS %0, psp\n" : "=r" (result) ); return(result); } - + /** \brief Set Process Stack Pointer @@ -417,9 +424,9 @@ __attribute__( ( always_inline ) ) static __INLINE uint32_t __get_PSP(void) \param [in] topOfProcStack Process Stack Pointer value to set */ -__attribute__( ( always_inline ) ) static __INLINE void __set_PSP(uint32_t topOfProcStack) +__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PSP(uint32_t topOfProcStack) { - asm volatile ("MSR psp, %0\n" : : "r" (topOfProcStack) ); + __ASM volatile ("MSR psp, %0\n" : : "r" (topOfProcStack) : "sp"); } @@ -429,14 +436,14 @@ __attribute__( ( always_inline ) ) static __INLINE void __set_PSP(uint32_t topOf \return MSP Register value */ -__attribute__( ( always_inline ) ) static __INLINE uint32_t __get_MSP(void) +__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_MSP(void) { register uint32_t result; - asm volatile ("MRS %0, msp\n" : "=r" (result) ); + __ASM volatile ("MRS %0, msp\n" : "=r" (result) ); return(result); } - + /** \brief Set Main Stack Pointer @@ -444,9 +451,9 @@ __attribute__( ( always_inline ) ) static __INLINE uint32_t __get_MSP(void) \param [in] topOfMainStack Main Stack Pointer value to set */ -__attribute__( ( always_inline ) ) static __INLINE void __set_MSP(uint32_t topOfMainStack) +__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_MSP(uint32_t topOfMainStack) { - asm volatile ("MSR msp, %0\n" : : "r" (topOfMainStack) ); + __ASM volatile ("MSR msp, %0\n" : : "r" (topOfMainStack) : "sp"); } @@ -456,11 +463,11 @@ __attribute__( ( always_inline ) ) static __INLINE void __set_MSP(uint32_t topOf \return Priority Mask value */ -__attribute__( ( always_inline ) ) static __INLINE uint32_t __get_PRIMASK(void) +__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_PRIMASK(void) { uint32_t result; - asm volatile ("MRS %0, primask" : "=r" (result) ); + __ASM volatile ("MRS %0, primask" : "=r" (result) ); return(result); } @@ -471,11 +478,11 @@ __attribute__( ( always_inline ) ) static __INLINE uint32_t __get_PRIMASK(void) \param [in] priMask Priority Mask */ -__attribute__( ( always_inline ) ) static __INLINE void __set_PRIMASK(uint32_t priMask) +__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PRIMASK(uint32_t priMask) { - asm volatile ("MSR primask, %0" : : "r" (priMask) ); + __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); } - + #if (__CORTEX_M >= 0x03) @@ -484,9 +491,9 @@ __attribute__( ( always_inline ) ) static __INLINE void __set_PRIMASK(uint32_t p This function enables FIQ interrupts by clearing the F-bit in the CPSR. Can only be executed in Privileged modes. */ -__attribute__( ( always_inline ) ) static __INLINE void __enable_fault_irq(void) +__attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_fault_irq(void) { - asm volatile ("cpsie f"); + __ASM volatile ("cpsie f" : : : "memory"); } @@ -495,9 +502,9 @@ __attribute__( ( always_inline ) ) static __INLINE void __enable_fault_irq(void) This function disables FIQ interrupts by setting the F-bit in the CPSR. Can only be executed in Privileged modes. */ -__attribute__( ( always_inline ) ) static __INLINE void __disable_fault_irq(void) +__attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_fault_irq(void) { - asm volatile ("cpsid f"); + __ASM volatile ("cpsid f" : : : "memory"); } @@ -507,11 +514,11 @@ __attribute__( ( always_inline ) ) static __INLINE void __disable_fault_irq(void \return Base Priority register value */ -__attribute__( ( always_inline ) ) static __INLINE uint32_t __get_BASEPRI(void) +__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_BASEPRI(void) { uint32_t result; - - asm volatile ("MRS %0, basepri_max" : "=r" (result) ); + + __ASM volatile ("MRS %0, basepri_max" : "=r" (result) ); return(result); } @@ -522,9 +529,9 @@ __attribute__( ( always_inline ) ) static __INLINE uint32_t __get_BASEPRI(void) \param [in] basePri Base Priority value to set */ -__attribute__( ( always_inline ) ) static __INLINE void __set_BASEPRI(uint32_t value) +__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_BASEPRI(uint32_t value) { - asm volatile ("MSR basepri, %0" : : "r" (value) ); + __ASM volatile ("MSR basepri, %0" : : "r" (value) : "memory"); } @@ -534,11 +541,11 @@ __attribute__( ( always_inline ) ) static __INLINE void __set_BASEPRI(uint32_t v \return Fault Mask register value */ -__attribute__( ( always_inline ) ) static __INLINE uint32_t __get_FAULTMASK(void) +__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FAULTMASK(void) { uint32_t result; - - asm volatile ("MRS %0, faultmask" : "=r" (result) ); + + __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); return(result); } @@ -549,15 +556,15 @@ __attribute__( ( always_inline ) ) static __INLINE uint32_t __get_FAULTMASK(void \param [in] faultMask Fault Mask value to set */ -__attribute__( ( always_inline ) ) static __INLINE void __set_FAULTMASK(uint32_t faultMask) +__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask) { - asm volatile ("MSR faultmask, %0" : : "r" (faultMask) ); + __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); } #endif /* (__CORTEX_M >= 0x03) */ -#if (__CORTEX_M == 0x04) +#if (__CORTEX_M == 0x04) || (__CORTEX_M == 0x07) /** \brief Get FPSCR @@ -565,12 +572,15 @@ __attribute__( ( always_inline ) ) static __INLINE void __set_FAULTMASK(uint32_t \return Floating Point Status/Control register value */ -__attribute__( ( always_inline ) ) static __INLINE uint32_t __get_FPSCR(void) +__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FPSCR(void) { #if (__FPU_PRESENT == 1) && (__FPU_USED == 1) uint32_t result; - asm volatile ("VMRS %0, fpscr" : "=r" (result) ); + /* Empty asm statement works as a scheduling barrier */ + __ASM volatile (""); + __ASM volatile ("VMRS %0, fpscr" : "=r" (result) ); + __ASM volatile (""); return(result); #else return(0); @@ -584,31 +594,44 @@ __attribute__( ( always_inline ) ) static __INLINE uint32_t __get_FPSCR(void) \param [in] fpscr Floating Point Status/Control value to set */ -__attribute__( ( always_inline ) ) static __INLINE void __set_FPSCR(uint32_t fpscr) +__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FPSCR(uint32_t fpscr) { #if (__FPU_PRESENT == 1) && (__FPU_USED == 1) - asm volatile ("VMSR fpscr, %0" : : "r" (fpscr) ); + /* Empty asm statement works as a scheduling barrier */ + __ASM volatile (""); + __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc"); + __ASM volatile (""); #endif } -#endif /* (__CORTEX_M == 0x04) */ +#endif /* (__CORTEX_M == 0x04) || (__CORTEX_M == 0x07) */ + + +#elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/ +/* IAR iccarm specific functions */ +#include + + +#elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/ +/* TI CCS specific functions */ +#include #elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/ /* TASKING carm specific functions */ - /* * The CMSIS functions have been implemented as intrinsics in the compiler. - * Please use "carm -?i" to get an up to date list of all instrinsics, + * Please use "carm -?i" to get an up to date list of all intrinsics, * Including the CMSIS ones. */ + +#elif defined ( __CSMC__ ) /*------------------ COSMIC Compiler -------------------*/ +/* Cosmic specific functions */ +#include + #endif /*@} end of CMSIS_Core_RegAccFunctions */ -#ifdef __cplusplus -} -#endif - #endif /* __CORE_CMFUNC_H */ diff --git a/cpu/cortex-m0_common/include/core_cmInstr.h b/cpu/cortex-m0_common/include/core_cmInstr.h index 267b43f89e..d14110b2ab 100644 --- a/cpu/cortex-m0_common/include/core_cmInstr.h +++ b/cpu/cortex-m0_common/include/core_cmInstr.h @@ -1,31 +1,43 @@ /**************************************************************************//** * @file core_cmInstr.h * @brief CMSIS Cortex-M Core Instruction Access Header File - * @version V2.10 - * @date 19. July 2011 + * @version V4.00 + * @date 28. August 2014 * * @note - * Copyright (C) 2009-2011 ARM Limited. All rights reserved. - * - * @par - * ARM Limited (ARM) is supplying this software for use with Cortex-M - * processor based microcontrollers. This file can be freely distributed - * within development tools that are supporting such ARM based processors. - * - * THIS SOFTWARE IS PROVIDED "AS IS". NO WARRANTIES, WHETHER EXPRESS, IMPLIED - * OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE APPLY TO THIS SOFTWARE. - * ARM SHALL NOT, IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL, OR - * CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER. * ******************************************************************************/ +/* Copyright (c) 2009 - 2014 ARM LIMITED + + All rights reserved. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + - Neither the name of ARM nor the names of its contributors may be used + to endorse or promote products derived from this software without + specific prior written permission. + * + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + ---------------------------------------------------------------------------*/ + #ifndef __CORE_CMINSTR_H #define __CORE_CMINSTR_H -#ifdef __cplusplus -extern "C" { -#endif /* ########################## Core Instruction Access ######################### */ /** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface @@ -73,8 +85,8 @@ extern "C" { /** \brief Instruction Synchronization Barrier - Instruction Synchronization Barrier flushes the pipeline in the processor, - so that all instructions following the ISB are fetched from cache or + Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, after the instruction has been completed. */ #define __ISB() __isb(0xF) @@ -82,7 +94,7 @@ extern "C" { /** \brief Data Synchronization Barrier - This function acts as a special kind of Data Memory Barrier. + This function acts as a special kind of Data Memory Barrier. It completes when all explicit memory accesses before this instruction complete. */ #define __DSB() __dsb(0xF) @@ -90,7 +102,7 @@ extern "C" { /** \brief Data Memory Barrier - This function ensures the apparent order of the explicit memory operations before + This function ensures the apparent order of the explicit memory operations before and after the instruction, without ensuring their completion. */ #define __DMB() __dmb(0xF) @@ -113,12 +125,13 @@ extern "C" { \param [in] value Value to reverse \return Reversed value */ -static __INLINE __ASM uint32_t __REV16(uint32_t value) +#ifndef __NO_EMBEDDED_ASM +__attribute__((section(".rev16_text"))) __STATIC_INLINE __ASM uint32_t __REV16(uint32_t value) { rev16 r0, r0 bx lr } - +#endif /** \brief Reverse byte order in signed short value @@ -127,14 +140,38 @@ static __INLINE __ASM uint32_t __REV16(uint32_t value) \param [in] value Value to reverse \return Reversed value */ -static __INLINE __ASM int32_t __REVSH(int32_t value) +#ifndef __NO_EMBEDDED_ASM +__attribute__((section(".revsh_text"))) __STATIC_INLINE __ASM int32_t __REVSH(int32_t value) { revsh r0, r0 bx lr } +#endif -#if (__CORTEX_M >= 0x03) +/** \brief Rotate Right in unsigned value (32 bit) + + This function Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + + \param [in] value Value to rotate + \param [in] value Number of Bits to rotate + \return Rotated value + */ +#define __ROR __ror + + +/** \brief Breakpoint + + This function causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. + */ +#define __BKPT(value) __breakpoint(value) + + +#if (__CORTEX_M >= 0x03) || (__CORTEX_SC >= 300) /** \brief Reverse bit order of value @@ -148,7 +185,7 @@ static __INLINE __ASM int32_t __REVSH(int32_t value) /** \brief LDR Exclusive (8 bit) - This function performs a exclusive LDR command for 8 bit value. + This function executes a exclusive LDR instruction for 8 bit value. \param [in] ptr Pointer to data \return value of type uint8_t at (*ptr) @@ -158,7 +195,7 @@ static __INLINE __ASM int32_t __REVSH(int32_t value) /** \brief LDR Exclusive (16 bit) - This function performs a exclusive LDR command for 16 bit values. + This function executes a exclusive LDR instruction for 16 bit values. \param [in] ptr Pointer to data \return value of type uint16_t at (*ptr) @@ -168,7 +205,7 @@ static __INLINE __ASM int32_t __REVSH(int32_t value) /** \brief LDR Exclusive (32 bit) - This function performs a exclusive LDR command for 32 bit values. + This function executes a exclusive LDR instruction for 32 bit values. \param [in] ptr Pointer to data \return value of type uint32_t at (*ptr) @@ -178,7 +215,7 @@ static __INLINE __ASM int32_t __REVSH(int32_t value) /** \brief STR Exclusive (8 bit) - This function performs a exclusive STR command for 8 bit values. + This function executes a exclusive STR instruction for 8 bit values. \param [in] value Value to store \param [in] ptr Pointer to location @@ -190,7 +227,7 @@ static __INLINE __ASM int32_t __REVSH(int32_t value) /** \brief STR Exclusive (16 bit) - This function performs a exclusive STR command for 16 bit values. + This function executes a exclusive STR instruction for 16 bit values. \param [in] value Value to store \param [in] ptr Pointer to location @@ -202,7 +239,7 @@ static __INLINE __ASM int32_t __REVSH(int32_t value) /** \brief STR Exclusive (32 bit) - This function performs a exclusive STR command for 32 bit values. + This function executes a exclusive STR instruction for 32 bit values. \param [in] value Value to store \param [in] ptr Pointer to location @@ -249,26 +286,106 @@ static __INLINE __ASM int32_t __REVSH(int32_t value) \param [in] value Value to count the leading zeros \return number of leading zeros in value */ -#define __CLZ __clz - -#endif /* (__CORTEX_M >= 0x03) */ +#define __CLZ __clz +/** \brief Rotate Right with Extend (32 bit) -#elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/ -/* IAR iccarm specific functions */ + This function moves each bit of a bitstring right by one bit. The carry input is shifted in at the left end of the bitstring. -#include + \param [in] value Value to rotate + \return Rotated value + */ +#ifndef __NO_EMBEDDED_ASM +__attribute__((section(".rrx_text"))) __STATIC_INLINE __ASM uint32_t __RRX(uint32_t value) +{ + rrx r0, r0 + bx lr +} +#endif + + +/** \brief LDRT Unprivileged (8 bit) + + This function executes a Unprivileged LDRT instruction for 8 bit value. + + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDRBT(ptr) ((uint8_t ) __ldrt(ptr)) + + +/** \brief LDRT Unprivileged (16 bit) + + This function executes a Unprivileged LDRT instruction for 16 bit values. + + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDRHT(ptr) ((uint16_t) __ldrt(ptr)) + + +/** \brief LDRT Unprivileged (32 bit) + + This function executes a Unprivileged LDRT instruction for 32 bit values. + + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDRT(ptr) ((uint32_t ) __ldrt(ptr)) + + +/** \brief STRT Unprivileged (8 bit) + + This function executes a Unprivileged STRT instruction for 8 bit values. + + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +#define __STRBT(value, ptr) __strt(value, ptr) + + +/** \brief STRT Unprivileged (16 bit) + + This function executes a Unprivileged STRT instruction for 16 bit values. + + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +#define __STRHT(value, ptr) __strt(value, ptr) + + +/** \brief STRT Unprivileged (32 bit) + + This function executes a Unprivileged STRT instruction for 32 bit values. + + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +#define __STRT(value, ptr) __strt(value, ptr) + +#endif /* (__CORTEX_M >= 0x03) || (__CORTEX_SC >= 300) */ #elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/ /* GNU gcc specific functions */ +/* Define macros for porting to both thumb1 and thumb2. + * For thumb1, use low register (r0-r7), specified by constrant "l" + * Otherwise, use general registers, specified by constrant "r" */ +#if defined (__thumb__) && !defined (__thumb2__) +#define __CMSIS_GCC_OUT_REG(r) "=l" (r) +#define __CMSIS_GCC_USE_REG(r) "l" (r) +#else +#define __CMSIS_GCC_OUT_REG(r) "=r" (r) +#define __CMSIS_GCC_USE_REG(r) "r" (r) +#endif + /** \brief No Operation No Operation does nothing. This instruction can be used for code alignment purposes. */ -__attribute__( ( always_inline ) ) static __INLINE void __NOP(void) +__attribute__( ( always_inline ) ) __STATIC_INLINE void __NOP(void) { __ASM volatile ("nop"); } @@ -279,7 +396,7 @@ __attribute__( ( always_inline ) ) static __INLINE void __NOP(void) Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. */ -__attribute__( ( always_inline ) ) static __INLINE void __WFI(void) +__attribute__( ( always_inline ) ) __STATIC_INLINE void __WFI(void) { __ASM volatile ("wfi"); } @@ -290,7 +407,7 @@ __attribute__( ( always_inline ) ) static __INLINE void __WFI(void) Wait For Event is a hint instruction that permits the processor to enter a low-power state until one of a number of events occurs. */ -__attribute__( ( always_inline ) ) static __INLINE void __WFE(void) +__attribute__( ( always_inline ) ) __STATIC_INLINE void __WFE(void) { __ASM volatile ("wfe"); } @@ -300,7 +417,7 @@ __attribute__( ( always_inline ) ) static __INLINE void __WFE(void) Send Event is a hint instruction. It causes an event to be signaled to the CPU. */ -__attribute__( ( always_inline ) ) static __INLINE void __SEV(void) +__attribute__( ( always_inline ) ) __STATIC_INLINE void __SEV(void) { __ASM volatile ("sev"); } @@ -308,11 +425,11 @@ __attribute__( ( always_inline ) ) static __INLINE void __SEV(void) /** \brief Instruction Synchronization Barrier - Instruction Synchronization Barrier flushes the pipeline in the processor, - so that all instructions following the ISB are fetched from cache or + Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, after the instruction has been completed. */ -__attribute__( ( always_inline ) ) static __INLINE void __ISB(void) +__attribute__( ( always_inline ) ) __STATIC_INLINE void __ISB(void) { __ASM volatile ("isb"); } @@ -320,10 +437,10 @@ __attribute__( ( always_inline ) ) static __INLINE void __ISB(void) /** \brief Data Synchronization Barrier - This function acts as a special kind of Data Memory Barrier. + This function acts as a special kind of Data Memory Barrier. It completes when all explicit memory accesses before this instruction complete. */ -__attribute__( ( always_inline ) ) static __INLINE void __DSB(void) +__attribute__( ( always_inline ) ) __STATIC_INLINE void __DSB(void) { __ASM volatile ("dsb"); } @@ -331,10 +448,10 @@ __attribute__( ( always_inline ) ) static __INLINE void __DSB(void) /** \brief Data Memory Barrier - This function ensures the apparent order of the explicit memory operations before + This function ensures the apparent order of the explicit memory operations before and after the instruction, without ensuring their completion. */ -__attribute__( ( always_inline ) ) static __INLINE void __DMB(void) +__attribute__( ( always_inline ) ) __STATIC_INLINE void __DMB(void) { __ASM volatile ("dmb"); } @@ -347,12 +464,16 @@ __attribute__( ( always_inline ) ) static __INLINE void __DMB(void) \param [in] value Value to reverse \return Reversed value */ -__attribute__( ( always_inline ) ) static __INLINE uint32_t __REV(uint32_t value) +__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __REV(uint32_t value) { +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) + return __builtin_bswap32(value); +#else uint32_t result; - - __ASM volatile ("rev %0, %1" : "=r" (result) : "r" (value) ); + + __ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); return(result); +#endif } @@ -363,11 +484,11 @@ __attribute__( ( always_inline ) ) static __INLINE uint32_t __REV(uint32_t value \param [in] value Value to reverse \return Reversed value */ -__attribute__( ( always_inline ) ) static __INLINE uint32_t __REV16(uint32_t value) +__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __REV16(uint32_t value) { uint32_t result; - - __ASM volatile ("rev16 %0, %1" : "=r" (result) : "r" (value) ); + + __ASM volatile ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); return(result); } @@ -379,16 +500,45 @@ __attribute__( ( always_inline ) ) static __INLINE uint32_t __REV16(uint32_t val \param [in] value Value to reverse \return Reversed value */ -__attribute__( ( always_inline ) ) static __INLINE int32_t __REVSH(int32_t value) +__attribute__( ( always_inline ) ) __STATIC_INLINE int32_t __REVSH(int32_t value) { +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + return (short)__builtin_bswap16(value); +#else uint32_t result; - - __ASM volatile ("revsh %0, %1" : "=r" (result) : "r" (value) ); + + __ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); return(result); +#endif } -#if (__CORTEX_M >= 0x03) +/** \brief Rotate Right in unsigned value (32 bit) + + This function Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + + \param [in] value Value to rotate + \param [in] value Number of Bits to rotate + \return Rotated value + */ +__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __ROR(uint32_t op1, uint32_t op2) +{ + return (op1 >> op2) | (op1 << (32 - op2)); +} + + +/** \brief Breakpoint + + This function causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. + */ +#define __BKPT(value) __ASM volatile ("bkpt "#value) + + +#if (__CORTEX_M >= 0x03) || (__CORTEX_SC >= 300) /** \brief Reverse bit order of value @@ -397,10 +547,10 @@ __attribute__( ( always_inline ) ) static __INLINE int32_t __REVSH(int32_t value \param [in] value Value to reverse \return Reversed value */ -__attribute__( ( always_inline ) ) static __INLINE uint32_t __RBIT(uint32_t value) +__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __RBIT(uint32_t value) { uint32_t result; - + __ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) ); return(result); } @@ -408,102 +558,116 @@ __attribute__( ( always_inline ) ) static __INLINE uint32_t __RBIT(uint32_t valu /** \brief LDR Exclusive (8 bit) - This function performs a exclusive LDR command for 8 bit value. + This function executes a exclusive LDR instruction for 8 bit value. \param [in] ptr Pointer to data \return value of type uint8_t at (*ptr) */ -__attribute__( ( always_inline ) ) static __INLINE uint8_t __LDREXB(volatile uint8_t *addr) +__attribute__( ( always_inline ) ) __STATIC_INLINE uint8_t __LDREXB(volatile uint8_t *addr) { - uint8_t result; - - __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) ); - return(result); + uint32_t result; + +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); +#endif + return ((uint8_t) result); /* Add explicit type cast here */ } /** \brief LDR Exclusive (16 bit) - This function performs a exclusive LDR command for 16 bit values. + This function executes a exclusive LDR instruction for 16 bit values. \param [in] ptr Pointer to data \return value of type uint16_t at (*ptr) */ -__attribute__( ( always_inline ) ) static __INLINE uint16_t __LDREXH(volatile uint16_t *addr) +__attribute__( ( always_inline ) ) __STATIC_INLINE uint16_t __LDREXH(volatile uint16_t *addr) { - uint16_t result; - - __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) ); - return(result); + uint32_t result; + +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); +#endif + return ((uint16_t) result); /* Add explicit type cast here */ } /** \brief LDR Exclusive (32 bit) - This function performs a exclusive LDR command for 32 bit values. + This function executes a exclusive LDR instruction for 32 bit values. \param [in] ptr Pointer to data \return value of type uint32_t at (*ptr) */ -__attribute__( ( always_inline ) ) static __INLINE uint32_t __LDREXW(volatile uint32_t *addr) +__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __LDREXW(volatile uint32_t *addr) { uint32_t result; - - __ASM volatile ("ldrex %0, [%1]" : "=r" (result) : "r" (addr) ); + + __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) ); return(result); } /** \brief STR Exclusive (8 bit) - This function performs a exclusive STR command for 8 bit values. + This function executes a exclusive STR instruction for 8 bit values. \param [in] value Value to store \param [in] ptr Pointer to location \return 0 Function succeeded \return 1 Function failed */ -__attribute__( ( always_inline ) ) static __INLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr) +__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr) { uint32_t result; - - __ASM volatile ("strexb %0, %2, [%1]" : "=r" (result) : "r" (addr), "r" (value) ); + + __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); return(result); } /** \brief STR Exclusive (16 bit) - This function performs a exclusive STR command for 16 bit values. + This function executes a exclusive STR instruction for 16 bit values. \param [in] value Value to store \param [in] ptr Pointer to location \return 0 Function succeeded \return 1 Function failed */ -__attribute__( ( always_inline ) ) static __INLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr) +__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr) { uint32_t result; - - __ASM volatile ("strexh %0, %2, [%1]" : "=r" (result) : "r" (addr), "r" (value) ); + + __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); return(result); } /** \brief STR Exclusive (32 bit) - This function performs a exclusive STR command for 32 bit values. + This function executes a exclusive STR instruction for 32 bit values. \param [in] value Value to store \param [in] ptr Pointer to location \return 0 Function succeeded \return 1 Function failed */ -__attribute__( ( always_inline ) ) static __INLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr) +__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr) { uint32_t result; - - __ASM volatile ("strex %0, %2, [%1]" : "=r" (result) : "r" (addr), "r" (value) ); + + __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) ); return(result); } @@ -513,9 +677,9 @@ __attribute__( ( always_inline ) ) static __INLINE uint32_t __STREXW(uint32_t va This function removes the exclusive lock which is created by LDREX. */ -__attribute__( ( always_inline ) ) static __INLINE void __CLREX(void) +__attribute__( ( always_inline ) ) __STATIC_INLINE void __CLREX(void) { - __ASM volatile ("clrex"); + __ASM volatile ("clrex" ::: "memory"); } @@ -558,34 +722,159 @@ __attribute__( ( always_inline ) ) static __INLINE void __CLREX(void) \param [in] value Value to count the leading zeros \return number of leading zeros in value */ -__attribute__( ( always_inline ) ) static __INLINE uint8_t __CLZ(uint32_t value) +__attribute__( ( always_inline ) ) __STATIC_INLINE uint8_t __CLZ(uint32_t value) { - uint8_t result; - + uint32_t result; + __ASM volatile ("clz %0, %1" : "=r" (result) : "r" (value) ); + return ((uint8_t) result); /* Add explicit type cast here */ +} + + +/** \brief Rotate Right with Extend (32 bit) + + This function moves each bit of a bitstring right by one bit. The carry input is shifted in at the left end of the bitstring. + + \param [in] value Value to rotate + \return Rotated value + */ +__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __RRX(uint32_t value) +{ + uint32_t result; + + __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); return(result); } -#endif /* (__CORTEX_M >= 0x03) */ + +/** \brief LDRT Unprivileged (8 bit) + + This function executes a Unprivileged LDRT instruction for 8 bit value. + + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__attribute__( ( always_inline ) ) __STATIC_INLINE uint8_t __LDRBT(volatile uint8_t *addr) +{ + uint32_t result; + +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*addr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); +#endif + return ((uint8_t) result); /* Add explicit type cast here */ +} +/** \brief LDRT Unprivileged (16 bit) + + This function executes a Unprivileged LDRT instruction for 16 bit values. + + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__attribute__( ( always_inline ) ) __STATIC_INLINE uint16_t __LDRHT(volatile uint16_t *addr) +{ + uint32_t result; + +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*addr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); +#endif + return ((uint16_t) result); /* Add explicit type cast here */ +} + + +/** \brief LDRT Unprivileged (32 bit) + + This function executes a Unprivileged LDRT instruction for 32 bit values. + + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __LDRT(volatile uint32_t *addr) +{ + uint32_t result; + + __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*addr) ); + return(result); +} + + +/** \brief STRT Unprivileged (8 bit) + + This function executes a Unprivileged STRT instruction for 8 bit values. + + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__attribute__( ( always_inline ) ) __STATIC_INLINE void __STRBT(uint8_t value, volatile uint8_t *addr) +{ + __ASM volatile ("strbt %1, %0" : "=Q" (*addr) : "r" ((uint32_t)value) ); +} + + +/** \brief STRT Unprivileged (16 bit) + + This function executes a Unprivileged STRT instruction for 16 bit values. + + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__attribute__( ( always_inline ) ) __STATIC_INLINE void __STRHT(uint16_t value, volatile uint16_t *addr) +{ + __ASM volatile ("strht %1, %0" : "=Q" (*addr) : "r" ((uint32_t)value) ); +} + + +/** \brief STRT Unprivileged (32 bit) + + This function executes a Unprivileged STRT instruction for 32 bit values. + + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__attribute__( ( always_inline ) ) __STATIC_INLINE void __STRT(uint32_t value, volatile uint32_t *addr) +{ + __ASM volatile ("strt %1, %0" : "=Q" (*addr) : "r" (value) ); +} + +#endif /* (__CORTEX_M >= 0x03) || (__CORTEX_SC >= 300) */ + + +#elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/ +/* IAR iccarm specific functions */ +#include + + +#elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/ +/* TI CCS specific functions */ +#include #elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/ /* TASKING carm specific functions */ - /* * The CMSIS functions have been implemented as intrinsics in the compiler. * Please use "carm -?i" to get an up to date list of all intrinsics, * Including the CMSIS ones. */ + +#elif defined ( __CSMC__ ) /*------------------ COSMIC Compiler -------------------*/ +/* Cosmic specific functions */ +#include + #endif /*@}*/ /* end of group CMSIS_Core_InstructionInterface */ -#ifdef __cplusplus -} -#endif - #endif /* __CORE_CMINSTR_H */