mirror of
https://github.com/RIOT-OS/RIOT.git
synced 2024-12-29 04:50:03 +01:00
5f355e7210
Previously the compiler was allowed to reorder access to the interrupt control registers in regard to memory access not marked as `volatile` (at least some people - most notably some compiler developers - read the C standard this way). In practise this did not happen as irq_disable(), irq_restore(), irq_enable() are part of a separate compilation unit: Calls to external functions unknown to the compiler are treated as if they were memory barriers. But if link time optimization (LTO) is enabled, this no longer would work: The compiler could inline the code accessing the interrupt control registers and reorder the memory accesses wrapped in irq_disable() and irq_restore() outside of their protection. This commit adds the "memory" clobber to the inline assembly accessing the interrupt control registers. This makes those accesses explicit compiler memory barriers. The machine code generated without LTO enabled should not differ in any way by this commit. But the use of irq_*() should now be safe with LTO.
94 lines
1.8 KiB
C
94 lines
1.8 KiB
C
/* Copyright (C) 2005, 2006, 2007, 2008 by Thomas Hillebrandt and Heiko Will
|
|
* This file is subject to the terms and conditions of the GNU Lesser
|
|
* General Public License v2.1. See the file LICENSE in the top level
|
|
* directory for more details.
|
|
*/
|
|
|
|
#include "VIC.h"
|
|
#include <stdbool.h>
|
|
|
|
#define IRQ_MASK 0x00000080
|
|
#define FIQ_MASK 0x00000040
|
|
#define INT_MASK (IRQ_MASK | FIQ_MASK)
|
|
|
|
static inline unsigned __get_cpsr(void)
|
|
{
|
|
unsigned long retval;
|
|
__asm__ volatile(" mrs %0, cpsr" : "=r"(retval) : /* no inputs */ : "memory");
|
|
return retval;
|
|
}
|
|
|
|
int irq_is_in(void)
|
|
{
|
|
int retval;
|
|
__asm__ volatile(" mrs %0, cpsr" : "=r"(retval) : /* no inputs */ : "memory");
|
|
return (retval & INTMode) == 18;
|
|
}
|
|
|
|
static inline void __set_cpsr(unsigned val)
|
|
{
|
|
__asm__ volatile(" msr cpsr, %0" : /* no outputs */ : "r"(val) : "memory");
|
|
}
|
|
|
|
unsigned irq_disable(void)
|
|
{
|
|
unsigned _cpsr;
|
|
|
|
_cpsr = __get_cpsr();
|
|
__set_cpsr(_cpsr | IRQ_MASK);
|
|
return _cpsr;
|
|
}
|
|
|
|
unsigned irq_restore(unsigned oldCPSR)
|
|
{
|
|
unsigned _cpsr;
|
|
|
|
_cpsr = __get_cpsr();
|
|
__set_cpsr((_cpsr & ~IRQ_MASK) | (oldCPSR & IRQ_MASK));
|
|
return _cpsr;
|
|
}
|
|
|
|
unsigned IRQenabled(void)
|
|
{
|
|
unsigned _cpsr;
|
|
|
|
_cpsr = __get_cpsr();
|
|
return (_cpsr & IRQ_MASK);
|
|
}
|
|
|
|
unsigned irq_enable(void)
|
|
{
|
|
unsigned _cpsr;
|
|
|
|
_cpsr = __get_cpsr();
|
|
__set_cpsr(_cpsr & ~IRQ_MASK);
|
|
return _cpsr;
|
|
}
|
|
|
|
unsigned disableFIQ(void)
|
|
{
|
|
unsigned _cpsr;
|
|
|
|
_cpsr = __get_cpsr();
|
|
__set_cpsr(_cpsr | FIQ_MASK);
|
|
return _cpsr;
|
|
}
|
|
|
|
unsigned restoreFIQ(unsigned oldCPSR)
|
|
{
|
|
unsigned _cpsr;
|
|
|
|
_cpsr = __get_cpsr();
|
|
__set_cpsr((_cpsr & ~FIQ_MASK) | (oldCPSR & FIQ_MASK));
|
|
return _cpsr;
|
|
}
|
|
|
|
unsigned enableFIQ(void)
|
|
{
|
|
unsigned _cpsr;
|
|
|
|
_cpsr = __get_cpsr();
|
|
__set_cpsr(_cpsr & ~FIQ_MASK);
|
|
return _cpsr;
|
|
}
|