1
0
mirror of https://github.com/RIOT-OS/RIOT.git synced 2024-12-29 04:50:03 +01:00

core/c11_atomic.c: Add remaining C11 atomic library helpers for GCC

This commit is contained in:
Joakim Nohlgård 2016-02-07 22:15:48 +01:00
parent 7376419350
commit 6d20ac084b

View File

@ -28,11 +28,17 @@
* @note This implementation completely ignores the memory model parameter
*
* @see https://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary
* @see https://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html
*
* @author Joakim Nohlgård <joakim.nohlgard@eistec.se>
*/
#include <stddef.h>
#include <stdlib.h>
#include <stdint.h>
#include <stdbool.h>
#include <stdatomic.h>
#include <string.h>
#include "irq.h"
/* GCC documentation refers to the types as I1, I2, I4, I8, I16 */
@ -42,16 +48,86 @@ typedef uint32_t I4;
typedef uint64_t I8;
/* typedef __uint128_t I16; */ /* No 128 bit integer support yet */
/**
* @brief This is a macro that defines a function named __atomic_load_<em>n</em>
*
* @param[in] n width of the data, in bytes
*/
#define TEMPLATE_ATOMIC_LOAD_N(n) \
I##n __atomic_load_##n (I##n *ptr, int memorder) \
{ \
(void) memorder; \
unsigned int mask = irq_disable(); \
I##n old = *ptr; \
irq_restore(mask); \
return old; \
}
/**
* @brief This is a macro that defines a function named __atomic_store_<em>n</em>
*
* @param[in] n width of the data, in bytes
*/
#define TEMPLATE_ATOMIC_STORE_N(n) \
void __atomic_store_##n (I##n *ptr, I##n val, int memorder) \
{ \
(void) memorder; \
unsigned int mask = irq_disable(); \
*ptr = val; \
irq_restore(mask); \
}
/**
* @brief This is a macro that defines a function named __atomic_exchange_<em>n</em>
*
* @param[in] n width of the data, in bytes
*/
#define TEMPLATE_ATOMIC_EXCHANGE_N(n) \
I##n __atomic_exchange_##n (I##n *ptr, I##n desired, int memorder) \
{ \
(void) memorder; \
unsigned int mask = irq_disable(); \
I##n old = *ptr; \
*ptr = desired; \
irq_restore(mask); \
return old; \
}
/**
* @brief This is a macro that defines a function named __atomic_compare_exchange_<em>n</em>
*
* @param[in] n width of the data, in bytes
*/
#define TEMPLATE_ATOMIC_COMPARE_EXCHANGE_N(n) \
bool __atomic_compare_exchange_##n (volatile void *ptr, void *expected, I##n desired, \
bool weak, int success_memorder, int failure_memorder) \
{ \
(void) weak; \
(void) success_memorder; \
(void) failure_memorder; \
unsigned int mask = irq_disable(); \
I##n cur = *((I##n*)ptr); \
if (cur != *((I##n*)expected)) { \
*((I##n*)expected) = cur; \
irq_restore(mask); \
return false; \
} \
\
*((volatile I##n*)ptr) = desired; \
irq_restore(mask); \
return true; \
}
/**
* @brief This is a macro that defines a function named __atomic_fetch_<em>opname</em>_<em>n</em>
*
* \param opname operator name that will be used in the function name
* \param op actual C language operator
* \param n width of the data, in bytes
* \param prefixop optional prefix unary operator (use ~ for inverting, NAND, NOR etc)
* @param[in] opname operator name that will be used in the function name
* @param[in] op actual C language operator
* @param[in] n width of the data, in bytes
* @param[in] prefixop optional prefix unary operator (use ~ for inverting, NAND, NOR etc)
*/
#define TEMPLATE_ATOMIC_FETCH_OP_N(opname, op, n, prefixop) \
I##n __atomic_fetch_##opname##_##n (volatile void *ptr, I##n val, int memmodel) \
I##n __atomic_fetch_##opname##_##n(volatile void *ptr, I##n val, int memmodel) \
{ \
unsigned int mask = irq_disable(); \
(void)memmodel; \
@ -61,7 +137,46 @@ typedef uint64_t I8;
return tmp; \
}
/**
* @brief This is a macro that defines a function named __atomic_<em>opname</em>_fetch_<em>n</em>
*
* @param[in] opname operator name that will be used in the function name
* @param[in] op actual C language operator
* @param[in] n width of the data, in bytes
* @param[in] prefixop optional prefix unary operator (use ~ for inverting, NAND, NOR etc)
*/
#define TEMPLATE_ATOMIC_OP_FETCH_N(opname, op, n, prefixop) \
I##n __atomic_##opname##_fetch_##n(volatile void *ptr, I##n val, int memmodel) \
{ \
(void)memmodel; \
unsigned int mask = irq_disable(); \
*((I##n*)ptr) = prefixop(*((I##n*)ptr) op val); \
I##n tmp = *((I##n*)ptr); \
irq_restore(mask); \
return tmp; \
}
/* Template instantiations below */
TEMPLATE_ATOMIC_LOAD_N(1) /* __atomic_load_1 */
TEMPLATE_ATOMIC_LOAD_N(2) /* __atomic_load_2 */
TEMPLATE_ATOMIC_LOAD_N(4) /* __atomic_load_4 */
TEMPLATE_ATOMIC_LOAD_N(8) /* __atomic_load_8 */
TEMPLATE_ATOMIC_STORE_N(1) /* __atomic_store_1 */
TEMPLATE_ATOMIC_STORE_N(2) /* __atomic_store_2 */
TEMPLATE_ATOMIC_STORE_N(4) /* __atomic_store_4 */
TEMPLATE_ATOMIC_STORE_N(8) /* __atomic_store_8 */
TEMPLATE_ATOMIC_EXCHANGE_N(1) /* __atomic_exchange_1 */
TEMPLATE_ATOMIC_EXCHANGE_N(2) /* __atomic_exchange_2 */
TEMPLATE_ATOMIC_EXCHANGE_N(4) /* __atomic_exchange_4 */
TEMPLATE_ATOMIC_EXCHANGE_N(8) /* __atomic_exchange_8 */
TEMPLATE_ATOMIC_COMPARE_EXCHANGE_N(1) /* __atomic_compare_exchange_1 */
TEMPLATE_ATOMIC_COMPARE_EXCHANGE_N(2) /* __atomic_compare_exchange_2 */
TEMPLATE_ATOMIC_COMPARE_EXCHANGE_N(4) /* __atomic_compare_exchange_4 */
TEMPLATE_ATOMIC_COMPARE_EXCHANGE_N(8) /* __atomic_compare_exchange_8 */
TEMPLATE_ATOMIC_FETCH_OP_N( add, +, 1, ) /* __atomic_fetch_add_1 */
TEMPLATE_ATOMIC_FETCH_OP_N( add, +, 2, ) /* __atomic_fetch_add_2 */
TEMPLATE_ATOMIC_FETCH_OP_N( add, +, 4, ) /* __atomic_fetch_add_4 */
@ -92,4 +207,157 @@ TEMPLATE_ATOMIC_FETCH_OP_N(nand, &, 2, ~) /* __atomic_fetch_nand_2 */
TEMPLATE_ATOMIC_FETCH_OP_N(nand, &, 4, ~) /* __atomic_fetch_nand_4 */
TEMPLATE_ATOMIC_FETCH_OP_N(nand, &, 8, ~) /* __atomic_fetch_nand_8 */
TEMPLATE_ATOMIC_OP_FETCH_N( add, +, 1, ) /* __atomic_add_fetch_1 */
TEMPLATE_ATOMIC_OP_FETCH_N( add, +, 2, ) /* __atomic_add_fetch_2 */
TEMPLATE_ATOMIC_OP_FETCH_N( add, +, 4, ) /* __atomic_add_fetch_4 */
TEMPLATE_ATOMIC_OP_FETCH_N( add, +, 8, ) /* __atomic_add_fetch_8 */
TEMPLATE_ATOMIC_OP_FETCH_N( sub, -, 1, ) /* __atomic_sub_fetch_1 */
TEMPLATE_ATOMIC_OP_FETCH_N( sub, -, 2, ) /* __atomic_sub_fetch_2 */
TEMPLATE_ATOMIC_OP_FETCH_N( sub, -, 4, ) /* __atomic_sub_fetch_4 */
TEMPLATE_ATOMIC_OP_FETCH_N( sub, -, 8, ) /* __atomic_sub_fetch_8 */
TEMPLATE_ATOMIC_OP_FETCH_N( and, &, 1, ) /* __atomic_and_fetch_1 */
TEMPLATE_ATOMIC_OP_FETCH_N( and, &, 2, ) /* __atomic_and_fetch_2 */
TEMPLATE_ATOMIC_OP_FETCH_N( and, &, 4, ) /* __atomic_and_fetch_4 */
TEMPLATE_ATOMIC_OP_FETCH_N( and, &, 8, ) /* __atomic_and_fetch_8 */
TEMPLATE_ATOMIC_OP_FETCH_N( or, |, 1, ) /* __atomic_or_fetch_1 */
TEMPLATE_ATOMIC_OP_FETCH_N( or, |, 2, ) /* __atomic_or_fetch_2 */
TEMPLATE_ATOMIC_OP_FETCH_N( or, |, 4, ) /* __atomic_or_fetch_4 */
TEMPLATE_ATOMIC_OP_FETCH_N( or, |, 8, ) /* __atomic_or_fetch_8 */
TEMPLATE_ATOMIC_OP_FETCH_N( xor, ^, 1, ) /* __atomic_xor_fetch_1 */
TEMPLATE_ATOMIC_OP_FETCH_N( xor, ^, 2, ) /* __atomic_xor_fetch_2 */
TEMPLATE_ATOMIC_OP_FETCH_N( xor, ^, 4, ) /* __atomic_xor_fetch_4 */
TEMPLATE_ATOMIC_OP_FETCH_N( xor, ^, 8, ) /* __atomic_xor_fetch_8 */
TEMPLATE_ATOMIC_OP_FETCH_N(nand, &, 1, ~) /* __atomic_nand_fetch_1 */
TEMPLATE_ATOMIC_OP_FETCH_N(nand, &, 2, ~) /* __atomic_nand_fetch_2 */
TEMPLATE_ATOMIC_OP_FETCH_N(nand, &, 4, ~) /* __atomic_nand_fetch_4 */
TEMPLATE_ATOMIC_OP_FETCH_N(nand, &, 8, ~) /* __atomic_nand_fetch_8 */
/* ***** Generic versions below ***** */
/* Clang objects if you redefine a builtin. This little hack allows us to
* define a function with the same name as an intrinsic. */
/* Hack origin: http://llvm.org/svn/llvm-project/compiler-rt/trunk/lib/builtins/atomic.c */
#pragma redefine_extname __atomic_load_c __atomic_load
#pragma redefine_extname __atomic_store_c __atomic_store
#pragma redefine_extname __atomic_exchange_c __atomic_exchange
#pragma redefine_extname __atomic_compare_exchange_c __atomic_compare_exchange
/**
* @brief Atomic generic load
*
* @param[in] size width of the data, in bytes
* @param[in] src source address to load from
* @param[in] dest destination address
* @param[in] memorder memory ordering, ignored in this implementation
*/
void __atomic_load_c(size_t size, const void *src, void *dest, int memorder)
{
(void) memorder;
unsigned int mask = irq_disable();
memcpy(dest, src, size);
irq_restore(mask);
}
/**
* @brief Atomic generic store
*
* @param[in] size width of the data, in bytes
* @param[in] dest destination address to store to
* @param[in] src source address
* @param[in] memorder memory ordering, ignored in this implementation
*/
void __atomic_store_c(size_t size, void *dest, const void *src, int memorder)
{
(void) memorder;
unsigned int mask = irq_disable();
memcpy(dest, src, size);
irq_restore(mask);
}
/**
* @brief Atomic generic exchange
*
* @param[in] size width of the data, in bytes
* @param[in] ptr object to swap
* @param[in] val value to swap to
* @param[in] ret put the old value from @p ptr in @p ret
* @param[in] memorder memory ordering, ignored in this implementation
*/
void __atomic_exchange_c(size_t size, void *ptr, void *val, void *ret, int memorder)
{
(void) memorder;
unsigned int mask = irq_disable();
memcpy(ret, ptr, size);
memcpy(ptr, val, size);
irq_restore(mask);
}
/**
* @brief Atomic compare-and-swap operation
*
* This built-in function implements an atomic compare and exchange operation.
* This compares the contents of *ptr with the contents of *expected. If equal,
* the operation is a read-modify-write operation that writes desired into *ptr.
* If they are not equal, the operation is a read and the current contents of *ptr
* are written into *expected. weak is true for weak compare_exchange, which may
* fail spuriously, and false for the strong variation, which never fails
* spuriously. Many targets only offer the strong variation and ignore the
* parameter. When in doubt, use the strong variation.
*
* If desired is written into *ptr then true is returned and memory is affected
* according to the memory order specified by success_memorder. There are no
* restrictions on what memory order can be used here.
*
* Otherwise, false is returned and memory is affected according to
* failure_memorder. This memory order cannot be __ATOMIC_RELEASE nor
* __ATOMIC_ACQ_REL. It also cannot be a stronger order than that specified by
* success_memorder.
*
* @param[in] len
* @param[in] ptr
* @param[in] expected the expected value of ptr
* @param[in] desired the desired value of ptr
* @param[in] weak ignored in this implementation
* @param[in] success_memorder ignored in this implementation
* @param[in] failure_memorder ignored in this implementation
*
* @return true if *ptr had the expected value before the exchange and *ptr was updated
* @return false otherwise
*/
bool __atomic_compare_exchange_c(size_t len, void *ptr, void *expected,
void *desired, bool weak, int success_memorder, int failure_memorder)
{
(void)weak;
(void)success_memorder;
(void)failure_memorder;
unsigned int mask = irq_disable();
bool ret;
if (memcmp(ptr, expected, len) == 0) {
memcpy(ptr, desired, len);
ret = true;
}
else {
memcpy(expected, ptr, len);
ret = false;
}
irq_restore(mask);
return ret;
}
/* Memory barrier helper function, for platforms without barrier instructions */
void __sync_synchronize(void) __attribute__((__weak__));
void __sync_synchronize(void) {
/* ARMv4, ARMv5 do not have any hardware support for memory barriers,
* This is a software only barrier and a no-op, and will likely break on SMP
* systems, but we don't support any multi-CPU ARMv5 or ARMv4 boards in RIOT
* so I don't care. /JN
*/
__asm__ volatile ("" : : : "memory");
}
/** @} */