mirror of
https://github.com/RIOT-OS/RIOT.git
synced 2024-12-29 04:50:03 +01:00
Merge pull request #2933 from gebart/pr/atomic-set-return-deprecated
core: remove atomic_set_return
This commit is contained in:
commit
e857ca7f51
@ -26,19 +26,6 @@
|
||||
#include "cpu.h"
|
||||
#include "atomic.h"
|
||||
|
||||
#if (ARCH_HAS_ATOMIC_SET_RETURN == 0)
|
||||
|
||||
unsigned int atomic_set_return(unsigned int *val, unsigned int set)
|
||||
{
|
||||
unsigned int mask = disableIRQ();
|
||||
unsigned int old_val = *val;
|
||||
*val = set;
|
||||
restoreIRQ(mask);
|
||||
return old_val;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* Set ARCH_HAS_ATOMIC_COMPARE_AND_SWAP within cpu.h to override this function */
|
||||
#if (ARCH_HAS_ATOMIC_COMPARE_AND_SWAP == 0)
|
||||
|
||||
|
@ -1,53 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2014 Freie Universität Berlin
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU Lesser
|
||||
* General Public License v2.1. See the file LICENSE in the top level
|
||||
* directory for more details.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @ingroup core_arch
|
||||
* @{
|
||||
*
|
||||
* @file
|
||||
* @brief Architecture dependent interface for an atomic set operation
|
||||
*
|
||||
* @author Hauke Petersen <hauke.petersen@fu-berlin.de>
|
||||
*/
|
||||
|
||||
#ifndef ATOMIC_ARCH_H
|
||||
#define ATOMIC_ARCH_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Define mappings between arch and internal interfaces
|
||||
*
|
||||
* This mapping is done for compatibility of existing platforms,
|
||||
* new platforms should always use the *_arch_* interfaces.
|
||||
* @{
|
||||
*/
|
||||
#ifdef COREIF_NG
|
||||
#define atomic_set_return atomic_arch_set_return
|
||||
#endif
|
||||
/** @} */
|
||||
|
||||
/**
|
||||
* @brief Set a value atomically without interruption from interrupts etc.
|
||||
*
|
||||
* @param[out] to_set variable to set
|
||||
* @param[in] value value to set to_set to
|
||||
*
|
||||
* @return the value that was set
|
||||
*/
|
||||
unsigned int atomic_arch_set_return(unsigned int *to_set, unsigned int value);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ATOMIC_ARCH_H */
|
||||
/** @} */
|
@ -21,8 +21,6 @@
|
||||
#ifndef ATOMIC_H_
|
||||
#define ATOMIC_H_
|
||||
|
||||
#include "arch/atomic_arch.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
@ -34,18 +32,6 @@ typedef struct atomic_int {
|
||||
volatile int value;
|
||||
} atomic_int_t;
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* @brief Sets a new and returns the old value of a variable atomically
|
||||
*
|
||||
* @param[in] val The variable to be set
|
||||
* @param[in] set The value to be written
|
||||
*
|
||||
* @return The old value of *val*
|
||||
*/
|
||||
unsigned int atomic_set_return(unsigned int *val, unsigned int set);
|
||||
|
||||
/**
|
||||
* @brief Initializer for atomic variables
|
||||
*
|
||||
|
@ -22,6 +22,7 @@
|
||||
#define MUTEX_H_
|
||||
|
||||
#include "priority_queue.h"
|
||||
#include "atomic.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
@ -37,7 +38,7 @@ typedef struct mutex_t {
|
||||
* never be changed by the user.**
|
||||
* @internal
|
||||
*/
|
||||
unsigned int val;
|
||||
atomic_int_t val;
|
||||
/**
|
||||
* @brief The process waiting queue of the mutex. **Must never be changed
|
||||
* by the user.**
|
||||
@ -50,7 +51,7 @@ typedef struct mutex_t {
|
||||
* @brief Static initializer for mutex_t.
|
||||
* @details This initializer is preferable to mutex_init().
|
||||
*/
|
||||
#define MUTEX_INIT { 0, PRIORITY_QUEUE_INIT }
|
||||
#define MUTEX_INIT { ATOMIC_INIT(0), PRIORITY_QUEUE_INIT }
|
||||
|
||||
/**
|
||||
* @brief Initializes a mutex object.
|
||||
|
28
core/mutex.c
28
core/mutex.c
@ -14,6 +14,7 @@
|
||||
* @brief Kernel mutex implementation
|
||||
*
|
||||
* @author Kaspar Schleiser <kaspar@schleiser.de>
|
||||
* @author Joakim Gebart <joakim.gebart@eistec.se>
|
||||
*
|
||||
* @}
|
||||
*/
|
||||
@ -37,15 +38,15 @@ static void mutex_wait(struct mutex_t *mutex);
|
||||
|
||||
int mutex_trylock(struct mutex_t *mutex)
|
||||
{
|
||||
DEBUG("%s: trylocking to get mutex. val: %u\n", sched_active_thread->name, mutex->val);
|
||||
return (atomic_set_return(&mutex->val, 1) == 0);
|
||||
DEBUG("%s: trylocking to get mutex. val: %u\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val));
|
||||
return atomic_set_to_one(&mutex->val);
|
||||
}
|
||||
|
||||
void mutex_lock(struct mutex_t *mutex)
|
||||
{
|
||||
DEBUG("%s: trying to get mutex. val: %u\n", sched_active_thread->name, mutex->val);
|
||||
DEBUG("%s: trying to get mutex. val: %u\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val));
|
||||
|
||||
if (atomic_set_return(&mutex->val, 1) != 0) {
|
||||
if (atomic_set_to_one(&mutex->val) == 0) {
|
||||
/* mutex was locked. */
|
||||
mutex_wait(mutex);
|
||||
}
|
||||
@ -54,12 +55,11 @@ void mutex_lock(struct mutex_t *mutex)
|
||||
static void mutex_wait(struct mutex_t *mutex)
|
||||
{
|
||||
unsigned irqstate = disableIRQ();
|
||||
DEBUG("%s: Mutex in use. %u\n", sched_active_thread->name, mutex->val);
|
||||
DEBUG("%s: Mutex in use. %u\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val));
|
||||
|
||||
if (mutex->val == 0) {
|
||||
if (atomic_set_to_one(&mutex->val)) {
|
||||
/* somebody released the mutex. return. */
|
||||
mutex->val = 1;
|
||||
DEBUG("%s: mutex_wait early out. %u\n", sched_active_thread->name, mutex->val);
|
||||
DEBUG("%s: mutex_wait early out. %u\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val));
|
||||
restoreIRQ(irqstate);
|
||||
return;
|
||||
}
|
||||
@ -84,10 +84,10 @@ static void mutex_wait(struct mutex_t *mutex)
|
||||
|
||||
void mutex_unlock(struct mutex_t *mutex)
|
||||
{
|
||||
DEBUG("%s: unlocking mutex. val: %u pid: %" PRIkernel_pid "\n", sched_active_thread->name, mutex->val, sched_active_pid);
|
||||
DEBUG("%s: unlocking mutex. val: %u pid: %" PRIkernel_pid "\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val), sched_active_pid);
|
||||
unsigned irqstate = disableIRQ();
|
||||
|
||||
if (mutex->val != 0) {
|
||||
if (ATOMIC_VALUE(mutex->val) != 0) {
|
||||
priority_queue_node_t *next = priority_queue_remove_head(&(mutex->queue));
|
||||
if (next) {
|
||||
tcb_t *process = (tcb_t *) next->data;
|
||||
@ -97,7 +97,7 @@ void mutex_unlock(struct mutex_t *mutex)
|
||||
sched_switch(process->priority);
|
||||
}
|
||||
else {
|
||||
mutex->val = 0;
|
||||
ATOMIC_VALUE(mutex->val) = 0; /* This is safe, interrupts are disabled */
|
||||
}
|
||||
}
|
||||
|
||||
@ -106,10 +106,10 @@ void mutex_unlock(struct mutex_t *mutex)
|
||||
|
||||
void mutex_unlock_and_sleep(struct mutex_t *mutex)
|
||||
{
|
||||
DEBUG("%s: unlocking mutex. val: %u pid: %" PRIkernel_pid ", and taking a nap\n", sched_active_thread->name, mutex->val, sched_active_pid);
|
||||
DEBUG("%s: unlocking mutex. val: %u pid: %" PRIkernel_pid ", and taking a nap\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val), sched_active_pid);
|
||||
unsigned irqstate = disableIRQ();
|
||||
|
||||
if (mutex->val != 0) {
|
||||
if (ATOMIC_VALUE(mutex->val) != 0) {
|
||||
priority_queue_node_t *next = priority_queue_remove_head(&(mutex->queue));
|
||||
if (next) {
|
||||
tcb_t *process = (tcb_t *) next->data;
|
||||
@ -117,7 +117,7 @@ void mutex_unlock_and_sleep(struct mutex_t *mutex)
|
||||
sched_set_status(process, STATUS_PENDING);
|
||||
}
|
||||
else {
|
||||
mutex->val = 0;
|
||||
ATOMIC_VALUE(mutex->val) = 0; /* This is safe, interrupts are disabled */
|
||||
}
|
||||
}
|
||||
DEBUG("%s: going to sleep.\n", sched_active_thread->name);
|
||||
|
@ -1,25 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2014 Freie Universität Berlin
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU Lesser
|
||||
* General Public License v2.1. See the file LICENSE in the top level
|
||||
* directory for more details.
|
||||
*/
|
||||
|
||||
/* GCC ARM assembler */
|
||||
|
||||
.text
|
||||
.code 32
|
||||
.align 4 /* 0 */
|
||||
|
||||
/* .extern sched_run*/
|
||||
|
||||
/* Public functions declared in this file */
|
||||
.global atomic_set_return
|
||||
|
||||
.func
|
||||
atomic_set_return:
|
||||
SWP r2,r1,[r0]
|
||||
MOV r0, r2
|
||||
mov pc, lr
|
||||
.endfunc
|
@ -17,11 +17,6 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief ARM has architecture specific atomic_set_return in atomic.s
|
||||
*/
|
||||
#define ARCH_HAS_ATOMIC_SET_RETURN 1
|
||||
|
||||
#define NEW_TASK_CPSR 0x1F
|
||||
#define WORDSIZE 32
|
||||
|
||||
|
@ -44,9 +44,8 @@ extern "C" {
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief x86 has architecture specific atomic operations in x86_atomic.c.
|
||||
* @brief x86 has architecture specific atomic_cas in x86_atomic.c
|
||||
*/
|
||||
#define ARCH_HAS_ATOMIC_SET_RETURN 1
|
||||
#define ARCH_HAS_ATOMIC_COMPARE_AND_SWAP 1
|
||||
|
||||
static inline void __attribute__((always_inline)) dINT(void)
|
||||
|
@ -31,12 +31,6 @@
|
||||
#include <stdint.h>
|
||||
#include "atomic.h"
|
||||
|
||||
unsigned int atomic_set_return(unsigned int *val, unsigned int set)
|
||||
{
|
||||
asm volatile ("lock xchg %0, %1" : "+m"(*val), "+r"(set));
|
||||
return set;
|
||||
}
|
||||
|
||||
int atomic_cas(atomic_int_t *dest, int known_value, int new_value)
|
||||
{
|
||||
uint8_t successful;
|
||||
|
@ -134,7 +134,7 @@ ng_pktsnip_t *ng_pktbuf_start_write(ng_pktsnip_t *pkt)
|
||||
|
||||
res = _pktbuf_duplicate(pkt);
|
||||
|
||||
atomic_set_return(&pkt->users, pkt->users - 1);
|
||||
atomic_dec(&pkt->users);
|
||||
|
||||
mutex_unlock(&_pktbuf_mutex);
|
||||
|
||||
|
@ -13,6 +13,7 @@
|
||||
* @brief Spin locks.
|
||||
* @author Christian Mehlis <mehlis@inf.fu-berlin.de>
|
||||
* @author René Kijewski <kijewski@inf.fu-berlin.de>
|
||||
* @author Joakim Gebart <joakim.gebart@eistec.se>
|
||||
* @}
|
||||
*/
|
||||
|
||||
@ -46,9 +47,10 @@ int pthread_spin_lock(pthread_spinlock_t *lock)
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
while (atomic_set_return((unsigned *) lock, 1) != 0) {
|
||||
while (atomic_set_to_one((int *)lock) == 0) {
|
||||
/* spin */
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -58,7 +60,11 @@ int pthread_spin_trylock(pthread_spinlock_t *lock)
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
return atomic_set_return((unsigned *) lock, 1) == 0 ? 0 : EBUSY;
|
||||
if (atomic_set_to_one((int *)lock) == 0) {
|
||||
return EBUSY;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_spin_unlock(pthread_spinlock_t *lock)
|
||||
@ -67,5 +73,9 @@ int pthread_spin_unlock(pthread_spinlock_t *lock)
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
return atomic_set_return((unsigned *) lock, 0) != 0 ? 0 : EPERM;
|
||||
if (atomic_set_to_zero((int *)lock) == 0) {
|
||||
return EPERM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -15,55 +15,6 @@
|
||||
|
||||
#include "tests-core.h"
|
||||
|
||||
static void test_atomic_set_return_null_null(void)
|
||||
{
|
||||
unsigned int res = 0;
|
||||
|
||||
TEST_ASSERT_EQUAL_INT(0, atomic_set_return(&res, 0));
|
||||
TEST_ASSERT_EQUAL_INT(0, res);
|
||||
}
|
||||
|
||||
static void test_atomic_set_return_one_null(void)
|
||||
{
|
||||
unsigned int res = 1;
|
||||
|
||||
TEST_ASSERT_EQUAL_INT(1, atomic_set_return(&res, 0));
|
||||
TEST_ASSERT_EQUAL_INT(0, res);
|
||||
}
|
||||
|
||||
static void test_atomic_set_return_null_one(void)
|
||||
{
|
||||
unsigned int res = 0;
|
||||
|
||||
TEST_ASSERT_EQUAL_INT(0, atomic_set_return(&res, 1));
|
||||
TEST_ASSERT_EQUAL_INT(1, res);
|
||||
}
|
||||
|
||||
static void test_atomic_set_return_limit_null(void)
|
||||
{
|
||||
unsigned int res = UINT_MAX;
|
||||
|
||||
TEST_ASSERT_EQUAL_INT(UINT_MAX, atomic_set_return(&res, 0));
|
||||
TEST_ASSERT_EQUAL_INT(0, res);
|
||||
}
|
||||
|
||||
static void test_atomic_set_return_null_limit(void)
|
||||
{
|
||||
unsigned int res = 0;
|
||||
|
||||
TEST_ASSERT_EQUAL_INT(0, atomic_set_return(&res, UINT_MAX));
|
||||
TEST_ASSERT_EQUAL_INT(UINT_MAX, res);
|
||||
}
|
||||
|
||||
static void test_atomic_set_return_null_random(void)
|
||||
{
|
||||
unsigned int res = 0;
|
||||
unsigned int r = 45; /* XXX: decided by fair dice-roll ;-) */
|
||||
|
||||
TEST_ASSERT_EQUAL_INT(0, atomic_set_return(&res, r));
|
||||
TEST_ASSERT_EQUAL_INT(r, res);
|
||||
}
|
||||
|
||||
/* Test atomic_set_to_one on a variable set to 0 */
|
||||
static void test_atomic_set_to_one_zero(void)
|
||||
{
|
||||
@ -257,12 +208,6 @@ static void test_atomic_value(void)
|
||||
Test *tests_core_atomic_tests(void)
|
||||
{
|
||||
EMB_UNIT_TESTFIXTURES(fixtures) {
|
||||
new_TestFixture(test_atomic_set_return_null_null),
|
||||
new_TestFixture(test_atomic_set_return_one_null),
|
||||
new_TestFixture(test_atomic_set_return_null_one),
|
||||
new_TestFixture(test_atomic_set_return_limit_null),
|
||||
new_TestFixture(test_atomic_set_return_null_limit),
|
||||
new_TestFixture(test_atomic_set_return_null_random),
|
||||
new_TestFixture(test_atomic_set_to_one_one),
|
||||
new_TestFixture(test_atomic_set_to_one_zero),
|
||||
new_TestFixture(test_atomic_set_to_one_twice),
|
||||
|
@ -81,11 +81,11 @@ void test_ubjson_test(void (*sender_fun)(void), void (*receiver_fun)(void))
|
||||
test_ubjson_receiver_data_t data = {
|
||||
.run = receiver_fun,
|
||||
.main_thread = (tcb_t *) sched_active_thread,
|
||||
.mutexes = {
|
||||
{ 1, PRIORITY_QUEUE_INIT },
|
||||
{ 1, PRIORITY_QUEUE_INIT },
|
||||
},
|
||||
.mutexes = { MUTEX_INIT, MUTEX_INIT },
|
||||
};
|
||||
mutex_lock(&data.mutexes[0]);
|
||||
mutex_lock(&data.mutexes[1]);
|
||||
|
||||
kernel_pid_t receiver_pid = thread_create(receiver_stack, sizeof(receiver_stack),
|
||||
THREAD_PRIORITY_MAIN, CREATE_WOUT_YIELD,
|
||||
test_ubjson_receiver_trampoline, &data, "receiver");
|
||||
|
Loading…
Reference in New Issue
Block a user