1
0
mirror of https://github.com/RIOT-OS/RIOT.git synced 2024-12-29 04:50:03 +01:00

Merge pull request #16919 from maribu/core/rmutex

core/rmutex: use atomic utils
This commit is contained in:
benpicco 2022-02-04 11:56:05 +01:00 committed by GitHub
commit 4aa90d3dc6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 60 additions and 49 deletions

View File

@ -24,11 +24,6 @@
#define RMUTEX_H
#include <stdint.h>
#ifdef __cplusplus
#include "c11_atomics_compat.hpp"
#else
#include <stdatomic.h>
#endif
#include "mutex.h"
#include "sched.h"
@ -62,14 +57,14 @@ typedef struct rmutex_t {
* atomic_int_least16_t is used. Note @ref kernel_pid_t is an int16
* @internal
*/
atomic_int_least16_t owner;
kernel_pid_t owner;
} rmutex_t;
/**
* @brief Static initializer for rmutex_t.
* @details This initializer is preferable to rmutex_init().
*/
#define RMUTEX_INIT { MUTEX_INIT, 0, ATOMIC_VAR_INIT(KERNEL_PID_UNDEF) }
#define RMUTEX_INIT { MUTEX_INIT, 0, KERNEL_PID_UNDEF }
/**
* @brief Initializes a recursive mutex object.

View File

@ -24,9 +24,10 @@
#include <stdio.h>
#include <inttypes.h>
#include "assert.h"
#include "atomic_utils.h"
#include "rmutex.h"
#include "thread.h"
#include "assert.h"
#define ENABLE_DEBUG 0
#include "debug.h"
@ -78,7 +79,7 @@ static int _lock(rmutex_t *rmutex, int trylock)
*/
/* ensure that owner is read atomically, since I need a consistent value */
owner = atomic_load_explicit(&rmutex->owner, memory_order_relaxed);
owner = atomic_load_kernel_pid(&rmutex->owner);
DEBUG("rmutex %" PRIi16 " : mutex held by %" PRIi16 " \n",
thread_getpid(), owner);
@ -104,8 +105,7 @@ static int _lock(rmutex_t *rmutex, int trylock)
DEBUG("rmutex %" PRIi16 " : setting the owner\n", thread_getpid());
/* ensure that owner is written atomically, since others need a consistent value */
atomic_store_explicit(&rmutex->owner, thread_getpid(),
memory_order_relaxed);
atomic_store_kernel_pid(&rmutex->owner, thread_getpid());
DEBUG("rmutex %" PRIi16 " : increasing refs\n", thread_getpid());
@ -127,8 +127,8 @@ int rmutex_trylock(rmutex_t *rmutex)
void rmutex_unlock(rmutex_t *rmutex)
{
assert(atomic_load_explicit(&rmutex->owner,
memory_order_relaxed) == thread_getpid());
/* ensure that owner is read atomically, since I need a consistent value */
assert(atomic_load_kernel_pid(&rmutex->owner) == thread_getpid());
assert(rmutex->refcount > 0);
DEBUG("rmutex %" PRIi16 " : decrementing refs refs\n", thread_getpid());
@ -143,8 +143,7 @@ void rmutex_unlock(rmutex_t *rmutex)
DEBUG("rmutex %" PRIi16 " : resetting owner\n", thread_getpid());
/* ensure that owner is written only once */
atomic_store_explicit(&rmutex->owner, KERNEL_PID_UNDEF,
memory_order_relaxed);
atomic_store_kernel_pid(&rmutex->owner, KERNEL_PID_UNDEF);
DEBUG("rmutex %" PRIi16 " : releasing mutex\n", thread_getpid());

View File

@ -139,6 +139,8 @@
#include <stdint.h>
#include "irq.h"
#include "sched.h"
#include "atomic_utils_arch.h"
#ifdef __cplusplus
@ -263,6 +265,16 @@ static inline uintptr_t atomic_load_uintptr(const volatile uintptr_t *var) {
static inline void * atomic_load_ptr(void **ptr_addr) {
return (void *)atomic_load_uintptr((const volatile uintptr_t *)ptr_addr);
}
/**
* @brief Load an `kernel_pid_t` atomically
*
* @param[in] var Variable to load atomically
* @return The value stored in @p var
*/
static inline kernel_pid_t atomic_load_kernel_pid(const volatile kernel_pid_t *var)
{
return atomic_load_u16((const volatile uint16_t *)var);
}
/** @} */
/**
@ -321,6 +333,17 @@ static inline void atomic_store_uintptr(volatile uintptr_t *dest, uintptr_t val)
static inline void atomic_store_ptr(void **dest, const void *val) {
atomic_store_uintptr((volatile uintptr_t *)dest, (uintptr_t)val);
}
/**
* @brief Store an `kernel_pid_t` atomically
*
* @param[out] dest Location to atomically write the new value to
* @param[in] val Value to write
*/
static inline void atomic_store_kernel_pid(volatile kernel_pid_t *dest,
kernel_pid_t val)
{
atomic_store_u16((volatile uint16_t *)dest, (uint16_t)val);
}
/** @} */
/**

View File

@ -23,6 +23,7 @@
#include <string.h>
#include "xtimer.h"
#include "atomic_utils.h"
#include "msg.h"
#include "mutex.h"
#include "rmutex.h"
@ -224,8 +225,7 @@ int xtimer_rmutex_lock_timeout(rmutex_t *rmutex, uint64_t timeout)
return 0;
}
if (xtimer_mutex_lock_timeout(&rmutex->mutex, timeout) == 0) {
atomic_store_explicit(&rmutex->owner,
thread_getpid(), memory_order_relaxed);
atomic_store_kernel_pid(&rmutex->owner, thread_getpid());
rmutex->refcount++;
return 0;
}

View File

@ -23,6 +23,7 @@
#include <assert.h>
#include <errno.h>
#include "atomic_utils.h"
#include "irq.h"
#include "mutex.h"
#include "rmutex.h"
@ -190,8 +191,7 @@ int ztimer_rmutex_lock_timeout(ztimer_clock_t *clock, rmutex_t *rmutex,
return 0;
}
if (ztimer_mutex_lock_timeout(clock, &rmutex->mutex, timeout) == 0) {
atomic_store_explicit(&rmutex->owner,
thread_getpid(), memory_order_relaxed);
atomic_store_kernel_pid(&rmutex->owner, thread_getpid());
rmutex->refcount++;
return 0;
}

View File

@ -23,6 +23,7 @@
#include <assert.h>
#include <errno.h>
#include "atomic_utils.h"
#include "irq.h"
#include "mutex.h"
#include "rmutex.h"
@ -183,8 +184,7 @@ int ztimer64_rmutex_lock_until(ztimer64_clock_t *clock, rmutex_t *rmutex,
return 0;
}
if (ztimer64_mutex_lock_until(clock, &rmutex->mutex, target) == 0) {
atomic_store_explicit(&rmutex->owner,
thread_getpid(), memory_order_relaxed);
atomic_store_kernel_pid(&rmutex->owner, thread_getpid());
rmutex->refcount++;
return 0;
}

View File

@ -20,11 +20,13 @@
#include <stdio.h>
#include <stdlib.h>
#include "shell.h"
#include "xtimer.h"
#include "thread.h"
#include "msg.h"
#include "atomic_utils.h"
#include "irq.h"
#include "msg.h"
#include "shell.h"
#include "thread.h"
#include "xtimer.h"
/* XTIMER_SHIFT can be undefined when using xtimer_on_ztimer on boards
* incompatible with xtimers tick conversion, e.g. the waspmote-pro
@ -155,8 +157,7 @@ static int cmd_test_xtimer_rmutex_lock_timeout_long_unlocked(int argc,
if (xtimer_rmutex_lock_timeout(&test_rmutex, LONG_RMUTEX_TIMEOUT) == 0) {
/* rmutex has to be locked once */
if (atomic_load_explicit(&test_rmutex.owner,
memory_order_relaxed) == thread_getpid() &&
if (atomic_load_kernel_pid(&test_rmutex.owner) == thread_getpid() &&
test_rmutex.refcount == 1 &&
mutex_trylock(&test_rmutex.mutex) == 0) {
puts("OK");
@ -209,8 +210,7 @@ static int cmd_test_xtimer_rmutex_lock_timeout_long_locked(int argc,
}
else {
/* rmutex has to be locked once */
if (atomic_load_explicit(&test_rmutex.owner,
memory_order_relaxed) == second_t_pid &&
if (atomic_load_kernel_pid(&test_rmutex.owner) == second_t_pid &&
test_rmutex.refcount == 1 &&
mutex_trylock(&test_rmutex.mutex) == 0) {
puts("OK");
@ -261,8 +261,7 @@ static int cmd_test_xtimer_rmutex_lock_timeout_low_prio_thread(
if (xtimer_rmutex_lock_timeout(&test_rmutex, LONG_RMUTEX_TIMEOUT) == 0) {
/* rmutex has to be locked once */
if (atomic_load_explicit(&test_rmutex.owner,
memory_order_relaxed) == thread_getpid() &&
if (atomic_load_kernel_pid(&test_rmutex.owner) == thread_getpid() &&
test_rmutex.refcount == 1 &&
mutex_trylock(&test_rmutex.mutex) == 0) {
puts("OK");
@ -317,8 +316,7 @@ static int cmd_test_xtimer_rmutex_lock_timeout_short_locked(int argc,
}
else {
/* rmutex has to be locked once */
if (atomic_load_explicit(&test_rmutex.owner,
memory_order_relaxed) == second_t_pid &&
if (atomic_load_kernel_pid(&test_rmutex.owner) == second_t_pid &&
test_rmutex.refcount == 1 &&
mutex_trylock(&test_rmutex.mutex) == 0) {
puts("OK");
@ -355,8 +353,7 @@ static int cmd_test_xtimer_rmutex_lock_timeout_short_unlocked(int argc,
if (xtimer_rmutex_lock_timeout(&test_rmutex, SHORT_RMUTEX_TIMEOUT) == 0) {
/* rmutex has to be locked once */
if (atomic_load_explicit(&test_rmutex.owner,
memory_order_relaxed) == thread_getpid() &&
if (atomic_load_kernel_pid(&test_rmutex.owner) == thread_getpid() &&
test_rmutex.refcount == 1 &&
mutex_trylock(&test_rmutex.mutex) == 0) {
puts("OK");

View File

@ -20,11 +20,13 @@
#include <stdio.h>
#include <stdlib.h>
#include "shell.h"
#include "ztimer.h"
#include "thread.h"
#include "msg.h"
#include "atomic_utils.h"
#include "irq.h"
#include "msg.h"
#include "shell.h"
#include "thread.h"
#include "ztimer.h"
/* timeout at one millisecond (1000 us) to make sure it does not spin. */
#define LONG_RMUTEX_TIMEOUT 1000
@ -148,8 +150,7 @@ static int cmd_test_ztimer_rmutex_lock_timeout_long_unlocked(int argc,
if (ztimer_rmutex_lock_timeout(ZTIMER_USEC, &test_rmutex, LONG_RMUTEX_TIMEOUT) == 0) {
/* rmutex has to be locked once */
if (atomic_load_explicit(&test_rmutex.owner,
memory_order_relaxed) == thread_getpid() &&
if (atomic_load_kernel_pid(&test_rmutex.owner) == thread_getpid() &&
test_rmutex.refcount == 1 &&
mutex_trylock(&test_rmutex.mutex) == 0) {
puts("OK");
@ -202,8 +203,7 @@ static int cmd_test_ztimer_rmutex_lock_timeout_long_locked(int argc,
}
else {
/* rmutex has to be locked once */
if (atomic_load_explicit(&test_rmutex.owner,
memory_order_relaxed) == second_t_pid &&
if (atomic_load_kernel_pid(&test_rmutex.owner) == second_t_pid &&
test_rmutex.refcount == 1 &&
mutex_trylock(&test_rmutex.mutex) == 0) {
puts("OK");
@ -254,8 +254,7 @@ static int cmd_test_ztimer_rmutex_lock_timeout_low_prio_thread(
if (ztimer_rmutex_lock_timeout(ZTIMER_USEC, &test_rmutex, LONG_RMUTEX_TIMEOUT) == 0) {
/* rmutex has to be locked once */
if (atomic_load_explicit(&test_rmutex.owner,
memory_order_relaxed) == thread_getpid() &&
if (atomic_load_kernel_pid(&test_rmutex.owner) == thread_getpid() &&
test_rmutex.refcount == 1 &&
mutex_trylock(&test_rmutex.mutex) == 0) {
puts("OK");
@ -310,8 +309,7 @@ static int cmd_test_ztimer_rmutex_lock_timeout_short_locked(int argc,
}
else {
/* rmutex has to be locked once */
if (atomic_load_explicit(&test_rmutex.owner,
memory_order_relaxed) == second_t_pid &&
if (atomic_load_kernel_pid(&test_rmutex.owner) == second_t_pid &&
test_rmutex.refcount == 1 &&
mutex_trylock(&test_rmutex.mutex) == 0) {
puts("OK");
@ -348,8 +346,7 @@ static int cmd_test_ztimer_rmutex_lock_timeout_short_unlocked(int argc,
if (ztimer_rmutex_lock_timeout(ZTIMER_USEC, &test_rmutex, SHORT_RMUTEX_TIMEOUT) == 0) {
/* rmutex has to be locked once */
if (atomic_load_explicit(&test_rmutex.owner,
memory_order_relaxed) == thread_getpid() &&
if (atomic_load_kernel_pid(&test_rmutex.owner) == thread_getpid() &&
test_rmutex.refcount == 1 &&
mutex_trylock(&test_rmutex.mutex) == 0) {
puts("OK");