diff --git a/core/include/rmutex.h b/core/include/rmutex.h new file mode 100644 index 0000000000..81a3c33f09 --- /dev/null +++ b/core/include/rmutex.h @@ -0,0 +1,110 @@ +/* + * Copyright (C) 2016 Theobroma Systems Design & Consulting GmbH + * + * This file is subject to the terms and conditions of the GNU Lesser + * General Public License v2.1. See the file LICENSE in the top level + * directory for more details. + */ + +/** + * @ingroup core_sync Synchronization + * @brief Recursive Mutex for thread synchronization + * @{ + * + * @file + * @brief RIOT synchronization API + * + * @author Martin Elshuber + * + */ + +#ifndef RMUTEX_H_ +#define RMUTEX_H_ + +#include + +#include "mutex.h" +#include "kernel_types.h" + +#ifdef __cplusplus + extern "C" { +#endif + +/** + * @brief Mutex structure. Must never be modified by the user. + */ +typedef struct rmutex_t { + /* fields are managed by mutex functions, don't touch */ + /** + * @brief The mutex used for locking. **Must never be changed by + * the user.** + * @internal + */ + mutex_t mutex; + + /** + * @brief Number of locks owned by the thread owner + * @internal + */ + uint16_t refcount; + + /** + * @brief Owner thread of the mutex. + * @details Owner is written by the mutex holder, and read + * concurrently to ensure consistency, + * atomic_int_least16_t is used. Note @ref kernel_pid_t is an int16 + * @internal + */ + atomic_int_least16_t owner; +} rmutex_t; + +/** + * @brief Static initializer for rmutex_t. + * @details This initializer is preferable to rmutex_init(). + */ +#define RMUTEX_INIT { MUTEX_INIT, 0, ATOMIC_VAR_INIT(KERNEL_PID_UNDEF) } + +/** + * @brief Initializes a recursive mutex object. + * @details For initialization of variables use RMUTEX_INIT instead. + * Only use the function call for dynamically allocated mutexes. + * @param[out] rmutex pre-allocated mutex structure, must not be NULL. + */ +static inline void rmutex_init(rmutex_t *rmutex) +{ + rmutex_t empty_rmutex = RMUTEX_INIT; + *rmutex = empty_rmutex; +} + +/** + * @brief Tries to get a recursive mutex, non-blocking. + * + * @param[in] rmutex Recursive mutex object to lock. Has to be + * initialized first. Must not be NULL. + * + * @return 1 if mutex was unlocked, now it is locked. + * @return 0 if the mutex was locked. + */ +int rmutex_trylock(rmutex_t *rmutex); + +/** + * @brief Locks a recursive mutex, blocking. + * + * @param[in] rmutex Recursive mutex object to lock. Has to be + * initialized first. Must not be NULL. + */ +void rmutex_lock(rmutex_t *rmutex); + +/** + * @brief Unlocks the recursive mutex. + * + * @param[in] rmutex Recursive mutex object to unlock, must not be NULL. + */ +void rmutex_unlock(rmutex_t *rmutex); + +#ifdef __cplusplus +} +#endif + +#endif /* RMUTEX_H_ */ +/** @} */ diff --git a/core/rmutex.c b/core/rmutex.c new file mode 100644 index 0000000000..b6c96bbdab --- /dev/null +++ b/core/rmutex.c @@ -0,0 +1,161 @@ +/* + * Copyright (C) 2016 Theobroma Systems Design & Consulting GmbH + * + * This file is subject to the terms and conditions of the GNU Lesser + * General Public License v2.1. See the file LICENSE in the top level + * directory for more details. + */ + +/** + * @ingroup core_sync Synchronization + * @brief Recursive Mutex for thread synchronization + * @{ + * + * @file + * @brief RIOT synchronization API + * + * @author Martin Elshuber + * + * The recursive mutex implementation is inspired by the implementetaion of + * Nick v. IJzendoorn + * @see https://github.com/RIOT-OS/RIOT/pull/4529/files#diff-8f48e1b9ed7a0a48d0c686a87cc5084eR35 + * + */ + +#include +#include + +#include "rmutex.h" +#include "thread.h" +#include "assert.h" + +#define ENABLE_DEBUG (0) +#include "debug.h" + +void rmutex_lock(rmutex_t *rmutex) +{ + kernel_pid_t owner; + + /* try to lock the mutex */ + DEBUG("rmutex %" PRIi16" : trylock\n", thread_getpid()); + if (mutex_trylock(&rmutex->mutex) == 0) { + DEBUG("rmutex %" PRIi16" : mutex already held\n", thread_getpid()); + /* Mutex is already held + * + * Case 1: Mutex is not held by me + * Condition 1: holds + * rmutex->owner != thread_getpid() + * + * Note for Case 1: + * + * As a consequence it is necessaray to call + * mutex_lock(). However the read access to owner is not + * locked, and owner can be changed by a thread that is + * holding the lock (e.g.: holder unlocks the mutex, new + * holder aquired the lock). The atomic access strategy + * 'relaxed' ensures, that the value of rmutex->owner is read + * consistent. + * + * It is not necessary to synchronize (make written values + * visible) read/write with other threads, because every + * write by other threads let evaluate Condition 1 to + * false. They all write either KERNEL_PID_UNDEF or the + * pid of the other thread. + * + * Other threads never set rmutex->owner to the pid of the + * current thread. Hence, it is guaranteed that mutex_lock + * is eventually called. + * + * Case 2: Mutex is held be me (relock) + * Condition 2: holds + * rmutex->owner == thread_getpid() + * + * Note for Case 1: + * + * Because the mutex rmutex->owner is only written be the + * owner (me), rmutex->owner stays constant througout the + * complete call and rmutex->refcount is protected + * (read/write) by the mutex. + */ + + /* ensure that owner is read atomically, since I need a consistent value */ + owner = atomic_load_explicit(&rmutex->owner, memory_order_relaxed); + DEBUG("rmutex %" PRIi16" : mutex held by %" PRIi16" \n", thread_getpid(), owner); + + /* Case 1: Mutex is not held by me */ + if (owner != thread_getpid()) { + /* wait for the mutex */ + DEBUG("rmutex %" PRIi16" : locking mutex\n", thread_getpid()); + + mutex_lock(&rmutex->mutex); + } + /* Case 2: Mutex is held be me (relock) */ + /* Note: There is nothing to do for Case 2; refcount is incremented below */ + } + + DEBUG("rmutex %" PRIi16" : I am now holding the mutex\n", thread_getpid()); + + /* I am holding the recursive mutex */ + DEBUG("rmutex %" PRIi16" : settting the owner\n", thread_getpid()); + + /* ensure that owner is written atomically, since others need a consistent value */ + atomic_store_explicit(&rmutex->owner, thread_getpid(), memory_order_relaxed); + + DEBUG("rmutex %" PRIi16" : increasing refs\n", thread_getpid()); + + /* increase the refcount */ + rmutex->refcount++; +} + +int rmutex_trylock(rmutex_t *rmutex) +{ + kernel_pid_t owner; + + /* try to lock the mutex */ + if (mutex_trylock(&rmutex->mutex) == 0) { + /* ensure that owner is read atomically, since I need a consistent value */ + owner = atomic_load_explicit( &rmutex->owner, memory_order_relaxed); + + /* Case 1: Mutex is not held by me */ + if ( owner != thread_getpid() ) { + /* wait for the mutex */ + return 0; + } + /* Case 2: Mutex is held be me (relock) */ + /* Note: There is nothing to do for Case 2; refcount is incremented below */ + } + + /* I am are holding the recursive mutex */ + + /* ensure that owner is written atomically, since others need a consistent value */ + atomic_store_explicit(&rmutex->owner, thread_getpid(), memory_order_relaxed); + + /* increase the refcount */ + rmutex->refcount++; + return 1; +} + +void rmutex_unlock(rmutex_t *rmutex) +{ + assert(atomic_load_explicit(&rmutex->owner,memory_order_relaxed) == thread_getpid()); + assert(rmutex->refcount > 0); + + DEBUG("rmutex %" PRIi16" : decrementing refs refs\n", thread_getpid()); + + /* decrement refcount */ + rmutex->refcount--; + + /* check if I should still hold the mutex */ + if (rmutex->refcount == 0) { + /* if not release the mutex */ + + DEBUG("rmutex %" PRIi16" : resetting owner\n", thread_getpid()); + + /* ensure that owner is written only once */ + atomic_store_explicit(&rmutex->owner, KERNEL_PID_UNDEF, memory_order_relaxed); + + DEBUG("rmutex %" PRIi16" : releasing mutex\n", thread_getpid()); + + mutex_unlock(&rmutex->mutex); + } +}