mirror of
https://github.com/RIOT-OS/RIOT.git
synced 2024-12-29 04:50:03 +01:00
core/mutex: Add mutex_cancel() & mutex_lock_cancelable()
Add a version of `mutex_lock()` that can be canceled with the obvious name `mutex_lock_cancelable()`. This function returns `0` on success, and `-ECANCELED` when the calling thread was unblocked via a call to `mutex_cancel()` (and hence without obtaining the mutex). This is intended to simplify the implementation of `xtimer_mutex_lock_timeout()` and to implement `ztimer_mutex_lock_timeout()`.
This commit is contained in:
parent
ded50b2494
commit
e348407888
@ -126,6 +126,18 @@ typedef struct {
|
||||
list_node_t queue;
|
||||
} mutex_t;
|
||||
|
||||
/**
|
||||
* @brief A cancellation structure for use with @ref mutex_lock_cancelable
|
||||
* and @ref mutex_cancel
|
||||
*
|
||||
* @note The contents of this structure are internal.
|
||||
*/
|
||||
typedef struct {
|
||||
mutex_t *mutex; /**< The mutex to lock */
|
||||
thread_t *thread; /**< The thread trying to lock the mutex */
|
||||
uint8_t cancelled; /**< Flag whether the mutex has been cancelled */
|
||||
} mutex_cancel_t;
|
||||
|
||||
/**
|
||||
* @brief Static initializer for mutex_t.
|
||||
* @details This initializer is preferable to mutex_init().
|
||||
@ -158,6 +170,22 @@ static inline void mutex_init(mutex_t *mutex)
|
||||
mutex->queue.next = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Initialize a mutex cancellation structure
|
||||
* @param mutex The mutex that the calling thread wants to lock
|
||||
* @return The cancellation structure for use with @ref mutex_lock_cancelable
|
||||
* and @ref mutex_cancel
|
||||
*
|
||||
* @note This function is considered internal. Out of tree users should be
|
||||
* aware that breaking API changes or removal of this API without
|
||||
* an deprecation period might happen.
|
||||
*/
|
||||
static inline mutex_cancel_t mutex_cancel_init(mutex_t *mutex)
|
||||
{
|
||||
mutex_cancel_t result = { mutex, thread_get_active(), 0 };
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Tries to get a mutex, non-blocking.
|
||||
*
|
||||
@ -195,6 +223,30 @@ static inline int mutex_trylock(mutex_t *mutex)
|
||||
*/
|
||||
void mutex_lock(mutex_t *mutex);
|
||||
|
||||
/**
|
||||
* @brief Locks a mutex, blocking. This function can be canceled.
|
||||
*
|
||||
* @param[in,out] mc Mutex cancellation structure to work on
|
||||
*
|
||||
* @retval 0 The mutex was locked by the caller
|
||||
* @retval -ECANCELED The mutex was ***NOT*** locked, operation was
|
||||
* canceled. See @ref mutex_cancel
|
||||
*
|
||||
* @note This function is considered internal. Out of tree users should be
|
||||
* aware that breaking API changes or removal of this API without
|
||||
* an deprecation period might happen.
|
||||
*
|
||||
* @pre Must be called in thread context
|
||||
* @pre @p mc has been initialized with @ref mutex_cancel_init by the
|
||||
* calling thread.
|
||||
* @pre @p mc has ***NOT*** been used for previous calls to
|
||||
* this function. (Reinitialize before reusing!)
|
||||
*
|
||||
* @post The mutex referred to by @p mc is locked and held by the calling
|
||||
* thread, unless `-ECANCELED` was returned.
|
||||
*/
|
||||
int mutex_lock_cancelable(mutex_cancel_t *mc);
|
||||
|
||||
/**
|
||||
* @brief Unlocks the mutex.
|
||||
*
|
||||
@ -215,6 +267,71 @@ void mutex_unlock(mutex_t *mutex);
|
||||
*/
|
||||
void mutex_unlock_and_sleep(mutex_t *mutex);
|
||||
|
||||
/**
|
||||
* @brief Cancels a call to @ref mutex_lock_cancelable
|
||||
*
|
||||
* @param[in,out] mc Mutex cancellation structure referring to the
|
||||
* thread calling @ref mutex_lock_cancelable and to
|
||||
* the mutex to cancel the operation on
|
||||
*
|
||||
* @note This function is considered internal. Out of tree users should be
|
||||
* aware that breaking API changes or removal of this API without
|
||||
* an deprecation period might happen.
|
||||
*
|
||||
* @pre @p mc is used to cancel at most one call to
|
||||
* @ref mutex_lock_cancelable. (You can reinitialize the same memory
|
||||
* to safely reuse it.)
|
||||
* @warning You ***MUST NOT*** call this function once the thread referred to by
|
||||
* @p mc re-uses the mutex object referred to by @p mc (not counting
|
||||
* the call to @ref mutex_lock_cancelable @p mc was used in).
|
||||
* @note It is safe to call this function from IRQ context, e.g. from a timer
|
||||
* interrupt.
|
||||
* @note It is safe to call this function more than once on the same @p mc
|
||||
* while it is still valid (see the warning above). The first call will
|
||||
* cancel the operation and subsequent calls will have no effect.
|
||||
*
|
||||
* @details If @p thread is currently running (or pending), a subsequent call
|
||||
* from @p thread to @ref mutex_lock_cancelable will also fail
|
||||
*
|
||||
* Canonical use:
|
||||
*
|
||||
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~{.c}
|
||||
* static void timeout_cb(void *_arg) {
|
||||
* mutex_cancel(arg);
|
||||
* }
|
||||
*
|
||||
* int ztimer_mutex_lock_timeout(ztimer_clock_t *clock, mutex_t *mutex,
|
||||
* uint32_t timeout)
|
||||
* {
|
||||
* mutex_cancel_t mc = mutex_cancel_init(mutex);
|
||||
* ztimer_t t;
|
||||
* t.callback = timeout_cb;
|
||||
* t.arg = &mc;
|
||||
* ztimer_set(clock, &t, timeout);
|
||||
* if (0 == mutex_lock_cancelable(mutex)) {
|
||||
* ztimer_remove(clock, &t);
|
||||
* return 0;
|
||||
* }
|
||||
* return -ECANCELED;
|
||||
* }
|
||||
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
*
|
||||
* In the above example a simple implementation of how to implement mutex
|
||||
* locking with a timeout is given. There are two corner cases:
|
||||
*
|
||||
* 1. The call to @ref mutex_cancel could occur *before* the call to
|
||||
* @ref mutex_lock_cancelable. (E.g. for `timeout == 0`.)
|
||||
* 2. The call to @ref mutex_cancel could occur right after the mutex was
|
||||
* *successfully* obtained, but before `ztimer_remove()` was executed.
|
||||
*
|
||||
* In the first corner case the cancellation is stored in @p mc. Hence, the
|
||||
* subsequent call to @ref mutex_lock_cancelable gets indeed canceled. In the
|
||||
* second corner case the cancellation is also stored in @p mc but never used -
|
||||
* the mutex cancellation structure @p mc is not allowed to be reused without
|
||||
* reinitialization.
|
||||
*/
|
||||
void mutex_cancel(mutex_cancel_t *mc);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
63
core/mutex.c
63
core/mutex.c
@ -82,6 +82,39 @@ void mutex_lock(mutex_t *mutex)
|
||||
}
|
||||
}
|
||||
|
||||
int mutex_lock_cancelable(mutex_cancel_t *mc)
|
||||
{
|
||||
unsigned irq_state = irq_disable();
|
||||
|
||||
DEBUG("PID[%" PRIkernel_pid "] mutex_lock_cancelable()\n",
|
||||
thread_getpid());
|
||||
|
||||
if (mc->cancelled) {
|
||||
DEBUG("PID[%" PRIkernel_pid "] mutex_lock_cancelable cancelled "
|
||||
"early.\n", thread_getpid());
|
||||
irq_restore(irq_state);
|
||||
return -ECANCELED;
|
||||
}
|
||||
|
||||
mutex_t *mutex = mc->mutex;
|
||||
if (mutex->queue.next == NULL) {
|
||||
/* mutex is unlocked. */
|
||||
mutex->queue.next = MUTEX_LOCKED;
|
||||
DEBUG("PID[%" PRIkernel_pid "] mutex_lock_cancelable() early out.\n",
|
||||
thread_getpid());
|
||||
irq_restore(irq_state);
|
||||
return 0;
|
||||
}
|
||||
else {
|
||||
_block(mutex, irq_state);
|
||||
if (mc->cancelled) {
|
||||
DEBUG("PID[%" PRIkernel_pid "] mutex_lock_cancelable() "
|
||||
"cancelled.\n", thread_getpid());
|
||||
}
|
||||
return (mc->cancelled) ? -ECANCELED: 0;
|
||||
}
|
||||
}
|
||||
|
||||
void mutex_unlock(mutex_t *mutex)
|
||||
{
|
||||
unsigned irqstate = irq_disable();
|
||||
@ -148,3 +181,33 @@ void mutex_unlock_and_sleep(mutex_t *mutex)
|
||||
irq_restore(irqstate);
|
||||
thread_yield_higher();
|
||||
}
|
||||
|
||||
void mutex_cancel(mutex_cancel_t *mc)
|
||||
{
|
||||
unsigned irq_state = irq_disable();
|
||||
mc->cancelled = 1;
|
||||
|
||||
mutex_t *mutex = mc->mutex;
|
||||
thread_t *thread = mc->thread;
|
||||
if (thread_is_active(thread)) {
|
||||
/* thread is still running or about to run, so it will check
|
||||
* `mc-cancelled` in time */
|
||||
irq_restore(irq_state);
|
||||
return;
|
||||
}
|
||||
|
||||
if ((mutex->queue.next != MUTEX_LOCKED)
|
||||
&& (mutex->queue.next != NULL)
|
||||
&& list_remove(&mutex->queue, (list_node_t *)&thread->rq_entry)) {
|
||||
/* Thread was queued and removed from list, wake it up */
|
||||
if (mutex->queue.next == NULL) {
|
||||
mutex->queue.next = MUTEX_LOCKED;
|
||||
}
|
||||
sched_set_status(thread, STATUS_PENDING);
|
||||
irq_restore(irq_state);
|
||||
sched_switch(thread->priority);
|
||||
return;
|
||||
}
|
||||
|
||||
irq_restore(irq_state);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user