2013-11-27 16:28:31 +01:00
|
|
|
/*
|
2015-12-25 20:59:33 +01:00
|
|
|
* Copyright (C) 2015 Kaspar Schleiser <kaspar@schleiser.de>
|
|
|
|
* 2013 Freie Universität Berlin
|
2010-09-22 15:10:42 +02:00
|
|
|
*
|
2014-07-31 19:45:27 +02:00
|
|
|
* This file is subject to the terms and conditions of the GNU Lesser
|
|
|
|
* General Public License v2.1. See the file LICENSE in the top level
|
|
|
|
* directory for more details.
|
2013-11-27 16:28:31 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @ingroup core_sync
|
2010-09-22 15:10:42 +02:00
|
|
|
* @{
|
2013-11-27 16:28:31 +01:00
|
|
|
*
|
2015-05-22 07:34:41 +02:00
|
|
|
* @file
|
2013-11-27 16:28:31 +01:00
|
|
|
* @brief Kernel mutex implementation
|
|
|
|
*
|
2014-01-28 11:50:12 +01:00
|
|
|
* @author Kaspar Schleiser <kaspar@schleiser.de>
|
2015-09-20 13:47:39 +02:00
|
|
|
* @author Joakim Nohlgård <joakim.nohlgard@eistec.se>
|
2013-11-27 16:28:31 +01:00
|
|
|
*
|
2010-09-22 15:10:42 +02:00
|
|
|
* @}
|
|
|
|
*/
|
|
|
|
|
2020-10-28 14:46:58 +01:00
|
|
|
#include <errno.h>
|
2013-07-23 13:39:50 +02:00
|
|
|
#include <inttypes.h>
|
2020-10-28 14:46:58 +01:00
|
|
|
#include <stdio.h>
|
2013-07-23 13:39:50 +02:00
|
|
|
|
2010-09-22 15:10:42 +02:00
|
|
|
#include "mutex.h"
|
2016-01-04 22:29:34 +01:00
|
|
|
#include "thread.h"
|
2010-10-28 11:22:57 +02:00
|
|
|
#include "sched.h"
|
2013-07-23 13:39:50 +02:00
|
|
|
#include "irq.h"
|
2015-12-25 20:59:33 +01:00
|
|
|
#include "list.h"
|
2010-09-22 15:10:42 +02:00
|
|
|
|
2020-10-22 11:32:06 +02:00
|
|
|
#define ENABLE_DEBUG 0
|
2013-07-23 13:39:50 +02:00
|
|
|
#include "debug.h"
|
2010-09-22 15:10:42 +02:00
|
|
|
|
2021-12-10 15:43:51 +01:00
|
|
|
#if MAXTHREADS > 1
|
|
|
|
|
2020-11-18 09:18:27 +01:00
|
|
|
/**
|
|
|
|
* @brief Block waiting for a locked mutex
|
|
|
|
* @pre IRQs are disabled
|
|
|
|
* @post IRQs are restored to @p irq_state
|
|
|
|
* @post The calling thread is no longer waiting for the mutex, either
|
|
|
|
* because it got the mutex, or because the operation was cancelled
|
|
|
|
* (only possible for @ref mutex_lock_cancelable)
|
|
|
|
*
|
|
|
|
* Most applications don't use @ref mutex_lock_cancelable. Inlining this
|
|
|
|
* function into both @ref mutex_lock and @ref mutex_lock_cancelable is,
|
|
|
|
* therefore, beneficial for the majority of applications.
|
|
|
|
*/
|
2022-09-21 13:37:04 +02:00
|
|
|
static inline __attribute__((always_inline))
|
|
|
|
void _block(mutex_t *mutex,
|
|
|
|
unsigned irq_state,
|
|
|
|
uinttxtptr_t pc)
|
2013-06-20 18:18:29 +02:00
|
|
|
{
|
2022-09-21 13:37:04 +02:00
|
|
|
/* pc is only used when MODULE_CORE_MUTEX_DEBUG */
|
|
|
|
(void)pc;
|
|
|
|
#if IS_USED(MODULE_CORE_MUTEX_DEBUG)
|
|
|
|
printf("[mutex] waiting for thread %" PRIkernel_pid " (pc = 0x%" PRIxTXTPTR
|
|
|
|
")\n",
|
|
|
|
mutex->owner, mutex->owner_calling_pc);
|
|
|
|
#endif
|
2020-10-28 14:46:58 +01:00
|
|
|
thread_t *me = thread_get_active();
|
2021-08-14 14:47:47 +02:00
|
|
|
|
2021-02-15 06:01:01 +01:00
|
|
|
/* Fail visibly even if a blocking action is called from somewhere where
|
|
|
|
* it's subtly not allowed, eg. board_init */
|
|
|
|
assert(me != NULL);
|
2020-11-18 09:13:00 +01:00
|
|
|
DEBUG("PID[%" PRIkernel_pid "] mutex_lock() Adding node to mutex queue: "
|
|
|
|
"prio: %" PRIu32 "\n", thread_getpid(), (uint32_t)me->priority);
|
2020-10-28 14:46:58 +01:00
|
|
|
sched_set_status(me, STATUS_MUTEX_BLOCKED);
|
|
|
|
if (mutex->queue.next == MUTEX_LOCKED) {
|
|
|
|
mutex->queue.next = (list_node_t *)&me->rq_entry;
|
|
|
|
mutex->queue.next->next = NULL;
|
2015-12-25 20:59:33 +01:00
|
|
|
}
|
|
|
|
else {
|
2020-10-28 14:46:58 +01:00
|
|
|
thread_add_to_list(&mutex->queue, me);
|
2010-09-22 15:10:42 +02:00
|
|
|
}
|
2020-10-28 14:46:58 +01:00
|
|
|
|
2022-03-09 21:20:57 +01:00
|
|
|
#ifdef MODULE_CORE_MUTEX_PRIORITY_INHERITANCE
|
|
|
|
thread_t *owner = thread_get(mutex->owner);
|
|
|
|
if ((owner) && (owner->priority > me->priority)) {
|
|
|
|
DEBUG("PID[%" PRIkernel_pid "] prio of %" PRIkernel_pid
|
|
|
|
": %u --> %u\n",
|
|
|
|
thread_getpid(), mutex->owner,
|
|
|
|
(unsigned)owner->priority, (unsigned)me->priority);
|
|
|
|
sched_change_priority(owner, me->priority);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-10-28 14:46:58 +01:00
|
|
|
irq_restore(irq_state);
|
|
|
|
thread_yield_higher();
|
|
|
|
/* We were woken up by scheduler. Waker removed us from queue. */
|
2022-09-21 13:37:04 +02:00
|
|
|
#if IS_USED(MODULE_CORE_MUTEX_DEBUG)
|
|
|
|
mutex->owner_calling_pc = pc;
|
|
|
|
#endif
|
2020-11-18 09:18:27 +01:00
|
|
|
}
|
|
|
|
|
2022-09-21 13:17:22 +02:00
|
|
|
bool mutex_lock_internal(mutex_t *mutex, bool block)
|
2020-11-18 09:18:27 +01:00
|
|
|
{
|
2022-09-21 13:37:04 +02:00
|
|
|
uinttxtptr_t pc = 0;
|
|
|
|
#if IS_USED(MODULE_CORE_MUTEX_DEBUG)
|
|
|
|
pc = cpu_get_caller_pc();
|
|
|
|
#endif
|
2020-11-18 09:18:27 +01:00
|
|
|
unsigned irq_state = irq_disable();
|
|
|
|
|
2022-09-21 13:17:22 +02:00
|
|
|
DEBUG("PID[%" PRIkernel_pid "] mutex_lock_internal(block=%u).\n",
|
|
|
|
thread_getpid(), (unsigned)block);
|
2020-11-18 09:18:27 +01:00
|
|
|
|
|
|
|
if (mutex->queue.next == NULL) {
|
|
|
|
/* mutex is unlocked. */
|
|
|
|
mutex->queue.next = MUTEX_LOCKED;
|
2022-09-21 13:37:04 +02:00
|
|
|
#if IS_USED(MODULE_CORE_MUTEX_PRIORITY_INHERITANCE) \
|
|
|
|
|| IS_USED(MODULE_CORE_MUTEX_DEBUG)
|
2022-03-09 21:20:57 +01:00
|
|
|
thread_t *me = thread_get_active();
|
|
|
|
mutex->owner = me->pid;
|
2022-09-21 13:37:04 +02:00
|
|
|
#endif
|
|
|
|
#if IS_USED(MODULE_CORE_MUTEX_DEBUG)
|
|
|
|
mutex->owner_calling_pc = pc;
|
|
|
|
#endif
|
|
|
|
#if IS_USED(MODULE_CORE_MUTEX_PRIORITY_INHERITANCE)
|
2022-03-09 21:20:57 +01:00
|
|
|
mutex->owner_original_priority = me->priority;
|
|
|
|
#endif
|
2020-11-18 09:18:27 +01:00
|
|
|
DEBUG("PID[%" PRIkernel_pid "] mutex_lock(): early out.\n",
|
|
|
|
thread_getpid());
|
|
|
|
irq_restore(irq_state);
|
|
|
|
}
|
|
|
|
else {
|
2022-09-21 13:17:22 +02:00
|
|
|
if (!block) {
|
|
|
|
irq_restore(irq_state);
|
|
|
|
return false;
|
|
|
|
}
|
2022-09-21 13:37:04 +02:00
|
|
|
_block(mutex, irq_state, pc);
|
2020-11-18 09:18:27 +01:00
|
|
|
}
|
2022-09-21 13:17:22 +02:00
|
|
|
|
|
|
|
return true;
|
2010-09-22 15:10:42 +02:00
|
|
|
}
|
|
|
|
|
2020-11-13 20:17:12 +01:00
|
|
|
int mutex_lock_cancelable(mutex_cancel_t *mc)
|
|
|
|
{
|
2022-09-21 13:37:04 +02:00
|
|
|
uinttxtptr_t pc = 0;
|
|
|
|
#if IS_USED(MODULE_CORE_MUTEX_DEBUG)
|
|
|
|
pc = cpu_get_caller_pc();
|
|
|
|
#endif
|
2020-11-13 20:17:12 +01:00
|
|
|
unsigned irq_state = irq_disable();
|
|
|
|
|
|
|
|
DEBUG("PID[%" PRIkernel_pid "] mutex_lock_cancelable()\n",
|
|
|
|
thread_getpid());
|
|
|
|
|
|
|
|
if (mc->cancelled) {
|
|
|
|
DEBUG("PID[%" PRIkernel_pid "] mutex_lock_cancelable cancelled "
|
|
|
|
"early.\n", thread_getpid());
|
|
|
|
irq_restore(irq_state);
|
|
|
|
return -ECANCELED;
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_t *mutex = mc->mutex;
|
2021-01-19 17:47:23 +01:00
|
|
|
|
2020-11-13 20:17:12 +01:00
|
|
|
if (mutex->queue.next == NULL) {
|
|
|
|
/* mutex is unlocked. */
|
|
|
|
mutex->queue.next = MUTEX_LOCKED;
|
2022-09-21 13:37:04 +02:00
|
|
|
#if IS_USED(MODULE_CORE_MUTEX_PRIORITY_INHERITANCE) \
|
|
|
|
|| IS_USED(MODULE_CORE_MUTEX_DEBUG)
|
2022-03-09 21:20:57 +01:00
|
|
|
thread_t *me = thread_get_active();
|
|
|
|
mutex->owner = me->pid;
|
2022-09-21 13:37:04 +02:00
|
|
|
#endif
|
|
|
|
#if IS_USED(MODULE_CORE_MUTEX_DEBUG)
|
|
|
|
mutex->owner_calling_pc = pc;
|
|
|
|
#endif
|
|
|
|
#if IS_USED(MODULE_CORE_MUTEX_PRIORITY_INHERITANCE)
|
2022-03-09 21:20:57 +01:00
|
|
|
mutex->owner_original_priority = me->priority;
|
|
|
|
#endif
|
2020-11-13 20:17:12 +01:00
|
|
|
DEBUG("PID[%" PRIkernel_pid "] mutex_lock_cancelable() early out.\n",
|
|
|
|
thread_getpid());
|
|
|
|
irq_restore(irq_state);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
else {
|
2022-09-21 13:37:04 +02:00
|
|
|
_block(mutex, irq_state, pc);
|
2020-11-13 20:17:12 +01:00
|
|
|
if (mc->cancelled) {
|
|
|
|
DEBUG("PID[%" PRIkernel_pid "] mutex_lock_cancelable() "
|
|
|
|
"cancelled.\n", thread_getpid());
|
|
|
|
}
|
2021-01-19 17:47:23 +01:00
|
|
|
return (mc->cancelled) ? -ECANCELED : 0;
|
2020-11-13 20:17:12 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-25 20:59:33 +01:00
|
|
|
void mutex_unlock(mutex_t *mutex)
|
2013-06-20 18:18:29 +02:00
|
|
|
{
|
2016-03-19 09:25:47 +01:00
|
|
|
unsigned irqstate = irq_disable();
|
2016-04-17 13:38:38 +02:00
|
|
|
|
2020-11-18 09:13:00 +01:00
|
|
|
DEBUG("PID[%" PRIkernel_pid "] mutex_unlock(): queue.next: %p\n",
|
|
|
|
thread_getpid(), (void *)mutex->queue.next);
|
2013-06-20 18:18:29 +02:00
|
|
|
|
2015-12-25 20:59:33 +01:00
|
|
|
if (mutex->queue.next == NULL) {
|
2015-01-01 22:52:20 +01:00
|
|
|
/* the mutex was not locked */
|
2016-03-19 09:25:47 +01:00
|
|
|
irq_restore(irqstate);
|
2015-01-01 22:52:20 +01:00
|
|
|
return;
|
|
|
|
}
|
2010-11-11 09:55:08 +01:00
|
|
|
|
2015-12-25 20:59:33 +01:00
|
|
|
if (mutex->queue.next == MUTEX_LOCKED) {
|
|
|
|
mutex->queue.next = NULL;
|
2015-01-01 22:52:20 +01:00
|
|
|
/* the mutex was locked and no thread was waiting for it */
|
2016-03-19 09:25:47 +01:00
|
|
|
irq_restore(irqstate);
|
2015-01-01 22:52:20 +01:00
|
|
|
return;
|
2010-09-22 15:10:42 +02:00
|
|
|
}
|
|
|
|
|
2016-02-29 01:37:26 +01:00
|
|
|
list_node_t *next = list_remove_head(&mutex->queue);
|
2015-12-25 20:59:33 +01:00
|
|
|
|
2020-03-30 17:02:08 +02:00
|
|
|
thread_t *process = container_of((clist_node_t *)next, thread_t, rq_entry);
|
2015-12-25 20:59:33 +01:00
|
|
|
|
2020-11-18 09:13:00 +01:00
|
|
|
DEBUG("PID[%" PRIkernel_pid "] mutex_unlock(): waking up waiting thread %"
|
|
|
|
PRIkernel_pid "\n", thread_getpid(), process->pid);
|
2015-01-01 22:52:20 +01:00
|
|
|
sched_set_status(process, STATUS_PENDING);
|
|
|
|
|
2015-12-25 20:59:33 +01:00
|
|
|
if (!mutex->queue.next) {
|
|
|
|
mutex->queue.next = MUTEX_LOCKED;
|
|
|
|
}
|
|
|
|
|
2015-01-01 22:52:20 +01:00
|
|
|
uint16_t process_priority = process->priority;
|
2021-01-19 17:47:23 +01:00
|
|
|
|
2022-09-21 13:37:04 +02:00
|
|
|
#if IS_USED(MODULE_CORE_MUTEX_PRIORITY_INHERITANCE)
|
2022-09-12 21:40:54 +02:00
|
|
|
thread_t *owner = thread_get(mutex->owner);
|
|
|
|
if ((owner) && (owner->priority != mutex->owner_original_priority)) {
|
|
|
|
DEBUG("PID[%" PRIkernel_pid "] prio %u --> %u\n",
|
|
|
|
owner->pid,
|
|
|
|
(unsigned)owner->priority, (unsigned)owner->priority);
|
|
|
|
sched_change_priority(owner, mutex->owner_original_priority);
|
|
|
|
}
|
|
|
|
#endif
|
2022-09-21 13:37:04 +02:00
|
|
|
#if IS_USED(MODULE_CORE_MUTEX_DEBUG)
|
|
|
|
mutex->owner_calling_pc = 0;
|
|
|
|
#endif
|
2022-09-12 21:40:54 +02:00
|
|
|
|
2016-03-19 09:25:47 +01:00
|
|
|
irq_restore(irqstate);
|
2015-01-01 22:52:20 +01:00
|
|
|
sched_switch(process_priority);
|
2010-09-22 15:10:42 +02:00
|
|
|
}
|
2014-02-18 08:25:16 +01:00
|
|
|
|
2015-12-25 20:59:33 +01:00
|
|
|
void mutex_unlock_and_sleep(mutex_t *mutex)
|
2014-02-18 08:25:16 +01:00
|
|
|
{
|
2020-11-18 09:13:00 +01:00
|
|
|
DEBUG("PID[%" PRIkernel_pid "] mutex_unlock_and_sleep(): queue.next: %p\n",
|
|
|
|
thread_getpid(), (void *)mutex->queue.next);
|
2016-03-19 09:25:47 +01:00
|
|
|
unsigned irqstate = irq_disable();
|
2014-02-18 08:25:16 +01:00
|
|
|
|
2015-12-25 20:59:33 +01:00
|
|
|
if (mutex->queue.next) {
|
|
|
|
if (mutex->queue.next == MUTEX_LOCKED) {
|
|
|
|
mutex->queue.next = NULL;
|
2014-02-18 08:25:16 +01:00
|
|
|
}
|
|
|
|
else {
|
2015-12-25 20:59:33 +01:00
|
|
|
list_node_t *next = list_remove_head(&mutex->queue);
|
2020-03-30 17:02:08 +02:00
|
|
|
thread_t *process = container_of((clist_node_t *)next, thread_t,
|
2016-04-17 13:38:38 +02:00
|
|
|
rq_entry);
|
2020-11-18 09:13:00 +01:00
|
|
|
DEBUG("PID[%" PRIkernel_pid "] mutex_unlock_and_sleep(): waking up "
|
|
|
|
"waiter.\n", process->pid);
|
2015-12-25 20:59:33 +01:00
|
|
|
sched_set_status(process, STATUS_PENDING);
|
|
|
|
if (!mutex->queue.next) {
|
|
|
|
mutex->queue.next = MUTEX_LOCKED;
|
|
|
|
}
|
2014-02-18 08:25:16 +01:00
|
|
|
}
|
|
|
|
}
|
2015-12-25 20:59:33 +01:00
|
|
|
|
2020-11-18 09:13:00 +01:00
|
|
|
DEBUG("PID[%" PRIkernel_pid "] mutex_unlock_and_sleep(): going to sleep.\n",
|
|
|
|
thread_getpid());
|
2020-08-06 10:46:17 +02:00
|
|
|
sched_set_status(thread_get_active(), STATUS_SLEEPING);
|
2016-03-19 09:25:47 +01:00
|
|
|
irq_restore(irqstate);
|
2014-10-18 01:24:49 +02:00
|
|
|
thread_yield_higher();
|
2014-02-18 08:25:16 +01:00
|
|
|
}
|
2020-11-13 20:17:12 +01:00
|
|
|
|
|
|
|
void mutex_cancel(mutex_cancel_t *mc)
|
|
|
|
{
|
|
|
|
unsigned irq_state = irq_disable();
|
2021-01-19 17:47:23 +01:00
|
|
|
|
2020-11-13 20:17:12 +01:00
|
|
|
mc->cancelled = 1;
|
|
|
|
|
|
|
|
mutex_t *mutex = mc->mutex;
|
|
|
|
thread_t *thread = mc->thread;
|
2021-01-19 17:47:23 +01:00
|
|
|
|
2020-11-13 20:17:12 +01:00
|
|
|
if (thread_is_active(thread)) {
|
|
|
|
/* thread is still running or about to run, so it will check
|
|
|
|
* `mc-cancelled` in time */
|
|
|
|
irq_restore(irq_state);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((mutex->queue.next != MUTEX_LOCKED)
|
2021-01-19 17:47:23 +01:00
|
|
|
&& (mutex->queue.next != NULL)
|
|
|
|
&& list_remove(&mutex->queue, (list_node_t *)&thread->rq_entry)) {
|
2020-11-13 20:17:12 +01:00
|
|
|
/* Thread was queued and removed from list, wake it up */
|
|
|
|
if (mutex->queue.next == NULL) {
|
|
|
|
mutex->queue.next = MUTEX_LOCKED;
|
|
|
|
}
|
|
|
|
sched_set_status(thread, STATUS_PENDING);
|
|
|
|
irq_restore(irq_state);
|
|
|
|
sched_switch(thread->priority);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
irq_restore(irq_state);
|
|
|
|
}
|
2020-12-09 11:03:41 +01:00
|
|
|
|
2021-12-10 15:43:51 +01:00
|
|
|
#else /* MAXTHREADS < 2 */
|
|
|
|
typedef int dont_be_pedantic;
|
|
|
|
#endif
|