mirror of
https://github.com/RIOT-OS/RIOT.git
synced 2024-12-29 04:50:03 +01:00
Merge pull request #4557 from kaspar030/introduce_intrusive_singly_linked_list
core: mutex: several optimizations
This commit is contained in:
commit
f626ee5969
77
core/include/list.h
Normal file
77
core/include/list.h
Normal file
@ -0,0 +1,77 @@
|
||||
/*
|
||||
* Copyright (C) 2016 Kaspar Schleiser <kaspar@schleiser.de>
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU Lesser
|
||||
* General Public License v2.1. See the file LICENSE in the top level
|
||||
* directory for more details.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @addtogroup core_util
|
||||
* @{
|
||||
*
|
||||
* @file
|
||||
* @brief Intrusive linked list
|
||||
*
|
||||
* Lists are represented as element pointing to the first actual list element.
|
||||
*
|
||||
* @author Kaspar Schleiser <kaspar@schleiser.de>
|
||||
*/
|
||||
|
||||
#ifndef LIST_H
|
||||
#define LIST_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief List node structure
|
||||
*
|
||||
* Used as is as reference to a list, or as member of any data structure that
|
||||
* should be member of a list.
|
||||
*
|
||||
* Actual list objects should have a @c list_node_t as member and then use
|
||||
* the container_of() macro in list operations.
|
||||
* See @ref thread_add_to_list() as example.
|
||||
*/
|
||||
typedef struct list_node {
|
||||
struct list_node *next; /**< pointer to next list entry */
|
||||
} list_node_t;
|
||||
|
||||
/**
|
||||
* @brief Insert object into list
|
||||
*
|
||||
* If called with a list reference as node, the new node will become the new
|
||||
* list head.
|
||||
*
|
||||
* @param[in] node list node before new entry
|
||||
* @param[in] new_node list node to insert
|
||||
*/
|
||||
static inline void list_add(list_node_t *node, list_node_t *new_node) {
|
||||
new_node->next = node->next;
|
||||
node->next = new_node;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Removes the head of the list and returns it
|
||||
*
|
||||
* @param[in] list Pointer to the list itself, where list->next points
|
||||
* to the root node
|
||||
*
|
||||
* @return removed old list head, or NULL if empty
|
||||
*/
|
||||
static inline list_node_t* list_remove_head(list_node_t *list) {
|
||||
list_node_t* head = list->next;
|
||||
if (head) {
|
||||
list->next = head->next;
|
||||
}
|
||||
return head;
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* LIST_H */
|
||||
/** @} */
|
@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2013, 2014 Freie Universität Berlin
|
||||
* Copyright (C) 2015 Kaspar Schleiser <kaspar@schleiser.de>
|
||||
* 2013, 2014 Freie Universität Berlin
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU Lesser
|
||||
* General Public License v2.1. See the file LICENSE in the top level
|
||||
@ -21,7 +22,9 @@
|
||||
#ifndef MUTEX_H_
|
||||
#define MUTEX_H_
|
||||
|
||||
#include "priority_queue.h"
|
||||
#include <stddef.h>
|
||||
|
||||
#include "list.h"
|
||||
#include "atomic.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
@ -31,27 +34,20 @@
|
||||
/**
|
||||
* @brief Mutex structure. Must never be modified by the user.
|
||||
*/
|
||||
typedef struct mutex_t {
|
||||
/* fields are managed by mutex functions, don't touch */
|
||||
/**
|
||||
* @brief The value of the mutex; 0 if unlocked, 1 if locked. **Must
|
||||
* never be changed by the user.**
|
||||
* @internal
|
||||
*/
|
||||
atomic_int_t val;
|
||||
typedef struct {
|
||||
/**
|
||||
* @brief The process waiting queue of the mutex. **Must never be changed
|
||||
* by the user.**
|
||||
* @internal
|
||||
*/
|
||||
priority_queue_t queue;
|
||||
list_node_t queue;
|
||||
} mutex_t;
|
||||
|
||||
/**
|
||||
* @brief Static initializer for mutex_t.
|
||||
* @details This initializer is preferable to mutex_init().
|
||||
*/
|
||||
#define MUTEX_INIT { ATOMIC_INIT(0), PRIORITY_QUEUE_INIT }
|
||||
#define MUTEX_INIT { { NULL } }
|
||||
|
||||
/**
|
||||
* @brief Initializes a mutex object.
|
||||
@ -61,10 +57,24 @@ typedef struct mutex_t {
|
||||
*/
|
||||
static inline void mutex_init(mutex_t *mutex)
|
||||
{
|
||||
mutex_t empty_mutex = MUTEX_INIT;
|
||||
*mutex = empty_mutex;
|
||||
mutex->queue.next = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Lock a mutex, blocking or non-blocking.
|
||||
*
|
||||
* @details For commit purposes you should probably use mutex_trylock() and
|
||||
* mutex_lock() instead.
|
||||
*
|
||||
* @param[in] mutex Mutex object to lock. Has to be initialized first.
|
||||
* Must not be NULL.
|
||||
* @param[in] blocking if true, block until mutex is available.
|
||||
*
|
||||
* @return 1 if mutex was unlocked, now it is locked.
|
||||
* @return 0 if the mutex was locked.
|
||||
*/
|
||||
int _mutex_lock(mutex_t *mutex, int blocking);
|
||||
|
||||
/**
|
||||
* @brief Tries to get a mutex, non-blocking.
|
||||
*
|
||||
@ -74,14 +84,20 @@ static inline void mutex_init(mutex_t *mutex)
|
||||
* @return 1 if mutex was unlocked, now it is locked.
|
||||
* @return 0 if the mutex was locked.
|
||||
*/
|
||||
int mutex_trylock(mutex_t *mutex);
|
||||
static inline int mutex_trylock(mutex_t *mutex)
|
||||
{
|
||||
return _mutex_lock(mutex, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Locks a mutex, blocking.
|
||||
*
|
||||
* @param[in] mutex Mutex object to lock. Has to be initialized first. Must not be NULL.
|
||||
*/
|
||||
void mutex_lock(mutex_t *mutex);
|
||||
static inline void mutex_lock(mutex_t *mutex)
|
||||
{
|
||||
_mutex_lock(mutex, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Unlocks the mutex.
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "arch/thread_arch.h"
|
||||
#include "cpu_conf.h"
|
||||
#include "sched.h"
|
||||
#include "list.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
@ -315,6 +316,21 @@ char *thread_stack_init(thread_task_func_t task_func, void *arg, void *stack_sta
|
||||
*/
|
||||
void thread_print_msg_queue(void);
|
||||
|
||||
/**
|
||||
* @brief Add thread to list, sorted by priority (internal)
|
||||
*
|
||||
* This will add @p thread to @p list sorted by the thread priority.
|
||||
* It reuses the thread's rq_entry field.
|
||||
* Used internally by msg and mutex implementations.
|
||||
*
|
||||
* @note Only use for threads *not on any runqueue* and with interrupts
|
||||
* disabled.
|
||||
*
|
||||
* @param[in] list ptr to list root node
|
||||
* @param[in] thread thread to add
|
||||
*/
|
||||
void thread_add_to_list(list_node_t *list, thread_t *thread);
|
||||
|
||||
#ifdef DEVELHELP
|
||||
/**
|
||||
* @brief Returns the name of a process
|
||||
|
114
core/mutex.c
114
core/mutex.c
@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2013 Freie Universität Berlin
|
||||
* Copyright (C) 2015 Kaspar Schleiser <kaspar@schleiser.de>
|
||||
* 2013 Freie Universität Berlin
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU Lesser
|
||||
* General Public License v2.1. See the file LICENSE in the top level
|
||||
@ -29,102 +30,101 @@
|
||||
#include "thread.h"
|
||||
#include "irq.h"
|
||||
#include "thread.h"
|
||||
#include "list.h"
|
||||
|
||||
#define ENABLE_DEBUG (0)
|
||||
#include "debug.h"
|
||||
|
||||
static void mutex_wait(struct mutex_t *mutex);
|
||||
#define MUTEX_LOCKED ((void*)-1)
|
||||
|
||||
int mutex_trylock(struct mutex_t *mutex)
|
||||
{
|
||||
DEBUG("%s: trylocking to get mutex. val: %u\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val));
|
||||
return atomic_set_to_one(&mutex->val);
|
||||
}
|
||||
|
||||
void mutex_lock(struct mutex_t *mutex)
|
||||
{
|
||||
DEBUG("%s: trying to get mutex. val: %u\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val));
|
||||
|
||||
if (atomic_set_to_one(&mutex->val) == 0) {
|
||||
/* mutex was locked. */
|
||||
mutex_wait(mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static void mutex_wait(struct mutex_t *mutex)
|
||||
int _mutex_lock(mutex_t *mutex, int blocking)
|
||||
{
|
||||
unsigned irqstate = irq_disable();
|
||||
DEBUG("%s: Mutex in use. %u\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val));
|
||||
DEBUG("%s: Mutex in use.\n", sched_active_thread->name);
|
||||
|
||||
if (atomic_set_to_one(&mutex->val)) {
|
||||
/* somebody released the mutex. return. */
|
||||
DEBUG("%s: mutex_wait early out. %u\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val));
|
||||
if (mutex->queue.next == NULL) {
|
||||
/* mutex is unlocked. */
|
||||
mutex->queue.next = MUTEX_LOCKED;
|
||||
DEBUG("%s: mutex_wait early out.\n", sched_active_thread->name);
|
||||
irq_restore(irqstate);
|
||||
return;
|
||||
return 1;
|
||||
}
|
||||
else if (blocking) {
|
||||
thread_t *me = (thread_t*) sched_active_thread;
|
||||
DEBUG("%s: Adding node to mutex queue: prio: %" PRIu32 "\n", me->name, (uint32_t)me->priority);
|
||||
sched_set_status(me, STATUS_MUTEX_BLOCKED);
|
||||
if (mutex->queue.next == MUTEX_LOCKED) {
|
||||
mutex->queue.next = (list_node_t*)&me->rq_entry;
|
||||
mutex->queue.next->next = NULL;
|
||||
}
|
||||
else {
|
||||
thread_add_to_list(&mutex->queue, me);
|
||||
}
|
||||
irq_restore(irqstate);
|
||||
thread_yield_higher();
|
||||
/* we were woken up by scheduler. waker removed us from queue. we have the mutex now. */
|
||||
return 1;
|
||||
}
|
||||
else {
|
||||
irq_restore(irqstate);
|
||||
return 0;
|
||||
}
|
||||
|
||||
sched_set_status((thread_t*) sched_active_thread, STATUS_MUTEX_BLOCKED);
|
||||
|
||||
priority_queue_node_t n;
|
||||
n.priority = (unsigned int) sched_active_thread->priority;
|
||||
n.data = (unsigned int) sched_active_thread;
|
||||
n.next = NULL;
|
||||
|
||||
DEBUG("%s: Adding node to mutex queue: prio: %" PRIu32 "\n", sched_active_thread->name, n.priority);
|
||||
|
||||
priority_queue_add(&(mutex->queue), &n);
|
||||
|
||||
irq_restore(irqstate);
|
||||
|
||||
thread_yield_higher();
|
||||
|
||||
/* we were woken up by scheduler. waker removed us from queue. we have the mutex now. */
|
||||
}
|
||||
|
||||
void mutex_unlock(struct mutex_t *mutex)
|
||||
void mutex_unlock(mutex_t *mutex)
|
||||
{
|
||||
unsigned irqstate = irq_disable();
|
||||
DEBUG("mutex_unlock(): val: %u pid: %" PRIkernel_pid "\n", ATOMIC_VALUE(mutex->val), sched_active_pid);
|
||||
DEBUG("mutex_unlock(): queue.next: 0x%08x pid: %" PRIkernel_pid "\n", (unsigned)mutex->queue.next, sched_active_pid);
|
||||
|
||||
if (ATOMIC_VALUE(mutex->val) == 0) {
|
||||
if (mutex->queue.next == NULL) {
|
||||
/* the mutex was not locked */
|
||||
irq_restore(irqstate);
|
||||
return;
|
||||
}
|
||||
|
||||
priority_queue_node_t *next = priority_queue_remove_head(&(mutex->queue));
|
||||
if (!next) {
|
||||
if (mutex->queue.next == MUTEX_LOCKED) {
|
||||
mutex->queue.next = NULL;
|
||||
/* the mutex was locked and no thread was waiting for it */
|
||||
ATOMIC_VALUE(mutex->val) = 0;
|
||||
irq_restore(irqstate);
|
||||
return;
|
||||
}
|
||||
|
||||
thread_t *process = (thread_t *) next->data;
|
||||
list_node_t *next = (list_node_t*) list_remove_head(&mutex->queue);
|
||||
|
||||
thread_t *process = container_of((clist_node_t*)next, thread_t, rq_entry);
|
||||
|
||||
DEBUG("mutex_unlock: waking up waiting thread %" PRIkernel_pid "\n", process->pid);
|
||||
sched_set_status(process, STATUS_PENDING);
|
||||
|
||||
if (!mutex->queue.next) {
|
||||
mutex->queue.next = MUTEX_LOCKED;
|
||||
}
|
||||
|
||||
uint16_t process_priority = process->priority;
|
||||
irq_restore(irqstate);
|
||||
sched_switch(process_priority);
|
||||
}
|
||||
|
||||
void mutex_unlock_and_sleep(struct mutex_t *mutex)
|
||||
void mutex_unlock_and_sleep(mutex_t *mutex)
|
||||
{
|
||||
DEBUG("%s: unlocking mutex. val: %u pid: %" PRIkernel_pid ", and taking a nap\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val), sched_active_pid);
|
||||
DEBUG("%s: unlocking mutex. queue.next: 0x%08x pid: %" PRIkernel_pid ", and taking a nap\n", sched_active_thread->name, (unsigned)mutex->queue.next, sched_active_pid);
|
||||
unsigned irqstate = irq_disable();
|
||||
|
||||
if (ATOMIC_VALUE(mutex->val) != 0) {
|
||||
priority_queue_node_t *next = priority_queue_remove_head(&(mutex->queue));
|
||||
if (next) {
|
||||
thread_t *process = (thread_t *) next->data;
|
||||
DEBUG("%s: waking up waiter.\n", process->name);
|
||||
sched_set_status(process, STATUS_PENDING);
|
||||
if (mutex->queue.next) {
|
||||
if (mutex->queue.next == MUTEX_LOCKED) {
|
||||
mutex->queue.next = NULL;
|
||||
}
|
||||
else {
|
||||
ATOMIC_VALUE(mutex->val) = 0; /* This is safe, interrupts are disabled */
|
||||
list_node_t *next = list_remove_head(&mutex->queue);
|
||||
thread_t *process = container_of((clist_node_t*)next, thread_t, rq_entry);
|
||||
DEBUG("%s: waking up waiter.\n", process->name);
|
||||
sched_set_status(process, STATUS_PENDING);
|
||||
if (!mutex->queue.next) {
|
||||
mutex->queue.next = MUTEX_LOCKED;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
DEBUG("%s: going to sleep.\n", sched_active_thread->name);
|
||||
sched_set_status((thread_t*) sched_active_thread, STATUS_SLEEPING);
|
||||
irq_restore(irqstate);
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <errno.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#include "assert.h"
|
||||
#include "thread.h"
|
||||
#include "irq.h"
|
||||
|
||||
@ -104,6 +105,25 @@ void thread_yield(void)
|
||||
thread_yield_higher();
|
||||
}
|
||||
|
||||
void thread_add_to_list(list_node_t *list, thread_t *thread)
|
||||
{
|
||||
assert (thread->status < STATUS_ON_RUNQUEUE);
|
||||
|
||||
uint16_t my_prio = thread->priority;
|
||||
list_node_t *new_node = (list_node_t*)&thread->rq_entry;
|
||||
|
||||
while (list->next) {
|
||||
thread_t *list_entry = container_of((clist_node_t*)list->next, thread_t, rq_entry);
|
||||
if (list_entry->priority > my_prio) {
|
||||
break;
|
||||
}
|
||||
list = list->next;
|
||||
}
|
||||
|
||||
new_node->next = list->next;
|
||||
list->next = new_node;
|
||||
}
|
||||
|
||||
#ifdef DEVELHELP
|
||||
uintptr_t thread_measure_stack_free(char *stack)
|
||||
{
|
||||
|
@ -44,7 +44,7 @@ class mutex {
|
||||
public:
|
||||
using native_handle_type = mutex_t*;
|
||||
|
||||
inline constexpr mutex() noexcept : m_mtx{0, PRIORITY_QUEUE_INIT} {}
|
||||
inline constexpr mutex() noexcept : m_mtx{0} {}
|
||||
~mutex();
|
||||
|
||||
void lock();
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
#include <time.h>
|
||||
#include "mutex.h"
|
||||
#include "priority_queue.h"
|
||||
|
||||
#if defined(CPU_CC430) || defined(CPU_MSP430FXYZ)
|
||||
# include "msp430_types.h"
|
||||
@ -120,7 +121,7 @@ int pthread_cond_destroy(struct pthread_cond_t *cond);
|
||||
* @param[in, out] mutex pre-allocated mutex variable structure.
|
||||
* @return returns 0 on success, an errorcode otherwise.
|
||||
*/
|
||||
int pthread_cond_wait(struct pthread_cond_t *cond, struct mutex_t *mutex);
|
||||
int pthread_cond_wait(struct pthread_cond_t *cond, mutex_t *mutex);
|
||||
|
||||
/**
|
||||
* @brief blocks the calling thread until the specified condition cond is signalled
|
||||
@ -129,7 +130,7 @@ int pthread_cond_wait(struct pthread_cond_t *cond, struct mutex_t *mutex);
|
||||
* @param[in] abstime pre-allocated timeout.
|
||||
* @return returns 0 on success, an errorcode otherwise.
|
||||
*/
|
||||
int pthread_cond_timedwait(struct pthread_cond_t *cond, struct mutex_t *mutex, const struct timespec *abstime);
|
||||
int pthread_cond_timedwait(struct pthread_cond_t *cond, mutex_t *mutex, const struct timespec *abstime);
|
||||
|
||||
/**
|
||||
* @brief unblock at least one of the threads that are blocked on the specified condition variable cond
|
||||
|
@ -71,7 +71,7 @@ typedef struct pthread_thread {
|
||||
} pthread_thread_t;
|
||||
|
||||
static pthread_thread_t *volatile pthread_sched_threads[MAXTHREADS];
|
||||
static struct mutex_t pthread_mutex;
|
||||
static mutex_t pthread_mutex;
|
||||
|
||||
static volatile kernel_pid_t pthread_reaper_pid = KERNEL_PID_UNDEF;
|
||||
|
||||
|
@ -92,7 +92,7 @@ int pthread_cond_destroy(struct pthread_cond_t *cond)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_cond_wait(struct pthread_cond_t *cond, struct mutex_t *mutex)
|
||||
int pthread_cond_wait(struct pthread_cond_t *cond, mutex_t *mutex)
|
||||
{
|
||||
priority_queue_node_t n;
|
||||
n.priority = sched_active_thread->priority;
|
||||
@ -118,7 +118,7 @@ int pthread_cond_wait(struct pthread_cond_t *cond, struct mutex_t *mutex)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_cond_timedwait(struct pthread_cond_t *cond, struct mutex_t *mutex, const struct timespec *abstime)
|
||||
int pthread_cond_timedwait(struct pthread_cond_t *cond, mutex_t *mutex, const struct timespec *abstime)
|
||||
{
|
||||
timex_t now, then, reltime;
|
||||
|
||||
|
@ -36,7 +36,7 @@ struct __pthread_tls_key {
|
||||
/**
|
||||
* @brief Used while manipulating the TLS of a pthread.
|
||||
*/
|
||||
static struct mutex_t tls_mutex;
|
||||
static mutex_t tls_mutex;
|
||||
|
||||
/**
|
||||
* @brief Find a thread-specific datum.
|
||||
|
Loading…
Reference in New Issue
Block a user