1
0
mirror of https://github.com/RIOT-OS/RIOT.git synced 2024-12-29 04:50:03 +01:00

* mutex changes

This commit is contained in:
Kaspar Schleiser 2010-11-11 09:55:08 +01:00
parent fb784c6628
commit 8b242c74a8
3 changed files with 33 additions and 53 deletions

View File

@ -25,6 +25,7 @@ void sched_init();
void sched_run();
void sched_set_status(tcb *process, unsigned int status);
void sched_switch_if_higher(uint16_t current_prio, uint16_t other_prio, int in_isr);
volatile unsigned int sched_context_switch_request;

View File

@ -20,6 +20,7 @@
#include "tcb.h"
#include "kernel.h"
#include "sched.h"
#include <irq.h>
//#define ENABLE_DEBUG
#include <debug.h>
@ -36,48 +37,31 @@ int mutex_init(struct mutex_t* mutex) {
int mutex_trylock(struct mutex_t* mutex) {
DEBUG("%s: trylocking to get mutex. val: %u\n", active_thread->name, mutex->val);
return (atomic_set_return(&mutex->val, thread_pid ) == 0);
return atomic_set_return(&mutex->val, 1 ) == 0;
}
int prio() {
static int prio() {
return active_thread->priority;
}
int mutex_lock(struct mutex_t* mutex) {
DEBUG("%s: trying to get mutex. val: %u\n", active_thread->name, mutex->val);
if (atomic_set_return(&mutex->val,thread_pid) != 0) {
if (atomic_set_return(&mutex->val,1) != 0) {
// mutex was locked.
mutex_wait(mutex);
}
return 1;
}
void mutex_unlock(struct mutex_t* mutex, int yield) {
DEBUG("%s: unlocking mutex. val: %u pid: %u\n", active_thread->name, mutex->val, thread_pid);
int me_value;
if (inISR()) {
me_value = 0;
yield = MUTEX_INISR;
} else {
me_value = thread_pid;
}
if (atomic_set_return(&mutex->val,0) != me_value ) {
// there were waiters.
mutex_wake_waiters(mutex, yield);
}
}
void mutex_wait(struct mutex_t *mutex) {
dINT();
int irqstate = disableIRQ();
DEBUG("%s: Mutex in use. %u\n", active_thread->name, mutex->val);
if (mutex->val == 0) {
// somebody released the mutex. return.
mutex->val = thread_pid;
DEBUG("%s: mutex_wait early out. %u\n", active_thread->name, mutex->val);
eINT();
restoreIRQ(irqstate);
return;
}
@ -92,45 +76,30 @@ void mutex_wait(struct mutex_t *mutex) {
queue_priority_add(&(mutex->queue), &n);
eINT();
restoreIRQ(irqstate);
thread_yield();
/* we were woken up by scheduler. waker removed us from queue. we have the mutex now. */
}
void mutex_wake_waiters(struct mutex_t *mutex, int flags) {
if ( ! (flags & MUTEX_INISR)) dINT();
DEBUG("%s: waking up waiters.\n", active_thread->name);
void mutex_unlock(struct mutex_t* mutex, int yield) {
DEBUG("%s: unlocking mutex. val: %u pid: %u\n", active_thread->name, mutex->val, thread_pid);
int irqstate = disableIRQ();
if (mutex->val != 0) {
if (mutex->queue.next) {
queue_node_t *next = queue_remove_head(&(mutex->queue));
tcb* process = (tcb*)next->data;
DEBUG("%s: waking up waiter %s.\n", process->name);
sched_set_status(process, STATUS_PENDING);
queue_node_t *next = queue_remove_head(&(mutex->queue));
/* queue is empty */
if (!next) {
DEBUG("%s: no waiters?\n", active_thread->name);
mutex->val = 0;
if ( ! (flags & MUTEX_INISR)) eINT();
return;
sched_switch_if_higher(active_thread->priority, process->priority, inISR());
} else {
mutex->val = 0;
}
}
tcb* process = (tcb*)next->data;
sched_set_status(process, STATUS_PENDING);
if ( mutex->queue.next != NULL) {
mutex->val = -1;
} else {
mutex->val = process->pid;
}
DEBUG("%s: waiters woken up.\n", active_thread->name);
/* If called from process, reenable interrupts, yield if requested */
if (! (flags & MUTEX_INISR)) {
eINT();
if (flags & MUTEX_YIELD) thread_yield();
} else {
sched_context_switch_request = 1;
}
restoreIRQ(irqstate);
}

View File

@ -143,6 +143,16 @@ void sched_set_status(tcb *process, unsigned int status) {
process->status = status;
}
void sched_switch_if_higher(uint16_t current_prio, uint16_t other_prio, int in_isr) {
if (current_prio < other_prio) {
if (in_isr) {
sched_context_switch_request = 1;
} else {
thread_yield();
}
}
}
extern void cpu_switch_context_exit(void);
void sched_task_exit(void) {