mirror of
https://github.com/RIOT-OS/RIOT.git
synced 2025-01-17 05:12:57 +01:00
core: introduce thread_yield_higher(), yield less
Fixes #1708. Currently involuntary preemption causes the current thread not only to yield for a higher prioritized thread, but all other threads of its own priority class, too. This PR adds the function `thread_yield_higher()`, which will yield the current thread in favor of higher prioritized functions, but not for threads of its own priority class. Boards now need to implement `thread_yield_higher()` instead of `thread_yield()`, but `COREIF_NG` boards are not affected in any way. `thread_yield()` retains its old meaning: yield for every thread that has the same or a higher priority. This PR does not touch the occurrences of `thread_yield()` in the periph drivers, because the author of this PR did not look into the logic of the various driver implementations.
This commit is contained in:
parent
aa086158ee
commit
677d690e2b
@ -37,7 +37,7 @@
|
||||
#define thread_stack_init thread_arch_stack_init
|
||||
#define thread_print_stack thread_arch_print_stack
|
||||
#define cpu_switch_context_exit thread_arch_start_threading
|
||||
#define thread_yield thread_arch_yield
|
||||
#define thread_yield_higher thread_arch_yield
|
||||
#endif
|
||||
/** @} */
|
||||
|
||||
|
@ -152,6 +152,20 @@ extern volatile int sched_num_threads;
|
||||
*/
|
||||
extern volatile kernel_pid_t sched_active_pid;
|
||||
|
||||
/**
|
||||
* @brief Lets current thread yield in favor of a higher prioritized thread.
|
||||
*
|
||||
* @details The current thread will resume operation immediately,
|
||||
* if there is no other ready thread with a higher priority.
|
||||
*
|
||||
* Differently from thread_yield() the current thread will be scheduled next
|
||||
* in its own priority class, i.e. it stays the first thread in its
|
||||
* priority class.
|
||||
*
|
||||
* @see thread_yield()
|
||||
*/
|
||||
void thread_yield_higher(void);
|
||||
|
||||
#if SCHEDSTATISTICS
|
||||
/**
|
||||
* Scheduler statistics
|
||||
|
@ -118,10 +118,15 @@ const char *thread_getname(kernel_pid_t pid);
|
||||
void thread_sleep(void);
|
||||
|
||||
/**
|
||||
* @brief The current thread yields and let the scheduler run
|
||||
* @brief Lets current thread yield.
|
||||
*
|
||||
* The current thread will resume operation immediately if there is no other thread with the same
|
||||
* or a higher priority.
|
||||
* @details The current thread will resume operation immediately,
|
||||
* if there is no other ready thread with the same or a higher priority.
|
||||
*
|
||||
* Differently from thread_yield_higher() the current thread will be put to the
|
||||
* end of the threads in its priority class.
|
||||
*
|
||||
* @see thread_yield_higher()
|
||||
*/
|
||||
void thread_yield(void);
|
||||
|
||||
|
17
core/msg.c
17
core/msg.c
@ -96,7 +96,7 @@ static int _msg_send(msg_t *m, kernel_pid_t target_pid, bool block)
|
||||
DEBUG("msg_send() %s:%i: Target %" PRIkernel_pid " has a msg_queue. Queueing message.\n", __FILE__, __LINE__, target_pid);
|
||||
eINT();
|
||||
if (sched_active_thread->status == STATUS_REPLY_BLOCKED) {
|
||||
thread_yield();
|
||||
thread_yield_higher();
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
@ -130,6 +130,9 @@ static int _msg_send(msg_t *m, kernel_pid_t target_pid, bool block)
|
||||
sched_set_status((tcb_t*) sched_active_thread, newstatus);
|
||||
|
||||
DEBUG("msg_send: %s: Back from send block.\n", sched_active_thread->name);
|
||||
|
||||
eINT();
|
||||
thread_yield_higher();
|
||||
}
|
||||
else {
|
||||
DEBUG("msg_send: %s: Direct msg copy from %" PRIkernel_pid " to %" PRIkernel_pid ".\n", sched_active_thread->name, thread_getpid(), target_pid);
|
||||
@ -137,10 +140,11 @@ static int _msg_send(msg_t *m, kernel_pid_t target_pid, bool block)
|
||||
msg_t *target_message = (msg_t*) target->wait_data;
|
||||
*target_message = *m;
|
||||
sched_set_status(target, STATUS_PENDING);
|
||||
}
|
||||
|
||||
eINT();
|
||||
thread_yield();
|
||||
uint16_t target_prio = target->priority;
|
||||
eINT();
|
||||
sched_switch(target_prio);
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
@ -224,8 +228,9 @@ int msg_reply(msg_t *m, msg_t *reply)
|
||||
msg_t *target_message = (msg_t*) target->wait_data;
|
||||
*target_message = *reply;
|
||||
sched_set_status(target, STATUS_PENDING);
|
||||
uint16_t target_prio = target->priority;
|
||||
restoreIRQ(state);
|
||||
thread_yield();
|
||||
sched_switch(target_prio);
|
||||
|
||||
return 1;
|
||||
}
|
||||
@ -293,7 +298,7 @@ static int _msg_receive(msg_t *m, int block)
|
||||
sched_set_status(me, STATUS_RECEIVE_BLOCKED);
|
||||
|
||||
eINT();
|
||||
thread_yield();
|
||||
thread_yield_higher();
|
||||
|
||||
/* sender copied message */
|
||||
}
|
||||
|
@ -77,7 +77,7 @@ static void mutex_wait(struct mutex_t *mutex)
|
||||
|
||||
restoreIRQ(irqstate);
|
||||
|
||||
thread_yield();
|
||||
thread_yield_higher();
|
||||
|
||||
/* we were woken up by scheduler. waker removed us from queue. we have the mutex now. */
|
||||
}
|
||||
@ -123,5 +123,5 @@ void mutex_unlock_and_sleep(struct mutex_t *mutex)
|
||||
DEBUG("%s: going to sleep.\n", sched_active_thread->name);
|
||||
sched_set_status((tcb_t*) sched_active_thread, STATUS_SLEEPING);
|
||||
restoreIRQ(irqstate);
|
||||
thread_yield();
|
||||
thread_yield_higher();
|
||||
}
|
||||
|
15
core/sched.c
15
core/sched.c
@ -88,7 +88,6 @@ void sched_run(void)
|
||||
*/
|
||||
int nextrq = bitarithm_lsb(runqueue_bitcache);
|
||||
my_active_thread = clist_get_container(sched_runqueues[nextrq], tcb_t, rq_entry);
|
||||
clist_advance(&(sched_runqueues[nextrq]));
|
||||
DEBUG("scheduler: first in queue: %s\n", my_active_thread->name);
|
||||
|
||||
kernel_pid_t my_next_pid = my_active_thread->pid;
|
||||
@ -162,7 +161,7 @@ void sched_switch(uint16_t other_prio)
|
||||
sched_context_switch_request = 1;
|
||||
}
|
||||
else {
|
||||
thread_yield();
|
||||
thread_yield_higher();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -180,3 +179,15 @@ NORETURN void sched_task_exit(void)
|
||||
sched_active_thread = NULL;
|
||||
cpu_switch_context_exit();
|
||||
}
|
||||
|
||||
void thread_yield(void)
|
||||
{
|
||||
unsigned old_state = disableIRQ();
|
||||
tcb_t *me = (tcb_t *)sched_active_thread;
|
||||
if (me->status >= STATUS_ON_RUNQUEUE) {
|
||||
clist_advance(&sched_runqueues[me->priority]);
|
||||
}
|
||||
restoreIRQ(old_state);
|
||||
|
||||
thread_yield_higher();
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ void thread_sleep(void)
|
||||
dINT();
|
||||
sched_set_status((tcb_t *)sched_active_thread, STATUS_SLEEPING);
|
||||
eINT();
|
||||
thread_yield();
|
||||
thread_yield_higher();
|
||||
}
|
||||
|
||||
int thread_wakeup(kernel_pid_t pid)
|
||||
@ -208,7 +208,7 @@ kernel_pid_t thread_create(char *stack, int stacksize, char priority, int flags,
|
||||
if (!(flags & CREATE_WOUT_YIELD)) {
|
||||
if (!inISR()) {
|
||||
eINT();
|
||||
thread_yield();
|
||||
sched_switch(priority);
|
||||
}
|
||||
else {
|
||||
sched_context_switch_request = 1;
|
||||
|
@ -25,7 +25,7 @@
|
||||
#define STACK_MARKER (0x77777777)
|
||||
#define REGISTER_CNT (12)
|
||||
|
||||
void thread_yield(void)
|
||||
void thread_yield_higher(void)
|
||||
{
|
||||
asm("svc 0\n");
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ NORETURN void cpu_switch_context_exit(void){
|
||||
}
|
||||
|
||||
|
||||
void thread_yield(void) {
|
||||
void thread_yield_higher(void) {
|
||||
asm("svc 0x01\n");
|
||||
}
|
||||
|
||||
|
@ -35,9 +35,9 @@ void save_context(void);
|
||||
void restore_context(void);
|
||||
|
||||
/**
|
||||
* @brief Let the thread yield
|
||||
* @brief Let the scheduler yield
|
||||
*/
|
||||
void thread_yield(void);
|
||||
void thread_yield_higher(void);
|
||||
|
||||
/** @} */
|
||||
|
||||
|
@ -20,10 +20,10 @@ char __isr_stack[MSP430_ISR_STACK_SIZE];
|
||||
|
||||
/*
|
||||
* we must prevent the compiler to generate a prologue or an epilogue
|
||||
* for thread_yield(), since we rely on the RETI instruction at the end
|
||||
* for thread_yield_higher(), since we rely on the RETI instruction at the end
|
||||
* of its execution, in the inlined __restore_context() sub-function
|
||||
*/
|
||||
__attribute__((naked)) void thread_yield(void)
|
||||
__attribute__((naked)) void thread_yield_higher(void)
|
||||
{
|
||||
/*
|
||||
* disable IRQ, remembering if they are
|
||||
|
@ -198,7 +198,7 @@ void isr_thread_yield(void)
|
||||
}
|
||||
}
|
||||
|
||||
void thread_yield(void)
|
||||
void thread_yield_higher(void)
|
||||
{
|
||||
ucontext_t *ctx = (ucontext_t *)(sched_active_thread->sp);
|
||||
if (_native_in_isr == 0) {
|
||||
@ -209,7 +209,7 @@ void thread_yield(void)
|
||||
native_isr_context.uc_stack.ss_flags = 0;
|
||||
makecontext(&native_isr_context, isr_thread_yield, 0);
|
||||
if (swapcontext(ctx, &native_isr_context) == -1) {
|
||||
err(EXIT_FAILURE, "thread_yield: swapcontext");
|
||||
err(EXIT_FAILURE, "thread_yield_higher: swapcontext");
|
||||
}
|
||||
eINT();
|
||||
}
|
||||
|
@ -208,7 +208,7 @@ void x86_int_handler(void)
|
||||
ctx->__intr.ip = sp[0];
|
||||
ctx->__intr.flags = sp[2];
|
||||
|
||||
thread_yield();
|
||||
thread_yield_higher();
|
||||
__builtin_unreachable();
|
||||
}
|
||||
|
||||
|
@ -99,7 +99,7 @@ static void __attribute__((noreturn)) isr_thread_yield(void)
|
||||
setcontext(ctx);
|
||||
}
|
||||
|
||||
void thread_yield(void)
|
||||
void thread_yield_higher(void)
|
||||
{
|
||||
if (x86_in_isr) {
|
||||
isr_thread_yield();
|
||||
|
@ -73,7 +73,7 @@ static ssize_t pipe_rw(ringbuffer_t *rb,
|
||||
|
||||
sched_set_status((tcb_t *) sched_active_thread, STATUS_SLEEPING);
|
||||
restoreIRQ(old_state);
|
||||
thread_yield();
|
||||
thread_yield_higher();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ static void sem_thread_blocked(sem_t *sem)
|
||||
/* scheduler should schedule an other thread, that unlocks the
|
||||
* mutex in the future, when this happens I get scheduled again
|
||||
*/
|
||||
thread_yield();
|
||||
thread_yield_higher();
|
||||
}
|
||||
|
||||
int sem_wait(sem_t *sem)
|
||||
|
4
tests/sched_testing/Makefile
Normal file
4
tests/sched_testing/Makefile
Normal file
@ -0,0 +1,4 @@
|
||||
APPLICATION = sched_testing
|
||||
include ../Makefile.tests_common
|
||||
|
||||
include $(RIOTBASE)/Makefile.include
|
46
tests/sched_testing/main.c
Normal file
46
tests/sched_testing/main.c
Normal file
@ -0,0 +1,46 @@
|
||||
/*
|
||||
* Copyright (C) 2014 Oliver Hahm <oliver.hahm@inria.fr>
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU Lesser
|
||||
* General Public License v2.1. See the file LICENSE in the top level
|
||||
* directory for more details.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @ingroup tests
|
||||
* @{
|
||||
* @file
|
||||
* @brief Test thread_yield()
|
||||
* @author Oliver Hahm <oliver.hahm@inria.fr>
|
||||
* @author René Kijewski <rene.kijewski@fu-berlin.de>
|
||||
* @}
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include "thread.h"
|
||||
|
||||
char snd_thread_stack[KERNEL_CONF_STACKSIZE_MAIN];
|
||||
|
||||
void *snd_thread(void *unused)
|
||||
{
|
||||
(void) unused;
|
||||
puts("snd_thread running");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int main(void)
|
||||
{
|
||||
puts("The output should be: yield 1, snd_thread running, yield 2, done");
|
||||
puts("----------------------------------------------------------------");
|
||||
|
||||
thread_create(snd_thread_stack, sizeof(snd_thread_stack), PRIORITY_MAIN,
|
||||
CREATE_WOUT_YIELD, snd_thread, NULL, "snd");
|
||||
|
||||
puts("yield 1");
|
||||
thread_yield();
|
||||
puts("yield 2");
|
||||
thread_yield();
|
||||
puts("done");
|
||||
|
||||
return 0;
|
||||
}
|
Loading…
Reference in New Issue
Block a user