1
0
mirror of https://github.com/RIOT-OS/RIOT.git synced 2024-12-29 04:50:03 +01:00
RIOT/core/sched.c

238 lines
6.5 KiB
C
Raw Normal View History

/*
* Copyright (C) 2014 Freie Universität Berlin
*
* This file is subject to the terms and conditions of the GNU Lesser
* General Public License v2.1. See the file LICENSE in the top level
* directory for more details.
*/
/**
* @ingroup core_sched
* @{
*
* @file
* @brief Scheduler implementation
*
* @author Kaspar Schleiser <kaspar@schleiser.de>
* @author René Kijewski <rene.kijewski@fu-berlin.de>
*
* @}
*/
#include <stdint.h>
2014-05-14 10:46:15 +02:00
2013-12-16 17:54:58 +01:00
#include "sched.h"
#include "clist.h"
#include "bitarithm.h"
#include "irq.h"
#include "thread.h"
2015-02-25 16:31:03 +01:00
#include "log.h"
#ifdef MODULE_MPU_STACK_GUARD
#include "mpu.h"
#endif
#define ENABLE_DEBUG (0)
2013-12-16 17:54:58 +01:00
#include "debug.h"
#if ENABLE_DEBUG
/* For PRIu16 etc. */
#include <inttypes.h>
#endif
volatile int sched_num_threads = 0;
volatile unsigned int sched_context_switch_request;
volatile thread_t *sched_threads[KERNEL_PID_LAST + 1];
volatile thread_t *sched_active_thread;
volatile kernel_pid_t sched_active_pid = KERNEL_PID_UNDEF;
2016-02-29 01:37:26 +01:00
clist_node_t sched_runqueues[SCHED_PRIO_LEVELS];
static uint32_t runqueue_bitcache = 0;
/* Needed by OpenOCD to read sched_threads */
2017-02-01 09:06:48 +01:00
#if defined(__APPLE__) && defined(__MACH__)
2020-03-30 17:02:08 +02:00
#define FORCE_USED_SECTION __attribute__((used)) __attribute__((section( \
"__OPENOCD,__openocd")))
2017-02-01 09:06:48 +01:00
#else
2020-03-30 17:02:08 +02:00
#define FORCE_USED_SECTION __attribute__((used)) __attribute__((section( \
".openocd")))
2017-02-01 09:06:48 +01:00
#endif
FORCE_USED_SECTION
2019-07-22 23:13:28 +02:00
const uint8_t max_threads = ARRAY_SIZE(sched_threads);
#ifdef DEVELHELP
/* OpenOCD can't determine struct offsets and additionally this member is only
* available if compiled with DEVELHELP */
2017-02-01 09:06:48 +01:00
FORCE_USED_SECTION
const uint8_t _tcb_name_offset = offsetof(thread_t, name);
#endif
#ifdef MODULE_SCHED_CB
2020-03-30 17:02:08 +02:00
static void (*sched_cb) (kernel_pid_t active_thread,
kernel_pid_t next_thread) = NULL;
#endif
static void _unschedule(thread_t *active_thread)
{
if (active_thread->status == STATUS_RUNNING) {
active_thread->status = STATUS_PENDING;
}
#ifdef SCHED_TEST_STACK
if (*((uintptr_t *)active_thread->stack_start) !=
(uintptr_t)active_thread->stack_start) {
LOG_WARNING(
"scheduler(): stack overflow detected, pid=%" PRIkernel_pid "\n",
active_thread->pid);
2020-06-04 21:52:57 +02:00
}
#endif
#ifdef MODULE_SCHED_CB
if (sched_cb) {
sched_cb(active_thread->pid, KERNEL_PID_UNDEF);
}
#endif
}
2020-06-04 21:52:57 +02:00
int __attribute__((used)) sched_run(void)
{
sched_context_switch_request = 0;
thread_t *active_thread = (thread_t *)sched_active_thread;
if (!IS_USED(MODULE_CORE_IDLE_THREAD)) {
if (!runqueue_bitcache) {
if (active_thread) {
_unschedule(active_thread);
active_thread = NULL;
}
do {
sched_arch_idle();
} while (!runqueue_bitcache);
}
}
int nextrq = bitarithm_lsb(runqueue_bitcache);
2020-03-30 17:02:08 +02:00
thread_t *next_thread = container_of(sched_runqueues[nextrq].next->next,
thread_t, rq_entry);
DEBUG(
"sched_run: active thread: %" PRIkernel_pid ", next thread: %" PRIkernel_pid "\n",
(kernel_pid_t)((active_thread == NULL)
? KERNEL_PID_UNDEF
: active_thread->pid),
next_thread->pid);
if (active_thread == next_thread) {
DEBUG("sched_run: done, sched_active_thread was not changed.\n");
return 0;
}
if (active_thread) {
_unschedule(active_thread);
}
#ifdef MODULE_SCHED_CB
if (sched_cb) {
sched_cb(KERNEL_PID_UNDEF, next_thread->pid);
}
#endif
next_thread->status = STATUS_RUNNING;
sched_active_pid = next_thread->pid;
2020-03-30 17:02:08 +02:00
sched_active_thread = (volatile thread_t *)next_thread;
#ifdef MODULE_MPU_STACK_GUARD
mpu_configure(
2020-03-30 17:02:08 +02:00
2, /* MPU region 2 */
(uintptr_t)sched_active_thread->stack_start + 31, /* Base Address (rounded up) */
MPU_ATTR(1, AP_RO_RO, 0, 1, 0, 1, MPU_SIZE_32B) /* Attributes and Size */
);
mpu_enable();
#endif
DEBUG("sched_run: done, changed sched_active_thread.\n");
return 1;
}
void sched_set_status(thread_t *process, thread_status_t status)
{
if (status >= STATUS_ON_RUNQUEUE) {
if (!(process->status >= STATUS_ON_RUNQUEUE)) {
2020-03-30 17:02:08 +02:00
DEBUG(
"sched_set_status: adding thread %" PRIkernel_pid " to runqueue %" PRIu8 ".\n",
process->pid, process->priority);
clist_rpush(&sched_runqueues[process->priority],
&(process->rq_entry));
runqueue_bitcache |= 1 << process->priority;
}
}
else {
if (process->status >= STATUS_ON_RUNQUEUE) {
2020-03-30 17:02:08 +02:00
DEBUG(
"sched_set_status: removing thread %" PRIkernel_pid " from runqueue %" PRIu8 ".\n",
process->pid, process->priority);
clist_lpop(&sched_runqueues[process->priority]);
2016-02-29 01:37:26 +01:00
if (!sched_runqueues[process->priority].next) {
runqueue_bitcache &= ~(1 << process->priority);
}
}
}
process->status = status;
}
void sched_switch(uint16_t other_prio)
{
2020-03-30 17:02:08 +02:00
thread_t *active_thread = (thread_t *)sched_active_thread;
uint16_t current_prio = active_thread->priority;
int on_runqueue = (active_thread->status >= STATUS_ON_RUNQUEUE);
2020-03-30 17:02:08 +02:00
DEBUG("sched_switch: active pid=%" PRIkernel_pid " prio=%" PRIu16 " on_runqueue=%i "
2014-11-10 07:01:23 +01:00
", other_prio=%" PRIu16 "\n",
2020-03-30 17:02:08 +02:00
active_thread->pid, current_prio, on_runqueue,
other_prio);
if (!on_runqueue || (current_prio > other_prio)) {
if (irq_is_in()) {
DEBUG("sched_switch: setting sched_context_switch_request.\n");
2010-11-11 09:55:08 +01:00
sched_context_switch_request = 1;
}
else {
DEBUG("sched_switch: yielding immediately.\n");
thread_yield_higher();
2010-11-11 09:55:08 +01:00
}
}
else {
DEBUG("sched_switch: continuing without yield.\n");
}
2010-11-11 09:55:08 +01:00
}
NORETURN void sched_task_exit(void)
{
2020-03-30 17:02:08 +02:00
DEBUG("sched_task_exit: ending thread %" PRIkernel_pid "...\n",
sched_active_thread->pid);
2020-03-30 17:02:08 +02:00
(void)irq_disable();
sched_threads[sched_active_pid] = NULL;
sched_num_threads--;
sched_set_status((thread_t *)sched_active_thread, STATUS_STOPPED);
sched_active_thread = NULL;
2010-10-28 11:22:57 +02:00
cpu_switch_context_exit();
}
#ifdef MODULE_SCHED_CB
void sched_register_cb(void (*callback)(kernel_pid_t, kernel_pid_t))
{
sched_cb = callback;
}
#endif