2013-11-27 16:28:31 +01:00
|
|
|
/*
|
2017-08-04 14:21:38 +02:00
|
|
|
* Copyright (C) 2014-2017 Freie Universität Berlin
|
2010-09-22 15:10:42 +02:00
|
|
|
*
|
2014-07-31 19:45:27 +02:00
|
|
|
* This file is subject to the terms and conditions of the GNU Lesser
|
|
|
|
* General Public License v2.1. See the file LICENSE in the top level
|
|
|
|
* directory for more details.
|
2013-11-27 16:28:31 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
2014-04-07 11:16:32 +02:00
|
|
|
* @ingroup core_sched
|
2013-11-27 16:28:31 +01:00
|
|
|
* @{
|
2010-09-22 15:10:42 +02:00
|
|
|
*
|
2015-05-22 07:34:41 +02:00
|
|
|
* @file
|
2013-11-27 16:28:31 +01:00
|
|
|
* @brief Scheduler implementation
|
|
|
|
*
|
2014-01-28 11:50:12 +01:00
|
|
|
* @author Kaspar Schleiser <kaspar@schleiser.de>
|
2014-11-05 11:29:25 +01:00
|
|
|
* @author René Kijewski <rene.kijewski@fu-berlin.de>
|
2017-08-04 14:21:38 +02:00
|
|
|
* @author Hauke Petersen <hauke.petersen@fu-berlin.de>
|
2013-11-16 19:26:02 +01:00
|
|
|
*
|
2010-09-22 15:10:42 +02:00
|
|
|
* @}
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stdint.h>
|
2020-10-23 14:37:01 +02:00
|
|
|
#include <inttypes.h>
|
2014-05-14 10:46:15 +02:00
|
|
|
|
2022-09-09 21:24:11 +02:00
|
|
|
#include "assert.h"
|
2013-12-16 17:54:58 +01:00
|
|
|
#include "bitarithm.h"
|
2022-09-09 21:24:11 +02:00
|
|
|
#include "clist.h"
|
2014-03-17 17:59:06 +01:00
|
|
|
#include "irq.h"
|
2015-02-25 16:31:03 +01:00
|
|
|
#include "log.h"
|
2022-09-09 21:24:11 +02:00
|
|
|
#include "sched.h"
|
|
|
|
#include "thread.h"
|
2022-08-12 05:26:22 +02:00
|
|
|
#include "panic.h"
|
2010-09-22 15:10:42 +02:00
|
|
|
|
2016-10-24 18:36:15 +02:00
|
|
|
#ifdef MODULE_MPU_STACK_GUARD
|
|
|
|
#include "mpu.h"
|
|
|
|
#endif
|
|
|
|
|
2020-10-22 11:32:06 +02:00
|
|
|
#define ENABLE_DEBUG 0
|
2013-12-16 17:54:58 +01:00
|
|
|
#include "debug.h"
|
2010-09-22 15:10:42 +02:00
|
|
|
|
2020-07-02 08:29:49 +02:00
|
|
|
#ifdef PICOLIBC_TLS
|
|
|
|
#include <picotls.h>
|
|
|
|
#endif
|
|
|
|
|
2015-10-06 11:37:49 +02:00
|
|
|
/* Needed by OpenOCD to read sched_threads */
|
2017-02-01 09:06:48 +01:00
|
|
|
#if defined(__APPLE__) && defined(__MACH__)
|
2020-03-30 17:02:08 +02:00
|
|
|
#define FORCE_USED_SECTION __attribute__((used)) __attribute__((section( \
|
|
|
|
"__OPENOCD,__openocd")))
|
2017-02-01 09:06:48 +01:00
|
|
|
#else
|
2020-03-30 17:02:08 +02:00
|
|
|
#define FORCE_USED_SECTION __attribute__((used)) __attribute__((section( \
|
|
|
|
".openocd")))
|
2017-02-01 09:06:48 +01:00
|
|
|
#endif
|
|
|
|
|
2020-07-27 13:48:59 +02:00
|
|
|
/**
|
|
|
|
* @brief Symbols also used by OpenOCD, keep in sync with src/rtos/riot.c
|
|
|
|
* @{
|
|
|
|
*/
|
|
|
|
volatile kernel_pid_t sched_active_pid = KERNEL_PID_UNDEF;
|
|
|
|
volatile thread_t *sched_threads[KERNEL_PID_LAST + 1];
|
|
|
|
volatile int sched_num_threads = 0;
|
|
|
|
|
2022-09-09 21:24:11 +02:00
|
|
|
static_assert(SCHED_PRIO_LEVELS <= 32, "SCHED_PRIO_LEVELS may at most be 32");
|
|
|
|
|
2017-02-01 09:06:48 +01:00
|
|
|
FORCE_USED_SECTION
|
2019-07-22 23:13:28 +02:00
|
|
|
const uint8_t max_threads = ARRAY_SIZE(sched_threads);
|
2015-10-06 11:37:49 +02:00
|
|
|
|
|
|
|
#ifdef DEVELHELP
|
|
|
|
/* OpenOCD can't determine struct offsets and additionally this member is only
|
|
|
|
* available if compiled with DEVELHELP */
|
2017-02-01 09:06:48 +01:00
|
|
|
FORCE_USED_SECTION
|
2018-05-07 16:54:15 +02:00
|
|
|
const uint8_t _tcb_name_offset = offsetof(thread_t, name);
|
2015-10-06 11:37:49 +02:00
|
|
|
#endif
|
2020-07-27 13:48:59 +02:00
|
|
|
/** @} */
|
|
|
|
|
|
|
|
volatile thread_t *sched_active_thread;
|
|
|
|
volatile unsigned int sched_context_switch_request;
|
|
|
|
|
|
|
|
clist_node_t sched_runqueues[SCHED_PRIO_LEVELS];
|
|
|
|
static uint32_t runqueue_bitcache = 0;
|
2015-10-06 11:37:49 +02:00
|
|
|
|
2019-09-10 17:04:31 +02:00
|
|
|
#ifdef MODULE_SCHED_CB
|
2021-08-09 14:22:07 +02:00
|
|
|
static void (*sched_cb)(kernel_pid_t active_thread,
|
|
|
|
kernel_pid_t next_thread) = NULL;
|
2019-09-10 17:04:31 +02:00
|
|
|
#endif
|
2010-09-22 15:10:42 +02:00
|
|
|
|
2020-08-06 20:17:20 +02:00
|
|
|
/* Depending on whether the CLZ instruction is available, the order of the
|
|
|
|
* runqueue_bitcache is reversed. When the instruction is available, it is
|
|
|
|
* faster to determine the MSBit set. When it is not available it is faster to
|
|
|
|
* determine the LSBit set. These functions abstract the runqueue modifications
|
|
|
|
* and readout away, switching between the two orders depending on the CLZ
|
|
|
|
* instruction availability
|
|
|
|
*/
|
2017-08-04 14:21:38 +02:00
|
|
|
static inline void _set_runqueue_bit(uint8_t priority)
|
2020-08-06 20:17:20 +02:00
|
|
|
{
|
|
|
|
#if defined(BITARITHM_HAS_CLZ)
|
2017-08-04 14:21:38 +02:00
|
|
|
runqueue_bitcache |= BIT31 >> priority;
|
2020-08-06 20:17:20 +02:00
|
|
|
#else
|
2022-09-09 21:24:11 +02:00
|
|
|
runqueue_bitcache |= 1UL << priority;
|
2020-08-06 20:17:20 +02:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2017-08-04 14:21:38 +02:00
|
|
|
static inline void _clear_runqueue_bit(uint8_t priority)
|
2020-08-06 20:17:20 +02:00
|
|
|
{
|
|
|
|
#if defined(BITARITHM_HAS_CLZ)
|
2017-08-04 14:21:38 +02:00
|
|
|
runqueue_bitcache &= ~(BIT31 >> priority);
|
2020-08-06 20:17:20 +02:00
|
|
|
#else
|
2022-09-09 21:24:11 +02:00
|
|
|
runqueue_bitcache &= ~(1UL << priority);
|
2020-08-06 20:17:20 +02:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned _get_prio_queue_from_runqueue(void)
|
|
|
|
{
|
|
|
|
#if defined(BITARITHM_HAS_CLZ)
|
|
|
|
return 31 - bitarithm_msb(runqueue_bitcache);
|
|
|
|
#else
|
|
|
|
return bitarithm_lsb(runqueue_bitcache);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2020-06-17 10:57:49 +02:00
|
|
|
static void _unschedule(thread_t *active_thread)
|
2013-06-20 18:18:29 +02:00
|
|
|
{
|
2020-06-17 10:57:49 +02:00
|
|
|
if (active_thread->status == STATUS_RUNNING) {
|
|
|
|
active_thread->status = STATUS_PENDING;
|
|
|
|
}
|
2010-09-22 15:10:42 +02:00
|
|
|
|
2021-11-03 21:38:22 +01:00
|
|
|
#if IS_ACTIVE(SCHED_TEST_STACK)
|
2021-11-05 21:41:43 +01:00
|
|
|
/* All platforms align the stack to word boundaries (possible wasting one
|
|
|
|
* word of RAM), so this access is not unaligned. Using an intermediate
|
|
|
|
* cast to uintptr_t to silence -Wcast-align
|
|
|
|
*/
|
|
|
|
if (*((uintptr_t *)(uintptr_t)active_thread->stack_start) !=
|
2020-06-17 10:57:49 +02:00
|
|
|
(uintptr_t)active_thread->stack_start) {
|
2022-08-12 05:26:22 +02:00
|
|
|
LOG_ERROR(
|
2020-06-17 10:57:49 +02:00
|
|
|
"scheduler(): stack overflow detected, pid=%" PRIkernel_pid "\n",
|
|
|
|
active_thread->pid);
|
2022-08-12 05:26:22 +02:00
|
|
|
core_panic(PANIC_STACK_OVERFLOW, "STACK OVERFLOW");
|
2020-06-04 21:52:57 +02:00
|
|
|
}
|
|
|
|
#endif
|
2020-06-17 10:57:49 +02:00
|
|
|
#ifdef MODULE_SCHED_CB
|
|
|
|
if (sched_cb) {
|
|
|
|
sched_cb(active_thread->pid, KERNEL_PID_UNDEF);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
2020-06-04 21:52:57 +02:00
|
|
|
|
2020-07-28 10:49:15 +02:00
|
|
|
thread_t *__attribute__((used)) sched_run(void)
|
2020-06-17 10:57:49 +02:00
|
|
|
{
|
2020-08-06 10:46:17 +02:00
|
|
|
thread_t *active_thread = thread_get_active();
|
2020-07-27 16:31:27 +02:00
|
|
|
thread_t *previous_thread = active_thread;
|
2014-10-18 18:41:40 +02:00
|
|
|
|
2020-07-29 14:00:57 +02:00
|
|
|
if (!IS_USED(MODULE_CORE_IDLE_THREAD) && !runqueue_bitcache) {
|
|
|
|
if (active_thread) {
|
|
|
|
_unschedule(active_thread);
|
2020-07-28 10:49:15 +02:00
|
|
|
active_thread = NULL;
|
2020-06-17 10:57:49 +02:00
|
|
|
}
|
2020-07-29 14:00:57 +02:00
|
|
|
|
|
|
|
do {
|
|
|
|
sched_arch_idle();
|
|
|
|
} while (!runqueue_bitcache);
|
2020-06-17 10:57:49 +02:00
|
|
|
}
|
|
|
|
|
2020-07-21 11:27:08 +02:00
|
|
|
sched_context_switch_request = 0;
|
|
|
|
|
2020-08-06 20:17:20 +02:00
|
|
|
unsigned nextrq = _get_prio_queue_from_runqueue();
|
2020-03-30 17:02:08 +02:00
|
|
|
thread_t *next_thread = container_of(sched_runqueues[nextrq].next->next,
|
|
|
|
thread_t, rq_entry);
|
2014-10-18 18:41:40 +02:00
|
|
|
|
2021-04-15 14:42:02 +02:00
|
|
|
#if (IS_USED(MODULE_SCHED_RUNQ_CALLBACK))
|
|
|
|
sched_runq_callback(nextrq);
|
|
|
|
#endif
|
|
|
|
|
2019-10-29 17:51:21 +01:00
|
|
|
DEBUG(
|
|
|
|
"sched_run: active thread: %" PRIkernel_pid ", next thread: %" PRIkernel_pid "\n",
|
|
|
|
(kernel_pid_t)((active_thread == NULL)
|
|
|
|
? KERNEL_PID_UNDEF
|
|
|
|
: active_thread->pid),
|
|
|
|
next_thread->pid);
|
2014-10-18 18:41:40 +02:00
|
|
|
|
2020-07-28 10:49:15 +02:00
|
|
|
next_thread->status = STATUS_RUNNING;
|
|
|
|
|
2020-07-27 16:31:27 +02:00
|
|
|
if (previous_thread == next_thread) {
|
|
|
|
#ifdef MODULE_SCHED_CB
|
2020-07-28 10:49:15 +02:00
|
|
|
/* Call the sched callback again only if the active thread is NULL. When
|
|
|
|
* active_thread is NULL, there was a sleep in between descheduling the
|
|
|
|
* previous thread and scheduling the new thread. Call the callback here
|
|
|
|
* again ensures that the time sleeping doesn't count as running the
|
|
|
|
* previous thread
|
|
|
|
*/
|
2020-07-27 16:31:27 +02:00
|
|
|
if (sched_cb && !active_thread) {
|
|
|
|
sched_cb(KERNEL_PID_UNDEF, next_thread->pid);
|
|
|
|
}
|
|
|
|
#endif
|
2014-11-11 17:55:09 +01:00
|
|
|
DEBUG("sched_run: done, sched_active_thread was not changed.\n");
|
2014-10-18 18:41:40 +02:00
|
|
|
}
|
2020-07-28 10:49:15 +02:00
|
|
|
else {
|
|
|
|
if (active_thread) {
|
|
|
|
_unschedule(active_thread);
|
|
|
|
}
|
2014-10-18 18:41:40 +02:00
|
|
|
|
2020-07-28 10:49:15 +02:00
|
|
|
sched_active_pid = next_thread->pid;
|
|
|
|
sched_active_thread = next_thread;
|
2010-09-22 15:10:42 +02:00
|
|
|
|
2019-09-10 17:04:31 +02:00
|
|
|
#ifdef MODULE_SCHED_CB
|
2020-07-28 10:49:15 +02:00
|
|
|
if (sched_cb) {
|
|
|
|
sched_cb(KERNEL_PID_UNDEF, next_thread->pid);
|
|
|
|
}
|
2010-09-22 15:10:42 +02:00
|
|
|
#endif
|
2013-06-20 18:18:29 +02:00
|
|
|
|
2020-07-02 08:29:49 +02:00
|
|
|
#ifdef PICOLIBC_TLS
|
2020-07-28 10:49:15 +02:00
|
|
|
_set_tls(next_thread->tls);
|
2020-07-02 08:29:49 +02:00
|
|
|
#endif
|
2010-09-22 15:10:42 +02:00
|
|
|
|
2016-10-24 18:36:15 +02:00
|
|
|
#ifdef MODULE_MPU_STACK_GUARD
|
2020-07-28 10:49:15 +02:00
|
|
|
mpu_configure(
|
|
|
|
2, /* MPU region 2 */
|
|
|
|
(uintptr_t)next_thread->stack_start + 31, /* Base Address (rounded up) */
|
|
|
|
MPU_ATTR(1, AP_RO_RO, 0, 1, 0, 1, MPU_SIZE_32B) /* Attributes and Size */
|
|
|
|
);
|
2016-10-24 18:36:15 +02:00
|
|
|
#endif
|
2020-07-28 10:49:15 +02:00
|
|
|
DEBUG("sched_run: done, changed sched_active_thread.\n");
|
|
|
|
}
|
2016-10-24 18:36:15 +02:00
|
|
|
|
2020-07-28 10:49:15 +02:00
|
|
|
return next_thread;
|
2010-09-22 15:10:42 +02:00
|
|
|
}
|
|
|
|
|
2017-08-04 14:21:38 +02:00
|
|
|
/* Note: Forcing the compiler to inline this function will reduce .text for applications
|
|
|
|
* not linking in sched_change_priority(), which benefits the vast majority of apps.
|
|
|
|
*/
|
|
|
|
static inline __attribute__((always_inline)) void _runqueue_push(thread_t *thread, uint8_t priority)
|
|
|
|
{
|
|
|
|
DEBUG("sched_set_status: adding thread %" PRIkernel_pid " to runqueue %" PRIu8 ".\n",
|
|
|
|
thread->pid, priority);
|
|
|
|
clist_rpush(&sched_runqueues[priority], &(thread->rq_entry));
|
|
|
|
_set_runqueue_bit(priority);
|
|
|
|
|
|
|
|
/* some thread entered a runqueue
|
|
|
|
* if it is the active runqueue
|
|
|
|
* inform the runqueue_change callback */
|
|
|
|
#if (IS_USED(MODULE_SCHED_RUNQ_CALLBACK))
|
|
|
|
thread_t *active_thread = thread_get_active();
|
|
|
|
if (active_thread && active_thread->priority == priority) {
|
|
|
|
sched_runq_callback(priority);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Note: Forcing the compiler to inline this function will reduce .text for applications
|
|
|
|
* not linking in sched_change_priority(), which benefits the vast majority of apps.
|
|
|
|
*/
|
|
|
|
static inline __attribute__((always_inline)) void _runqueue_pop(thread_t *thread)
|
|
|
|
{
|
|
|
|
DEBUG("sched_set_status: removing thread %" PRIkernel_pid " from runqueue %" PRIu8 ".\n",
|
|
|
|
thread->pid, thread->priority);
|
|
|
|
clist_lpop(&sched_runqueues[thread->priority]);
|
|
|
|
|
|
|
|
if (!sched_runqueues[thread->priority].next) {
|
|
|
|
_clear_runqueue_bit(thread->priority);
|
|
|
|
#if (IS_USED(MODULE_SCHED_RUNQ_CALLBACK))
|
|
|
|
sched_runq_callback(thread->priority);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-11 12:25:07 +02:00
|
|
|
void sched_set_status(thread_t *process, thread_status_t status)
|
2013-06-20 18:18:29 +02:00
|
|
|
{
|
2014-02-17 12:28:54 +01:00
|
|
|
if (status >= STATUS_ON_RUNQUEUE) {
|
|
|
|
if (!(process->status >= STATUS_ON_RUNQUEUE)) {
|
2017-08-04 14:21:38 +02:00
|
|
|
_runqueue_push(process, process->priority);
|
2010-09-22 15:10:42 +02:00
|
|
|
}
|
2013-06-20 18:18:29 +02:00
|
|
|
}
|
|
|
|
else {
|
2014-02-17 12:28:54 +01:00
|
|
|
if (process->status >= STATUS_ON_RUNQUEUE) {
|
2017-08-04 14:21:38 +02:00
|
|
|
_runqueue_pop(process);
|
2010-09-22 15:10:42 +02:00
|
|
|
}
|
|
|
|
}
|
2013-06-20 18:18:29 +02:00
|
|
|
|
2010-09-22 15:10:42 +02:00
|
|
|
process->status = status;
|
|
|
|
}
|
|
|
|
|
2014-04-25 18:20:42 +02:00
|
|
|
void sched_switch(uint16_t other_prio)
|
2013-06-20 18:18:29 +02:00
|
|
|
{
|
2020-08-06 10:46:17 +02:00
|
|
|
thread_t *active_thread = thread_get_active();
|
2014-11-05 11:29:25 +01:00
|
|
|
uint16_t current_prio = active_thread->priority;
|
2014-11-05 11:38:40 +01:00
|
|
|
int on_runqueue = (active_thread->status >= STATUS_ON_RUNQUEUE);
|
2014-02-15 18:49:49 +01:00
|
|
|
|
2020-03-30 17:02:08 +02:00
|
|
|
DEBUG("sched_switch: active pid=%" PRIkernel_pid " prio=%" PRIu16 " on_runqueue=%i "
|
2014-11-10 07:01:23 +01:00
|
|
|
", other_prio=%" PRIu16 "\n",
|
2020-03-30 17:02:08 +02:00
|
|
|
active_thread->pid, current_prio, on_runqueue,
|
|
|
|
other_prio);
|
2013-06-20 18:18:29 +02:00
|
|
|
|
2014-11-05 11:38:40 +01:00
|
|
|
if (!on_runqueue || (current_prio > other_prio)) {
|
2016-03-19 09:25:47 +01:00
|
|
|
if (irq_is_in()) {
|
2014-11-05 11:38:40 +01:00
|
|
|
DEBUG("sched_switch: setting sched_context_switch_request.\n");
|
2010-11-11 09:55:08 +01:00
|
|
|
sched_context_switch_request = 1;
|
2013-06-20 18:18:29 +02:00
|
|
|
}
|
|
|
|
else {
|
2014-11-05 11:38:40 +01:00
|
|
|
DEBUG("sched_switch: yielding immediately.\n");
|
2014-10-18 01:24:49 +02:00
|
|
|
thread_yield_higher();
|
2010-11-11 09:55:08 +01:00
|
|
|
}
|
|
|
|
}
|
2014-11-05 11:38:40 +01:00
|
|
|
else {
|
|
|
|
DEBUG("sched_switch: continuing without yield.\n");
|
|
|
|
}
|
2010-11-11 09:55:08 +01:00
|
|
|
}
|
|
|
|
|
2014-04-30 09:41:37 +02:00
|
|
|
NORETURN void sched_task_exit(void)
|
2013-06-20 18:18:29 +02:00
|
|
|
{
|
2020-03-30 17:02:08 +02:00
|
|
|
DEBUG("sched_task_exit: ending thread %" PRIkernel_pid "...\n",
|
2020-08-06 10:46:17 +02:00
|
|
|
thread_getpid());
|
2010-09-22 15:10:42 +02:00
|
|
|
|
2020-08-06 13:24:06 +02:00
|
|
|
#if defined(MODULE_TEST_UTILS_PRINT_STACK_USAGE) && defined(DEVELHELP)
|
|
|
|
void print_stack_usage_metric(const char *name, void *stack, unsigned max_size);
|
|
|
|
thread_t *me = thread_get_active();
|
|
|
|
print_stack_usage_metric(me->name, me->stack_start, me->stack_size);
|
|
|
|
#endif
|
|
|
|
|
2020-03-30 17:02:08 +02:00
|
|
|
(void)irq_disable();
|
2020-08-06 10:46:17 +02:00
|
|
|
sched_threads[thread_getpid()] = NULL;
|
2014-04-10 22:28:35 +02:00
|
|
|
sched_num_threads--;
|
2013-06-20 18:18:29 +02:00
|
|
|
|
2020-08-06 10:46:17 +02:00
|
|
|
sched_set_status(thread_get_active(), STATUS_STOPPED);
|
2010-10-25 15:40:01 +02:00
|
|
|
|
2014-04-10 22:28:35 +02:00
|
|
|
sched_active_thread = NULL;
|
2010-10-28 11:22:57 +02:00
|
|
|
cpu_switch_context_exit();
|
2010-09-22 15:10:42 +02:00
|
|
|
}
|
2019-07-03 15:12:03 +02:00
|
|
|
|
2019-09-10 17:04:31 +02:00
|
|
|
#ifdef MODULE_SCHED_CB
|
2019-07-03 15:12:03 +02:00
|
|
|
void sched_register_cb(void (*callback)(kernel_pid_t, kernel_pid_t))
|
|
|
|
{
|
|
|
|
sched_cb = callback;
|
|
|
|
}
|
2019-09-10 17:04:31 +02:00
|
|
|
#endif
|
2017-08-04 14:21:38 +02:00
|
|
|
|
|
|
|
void sched_change_priority(thread_t *thread, uint8_t priority)
|
|
|
|
{
|
|
|
|
assert(thread && (priority < SCHED_PRIO_LEVELS));
|
|
|
|
|
|
|
|
if (thread->priority == priority) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned irq_state = irq_disable();
|
|
|
|
|
|
|
|
if (thread_is_active(thread)) {
|
|
|
|
_runqueue_pop(thread);
|
|
|
|
_runqueue_push(thread, priority);
|
|
|
|
}
|
|
|
|
thread->priority = priority;
|
|
|
|
|
|
|
|
irq_restore(irq_state);
|
|
|
|
|
|
|
|
thread_t *active = thread_get_active();
|
|
|
|
|
|
|
|
if ((active == thread)
|
|
|
|
|| ((active != NULL) && (active->priority > priority) && thread_is_active(thread))
|
|
|
|
) {
|
|
|
|
/* If the change in priority would result in a different decision of
|
|
|
|
* the scheduler, we need to yield to make sure the change in priority
|
|
|
|
* takes effect immediately. This can be due to one of the following:
|
|
|
|
*
|
|
|
|
* 1) The priority of the thread currently running has been reduced
|
|
|
|
* (higher numeric value), so that other threads now have priority
|
|
|
|
* over the currently running.
|
|
|
|
* 2) The priority of a pending thread has been increased (lower numeric value) so that it
|
|
|
|
* now has priority over the running thread.
|
|
|
|
*/
|
|
|
|
thread_yield_higher();
|
|
|
|
}
|
|
|
|
}
|