mirror of
https://github.com/RIOT-OS/RIOT.git
synced 2024-12-29 04:50:03 +01:00
Merge pull request #14224 from kaspar030/cortexm_remove_idle_thread
core: make idle thread optional
This commit is contained in:
commit
279f2aebed
@ -1074,6 +1074,9 @@ FEATURES_REQUIRED += $(filter arch_%,$(FEATURES_PROVIDED))
|
||||
# always select CPU core features
|
||||
FEATURES_REQUIRED += $(filter cpu_core_%,$(FEATURES_PROVIDED))
|
||||
|
||||
# don't use idle thread if architecture has needed support
|
||||
FEATURES_OPTIONAL += no_idle_thread
|
||||
|
||||
ifneq (,$(filter ecc_%,$(USEMODULE)))
|
||||
USEMODULE += ecc
|
||||
endif
|
||||
|
@ -203,8 +203,22 @@ extern clist_node_t sched_runqueues[SCHED_PRIO_LEVELS];
|
||||
*/
|
||||
NORETURN void sched_task_exit(void);
|
||||
|
||||
#if IS_USED(MODULE_SCHED_CB) || defined(DOXYGEN)
|
||||
/**
|
||||
* @brief Set CPU to idle mode (CPU dependent)
|
||||
*
|
||||
* Only used when there's no idle thread.
|
||||
*
|
||||
* This function will be called by the scheduler when there's no runnable thread.
|
||||
* It will be called from ISR context, and *must* allow other ISR handlers to be run.
|
||||
* E.g., on Cortex-M, the PendSV priority is temporarily lowered (set to higher
|
||||
* value) in order to enable other exceptions to be run.
|
||||
*
|
||||
* This function should also invoke setting a low power mode, e.g., by calling
|
||||
* 'pm_set_lowest()'.
|
||||
*/
|
||||
void sched_arch_idle(void);
|
||||
|
||||
#if IS_USED(MODULE_SCHED_CB) || defined(DOXYGEN)
|
||||
/**
|
||||
* @brief Scheduler run callback
|
||||
*
|
||||
|
15
core/init.c
15
core/init.c
@ -53,6 +53,9 @@ static void *main_trampoline(void *arg)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static char main_stack[THREAD_STACKSIZE_MAIN];
|
||||
static char idle_stack[THREAD_STACKSIZE_IDLE];
|
||||
|
||||
static void *idle_thread(void *arg)
|
||||
{
|
||||
(void)arg;
|
||||
@ -64,17 +67,17 @@ static void *idle_thread(void *arg)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static char main_stack[THREAD_STACKSIZE_MAIN];
|
||||
static char idle_stack[THREAD_STACKSIZE_IDLE];
|
||||
|
||||
void kernel_init(void)
|
||||
{
|
||||
irq_disable();
|
||||
|
||||
thread_create(idle_stack, sizeof(idle_stack),
|
||||
THREAD_PRIORITY_IDLE,
|
||||
THREAD_CREATE_WOUT_YIELD | THREAD_CREATE_STACKTEST,
|
||||
idle_thread, NULL, "idle");
|
||||
if (IS_USED(MODULE_CORE_IDLE_THREAD)) {
|
||||
thread_create(idle_stack, sizeof(idle_stack),
|
||||
THREAD_PRIORITY_IDLE,
|
||||
THREAD_CREATE_WOUT_YIELD | THREAD_CREATE_STACKTEST,
|
||||
idle_thread, NULL, "idle");
|
||||
}
|
||||
|
||||
thread_create(main_stack, sizeof(main_stack),
|
||||
THREAD_PRIORITY_MAIN,
|
||||
|
56
core/sched.c
56
core/sched.c
@ -76,15 +76,45 @@ static void (*sched_cb) (kernel_pid_t active_thread,
|
||||
kernel_pid_t next_thread) = NULL;
|
||||
#endif
|
||||
|
||||
static void _unschedule(thread_t *active_thread)
|
||||
{
|
||||
if (active_thread->status == STATUS_RUNNING) {
|
||||
active_thread->status = STATUS_PENDING;
|
||||
}
|
||||
|
||||
#ifdef SCHED_TEST_STACK
|
||||
if (*((uintptr_t *)active_thread->stack_start) !=
|
||||
(uintptr_t)active_thread->stack_start) {
|
||||
LOG_WARNING(
|
||||
"scheduler(): stack overflow detected, pid=%" PRIkernel_pid "\n",
|
||||
active_thread->pid);
|
||||
}
|
||||
#endif
|
||||
#ifdef MODULE_SCHED_CB
|
||||
if (sched_cb) {
|
||||
sched_cb(active_thread->pid, KERNEL_PID_UNDEF);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
int __attribute__((used)) sched_run(void)
|
||||
{
|
||||
sched_context_switch_request = 0;
|
||||
|
||||
thread_t *active_thread = (thread_t *)sched_active_thread;
|
||||
|
||||
/* The bitmask in runqueue_bitcache is never empty,
|
||||
* since the threading should not be started before at least the idle thread was started.
|
||||
*/
|
||||
if (!IS_USED(MODULE_CORE_IDLE_THREAD)) {
|
||||
if (!runqueue_bitcache) {
|
||||
if (active_thread) {
|
||||
_unschedule(active_thread);
|
||||
active_thread = NULL;
|
||||
}
|
||||
|
||||
while (!runqueue_bitcache) {
|
||||
sched_arch_idle();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int nextrq = bitarithm_lsb(runqueue_bitcache);
|
||||
thread_t *next_thread = container_of(sched_runqueues[nextrq].next->next,
|
||||
thread_t, rq_entry);
|
||||
@ -102,26 +132,12 @@ int __attribute__((used)) sched_run(void)
|
||||
}
|
||||
|
||||
if (active_thread) {
|
||||
if (active_thread->status == STATUS_RUNNING) {
|
||||
active_thread->status = STATUS_PENDING;
|
||||
}
|
||||
|
||||
#ifdef SCHED_TEST_STACK
|
||||
if (*((uintptr_t *)active_thread->stack_start) !=
|
||||
(uintptr_t)active_thread->stack_start) {
|
||||
LOG_WARNING(
|
||||
"scheduler(): stack overflow detected, pid=%" PRIkernel_pid "\n",
|
||||
active_thread->pid);
|
||||
}
|
||||
#endif
|
||||
_unschedule(active_thread);
|
||||
}
|
||||
|
||||
#ifdef MODULE_SCHED_CB
|
||||
if (sched_cb) {
|
||||
/* Use `sched_active_pid` instead of `active_thread` since after `sched_task_exit()` is
|
||||
called `active_thread` is set to NULL while `sched_active_thread` isn't updated until
|
||||
`next_thread` is scheduled*/
|
||||
sched_cb(sched_active_pid, next_thread->pid);
|
||||
sched_cb(KERNEL_PID_UNDEF, next_thread->pid);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -13,11 +13,13 @@ config CPU_ARCH_ARMV7M
|
||||
bool
|
||||
select HAS_ARCH_ARM
|
||||
select HAS_ARCH_32BIT
|
||||
select HAS_NO_IDLE_THREAD
|
||||
|
||||
config CPU_ARCH_ARMV8M
|
||||
bool
|
||||
select HAS_ARCH_ARM
|
||||
select HAS_ARCH_32BIT
|
||||
select HAS_NO_IDLE_THREAD
|
||||
|
||||
config CPU_ARCH
|
||||
default "armv6m" if CPU_ARCH_ARMV6M
|
||||
|
@ -30,3 +30,8 @@ else ifeq ($(CPU_CORE),cortex-m23)
|
||||
else
|
||||
$(error Unkwnown cortexm core: $(CPU_CORE))
|
||||
endif
|
||||
|
||||
# cortex-m3 and higher don't need the idle thread
|
||||
ifneq (,$(filter armv7m armv8m,$(CPU_ARCH)))
|
||||
FEATURES_PROVIDED += no_idle_thread
|
||||
endif
|
||||
|
@ -446,3 +446,24 @@ void __attribute__((used)) isr_svc(void)
|
||||
SCB->ICSR = SCB_ICSR_PENDSVSET_Msk;
|
||||
}
|
||||
#endif /* MODULE_CORTEXM_SVC */
|
||||
|
||||
void sched_arch_idle(void)
|
||||
{
|
||||
/* by default, PendSV has the same priority as other ISRs.
|
||||
* In this function, we temporarily lower the priority (set higher value),
|
||||
* allowing other ISRs to interrupt.
|
||||
*
|
||||
* According to [this](http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dai0321a/BIHJICIE.html),
|
||||
* dynamically changing the priority is not supported on CortexM0(+).
|
||||
*/
|
||||
NVIC_SetPriority(PendSV_IRQn, CPU_CORTEXM_PENDSV_IRQ_PRIO + 1);
|
||||
__DSB();
|
||||
__ISB();
|
||||
#ifdef MODULE_PM_LAYERED
|
||||
void pm_set_lowest(void);
|
||||
pm_set_lowest();
|
||||
#else
|
||||
__WFI();
|
||||
#endif
|
||||
NVIC_SetPriority(PendSV_IRQn, CPU_CORTEXM_PENDSV_IRQ_PRIO);
|
||||
}
|
||||
|
@ -67,6 +67,11 @@ config HAS_ETHERNET
|
||||
help
|
||||
Indicates that Ethernet connectivity is present.
|
||||
|
||||
config HAS_NO_IDLE_THREAD
|
||||
bool
|
||||
help
|
||||
Indicates that this MCU doesn't need the idle thread
|
||||
|
||||
config HAS_MOTOR_DRIVER
|
||||
bool
|
||||
help
|
||||
|
@ -28,3 +28,8 @@ endif
|
||||
|
||||
# select cortexm_svc pseudomodule if the corresponding feature is used
|
||||
USEMODULE += $(filter cortexm_svc, $(FEATURES_USED))
|
||||
|
||||
# select core_idle_thread if the feature no_idle_thread is *not* used
|
||||
ifeq (, $(filter no_idle_thread, $(FEATURES_USED)))
|
||||
USEMODULE += core_idle_thread
|
||||
endif
|
||||
|
@ -31,15 +31,18 @@ void sched_statistics_cb(kernel_pid_t active_thread, kernel_pid_t next_thread)
|
||||
{
|
||||
uint32_t now = xtimer_now().ticks32;
|
||||
|
||||
/* Update active thread runtime, there is always an active thread since
|
||||
first sched_run happens when main_trampoline gets scheduled */
|
||||
schedstat_t *active_stat = &sched_pidlist[active_thread];
|
||||
active_stat->runtime_ticks += now - active_stat->laststart;
|
||||
/* Update active thread stats */
|
||||
if (active_thread != KERNEL_PID_UNDEF) {
|
||||
schedstat_t *active_stat = &sched_pidlist[active_thread];
|
||||
active_stat->runtime_ticks += now - active_stat->laststart;
|
||||
}
|
||||
|
||||
/* Update next_thread stats */
|
||||
schedstat_t *next_stat = &sched_pidlist[next_thread];
|
||||
next_stat->laststart = now;
|
||||
next_stat->schedules++;
|
||||
if (next_thread != KERNEL_PID_UNDEF) {
|
||||
schedstat_t *next_stat = &sched_pidlist[next_thread];
|
||||
next_stat->laststart = now;
|
||||
next_stat->schedules++;
|
||||
}
|
||||
}
|
||||
|
||||
void init_schedstatistics(void)
|
||||
|
@ -24,8 +24,7 @@ EXPECTED_HELP = (
|
||||
|
||||
EXPECTED_PS = (
|
||||
'\tpid | state Q | pri',
|
||||
'\t 1 | pending Q | 15',
|
||||
'\t 2 | running Q | 7'
|
||||
'\t \d | running Q | 7'
|
||||
)
|
||||
|
||||
# In native we are directly executing the binary (no terminal program). We must
|
||||
@ -102,6 +101,8 @@ CMDS = (
|
||||
('end_test', '[TEST_END]'),
|
||||
)
|
||||
|
||||
CMDS_REGEX = {'ps'}
|
||||
|
||||
BOARD = os.environ['BOARD']
|
||||
|
||||
|
||||
@ -112,10 +113,14 @@ def print_error(message):
|
||||
|
||||
|
||||
def check_cmd(child, cmd, expected):
|
||||
regex = cmd in CMDS_REGEX
|
||||
child.expect(PROMPT)
|
||||
child.sendline(cmd)
|
||||
for line in expected:
|
||||
child.expect_exact(line)
|
||||
if regex:
|
||||
child.expect(line)
|
||||
else:
|
||||
child.expect_exact(line)
|
||||
|
||||
|
||||
def check_startup(child):
|
||||
|
@ -28,6 +28,9 @@ BASELIBS += $(UNIT_TESTS:%=$(BINDIR)/%.a)
|
||||
|
||||
INCLUDES += -I$(RIOTBASE)/tests/unittests/common
|
||||
|
||||
# some tests need more stack
|
||||
CFLAGS += -DTHREAD_STACKSIZE_MAIN=THREAD_STACKSIZE_LARGE
|
||||
|
||||
include $(RIOTBASE)/Makefile.include
|
||||
|
||||
.PHONY: $(UNIT_TESTS)
|
||||
|
Loading…
Reference in New Issue
Block a user