2015-09-29 13:41:33 +02:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2016 Kaspar Schleiser <kaspar@schleiser.de>
|
|
|
|
* 2013 Ludwig Knüpfer <ludwig.knuepfer@fu-berlin.de>
|
2013-03-06 10:29:49 +01:00
|
|
|
*
|
2014-07-31 19:45:27 +02:00
|
|
|
* This file is subject to the terms and conditions of the GNU Lesser
|
|
|
|
* General Public License v2.1. See the file LICENSE in the top level
|
|
|
|
* directory for more details.
|
2015-09-29 13:41:33 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
2018-06-01 12:17:51 +02:00
|
|
|
* @ingroup cpu_native
|
2013-03-06 10:29:49 +01:00
|
|
|
* @{
|
2015-09-29 13:41:33 +02:00
|
|
|
*
|
2013-03-06 10:29:49 +01:00
|
|
|
* @file
|
2015-09-29 13:41:33 +02:00
|
|
|
* @brief Native CPU kernel_intern.h and sched.h implementation
|
|
|
|
*
|
|
|
|
* in-process preemptive context switching utilizes POSIX ucontexts.
|
|
|
|
* (ucontext provides for architecture independent stack handling)
|
|
|
|
*
|
2015-09-27 18:58:30 +02:00
|
|
|
* @author Ludwig Knüpfer <ludwig.knuepfer@fu-berlin.de>
|
2015-09-29 13:41:33 +02:00
|
|
|
* @author Kaspar Schleiser <kaspar@schleiser.de>
|
2013-03-06 10:29:49 +01:00
|
|
|
*/
|
2014-02-14 16:18:40 +01:00
|
|
|
|
2022-11-21 12:24:58 +01:00
|
|
|
/* __USE_GNU for gregs[REG_EIP] access under glibc
|
|
|
|
* _GNU_SOURCE for REG_EIP and strsignal() under musl */
|
2014-11-28 12:14:54 +01:00
|
|
|
#define __USE_GNU
|
2022-11-21 12:24:58 +01:00
|
|
|
#define _GNU_SOURCE
|
|
|
|
|
|
|
|
#include <err.h>
|
2014-11-28 12:14:54 +01:00
|
|
|
#include <signal.h>
|
2022-11-21 12:24:58 +01:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <unistd.h>
|
2014-11-28 12:14:54 +01:00
|
|
|
|
2022-11-21 12:24:58 +01:00
|
|
|
#if USE_LIBUCONTEXT
|
|
|
|
#include <libucontext/libucontext.h>
|
|
|
|
#else
|
2013-03-06 01:08:15 +01:00
|
|
|
#include <ucontext.h>
|
2022-11-21 12:24:58 +01:00
|
|
|
#endif
|
2013-03-06 01:08:15 +01:00
|
|
|
|
2013-09-30 14:07:10 +02:00
|
|
|
#ifdef HAVE_VALGRIND_H
|
|
|
|
#include <valgrind.h>
|
|
|
|
#define VALGRIND_DEBUG DEBUG
|
|
|
|
#elif defined(HAVE_VALGRIND_VALGRIND_H)
|
|
|
|
#include <valgrind/valgrind.h>
|
|
|
|
#define VALGRIND_DEBUG DEBUG
|
|
|
|
#else
|
2023-12-08 11:22:32 +01:00
|
|
|
#define VALGRIND_STACK_REGISTER(...) (0)
|
2013-09-30 14:07:10 +02:00
|
|
|
#define VALGRIND_DEBUG(...)
|
|
|
|
#endif
|
|
|
|
|
2013-12-02 11:39:11 +01:00
|
|
|
#include <stdlib.h>
|
|
|
|
|
2013-03-06 01:08:15 +01:00
|
|
|
#include "cpu.h"
|
2015-05-22 14:31:23 +02:00
|
|
|
#include "cpu_conf.h"
|
2022-11-21 12:24:58 +01:00
|
|
|
#include "irq.h"
|
|
|
|
#include "sched.h"
|
|
|
|
#include "test_utils/expect.h"
|
2015-08-06 12:12:47 +02:00
|
|
|
|
2017-02-15 13:07:34 +01:00
|
|
|
#ifdef MODULE_NETDEV_TAP
|
|
|
|
#include "netdev_tap.h"
|
|
|
|
extern netdev_tap_t netdev_tap;
|
2015-07-19 12:05:38 +02:00
|
|
|
#endif
|
2013-09-30 14:07:10 +02:00
|
|
|
|
2013-11-07 17:23:08 +01:00
|
|
|
#include "native_internal.h"
|
|
|
|
|
2020-10-22 11:34:00 +02:00
|
|
|
#define ENABLE_DEBUG 0
|
2013-03-06 01:08:15 +01:00
|
|
|
#include "debug.h"
|
|
|
|
|
2013-11-07 17:23:08 +01:00
|
|
|
ucontext_t end_context;
|
2013-03-06 01:08:15 +01:00
|
|
|
|
2014-11-28 12:14:54 +01:00
|
|
|
/**
|
|
|
|
* make the new context assign `_native_in_isr = 0` before resuming
|
|
|
|
*/
|
|
|
|
static void _native_mod_ctx_leave_sigh(ucontext_t *ctx)
|
|
|
|
{
|
2022-12-19 18:25:29 +01:00
|
|
|
#if defined(__FreeBSD__)
|
2014-11-28 12:14:54 +01:00
|
|
|
_native_saved_eip = ((struct sigcontext *)ctx)->sc_eip;
|
|
|
|
((struct sigcontext *)ctx)->sc_eip = (unsigned int)&_native_sig_leave_handler;
|
|
|
|
#else /* Linux */
|
|
|
|
#if defined(__arm__)
|
|
|
|
_native_saved_eip = ((ucontext_t *)ctx)->uc_mcontext.arm_pc;
|
|
|
|
((ucontext_t *)ctx)->uc_mcontext.arm_pc = (unsigned int)&_native_sig_leave_handler;
|
|
|
|
#else /* Linux/x86 */
|
2023-12-08 11:22:32 +01:00
|
|
|
#ifdef __x86_64__
|
|
|
|
_native_saved_eip = ctx->uc_mcontext.gregs[REG_RIP];
|
|
|
|
ctx->uc_mcontext.gregs[REG_RIP] = (unsigned long)&_native_sig_leave_handler;
|
|
|
|
#else
|
2014-11-28 12:14:54 +01:00
|
|
|
_native_saved_eip = ctx->uc_mcontext.gregs[REG_EIP];
|
|
|
|
ctx->uc_mcontext.gregs[REG_EIP] = (unsigned int)&_native_sig_leave_handler;
|
2023-12-08 11:22:32 +01:00
|
|
|
#endif
|
2014-11-28 12:14:54 +01:00
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2013-03-06 01:08:15 +01:00
|
|
|
/**
|
|
|
|
* TODO: implement
|
|
|
|
*/
|
|
|
|
void thread_print_stack(void)
|
|
|
|
{
|
2014-12-04 10:48:15 +01:00
|
|
|
DEBUG("thread_print_stack\n");
|
2013-03-06 01:08:15 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-06-01 20:35:00 +02:00
|
|
|
/* This function calculates the ISR_usage */
|
2017-10-20 17:26:10 +02:00
|
|
|
int thread_isr_stack_usage(void)
|
2016-06-01 20:35:00 +02:00
|
|
|
{
|
|
|
|
/* TODO */
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2023-03-09 13:51:03 +01:00
|
|
|
void native_breakpoint(void)
|
|
|
|
{
|
|
|
|
raise(SIGTRAP);
|
|
|
|
}
|
|
|
|
|
2022-01-27 21:09:59 +01:00
|
|
|
static inline void *align_stack(uintptr_t start, int *stacksize)
|
2020-11-20 10:58:37 +01:00
|
|
|
{
|
|
|
|
const size_t alignment = sizeof(uintptr_t);
|
|
|
|
const uintptr_t align_mask = alignment - 1;
|
|
|
|
size_t unalignment = (start & align_mask)
|
|
|
|
? (alignment - (start & align_mask)) : 0;
|
|
|
|
start += unalignment;
|
|
|
|
*stacksize -= unalignment;
|
|
|
|
*stacksize &= ~align_mask;
|
|
|
|
return (void *)start;
|
|
|
|
}
|
|
|
|
|
2014-03-04 20:20:01 +01:00
|
|
|
char *thread_stack_init(thread_task_func_t task_func, void *arg, void *stack_start, int stacksize)
|
2013-03-06 01:08:15 +01:00
|
|
|
{
|
|
|
|
ucontext_t *p;
|
|
|
|
|
2022-01-27 21:09:59 +01:00
|
|
|
stack_start = align_stack((uintptr_t)stack_start, &stacksize);
|
2020-11-20 10:58:37 +01:00
|
|
|
|
2023-12-08 11:22:32 +01:00
|
|
|
(void) VALGRIND_STACK_REGISTER(stack_start, (char *)stack_start + stacksize);
|
2016-03-23 06:36:07 +01:00
|
|
|
VALGRIND_DEBUG("VALGRIND_STACK_REGISTER(%p, %p)\n",
|
2023-12-08 11:22:32 +01:00
|
|
|
stack_start, (void*)((char *)stack_start + stacksize));
|
2013-09-30 14:07:10 +02:00
|
|
|
|
2014-12-04 10:48:15 +01:00
|
|
|
DEBUG("thread_stack_init\n");
|
2013-03-06 01:08:15 +01:00
|
|
|
|
2020-11-20 10:58:37 +01:00
|
|
|
/* Use intermediate cast to uintptr_t to silence -Wcast-align. The stack
|
|
|
|
* is aligned to word size above. */
|
2022-01-27 21:09:59 +01:00
|
|
|
p = (ucontext_t *)(uintptr_t)((uint8_t *)stack_start + (stacksize - sizeof(ucontext_t)));
|
2013-03-06 01:08:15 +01:00
|
|
|
stacksize -= sizeof(ucontext_t);
|
|
|
|
|
2013-06-24 22:37:35 +02:00
|
|
|
if (getcontext(p) == -1) {
|
2014-12-04 10:48:15 +01:00
|
|
|
err(EXIT_FAILURE, "thread_stack_init: getcontext");
|
2013-03-06 01:08:15 +01:00
|
|
|
}
|
|
|
|
|
2022-01-27 21:09:59 +01:00
|
|
|
p->uc_stack.ss_sp = stack_start;
|
2013-03-06 01:08:15 +01:00
|
|
|
p->uc_stack.ss_size = stacksize;
|
|
|
|
p->uc_stack.ss_flags = 0;
|
2013-05-14 18:31:47 +02:00
|
|
|
p->uc_link = &end_context;
|
2013-06-21 03:52:57 +02:00
|
|
|
|
2013-06-24 22:37:35 +02:00
|
|
|
if (sigemptyset(&(p->uc_sigmask)) == -1) {
|
2014-12-04 10:48:15 +01:00
|
|
|
err(EXIT_FAILURE, "thread_stack_init: sigemptyset");
|
2013-03-06 01:08:15 +01:00
|
|
|
}
|
|
|
|
|
2014-03-04 20:20:01 +01:00
|
|
|
makecontext(p, (void (*)(void)) task_func, 1, arg);
|
2013-03-06 01:08:15 +01:00
|
|
|
|
|
|
|
return (char *) p;
|
|
|
|
}
|
|
|
|
|
2013-11-08 13:12:15 +01:00
|
|
|
void isr_cpu_switch_context_exit(void)
|
2013-03-06 01:08:15 +01:00
|
|
|
{
|
2013-04-15 20:08:46 +02:00
|
|
|
ucontext_t *ctx;
|
|
|
|
|
2014-12-04 10:48:15 +01:00
|
|
|
DEBUG("isr_cpu_switch_context_exit\n");
|
2024-01-09 23:02:01 +01:00
|
|
|
if (((sched_context_switch_request == 1) || (thread_get_active() == NULL))
|
|
|
|
&& IS_USED(MODULE_CORE_THREAD)) {
|
2013-09-30 15:45:47 +02:00
|
|
|
sched_run();
|
|
|
|
}
|
2013-03-06 01:08:15 +01:00
|
|
|
|
2020-08-17 11:54:17 +02:00
|
|
|
DEBUG("isr_cpu_switch_context_exit: calling setcontext(%" PRIkernel_pid ")\n\n", thread_getpid());
|
2020-11-20 10:58:37 +01:00
|
|
|
/* Use intermediate cast to uintptr_t to silence -Wcast-align.
|
|
|
|
* stacks are manually word aligned in thread_static_init() */
|
|
|
|
ctx = (ucontext_t *)(uintptr_t)(thread_get_active()->sp);
|
2013-09-30 15:45:47 +02:00
|
|
|
|
|
|
|
native_interrupts_enabled = 1;
|
2014-11-28 12:14:54 +01:00
|
|
|
_native_mod_ctx_leave_sigh(ctx);
|
2013-06-21 03:52:57 +02:00
|
|
|
|
2013-06-24 22:37:35 +02:00
|
|
|
if (setcontext(ctx) == -1) {
|
2014-12-04 10:48:15 +01:00
|
|
|
err(EXIT_FAILURE, "isr_cpu_switch_context_exit: setcontext");
|
2013-03-06 01:08:15 +01:00
|
|
|
}
|
2014-01-29 10:54:52 +01:00
|
|
|
errx(EXIT_FAILURE, "2 this should have never been reached!!");
|
2013-03-06 01:08:15 +01:00
|
|
|
}
|
|
|
|
|
2014-05-07 12:36:32 +02:00
|
|
|
void cpu_switch_context_exit(void)
|
2013-03-06 01:08:15 +01:00
|
|
|
{
|
2014-04-04 08:21:23 +02:00
|
|
|
#ifdef NATIVE_AUTO_EXIT
|
2014-04-10 22:28:35 +02:00
|
|
|
if (sched_num_threads <= 1) {
|
2024-03-04 14:04:00 +01:00
|
|
|
extern unsigned _native_retval;
|
2014-12-04 10:48:15 +01:00
|
|
|
DEBUG("cpu_switch_context_exit: last task has ended. exiting.\n");
|
2024-03-04 14:04:00 +01:00
|
|
|
real_exit(_native_retval);
|
2014-04-04 08:21:23 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-11-08 13:12:15 +01:00
|
|
|
if (_native_in_isr == 0) {
|
2016-03-19 09:25:47 +01:00
|
|
|
irq_disable();
|
2013-11-08 13:12:15 +01:00
|
|
|
_native_in_isr = 1;
|
|
|
|
native_isr_context.uc_stack.ss_sp = __isr_stack;
|
2022-11-21 12:24:58 +01:00
|
|
|
native_isr_context.uc_stack.ss_size = __isr_stack_size;
|
2013-11-08 13:12:15 +01:00
|
|
|
native_isr_context.uc_stack.ss_flags = 0;
|
|
|
|
makecontext(&native_isr_context, isr_cpu_switch_context_exit, 0);
|
|
|
|
if (setcontext(&native_isr_context) == -1) {
|
2014-11-28 11:47:42 +01:00
|
|
|
err(EXIT_FAILURE, "cpu_switch_context_exit: setcontext");
|
2013-11-08 13:12:15 +01:00
|
|
|
}
|
2014-01-29 10:54:52 +01:00
|
|
|
errx(EXIT_FAILURE, "1 this should have never been reached!!");
|
2013-11-08 13:12:15 +01:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
isr_cpu_switch_context_exit();
|
|
|
|
}
|
2014-01-29 10:54:52 +01:00
|
|
|
errx(EXIT_FAILURE, "3 this should have never been reached!!");
|
2013-11-08 13:12:15 +01:00
|
|
|
}
|
2013-03-06 01:08:15 +01:00
|
|
|
|
2014-05-07 12:36:32 +02:00
|
|
|
void isr_thread_yield(void)
|
2013-11-08 13:12:15 +01:00
|
|
|
{
|
2014-12-04 10:48:15 +01:00
|
|
|
DEBUG("isr_thread_yield\n");
|
2013-04-15 20:08:46 +02:00
|
|
|
|
2014-11-28 12:14:54 +01:00
|
|
|
if (_native_sigpend > 0) {
|
|
|
|
DEBUG("isr_thread_yield(): handling signals\n\n");
|
|
|
|
native_irq_handler();
|
|
|
|
}
|
|
|
|
|
2024-01-09 23:02:01 +01:00
|
|
|
if (!IS_USED(MODULE_CORE_THREAD)) {
|
|
|
|
return;
|
|
|
|
}
|
2013-03-06 01:08:15 +01:00
|
|
|
sched_run();
|
2024-01-09 23:02:01 +01:00
|
|
|
|
2020-11-20 10:58:37 +01:00
|
|
|
/* Use intermediate cast to uintptr_t to silence -Wcast-align.
|
|
|
|
* stacks are manually word aligned in thread_static_init() */
|
|
|
|
ucontext_t *ctx = (ucontext_t *)(uintptr_t)(thread_get_active()->sp);
|
2020-08-17 11:54:17 +02:00
|
|
|
DEBUG("isr_thread_yield: switching to(%" PRIkernel_pid ")\n\n",
|
|
|
|
thread_getpid());
|
2013-03-06 01:08:15 +01:00
|
|
|
|
2013-11-08 13:12:15 +01:00
|
|
|
native_interrupts_enabled = 1;
|
2014-11-28 12:14:54 +01:00
|
|
|
_native_mod_ctx_leave_sigh(ctx);
|
|
|
|
|
2013-11-08 13:12:15 +01:00
|
|
|
if (setcontext(ctx) == -1) {
|
2014-12-04 10:48:15 +01:00
|
|
|
err(EXIT_FAILURE, "isr_thread_yield: setcontext");
|
2013-11-08 13:12:15 +01:00
|
|
|
}
|
|
|
|
}
|
2013-06-21 03:52:57 +02:00
|
|
|
|
2014-10-18 01:24:49 +02:00
|
|
|
void thread_yield_higher(void)
|
2013-11-08 13:12:15 +01:00
|
|
|
{
|
2019-01-28 17:12:01 +01:00
|
|
|
sched_context_switch_request = 1;
|
|
|
|
|
2021-08-18 12:08:22 +02:00
|
|
|
if (_native_in_isr == 0 && native_interrupts_enabled) {
|
2020-11-20 10:58:37 +01:00
|
|
|
/* Use intermediate cast to uintptr_t to silence -Wcast-align.
|
|
|
|
* stacks are manually word aligned in thread_static_init() */
|
|
|
|
ucontext_t *ctx = (ucontext_t *)(uintptr_t)(thread_get_active()->sp);
|
2013-11-08 13:12:15 +01:00
|
|
|
_native_in_isr = 1;
|
2016-03-19 09:25:47 +01:00
|
|
|
irq_disable();
|
2013-11-08 13:12:15 +01:00
|
|
|
native_isr_context.uc_stack.ss_sp = __isr_stack;
|
2022-11-21 12:24:58 +01:00
|
|
|
native_isr_context.uc_stack.ss_size = __isr_stack_size;
|
2013-11-08 13:12:15 +01:00
|
|
|
native_isr_context.uc_stack.ss_flags = 0;
|
|
|
|
makecontext(&native_isr_context, isr_thread_yield, 0);
|
|
|
|
if (swapcontext(ctx, &native_isr_context) == -1) {
|
2014-10-18 01:24:49 +02:00
|
|
|
err(EXIT_FAILURE, "thread_yield_higher: swapcontext");
|
2013-04-15 20:08:46 +02:00
|
|
|
}
|
2016-03-19 09:25:47 +01:00
|
|
|
irq_enable();
|
2013-04-15 20:08:46 +02:00
|
|
|
}
|
2013-03-06 01:08:15 +01:00
|
|
|
}
|
|
|
|
|
2014-05-07 12:36:32 +02:00
|
|
|
void native_cpu_init(void)
|
2013-03-06 01:08:15 +01:00
|
|
|
{
|
2013-06-24 22:37:35 +02:00
|
|
|
if (getcontext(&end_context) == -1) {
|
2014-12-04 10:48:15 +01:00
|
|
|
err(EXIT_FAILURE, "native_cpu_init: getcontext");
|
2013-03-06 01:08:15 +01:00
|
|
|
}
|
2013-06-21 03:52:57 +02:00
|
|
|
|
2022-11-21 12:24:58 +01:00
|
|
|
end_context.uc_stack.ss_sp = malloc(SIGSTKSZ);
|
|
|
|
expect(end_context.uc_stack.ss_sp != NULL);
|
2013-05-14 18:31:47 +02:00
|
|
|
end_context.uc_stack.ss_size = SIGSTKSZ;
|
|
|
|
end_context.uc_stack.ss_flags = 0;
|
|
|
|
makecontext(&end_context, sched_task_exit, 0);
|
2022-11-21 12:24:58 +01:00
|
|
|
(void)VALGRIND_STACK_REGISTER(end_context.uc_stack.ss_sp,
|
|
|
|
(char *)end_context.uc_stack.ss_sp + end_context.uc_stack.ss_size);
|
2016-03-23 06:36:07 +01:00
|
|
|
VALGRIND_DEBUG("VALGRIND_STACK_REGISTER(%p, %p)\n",
|
2022-11-21 12:24:58 +01:00
|
|
|
(void*)end_context.uc_stack.ss_sp,
|
|
|
|
(void*)((char *)end_context.uc_stack.ss_sp + end_context.uc_stack.ss_size));
|
2013-05-14 18:31:47 +02:00
|
|
|
|
2014-01-22 18:23:10 +01:00
|
|
|
DEBUG("RIOT native cpu initialized.\n");
|
2013-03-06 01:08:15 +01:00
|
|
|
}
|
2013-03-13 21:56:56 +01:00
|
|
|
/** @} */
|