1
0
mirror of https://github.com/RIOT-OS/RIOT.git synced 2024-12-29 04:50:03 +01:00
RIOT/cpu/native/irq_cpu.c

561 lines
14 KiB
C
Raw Normal View History

2013-03-06 10:29:49 +01:00
/**
* Native CPU irq.h implementation
*
* Copyright (C) 2013 Ludwig Knüpfer <ludwig.knuepfer@fu-berlin.de>
2013-03-06 10:29:49 +01:00
*
* This file is subject to the terms and conditions of the GNU Lesser
* General Public License v2.1. See the file LICENSE in the top level
* directory for more details.
2013-03-06 10:29:49 +01:00
*
2018-06-01 12:17:51 +02:00
* @ingroup cpu_native
2013-03-06 10:29:49 +01:00
* @{
* @file
* @author Ludwig Knüpfer <ludwig.knuepfer@fu-berlin.de>
2013-03-06 10:29:49 +01:00
*/
/* __USE_GNU for gregs[REG_EIP] access under glibc
* _GNU_SOURCE for REG_EIP and strsignal() under musl */
#define __USE_GNU
#define _GNU_SOURCE
2013-03-06 01:08:15 +01:00
#include <err.h>
#include <signal.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
2013-09-30 14:07:10 +02:00
#ifdef HAVE_VALGRIND_H
#include <valgrind.h>
#define VALGRIND_DEBUG DEBUG
#elif defined(HAVE_VALGRIND_VALGRIND_H)
#include <valgrind/valgrind.h>
#define VALGRIND_DEBUG DEBUG
#else
#define VALGRIND_STACK_REGISTER(...) (0)
2013-09-30 14:07:10 +02:00
#define VALGRIND_DEBUG(...)
#endif
#include "irq.h"
2013-03-06 01:08:15 +01:00
#include "cpu.h"
2017-01-09 17:41:58 +01:00
#include "periph/pm.h"
2013-03-06 01:08:15 +01:00
#include "native_internal.h"
#include "test_utils/expect.h"
2020-10-22 11:34:00 +02:00
#define ENABLE_DEBUG 0
2013-03-06 01:08:15 +01:00
#include "debug.h"
2014-11-28 12:08:51 +01:00
volatile int native_interrupts_enabled = 0;
volatile int _native_in_isr;
volatile int _native_in_syscall;
static sigset_t _native_sig_set, _native_sig_set_dint;
2013-03-06 01:08:15 +01:00
char __isr_stack[THREAD_STACKSIZE_DEFAULT];
const size_t __isr_stack_size = sizeof(__isr_stack);
ucontext_t native_isr_context;
ucontext_t *_native_cur_ctx, *_native_isr_ctx;
volatile uintptr_t _native_saved_eip;
volatile int _native_sigpend;
int _sig_pipefd[2];
2014-10-29 18:00:38 +01:00
static _native_callback_t native_irq_handlers[255];
2013-04-15 20:08:46 +02:00
2017-10-20 17:26:10 +02:00
void *thread_isr_stack_pointer(void)
{
return native_isr_context.uc_stack.ss_sp;
}
2017-10-20 17:26:10 +02:00
void *thread_isr_stack_start(void)
{
return __isr_stack;
}
2013-04-15 20:08:46 +02:00
void print_thread_sigmask(ucontext_t *cp)
{
sigset_t *p = &cp->uc_sigmask;
if (sigemptyset(p) == -1) {
err(EXIT_FAILURE, "print_thread_sigmask: sigemptyset");
2013-04-15 20:08:46 +02:00
}
for (int i = 1; i < (NSIG); i++) {
2014-10-29 18:00:38 +01:00
if (native_irq_handlers[i] != NULL) {
2013-04-15 20:08:46 +02:00
printf("%s: %s\n",
strsignal(i),
(sigismember(&_native_sig_set, i) ? "blocked" : "unblocked")
2013-04-15 20:08:46 +02:00
);
}
if (sigismember(p, i)) {
2013-04-15 20:08:46 +02:00
printf("%s: pending\n", strsignal(i));
}
}
}
#ifdef DEVELHELP
2013-04-15 20:08:46 +02:00
void print_sigmasks(void)
{
for (int i = 0; i < MAXTHREADS; i++) {
if (sched_threads[i] != NULL) {
ucontext_t *p;
2013-04-15 20:08:46 +02:00
printf("%s:\n", sched_threads[i]->name);
//print_thread_sigmask(sched_threads[i]->sp);
/* Use intermediate cast to uintptr_t to silence -Wcast-align.
* stacks are manually word aligned in thread_static_init() */
p = (ucontext_t *)(uintptr_t)(sched_threads[i]->stack_start);
2013-04-15 20:08:46 +02:00
print_thread_sigmask(p);
puts("");
}
}
}
#endif
2013-04-15 20:08:46 +02:00
void native_print_signals(void)
2013-04-15 20:08:46 +02:00
{
sigset_t p, q;
puts("native signals:\n");
if (sigemptyset(&p) == -1) {
err(EXIT_FAILURE, "native_print_signals: sigemptyset");
2013-04-15 20:08:46 +02:00
}
if (sigpending(&p) == -1) {
err(EXIT_FAILURE, "native_print_signals: sigpending");
2013-04-15 20:08:46 +02:00
}
if (sigprocmask(SIG_SETMASK, NULL, &q) == -1) {
err(EXIT_FAILURE, "native_print_signals: sigprocmask");
2013-04-15 20:08:46 +02:00
}
for (int i = 1; i < (NSIG); i++) {
2014-10-29 18:00:38 +01:00
if (native_irq_handlers[i] != NULL || i == SIGUSR1) {
printf("%s: %s in active thread\n",
strsignal(i),
(sigismember(&_native_sig_set, i) ? "blocked" : "unblocked")
2013-04-15 20:08:46 +02:00
);
}
if (sigismember(&p, i)) {
2013-04-15 20:08:46 +02:00
printf("%s: pending\n", strsignal(i));
}
if (sigismember(&q, i)) {
printf("%s: blocked in this context\n", strsignal(i));
2013-04-15 20:08:46 +02:00
}
}
}
/**
* block signals
*/
unsigned irq_disable(void)
2013-03-06 01:08:15 +01:00
{
unsigned int prev_state;
_native_syscall_enter();
DEBUG("irq_disable()\n");
2013-03-06 01:08:15 +01:00
if (_native_in_isr == 1) {
DEBUG("irq_disable + _native_in_isr\n");
}
if (sigprocmask(SIG_SETMASK, &_native_sig_set_dint, NULL) == -1) {
err(EXIT_FAILURE, "irq_disable: sigprocmask");
2013-03-06 01:08:15 +01:00
}
2013-03-06 01:08:15 +01:00
prev_state = native_interrupts_enabled;
native_interrupts_enabled = 0;
DEBUG("irq_disable(): return\n");
_native_syscall_leave();
2013-03-06 01:08:15 +01:00
return prev_state;
}
/**
* unblock signals
*/
unsigned irq_enable(void)
2013-03-06 01:08:15 +01:00
{
unsigned int prev_state;
if (_native_in_isr == 1) {
2015-09-12 12:43:15 +02:00
#ifdef DEVELHELP
real_write(STDERR_FILENO, "irq_enable + _native_in_isr\n", 27);
#else
DEBUG("irq_enable + _native_in_isr\n");
#endif
}
_native_syscall_enter();
DEBUG("irq_enable()\n");
/* Mark the IRQ as enabled first since sigprocmask could call the handler
* before returning to userspace.
*/
prev_state = native_interrupts_enabled;
native_interrupts_enabled = 1;
if (sigprocmask(SIG_SETMASK, &_native_sig_set, NULL) == -1) {
err(EXIT_FAILURE, "irq_enable: sigprocmask");
2013-03-06 01:08:15 +01:00
}
_native_syscall_leave();
if (_native_in_isr == 0 && sched_context_switch_request) {
DEBUG("irq_enable() deferred thread_yield_higher()\n");
thread_yield_higher();
}
DEBUG("irq_enable(): return\n");
2013-04-15 20:08:46 +02:00
2013-03-06 01:08:15 +01:00
return prev_state;
}
void irq_restore(unsigned state)
2013-03-06 01:08:15 +01:00
{
DEBUG("irq_restore()\n");
2013-03-06 01:08:15 +01:00
if (state == 1) {
irq_enable();
2013-03-06 01:08:15 +01:00
}
else {
irq_disable();
2013-03-06 01:08:15 +01:00
}
2013-03-06 01:08:15 +01:00
return;
}
bool irq_is_enabled(void)
2019-03-06 18:02:54 +01:00
{
return native_interrupts_enabled;
}
bool irq_is_in(void)
2013-03-06 01:08:15 +01:00
{
DEBUG("irq_is_in: %i\n", _native_in_isr);
return _native_in_isr;
2013-03-06 01:08:15 +01:00
}
int _native_popsig(void)
{
int nread, nleft, i;
int sig = 0;
nleft = sizeof(int);
i = 0;
while ((nleft > 0) && ((nread = real_read(_sig_pipefd[0], ((uint8_t*)&sig) + i, nleft)) != -1)) {
i += nread;
nleft -= nread;
}
if (nread == -1) {
err(EXIT_FAILURE, "_native_popsig: real_read");
}
return sig;
}
2013-03-06 01:08:15 +01:00
/**
* call signal handlers,
2013-03-06 01:08:15 +01:00
* restore user context
*/
void native_irq_handler(void)
2013-03-06 01:08:15 +01:00
{
DEBUG("\n\n\t\tnative_irq_handler\n\n");
while (_native_sigpend > 0) {
2014-07-25 08:17:06 +02:00
int sig = _native_popsig();
_native_sigpend--;
2014-10-29 18:00:38 +01:00
if (native_irq_handlers[sig] != NULL) {
DEBUG("native_irq_handler: calling interrupt handler for %i\n", sig);
2014-10-29 18:00:38 +01:00
native_irq_handlers[sig]();
}
else if (sig == SIGUSR1) {
warnx("native_irq_handler: ignoring SIGUSR1");
}
else {
errx(EXIT_FAILURE, "XXX: no handler for signal %i\nXXX: this should not have happened!\n", sig);
}
2013-03-06 01:08:15 +01:00
}
DEBUG("native_irq_handler: return\n");
2013-03-06 01:08:15 +01:00
cpu_switch_context_exit();
}
void isr_set_sigmask(ucontext_t *ctx)
{
ctx->uc_sigmask = _native_sig_set_dint;
native_interrupts_enabled = 0;
}
2013-03-06 01:08:15 +01:00
/**
* save signal, return to _native_sig_leave_tramp if possible
2013-03-06 01:08:15 +01:00
*/
void native_isr_entry(int sig, siginfo_t *info, void *context)
{
(void) info; /* unused at the moment */
//printf("\n\033[33m\n\t\tnative_isr_entry(%i)\n\n\033[0m", sig);
2013-03-06 01:08:15 +01:00
/* save the signal */
if (real_write(_sig_pipefd[1], &sig, sizeof(int)) == -1) {
err(EXIT_FAILURE, "native_isr_entry: real_write()");
}
_native_sigpend++;
//real_write(STDOUT_FILENO, "sigpend\n", 8);
if (context == NULL) {
errx(EXIT_FAILURE, "native_isr_entry: context is null - unhandled");
}
if (thread_get_active() == NULL) {
_native_in_isr++;
warnx("native_isr_entry: thread_get_active() is null - unhandled");
_native_in_isr--;
return;
}
2013-03-06 01:08:15 +01:00
/* XXX: Workaround safety check - whenever this happens it really
* indicates a bug in irq_disable */
if (native_interrupts_enabled == 0) {
//printf("interrupts are off, but I caught a signal.\n");
return;
}
2013-11-20 19:50:07 +01:00
if (_native_in_isr != 0) {
//real_write(STDOUT_FILENO, "interrupts in ISR!!\n", 20);
return;
}
if (_native_in_syscall != 0) {
DEBUG("\n\n\t\tnative_isr_entry: return to syscall\n\n");
return;
}
native_isr_context.uc_stack.ss_sp = __isr_stack;
2014-11-28 11:47:42 +01:00
native_isr_context.uc_stack.ss_size = sizeof(__isr_stack);
native_isr_context.uc_stack.ss_flags = 0;
makecontext(&native_isr_context, native_irq_handler, 0);
/* Use intermediate cast to uintptr_t to silence -Wcast-align.
* stacks are manually word aligned in thread_stack_init() */
_native_cur_ctx = (ucontext_t *)(uintptr_t)thread_get_active()->sp;
DEBUG("\n\n\t\tnative_isr_entry: return to _native_sig_leave_tramp\n\n");
/* disable interrupts in context */
isr_set_sigmask((ucontext_t *)context);
_native_in_isr = 1;
#if defined(__FreeBSD__)
_native_saved_eip = ((struct sigcontext *)context)->sc_eip;
((struct sigcontext *)context)->sc_eip = (unsigned int)&_native_sig_leave_tramp;
2014-11-28 11:47:42 +01:00
#else /* Linux */
#if defined(__arm__)
2014-08-29 18:30:37 +02:00
_native_saved_eip = ((ucontext_t *)context)->uc_mcontext.arm_pc;
((ucontext_t *)context)->uc_mcontext.arm_pc = (unsigned int)&_native_sig_leave_tramp;
2014-11-28 11:47:42 +01:00
#else /* Linux/x86 */
#ifdef __x86_64__
_native_saved_eip = ((ucontext_t *)context)->uc_mcontext.gregs[REG_RIP];
((ucontext_t *)context)->uc_mcontext.gregs[REG_RIP] = (uintptr_t)&_native_sig_leave_tramp;
#else
//printf("\n\033[31mEIP:\t%p\ngo switching\n\n\033[0m", (void*)((ucontext_t *)context)->uc_mcontext.gregs[REG_EIP]);
_native_saved_eip = ((ucontext_t *)context)->uc_mcontext.gregs[REG_EIP];
((ucontext_t *)context)->uc_mcontext.gregs[REG_EIP] = (unsigned int)&_native_sig_leave_tramp;
#endif
2013-05-15 17:45:43 +02:00
#endif
2014-08-29 18:30:37 +02:00
#endif
2013-03-06 01:08:15 +01:00
}
/**
* Add or remove handler for signal
*
* To be called with interrupts disabled
*
*/
void set_signal_handler(int sig, bool add)
{
struct sigaction sa;
int ret;
/* update the signal mask so irq_enable()/irq_disable() will be aware */
if (add) {
_native_syscall_enter();
ret = sigdelset(&_native_sig_set, sig);
_native_syscall_leave();
} else {
_native_syscall_enter();
ret = sigaddset(&_native_sig_set, sig);
_native_syscall_leave();
}
if (ret == -1) {
err(EXIT_FAILURE, "set_signal_handler: sigdelset");
}
memset(&sa, 0, sizeof(sa));
/* Disable other signal during execution of the handler for this signal. */
memcpy(&sa.sa_mask, &_native_sig_set_dint, sizeof(sa.sa_mask));
/* restart interrupted systems call and custom signal stack */
sa.sa_flags = SA_RESTART | SA_ONSTACK;
if (add) {
sa.sa_flags |= SA_SIGINFO; /* sa.sa_sigaction is used */
sa.sa_sigaction = native_isr_entry;
} else
{
sa.sa_handler = SIG_IGN;
}
_native_syscall_enter();
if (sigaction(sig, &sa, NULL)) {
err(EXIT_FAILURE, "set_signal_handler: sigaction");
}
_native_syscall_leave();
}
2013-03-06 01:08:15 +01:00
/**
* register signal/interrupt handler for signal sig
*
* TODO: use appropriate data structure for signal
2013-03-06 01:08:15 +01:00
* handlers.
*/
2014-10-29 18:00:38 +01:00
int register_interrupt(int sig, _native_callback_t handler)
2013-03-06 01:08:15 +01:00
{
DEBUG("register_interrupt\n");
2013-03-06 01:08:15 +01:00
unsigned state = irq_disable();
2013-03-06 01:08:15 +01:00
2014-10-29 18:00:38 +01:00
native_irq_handlers[sig] = handler;
set_signal_handler(sig, true);
2013-03-06 01:08:15 +01:00
irq_restore(state);
2013-03-06 01:08:15 +01:00
return 0;
}
/**
* empty signal mask
2013-03-06 01:08:15 +01:00
*/
int unregister_interrupt(int sig)
{
DEBUG("unregister_interrupt\n");
2013-03-06 01:08:15 +01:00
unsigned state = irq_disable();
set_signal_handler(sig, false);
2014-10-29 18:00:38 +01:00
native_irq_handlers[sig] = NULL;
2013-03-06 01:08:15 +01:00
irq_restore(state);
2013-03-06 01:08:15 +01:00
return 0;
}
2014-11-14 20:50:39 +01:00
static void native_shutdown(int sig, siginfo_t *info, void *context)
2013-10-23 22:43:58 +02:00
{
(void)sig;
(void)info;
(void)context;
2017-01-09 17:41:58 +01:00
pm_off();
2013-10-23 22:43:58 +02:00
}
2013-03-06 01:08:15 +01:00
/**
* register internal signal handler,
2019-10-23 21:13:53 +02:00
* initialize local variables
*
2013-03-06 01:08:15 +01:00
* TODO: see register_interrupt
*/
void native_interrupt_init(void)
{
struct sigaction sa;
DEBUG("native_interrupt_init\n");
2013-03-06 01:08:15 +01:00
(void) VALGRIND_STACK_REGISTER(__isr_stack, __isr_stack + sizeof(__isr_stack));
VALGRIND_DEBUG("VALGRIND_STACK_REGISTER(%p, %p)\n",
(void *)__isr_stack, (void*)(__isr_stack + sizeof(__isr_stack)));
2013-09-30 14:07:10 +02:00
_native_sigpend = 0;
2013-04-15 20:08:46 +02:00
for (int i = 0; i < 255; i++) {
2014-10-29 18:00:38 +01:00
native_irq_handlers[i] = NULL;
2013-03-06 01:08:15 +01:00
}
sa.sa_sigaction = native_isr_entry;
2013-11-20 21:47:59 +01:00
if (sigfillset(&sa.sa_mask) == -1) {
err(EXIT_FAILURE, "native_interrupt_init: sigfillset");
2013-03-06 01:08:15 +01:00
}
sa.sa_flags = SA_RESTART | SA_SIGINFO | SA_ONSTACK;
2013-03-06 01:08:15 +01:00
2015-07-08 02:07:49 +02:00
/* We want to white list authorized signals */
if (sigfillset(&_native_sig_set) == -1) {
err(EXIT_FAILURE, "native_interrupt_init: sigprocmask");
2013-04-15 20:08:46 +02:00
}
2014-01-29 15:31:10 +01:00
/* we need to disable all signals during our signal handler as it
* can not cope with interrupted signals ... */
if (sigfillset(&_native_sig_set_dint) == -1) {
err(EXIT_FAILURE, "native_interrupt_init: sigfillset");
}
2013-04-15 20:08:46 +02:00
/* SIGUSR1 is intended for debugging purposes and shall always be
* enabled */
if (sigdelset(&_native_sig_set, SIGUSR1) == -1) {
err(EXIT_FAILURE, "native_interrupt_init: sigdelset");
}
if (sigdelset(&_native_sig_set_dint, SIGUSR1) == -1) {
err(EXIT_FAILURE, "native_interrupt_init: sigdelset");
2013-03-06 01:08:15 +01:00
}
/* SIGUSR1 is handled like a regular interrupt */
if (sigaction(SIGUSR1, &sa, NULL)) {
err(EXIT_FAILURE, "native_interrupt_init: sigaction");
2013-03-06 01:08:15 +01:00
}
if (getcontext(&native_isr_context) == -1) {
err(EXIT_FAILURE, "native_interrupt_init: getcontext");
2013-04-15 20:08:46 +02:00
}
native_isr_context.uc_stack.ss_sp = __isr_stack;
2014-11-28 11:47:42 +01:00
native_isr_context.uc_stack.ss_size = sizeof(__isr_stack);
native_isr_context.uc_stack.ss_flags = 0;
_native_isr_ctx = &native_isr_context;
static stack_t sigstk;
sigstk.ss_sp = malloc(SIGSTKSZ);
expect(sigstk.ss_sp != NULL);
sigstk.ss_size = SIGSTKSZ;
sigstk.ss_flags = 0;
if (sigaltstack(&sigstk, NULL) < 0) {
err(EXIT_FAILURE, "native_interrupt_init: sigaltstack");
2013-04-15 20:08:46 +02:00
}
makecontext(&native_isr_context, native_irq_handler, 0);
_native_in_syscall = 0;
2013-04-15 20:08:46 +02:00
if (real_pipe(_sig_pipefd) == -1) {
err(EXIT_FAILURE, "native_interrupt_init: pipe");
2013-04-15 20:08:46 +02:00
}
/* allow for ctrl+c to shut down gracefully always */
2014-11-14 20:50:39 +01:00
//register_interrupt(SIGINT, native_shutdown);
sa.sa_sigaction = native_shutdown;
if (sigdelset(&_native_sig_set, SIGINT) == -1) {
err(EXIT_FAILURE, "native_interrupt_init: sigdelset");
}
if (sigdelset(&_native_sig_set_dint, SIGINT) == -1) {
err(EXIT_FAILURE, "native_interrupt_init: sigdelset");
}
if (sigaction(SIGINT, &sa, NULL)) {
err(EXIT_FAILURE, "native_interrupt_init: sigaction");
}
2013-10-23 22:43:58 +02:00
2013-03-06 01:08:15 +01:00
puts("RIOT native interrupts/signals initialized.");
}
/** @} */