1
0
mirror of https://github.com/RIOT-OS/RIOT.git synced 2024-12-29 04:50:03 +01:00
RIOT/cpu/native/native_cpu.c

200 lines
5.1 KiB
C
Raw Normal View History

2013-03-06 10:29:49 +01:00
/**
* Native CPU kernel_intern.h and sched.h implementation
*
* in-process preemptive context switching utilizes POSIX ucontexts.
* (ucontext provides for architecture independent stack handling)
*
2013-03-06 10:29:49 +01:00
* Copyright (C) 2013 Ludwig Ortmann
*
2013-11-22 20:47:05 +01:00
* This file is subject to the terms and conditions of the GNU Lesser General
* Public License. See the file LICENSE in the top level directory for more
* details.
2013-03-06 10:29:49 +01:00
*
* @ingroup native_cpu
2013-03-06 10:29:49 +01:00
* @{
* @file
* @author Ludwig Ortmann <ludwig.ortmann@fu-berlin.de>
*/
2013-03-06 01:08:15 +01:00
#include <stdio.h>
2013-11-21 00:25:38 +01:00
#include <stdlib.h>
2013-05-15 17:45:43 +02:00
#ifdef __MACH__
#define _XOPEN_SOURCE
#endif
2013-03-06 01:08:15 +01:00
#include <ucontext.h>
2013-05-15 17:45:43 +02:00
#ifdef __MACH__
#undef _XOPEN_SOURCE
#endif
2013-03-06 01:08:15 +01:00
#include <err.h>
2013-09-30 14:07:10 +02:00
#ifdef HAVE_VALGRIND_H
#include <valgrind.h>
#define VALGRIND_DEBUG DEBUG
#elif defined(HAVE_VALGRIND_VALGRIND_H)
#include <valgrind/valgrind.h>
#define VALGRIND_DEBUG DEBUG
#else
#define VALGRIND_STACK_REGISTER(...)
#define VALGRIND_DEBUG(...)
#endif
#include "kernel_internal.h"
2013-03-06 01:08:15 +01:00
#include "sched.h"
#include "cpu.h"
#include "cpu-conf.h"
2013-09-30 14:07:10 +02:00
#include "native_internal.h"
2013-09-30 14:07:10 +02:00
#define ENABLE_DEBUG (0)
2013-03-06 01:08:15 +01:00
#include "debug.h"
extern volatile tcb_t *active_thread;
ucontext_t end_context;
char __end_stack[SIGSTKSZ];
2013-03-06 01:08:15 +01:00
2013-08-08 11:08:33 +02:00
#ifdef MODULE_UART0
fd_set _native_rfds;
#endif
2013-03-06 01:08:15 +01:00
/**
* TODO: implement
*/
void thread_print_stack(void)
{
DEBUG("XXX: thread_print_stack()\n");
return;
}
char *thread_stack_init(void (*task_func)(void), void *stack_start, int stacksize)
2013-03-06 01:08:15 +01:00
{
unsigned int *stk;
ucontext_t *p;
2013-09-30 14:07:10 +02:00
VALGRIND_STACK_REGISTER(stack_start, stack_start + stacksize);
VALGRIND_DEBUG("VALGRIND_STACK_REGISTER(%p, %p)\n", stack_start, (void*)((int)stack_start + stacksize));
2013-03-06 01:08:15 +01:00
DEBUG("thread_stack_init()\n");
stk = stack_start;
#ifdef NATIVESPONTOP
p = (ucontext_t *)stk;
stk += sizeof(ucontext_t) / sizeof(void *);
2013-03-06 01:08:15 +01:00
stacksize -= sizeof(ucontext_t);
#else
p = (ucontext_t *)(stk + ((stacksize - sizeof(ucontext_t)) / sizeof(void *)));
2013-03-06 01:08:15 +01:00
stacksize -= sizeof(ucontext_t);
#endif
if (getcontext(p) == -1) {
err(EXIT_FAILURE, "thread_stack_init(): getcontext()");
2013-03-06 01:08:15 +01:00
}
p->uc_stack.ss_sp = stk;
p->uc_stack.ss_size = stacksize;
p->uc_stack.ss_flags = 0;
p->uc_link = &end_context;
if (sigemptyset(&(p->uc_sigmask)) == -1) {
err(EXIT_FAILURE, "thread_stack_init(): sigemptyset()");
2013-03-06 01:08:15 +01:00
}
makecontext(p, task_func, 0);
return (char *) p;
}
void isr_cpu_switch_context_exit(void)
2013-03-06 01:08:15 +01:00
{
2013-04-15 20:08:46 +02:00
ucontext_t *ctx;
2013-03-06 01:08:15 +01:00
DEBUG("XXX: cpu_switch_context_exit()\n");
2013-09-30 15:45:47 +02:00
if ((sched_context_switch_request == 1) || (active_thread == NULL)) {
sched_run();
}
2013-03-06 01:08:15 +01:00
DEBUG("XXX: cpu_switch_context_exit(): calling setcontext(%s)\n\n", active_thread->name);
ctx = (ucontext_t *)(active_thread->sp);
2013-09-30 15:45:47 +02:00
/* the next context will have interrupts enabled due to ucontext */
DEBUG("XXX: cpu_switch_context_exit: native_interrupts_enabled = 1;\n");
native_interrupts_enabled = 1;
_native_in_isr = 0;
if (setcontext(ctx) == -1) {
err(EXIT_FAILURE, "cpu_switch_context_exit(): setcontext():");
2013-03-06 01:08:15 +01:00
}
}
void cpu_switch_context_exit()
2013-03-06 01:08:15 +01:00
{
if (_native_in_isr == 0) {
dINT();
_native_in_isr = 1;
native_isr_context.uc_stack.ss_sp = __isr_stack;
native_isr_context.uc_stack.ss_size = SIGSTKSZ;
native_isr_context.uc_stack.ss_flags = 0;
makecontext(&native_isr_context, isr_cpu_switch_context_exit, 0);
if (setcontext(&native_isr_context) == -1) {
err(EXIT_FAILURE, "cpu_switch_context_exit: swapcontext");
}
}
else {
isr_cpu_switch_context_exit();
}
errx(EXIT_FAILURE, "this should have never been reached!!");
}
2013-03-06 01:08:15 +01:00
void isr_thread_yield()
{
DEBUG("isr_thread_yield()\n");
2013-04-15 20:08:46 +02:00
2013-03-06 01:08:15 +01:00
sched_run();
ucontext_t *ctx = (ucontext_t *)(active_thread->sp);
DEBUG("isr_thread_yield(): switching to(%s)\n\n", active_thread->name);
2013-03-06 01:08:15 +01:00
native_interrupts_enabled = 1;
_native_in_isr = 0;
if (setcontext(ctx) == -1) {
err(EXIT_FAILURE, "isr_thread_yield(): setcontext()");
}
}
void thread_yield()
{
ucontext_t *ctx = (ucontext_t *)(active_thread->sp);
if (_native_in_isr == 0) {
_native_in_isr = 1;
dINT();
native_isr_context.uc_stack.ss_sp = __isr_stack;
native_isr_context.uc_stack.ss_size = SIGSTKSZ;
native_isr_context.uc_stack.ss_flags = 0;
makecontext(&native_isr_context, isr_thread_yield, 0);
if (swapcontext(ctx, &native_isr_context) == -1) {
err(EXIT_FAILURE, "thread_yield: swapcontext");
2013-04-15 20:08:46 +02:00
}
eINT();
2013-04-15 20:08:46 +02:00
}
else {
isr_thread_yield();
2013-03-06 01:08:15 +01:00
}
}
void native_cpu_init()
{
if (getcontext(&end_context) == -1) {
err(EXIT_FAILURE, "end_context(): getcontext()");
2013-03-06 01:08:15 +01:00
}
end_context.uc_stack.ss_sp = __end_stack;
end_context.uc_stack.ss_size = SIGSTKSZ;
end_context.uc_stack.ss_flags = 0;
makecontext(&end_context, sched_task_exit, 0);
2013-09-30 14:07:10 +02:00
VALGRIND_STACK_REGISTER(__end_stack, __end_stack + sizeof(__end_stack));
VALGRIND_DEBUG("VALGRIND_STACK_REGISTER(%p, %p)\n", __end_stack, (void*)((int)__end_stack + sizeof(__end_stack)));
DEBUG("RIOT native cpu initialized.");
2013-03-06 01:08:15 +01:00
}
/** @} */