2013-03-06 10:29:49 +01:00
|
|
|
/**
|
|
|
|
* Native CPU kernel_intern.h and sched.h implementation
|
|
|
|
*
|
2013-03-13 21:56:56 +01:00
|
|
|
* in-process preemptive context switching utilizes POSIX ucontexts.
|
|
|
|
* (ucontext provides for architecture independent stack handling)
|
|
|
|
*
|
2014-05-15 18:07:02 +02:00
|
|
|
* Copyright (C) 2013 Ludwig Ortmann <ludwig.ortmann@fu-berlin.de>
|
2013-03-06 10:29:49 +01:00
|
|
|
*
|
2013-11-22 20:47:05 +01:00
|
|
|
* This file is subject to the terms and conditions of the GNU Lesser General
|
2013-06-18 17:21:38 +02:00
|
|
|
* Public License. See the file LICENSE in the top level directory for more
|
|
|
|
* details.
|
2013-03-06 10:29:49 +01:00
|
|
|
*
|
2013-03-13 21:56:56 +01:00
|
|
|
* @ingroup native_cpu
|
2013-03-06 10:29:49 +01:00
|
|
|
* @{
|
|
|
|
* @file
|
|
|
|
* @author Ludwig Ortmann <ludwig.ortmann@fu-berlin.de>
|
|
|
|
*/
|
2014-02-14 16:18:40 +01:00
|
|
|
|
2013-03-06 01:08:15 +01:00
|
|
|
#include <stdio.h>
|
2014-02-14 16:18:40 +01:00
|
|
|
#include <unistd.h>
|
2013-12-02 11:39:11 +01:00
|
|
|
|
2013-05-15 17:45:43 +02:00
|
|
|
#ifdef __MACH__
|
|
|
|
#define _XOPEN_SOURCE
|
|
|
|
#endif
|
2013-03-06 01:08:15 +01:00
|
|
|
#include <ucontext.h>
|
2013-05-15 17:45:43 +02:00
|
|
|
#ifdef __MACH__
|
|
|
|
#undef _XOPEN_SOURCE
|
|
|
|
#endif
|
2013-03-06 01:08:15 +01:00
|
|
|
#include <err.h>
|
|
|
|
|
2013-09-30 14:07:10 +02:00
|
|
|
#ifdef HAVE_VALGRIND_H
|
|
|
|
#include <valgrind.h>
|
|
|
|
#define VALGRIND_DEBUG DEBUG
|
|
|
|
#elif defined(HAVE_VALGRIND_VALGRIND_H)
|
|
|
|
#include <valgrind/valgrind.h>
|
|
|
|
#define VALGRIND_DEBUG DEBUG
|
|
|
|
#else
|
|
|
|
#define VALGRIND_STACK_REGISTER(...)
|
|
|
|
#define VALGRIND_DEBUG(...)
|
|
|
|
#endif
|
|
|
|
|
2013-12-02 11:39:11 +01:00
|
|
|
#include <stdlib.h>
|
|
|
|
|
2013-07-16 16:36:37 +02:00
|
|
|
#include "kernel_internal.h"
|
2014-02-14 16:18:40 +01:00
|
|
|
#include "kernel.h"
|
|
|
|
#include "irq.h"
|
2013-03-06 01:08:15 +01:00
|
|
|
#include "sched.h"
|
|
|
|
|
|
|
|
#include "cpu.h"
|
|
|
|
#include "cpu-conf.h"
|
2014-07-13 16:23:27 +02:00
|
|
|
#ifdef MODULE_NATIVENET
|
|
|
|
#include "tap.h"
|
|
|
|
#endif
|
2013-09-30 14:07:10 +02:00
|
|
|
|
2013-11-07 17:23:08 +01:00
|
|
|
#include "native_internal.h"
|
|
|
|
|
2013-09-30 14:07:10 +02:00
|
|
|
#define ENABLE_DEBUG (0)
|
2013-03-06 01:08:15 +01:00
|
|
|
#include "debug.h"
|
|
|
|
|
2014-04-10 22:28:35 +02:00
|
|
|
extern volatile tcb_t *sched_active_thread;
|
2013-11-07 17:23:08 +01:00
|
|
|
|
|
|
|
ucontext_t end_context;
|
|
|
|
char __end_stack[SIGSTKSZ];
|
2013-03-06 01:08:15 +01:00
|
|
|
|
2013-08-08 11:08:33 +02:00
|
|
|
#ifdef MODULE_UART0
|
2013-06-26 23:29:09 +02:00
|
|
|
fd_set _native_rfds;
|
|
|
|
#endif
|
|
|
|
|
2014-03-01 09:36:17 +01:00
|
|
|
int reboot_arch(int mode)
|
2014-02-14 16:18:40 +01:00
|
|
|
{
|
2014-03-01 09:36:17 +01:00
|
|
|
(void) mode;
|
|
|
|
|
2014-02-14 16:18:40 +01:00
|
|
|
printf("\n\n\t\t!! REBOOT !!\n\n");
|
2014-07-13 16:23:27 +02:00
|
|
|
#ifdef MODULE_UART0
|
|
|
|
/* TODO: close stdio fds */
|
|
|
|
#endif
|
|
|
|
#ifdef MODULE_NATIVENET
|
|
|
|
if (_native_tap_fd != -1) {
|
|
|
|
real_close(_native_tap_fd);
|
|
|
|
}
|
|
|
|
#endif
|
2014-03-01 09:36:17 +01:00
|
|
|
|
2014-06-18 20:34:12 +02:00
|
|
|
if (real_execve(_native_argv[0], _native_argv, NULL) == -1) {
|
2014-02-14 16:18:40 +01:00
|
|
|
err(EXIT_FAILURE, "reboot: execve");
|
|
|
|
}
|
2014-03-01 09:36:17 +01:00
|
|
|
|
2014-05-09 23:38:38 +02:00
|
|
|
errx(EXIT_FAILURE, "reboot: this should not have been reached");
|
2014-02-14 16:18:40 +01:00
|
|
|
}
|
|
|
|
|
2013-03-06 01:08:15 +01:00
|
|
|
/**
|
|
|
|
* TODO: implement
|
|
|
|
*/
|
|
|
|
void thread_print_stack(void)
|
|
|
|
{
|
|
|
|
DEBUG("XXX: thread_print_stack()\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-03-04 20:20:01 +01:00
|
|
|
char *thread_stack_init(thread_task_func_t task_func, void *arg, void *stack_start, int stacksize)
|
2013-03-06 01:08:15 +01:00
|
|
|
{
|
|
|
|
unsigned int *stk;
|
|
|
|
ucontext_t *p;
|
|
|
|
|
2014-07-25 08:17:06 +02:00
|
|
|
VALGRIND_STACK_REGISTER(stack_start, (char *) stack_start + stacksize);
|
2013-09-30 14:07:10 +02:00
|
|
|
VALGRIND_DEBUG("VALGRIND_STACK_REGISTER(%p, %p)\n", stack_start, (void*)((int)stack_start + stacksize));
|
|
|
|
|
2013-03-06 01:08:15 +01:00
|
|
|
DEBUG("thread_stack_init()\n");
|
|
|
|
|
2014-02-07 09:14:58 +01:00
|
|
|
stk = (unsigned int *)stack_start;
|
2013-03-06 01:08:15 +01:00
|
|
|
|
|
|
|
#ifdef NATIVESPONTOP
|
2013-06-21 03:52:57 +02:00
|
|
|
p = (ucontext_t *)stk;
|
|
|
|
stk += sizeof(ucontext_t) / sizeof(void *);
|
2013-03-06 01:08:15 +01:00
|
|
|
stacksize -= sizeof(ucontext_t);
|
|
|
|
#else
|
2013-06-21 03:52:57 +02:00
|
|
|
p = (ucontext_t *)(stk + ((stacksize - sizeof(ucontext_t)) / sizeof(void *)));
|
2013-03-06 01:08:15 +01:00
|
|
|
stacksize -= sizeof(ucontext_t);
|
|
|
|
#endif
|
|
|
|
|
2013-06-24 22:37:35 +02:00
|
|
|
if (getcontext(p) == -1) {
|
2013-11-20 15:25:11 +01:00
|
|
|
err(EXIT_FAILURE, "thread_stack_init(): getcontext()");
|
2013-03-06 01:08:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
p->uc_stack.ss_sp = stk;
|
|
|
|
p->uc_stack.ss_size = stacksize;
|
|
|
|
p->uc_stack.ss_flags = 0;
|
2013-05-14 18:31:47 +02:00
|
|
|
p->uc_link = &end_context;
|
2013-06-21 03:52:57 +02:00
|
|
|
|
2013-06-24 22:37:35 +02:00
|
|
|
if (sigemptyset(&(p->uc_sigmask)) == -1) {
|
2013-11-20 15:25:11 +01:00
|
|
|
err(EXIT_FAILURE, "thread_stack_init(): sigemptyset()");
|
2013-03-06 01:08:15 +01:00
|
|
|
}
|
|
|
|
|
2014-03-04 20:20:01 +01:00
|
|
|
makecontext(p, (void (*)(void)) task_func, 1, arg);
|
2013-03-06 01:08:15 +01:00
|
|
|
|
|
|
|
return (char *) p;
|
|
|
|
}
|
|
|
|
|
2013-11-08 13:12:15 +01:00
|
|
|
void isr_cpu_switch_context_exit(void)
|
2013-03-06 01:08:15 +01:00
|
|
|
{
|
2013-04-15 20:08:46 +02:00
|
|
|
ucontext_t *ctx;
|
|
|
|
|
2013-03-06 01:08:15 +01:00
|
|
|
DEBUG("XXX: cpu_switch_context_exit()\n");
|
2014-04-10 22:28:35 +02:00
|
|
|
if ((sched_context_switch_request == 1) || (sched_active_thread == NULL)) {
|
2013-09-30 15:45:47 +02:00
|
|
|
sched_run();
|
|
|
|
}
|
2013-03-06 01:08:15 +01:00
|
|
|
|
2014-04-10 22:28:35 +02:00
|
|
|
DEBUG("XXX: cpu_switch_context_exit(): calling setcontext(%s)\n\n", sched_active_thread->name);
|
|
|
|
ctx = (ucontext_t *)(sched_active_thread->sp);
|
2013-09-30 15:45:47 +02:00
|
|
|
|
|
|
|
/* the next context will have interrupts enabled due to ucontext */
|
|
|
|
DEBUG("XXX: cpu_switch_context_exit: native_interrupts_enabled = 1;\n");
|
|
|
|
native_interrupts_enabled = 1;
|
2013-11-08 13:12:15 +01:00
|
|
|
_native_in_isr = 0;
|
2013-06-21 03:52:57 +02:00
|
|
|
|
2013-06-24 22:37:35 +02:00
|
|
|
if (setcontext(ctx) == -1) {
|
2013-11-20 15:25:11 +01:00
|
|
|
err(EXIT_FAILURE, "cpu_switch_context_exit(): setcontext():");
|
2013-03-06 01:08:15 +01:00
|
|
|
}
|
2014-01-29 10:54:52 +01:00
|
|
|
errx(EXIT_FAILURE, "2 this should have never been reached!!");
|
2013-03-06 01:08:15 +01:00
|
|
|
}
|
|
|
|
|
2014-05-07 12:36:32 +02:00
|
|
|
void cpu_switch_context_exit(void)
|
2013-03-06 01:08:15 +01:00
|
|
|
{
|
2014-04-04 08:21:23 +02:00
|
|
|
#ifdef NATIVE_AUTO_EXIT
|
2014-04-10 22:28:35 +02:00
|
|
|
if (sched_num_threads <= 1) {
|
2014-04-04 08:21:23 +02:00
|
|
|
DEBUG("cpu_switch_context_exit(): last task has ended. exiting.\n");
|
|
|
|
exit(EXIT_SUCCESS);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-11-08 13:12:15 +01:00
|
|
|
if (_native_in_isr == 0) {
|
|
|
|
dINT();
|
|
|
|
_native_in_isr = 1;
|
|
|
|
native_isr_context.uc_stack.ss_sp = __isr_stack;
|
|
|
|
native_isr_context.uc_stack.ss_size = SIGSTKSZ;
|
|
|
|
native_isr_context.uc_stack.ss_flags = 0;
|
|
|
|
makecontext(&native_isr_context, isr_cpu_switch_context_exit, 0);
|
|
|
|
if (setcontext(&native_isr_context) == -1) {
|
2013-11-20 15:25:11 +01:00
|
|
|
err(EXIT_FAILURE, "cpu_switch_context_exit: swapcontext");
|
2013-11-08 13:12:15 +01:00
|
|
|
}
|
2014-01-29 10:54:52 +01:00
|
|
|
errx(EXIT_FAILURE, "1 this should have never been reached!!");
|
2013-11-08 13:12:15 +01:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
isr_cpu_switch_context_exit();
|
|
|
|
}
|
2014-01-29 10:54:52 +01:00
|
|
|
errx(EXIT_FAILURE, "3 this should have never been reached!!");
|
2013-11-08 13:12:15 +01:00
|
|
|
}
|
2013-03-06 01:08:15 +01:00
|
|
|
|
2014-05-07 12:36:32 +02:00
|
|
|
void isr_thread_yield(void)
|
2013-11-08 13:12:15 +01:00
|
|
|
{
|
|
|
|
DEBUG("isr_thread_yield()\n");
|
2013-04-15 20:08:46 +02:00
|
|
|
|
2013-03-06 01:08:15 +01:00
|
|
|
sched_run();
|
2014-04-10 22:28:35 +02:00
|
|
|
ucontext_t *ctx = (ucontext_t *)(sched_active_thread->sp);
|
|
|
|
DEBUG("isr_thread_yield(): switching to(%s)\n\n", sched_active_thread->name);
|
2013-03-06 01:08:15 +01:00
|
|
|
|
2013-11-08 13:12:15 +01:00
|
|
|
native_interrupts_enabled = 1;
|
|
|
|
_native_in_isr = 0;
|
|
|
|
if (setcontext(ctx) == -1) {
|
2013-11-20 15:25:11 +01:00
|
|
|
err(EXIT_FAILURE, "isr_thread_yield(): setcontext()");
|
2013-11-08 13:12:15 +01:00
|
|
|
}
|
|
|
|
}
|
2013-06-21 03:52:57 +02:00
|
|
|
|
2014-05-07 12:36:32 +02:00
|
|
|
void thread_yield(void)
|
2013-11-08 13:12:15 +01:00
|
|
|
{
|
2014-04-10 22:28:35 +02:00
|
|
|
ucontext_t *ctx = (ucontext_t *)(sched_active_thread->sp);
|
2013-11-08 13:12:15 +01:00
|
|
|
if (_native_in_isr == 0) {
|
|
|
|
_native_in_isr = 1;
|
|
|
|
dINT();
|
|
|
|
native_isr_context.uc_stack.ss_sp = __isr_stack;
|
|
|
|
native_isr_context.uc_stack.ss_size = SIGSTKSZ;
|
|
|
|
native_isr_context.uc_stack.ss_flags = 0;
|
|
|
|
makecontext(&native_isr_context, isr_thread_yield, 0);
|
|
|
|
if (swapcontext(ctx, &native_isr_context) == -1) {
|
2013-11-20 15:25:11 +01:00
|
|
|
err(EXIT_FAILURE, "thread_yield: swapcontext");
|
2013-04-15 20:08:46 +02:00
|
|
|
}
|
2013-11-08 13:12:15 +01:00
|
|
|
eINT();
|
2013-04-15 20:08:46 +02:00
|
|
|
}
|
|
|
|
else {
|
2013-11-08 13:12:15 +01:00
|
|
|
isr_thread_yield();
|
2013-03-06 01:08:15 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-07 12:36:32 +02:00
|
|
|
void native_cpu_init(void)
|
2013-03-06 01:08:15 +01:00
|
|
|
{
|
2013-06-24 22:37:35 +02:00
|
|
|
if (getcontext(&end_context) == -1) {
|
2013-11-20 15:25:11 +01:00
|
|
|
err(EXIT_FAILURE, "end_context(): getcontext()");
|
2013-03-06 01:08:15 +01:00
|
|
|
}
|
2013-06-21 03:52:57 +02:00
|
|
|
|
2013-09-30 15:31:40 +02:00
|
|
|
end_context.uc_stack.ss_sp = __end_stack;
|
2013-05-14 18:31:47 +02:00
|
|
|
end_context.uc_stack.ss_size = SIGSTKSZ;
|
|
|
|
end_context.uc_stack.ss_flags = 0;
|
|
|
|
makecontext(&end_context, sched_task_exit, 0);
|
2013-09-30 14:07:10 +02:00
|
|
|
VALGRIND_STACK_REGISTER(__end_stack, __end_stack + sizeof(__end_stack));
|
|
|
|
VALGRIND_DEBUG("VALGRIND_STACK_REGISTER(%p, %p)\n", __end_stack, (void*)((int)__end_stack + sizeof(__end_stack)));
|
2013-05-14 18:31:47 +02:00
|
|
|
|
2014-01-22 18:23:10 +01:00
|
|
|
DEBUG("RIOT native cpu initialized.\n");
|
2013-03-06 01:08:15 +01:00
|
|
|
}
|
2013-03-13 21:56:56 +01:00
|
|
|
/** @} */
|