1
0
mirror of https://github.com/RIOT-OS/RIOT.git synced 2024-12-29 04:50:03 +01:00

Merge pull request #1357 from haukepetersen/fix_cortexm3_threading

cpu: cortex-m3_common: cleanup thread_arch.c
This commit is contained in:
Thomas Eichinger 2014-07-02 16:54:33 +02:00
commit d56e90a3f0

View File

@ -20,14 +20,15 @@
*/
#include <stdint.h>
#include <stdio.h>
#include "arch/thread_arch.h"
#include "thread.h"
#include "sched.h"
#include "irq.h"
#include "cpu.h"
#include "kernel_internal.h"
/**
* @name noticeable marker marking the beginning of a stack segment
*
@ -41,10 +42,8 @@
*/
#define EXCEPT_RET_TASK_MODE (0xfffffffd)
static void context_save(void);
static void context_restore(void) NORETURN;
static void enter_thread_mode(void) NORETURN;
/**
* Cortex-M3 knows stacks and handles register backups, so use different stack frame layout
@ -92,103 +91,89 @@ char *thread_arch_stack_init(void (*task_func)(void), void *stack_start, int st
*stk = i;
}
/* lr means exception return code */
/* put LR to trigger return to thread stack pointer */
stk--;
*stk = (uint32_t)EXCEPT_RET_TASK_MODE; /* return to task-mode main stack pointer */
*stk = (uint32_t)EXCEPT_RET_TASK_MODE;
return (char*) stk;
}
void thread_arch_stack_print(void)
{
/* TODO */
int count = 0;
uint32_t *sp = (uint32_t *)sched_active_thread->sp;
printf("printing the current stack of thread %u\n", thread_getpid());
printf(" address: data:\n");
do {
printf(" 0x%08x: 0x%08x\n", (unsigned int)sp, (unsigned int)*sp);
sp++;
count++;
} while (*sp != STACK_MARKER);
printf("current stack size: %u byte\n", count);
}
void thread_arch_start_threading(void)
__attribute__((naked)) void thread_arch_start_threading(void)
{
sched_run();
/* enable IRQs to make sure the SVC interrupt is reachable */
enableIRQ();
enter_thread_mode();
}
/**
* @brief Set the MCU into Thread-Mode and load the initial task from the stack and run it
*/
void NORETURN enter_thread_mode(void)
{
/* switch to user mode use PSP instead of MSP in ISR Mode*/
CONTROL_Type mode;
mode.w = __get_CONTROL();
mode.b.SPSEL = 1; /* select PSP */
mode.b.nPRIV = 0; /* privilege */
__set_CONTROL(mode.w);
/* load pdc->stackpointer in r0 */
asm("ldr r0, =sched_active_thread" ); /* r0 = &sched_active_thread */
asm("ldr r0, [r0]" ); /* r0 = *r0 = sched_active_thread */
asm("ldr sp, [r0]" ); /* sp = r0 restore stack pointer*/
asm("pop {r4}" ); /* skip exception return */
asm("pop {r4-r11}" );
asm("pop {r0-r3,r12,lr}" ); /* get registers from stack */
asm("pop {r4}" ); /* get PC */
asm("pop {r5}" ); /* discard the xPSR entry */
asm("mov pc, r4" ); /* load PC */
UNREACHABLE();
/* trigger the SVC interrupt which will get and execute the next thread */
asm("svc 0x01");
}
void thread_arch_yield(void)
{
if (irq_arch_in()) {
SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk; /* set PendSV Bit */
}
else {
asm("svc 0x01\n"); /* trigger SVC interrupt, which will trigger the pendSV (needed?) */
}
}
/**
* @brief SVC interrupt handler (to be discussed if this is really needed)
*/
__attribute__((naked)) void isr_svc(void)
{
SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk; /* trigger the pendsv interrupt */
asm("bx LR" ); /* load exception return value to PC causes end of exception*/
}
__attribute__((naked)) void isr_pendsv(void)
{
context_save();
sched_run();
context_restore();
/* trigger the PENDSV interrupt to run scheduler and schedule new thread if applicable */
SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
}
__attribute__((always_inline)) static __INLINE void context_save(void)
{
/* {r0-r3,r12,LR,PC,xPSR} are saved automatically on exception entry */
/* save unsaved registers */
asm("mrs r0, psp" ); /* get stack pointer from user mode */
asm("stmdb r0!,{r4-r11}" ); /* save regs */
asm("stmdb r0!,{lr}" ); /* exception return value */
/* asm("vstmdb sp!, {s16-s31}" ); */ /* TODO save FPU registers if needed */
asm("ldr r1, =sched_active_thread" ); /* load address of current tcb */
asm("ldr r1, [r1]" ); /* dereference pdc */
asm("str r0, [r1]" ); /* write r0 to pdc->sp means current threads stack pointer */
/* save unsaved registers onto the stack */
asm("mrs r0, psp" ); /* get stack pointer from user mode */
asm("stmdb r0!,{r4-r11}" ); /* save regs */
asm("stmdb r0!,{lr}" ); /* exception return value */
asm("ldr r1, =sched_active_thread" ); /* load address of current TCB */
asm("ldr r1, [r1]" ); /* dereference TCB */
asm("str r0, [r1]" ); /* write r0 to tcb->sp */
}
__attribute__((always_inline)) static __INLINE void context_restore(void)
{
asm("ldr r0, =sched_active_thread" ); /* load address of current TCB */
asm("ldr r0, [r0]" ); /* dereference TCB */
asm("ldr r1, [r0]" ); /* load tcb->sp to register 1 */
asm("ldmia r1!, {r0}" ); /* restore exception return value from stack */
/* asm("pop {s16-s31}" ); */ /* TODO load FPU register if needed depends on r0 exret */
asm("ldmia r1!, {r4-r11}" ); /* restore other registers */
asm("msr psp, r1" ); /* restore PSP register (user mode SP)*/
asm("bx r0" ); /* load exception return value to PC causes end of exception*/
/* restore registers from stack */
asm("ldr r0, =sched_active_thread" ); /* load address of current TCB */
asm("ldr r0, [r0]" ); /* dereference TCB */
asm("ldr r1, [r0]" ); /* load tcb->sp to register 1 */
asm("ldmia r1!, {r0}" ); /* restore exception return value from stack */
asm("ldmia r1!, {r4-r11}" ); /* restore other registers */
asm("msr psp, r1" ); /* restore PSP register (user mode SP) */
asm("bx r0" ); /* load exception return value to PC */
/* {r0-r3,r12,LR,PC,xPSR} are restored automatically on exception return */
UNREACHABLE();
}
/**
* @brief The svc is used for running the scheduler and scheduling a new task during start-up or
* after a thread has exited
*/
__attribute__((naked)) void isr_svc(void)
{
sched_run();
context_restore();
}
/**
* @brief All task switching activity is carried out int the pendSV interrupt
*/
__attribute__((naked)) void isr_pendsv(void)
{
context_save();
sched_run();
context_restore();
}