1
0
mirror of https://github.com/RIOT-OS/RIOT.git synced 2024-12-29 04:50:03 +01:00

added NORETURNs to functions that should not return

and added `UNREACHABLE();` to hint the compiler unreachable lines

added right signature for first parameter of `thread_stack_init()`
added `UNREACHABLE();` macro to `cpu/lpc1768/atom.c` and `cpu/msp430-common/cpu.c`
This commit is contained in:
Martin 2014-04-30 09:41:37 +02:00 committed by Martin
parent f1ce664dcf
commit 4e73169b38
5 changed files with 16 additions and 9 deletions

View File

@ -20,6 +20,8 @@
#ifndef KERNEL_INTERNAL_H_
#define KERNEL_INTERNAL_H_
#include "attributes.h"
/**
* @brief Initializes scheduler and creates main and idle task
*/
@ -44,7 +46,7 @@ char *thread_stack_init(void (*task_func)(void), void *stack_start, int stack_s
/**
* @brief Removes thread from scheduler and set status to #STATUS_STOPPED
*/
void sched_task_exit(void);
NORETURN void sched_task_exit(void);
/**
* @brief Prints human readable, ps-like thread information for debugging purposes

View File

@ -25,6 +25,7 @@
#include <stddef.h>
#include "bitarithm.h"
#include "tcb.h"
#include "attributes.h"
#define MAXTHREADS 32 /**< the maximum number of threads to be scheduled */
@ -65,7 +66,7 @@ void sched_switch(uint16_t current_prio, uint16_t other_prio);
/**
* @brief Call context switching at thread exit
*/
void cpu_switch_context_exit(void);
NORETURN void cpu_switch_context_exit(void);
/**
* Flag indicating whether a context switch is necessary after handling an

View File

@ -187,7 +187,7 @@ void sched_switch(uint16_t current_prio, uint16_t other_prio)
}
}
void sched_task_exit(void)
NORETURN void sched_task_exit(void)
{
DEBUG("sched_task_exit(): ending task %s...\n", active_thread->name);

View File

@ -18,9 +18,9 @@
#include "sched.h"
#include "cpu.h"
#include "irq.h"
#include "kernel_internal.h"
extern void sched_task_exit(void);
void sched_task_return(void);
NORETURN void sched_task_return(void);
unsigned int atomic_set_return(unsigned int* p, unsigned int uiVal) {
//unsigned int cspr = disableIRQ(); //crashes
@ -32,7 +32,7 @@ unsigned int atomic_set_return(unsigned int* p, unsigned int uiVal) {
return uiOldVal;
}
void cpu_switch_context_exit(void){
NORETURN void cpu_switch_context_exit(void){
sched_run();
sched_task_return();
}
@ -78,7 +78,7 @@ void ctx_switch(void)
sched_task_return();
}
/* call scheduler so active_thread points to the next task */
void sched_task_return(void)
NORETURN void sched_task_return(void)
{
/* load pdc->stackpointer in r0 */
asm("ldr r0, =active_thread"); /* r0 = &active_thread */
@ -89,6 +89,8 @@ void sched_task_return(void)
asm(" pop {r0-r3,r12,lr}"); /* simulate register restor from stack */
// asm("pop {r4}"); /*foo*/
asm("pop {pc}");
UNREACHABLE();
}
/*
* cortex m4 knows stacks and handles register backups
@ -109,7 +111,7 @@ void sched_task_return(void)
*
*
*/
char * thread_stack_init(void * task_func, void * stack_start, int stack_size ) {
char * thread_stack_init(void (*task_func)(void), void * stack_start, int stack_size ) {
unsigned int * stk;
stk = (unsigned int *) (stack_start + stack_size);

View File

@ -32,12 +32,14 @@ void thread_yield()
__restore_context();
}
void cpu_switch_context_exit(void)
NORETURN void cpu_switch_context_exit(void)
{
active_thread = sched_threads[0];
sched_run();
__restore_context();
UNREACHABLE();
}
/**