mirror of
https://github.com/RIOT-OS/RIOT.git
synced 2024-12-29 04:50:03 +01:00
fixed coding conventions (correctly this time)
This commit is contained in:
parent
6ca6ae9811
commit
ffeb6f8523
@ -31,8 +31,10 @@ number_of_highest_bit(unsigned v)
|
||||
r |= (v >> 1);
|
||||
#else
|
||||
r = 0;
|
||||
while (v >>= 1) // unroll for more speed...
|
||||
while(v >>= 1) { // unroll for more speed...
|
||||
r++;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
return r;
|
||||
@ -43,7 +45,7 @@ number_of_lowest_bit(register unsigned v)
|
||||
{
|
||||
register unsigned r = 0;
|
||||
|
||||
while( (v & 0x01) == 0 ) {
|
||||
while((v & 0x01) == 0) {
|
||||
v >>= 1;
|
||||
r++;
|
||||
};
|
||||
@ -55,8 +57,9 @@ unsigned
|
||||
number_of_bits_set(unsigned v)
|
||||
{
|
||||
unsigned c; // c accumulates the total bits set in v
|
||||
for (c = 0; v; c++) {
|
||||
v &= v - 1; // clear the least significant bit set
|
||||
|
||||
for(c = 0; v; c++) {
|
||||
v &= v - 1; // clear the least significant bit set
|
||||
}
|
||||
|
||||
return c;
|
||||
|
30
core/cib.c
30
core/cib.c
@ -1,30 +1,34 @@
|
||||
#include <cib.h>
|
||||
|
||||
void cib_init(cib_t *cib, unsigned int size) {
|
||||
void cib_init(cib_t *cib, unsigned int size)
|
||||
{
|
||||
cib->read_count = 0;
|
||||
cib->write_count = 0;
|
||||
cib->complement = 0-size;
|
||||
cib->complement = 0 - size;
|
||||
}
|
||||
|
||||
int cib_avail (cib_t *cib) {
|
||||
return (int) (cib->write_count - cib->read_count);
|
||||
int cib_avail(cib_t *cib)
|
||||
{
|
||||
return (int)(cib->write_count - cib->read_count);
|
||||
}
|
||||
|
||||
int cib_get(cib_t *cib) {
|
||||
int avail = cib_avail (cib);
|
||||
int cib_get(cib_t *cib)
|
||||
{
|
||||
int avail = cib_avail(cib);
|
||||
|
||||
if (avail > 0) {
|
||||
return (int) (cib->read_count++ & ~cib->complement);
|
||||
}
|
||||
if(avail > 0) {
|
||||
return (int)(cib->read_count++ & ~cib->complement);
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
int cib_put(cib_t *cib) {
|
||||
int avail = cib_avail (cib);
|
||||
int cib_put(cib_t *cib)
|
||||
{
|
||||
int avail = cib_avail(cib);
|
||||
|
||||
if ((int)(avail + cib->complement) < 0 ) {
|
||||
return (int) (cib->write_count++ & ~(cib->complement));
|
||||
if((int)(avail + cib->complement) < 0) {
|
||||
return (int)(cib->write_count++ & ~(cib->complement));
|
||||
}
|
||||
|
||||
return -1;
|
||||
|
38
core/clist.c
38
core/clist.c
@ -19,16 +19,19 @@
|
||||
#include <stdio.h>
|
||||
|
||||
/* inserts new_node after node */
|
||||
void clist_add(clist_node_t** node, clist_node_t* new_node) {
|
||||
if (*node != NULL) {
|
||||
void clist_add(clist_node_t **node, clist_node_t *new_node)
|
||||
{
|
||||
if(*node != NULL) {
|
||||
new_node->next = (*node);
|
||||
new_node->prev = (*node)->prev;
|
||||
(*node)->prev->next = new_node;
|
||||
(*node)->prev = new_node;
|
||||
if ((*node)->prev == *node) {
|
||||
|
||||
if((*node)->prev == *node) {
|
||||
(*node)->prev = new_node;
|
||||
}
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
*node = new_node;
|
||||
new_node->next = new_node;
|
||||
new_node->prev = new_node;
|
||||
@ -36,23 +39,32 @@ void clist_add(clist_node_t** node, clist_node_t* new_node) {
|
||||
}
|
||||
|
||||
/* removes node. */
|
||||
void clist_remove(clist_node_t** list, clist_node_t *node) {
|
||||
if (node->next != node) {
|
||||
void clist_remove(clist_node_t **list, clist_node_t *node)
|
||||
{
|
||||
if(node->next != node) {
|
||||
node->prev->next = node->next;
|
||||
node->next->prev = node->prev;
|
||||
if (node == *list) *list = node->next;
|
||||
} else {
|
||||
|
||||
if(node == *list) {
|
||||
*list = node->next;
|
||||
}
|
||||
}
|
||||
else {
|
||||
*list = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void clist_print(clist_node_t* clist) {
|
||||
void clist_print(clist_node_t *clist)
|
||||
{
|
||||
clist_node_t *start = clist;
|
||||
|
||||
while (clist != NULL ) {
|
||||
while(clist != NULL) {
|
||||
printf("list entry: %u prev=%u next=%u\n", clist->data, clist->prev->data, clist->next->data);
|
||||
clist = clist->next;
|
||||
if (clist == start) break;
|
||||
|
||||
if(clist == start) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -89,12 +101,12 @@ int main (int argc, char* argv[]) {
|
||||
|
||||
clist_print(clist);
|
||||
printf("\n");
|
||||
|
||||
|
||||
printf("removing a...\n");
|
||||
|
||||
clist_remove(&clist, &a);
|
||||
clist_print(clist);
|
||||
|
||||
|
||||
printf("removing c...\n");
|
||||
|
||||
clist_remove(&clist, &c);
|
||||
|
@ -29,23 +29,24 @@
|
||||
|
||||
typedef struct hwtimer_t {
|
||||
void (*callback)(void*);
|
||||
void* data;
|
||||
void *data;
|
||||
} hwtimer_t;
|
||||
|
||||
static hwtimer_t timer[ARCH_MAXTIMERS];
|
||||
static int lifo[ARCH_MAXTIMERS+1];
|
||||
static int lifo[ARCH_MAXTIMERS + 1];
|
||||
|
||||
/*---------------------------------------------------------------------------*/
|
||||
|
||||
static void multiplexer(int source) {
|
||||
// printf("\nhwt: trigger %i.\n", source);
|
||||
static void multiplexer(int source)
|
||||
{
|
||||
lifo_insert(lifo, source);
|
||||
lpm_prevent_sleep--;
|
||||
|
||||
timer[source].callback(timer[source].data);
|
||||
}
|
||||
|
||||
static void hwtimer_wakeup(void* ptr) {
|
||||
static void hwtimer_wakeup(void *ptr)
|
||||
{
|
||||
int pid = (int)ptr;
|
||||
thread_wakeup(pid);
|
||||
}
|
||||
@ -53,30 +54,36 @@ static void hwtimer_wakeup(void* ptr) {
|
||||
void hwtimer_spin(unsigned long ticks)
|
||||
{
|
||||
unsigned long co = hwtimer_arch_now() + ticks;
|
||||
while (hwtimer_arch_now() > co);
|
||||
while (hwtimer_arch_now() < co);
|
||||
|
||||
while(hwtimer_arch_now() > co);
|
||||
|
||||
while(hwtimer_arch_now() < co);
|
||||
}
|
||||
|
||||
/*---------------------------------------------------------------------------*/
|
||||
|
||||
void hwtimer_init(void) {
|
||||
void hwtimer_init(void)
|
||||
{
|
||||
hwtimer_init_comp(F_CPU);
|
||||
}
|
||||
|
||||
/*---------------------------------------------------------------------------*/
|
||||
|
||||
void hwtimer_init_comp(uint32_t fcpu) {
|
||||
void hwtimer_init_comp(uint32_t fcpu)
|
||||
{
|
||||
hwtimer_arch_init(multiplexer, fcpu);
|
||||
|
||||
|
||||
lifo_init(lifo, ARCH_MAXTIMERS);
|
||||
for (int i = 0; i < ARCH_MAXTIMERS; i++) {
|
||||
|
||||
for(int i = 0; i < ARCH_MAXTIMERS; i++) {
|
||||
lifo_insert(lifo, i);
|
||||
}
|
||||
}
|
||||
|
||||
/*---------------------------------------------------------------------------*/
|
||||
|
||||
int hwtimer_active(void) {
|
||||
int hwtimer_active(void)
|
||||
{
|
||||
return (! lifo_empty(lifo));
|
||||
}
|
||||
|
||||
@ -91,14 +98,15 @@ unsigned long hwtimer_now(void)
|
||||
|
||||
void hwtimer_wait(unsigned long ticks)
|
||||
{
|
||||
if (ticks <= 6 || inISR()) {
|
||||
if(ticks <= 6 || inISR()) {
|
||||
hwtimer_spin(ticks);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
/* -2 is to adjust the real value */
|
||||
int res = hwtimer_set(ticks-2, hwtimer_wakeup, (void*) (unsigned int)(active_thread->pid));
|
||||
if (res == -1) {
|
||||
int res = hwtimer_set(ticks - 2, hwtimer_wakeup, (void*)(unsigned int)(active_thread->pid));
|
||||
|
||||
if(res == -1) {
|
||||
hwtimer_spin(ticks);
|
||||
return;
|
||||
}
|
||||
@ -111,44 +119,47 @@ void hwtimer_wait(unsigned long ticks)
|
||||
|
||||
static int _hwtimer_set(unsigned long offset, void (*callback)(void*), void *ptr, bool absolute)
|
||||
{
|
||||
if (!inISR()) {
|
||||
if(!inISR()) {
|
||||
dINT();
|
||||
}
|
||||
|
||||
int n = lifo_get(lifo);
|
||||
if (n == -1) {
|
||||
if (! inISR()) {
|
||||
|
||||
if(n == -1) {
|
||||
if(! inISR()) {
|
||||
eINT();
|
||||
}
|
||||
|
||||
puts("No hwtimer left.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
timer[n].callback = callback;
|
||||
timer[n].data = ptr;
|
||||
|
||||
if (absolute) {
|
||||
// printf("hwt: setting %i to %u\n", n, offset);
|
||||
if(absolute) {
|
||||
hwtimer_arch_set_absolute(offset, n);
|
||||
}
|
||||
else {
|
||||
// printf("hwt: setting %i to offset %u\n", n, offset);
|
||||
hwtimer_arch_set(offset, n);
|
||||
}
|
||||
|
||||
lpm_prevent_sleep++;
|
||||
|
||||
if (!inISR()) {
|
||||
if(!inISR()) {
|
||||
eINT();
|
||||
}
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
int hwtimer_set(unsigned long offset, void (*callback)(void*), void *ptr) {
|
||||
int hwtimer_set(unsigned long offset, void (*callback)(void*), void *ptr)
|
||||
{
|
||||
return _hwtimer_set(offset, callback, ptr, false);
|
||||
}
|
||||
|
||||
int hwtimer_set_absolute(unsigned long offset, void (*callback)(void*), void *ptr) {
|
||||
int hwtimer_set_absolute(unsigned long offset, void (*callback)(void*), void *ptr)
|
||||
{
|
||||
return _hwtimer_set(offset, callback, ptr, true);
|
||||
}
|
||||
|
||||
@ -157,7 +168,6 @@ int hwtimer_set_absolute(unsigned long offset, void (*callback)(void*), void *pt
|
||||
|
||||
int hwtimer_remove(int n)
|
||||
{
|
||||
// printf("hwt: remove %i.\n", n);
|
||||
hwtimer_arch_disable_interrupt();
|
||||
hwtimer_arch_unset(n);
|
||||
|
||||
@ -165,7 +175,7 @@ int hwtimer_remove(int n)
|
||||
timer[n].callback = NULL;
|
||||
|
||||
lpm_prevent_sleep--;
|
||||
|
||||
|
||||
hwtimer_arch_enable_interrupt();
|
||||
return 1;
|
||||
}
|
||||
|
@ -21,7 +21,7 @@
|
||||
* @brief sets "val" to "set", returns old "val", atomically
|
||||
*/
|
||||
|
||||
extern unsigned int atomic_set_return(unsigned int* val, unsigned int set);
|
||||
extern unsigned int atomic_set_return(unsigned int *val, unsigned int set);
|
||||
|
||||
/**
|
||||
* @}
|
||||
|
@ -1,5 +1,5 @@
|
||||
#ifndef __CIB_H
|
||||
#define __CIB_H
|
||||
#define __CIB_H
|
||||
|
||||
typedef struct cib_t {
|
||||
unsigned int read_count;
|
||||
|
@ -26,20 +26,21 @@ typedef struct clist_node_t {
|
||||
} clist_node_t;
|
||||
|
||||
/* inserts new_node after node */
|
||||
void clist_add(clist_node_t** node, clist_node_t* new_node);
|
||||
void clist_add(clist_node_t **node, clist_node_t *new_node);
|
||||
|
||||
/* removes node. */
|
||||
void clist_remove(clist_node_t** list, clist_node_t *node);
|
||||
void clist_remove(clist_node_t **list, clist_node_t *node);
|
||||
|
||||
/* advances the circle list. second list entry will be first, first is last. */
|
||||
/*void clist_advance(clist_node_t** list);*/
|
||||
|
||||
static inline void clist_advance(clist_node_t** list) {
|
||||
static inline void clist_advance(clist_node_t **list)
|
||||
{
|
||||
*list = (*list)->next;
|
||||
}
|
||||
|
||||
|
||||
void clist_print(clist_node_t* clist);
|
||||
void clist_print(clist_node_t *clist);
|
||||
|
||||
/**
|
||||
* @}
|
||||
|
@ -25,11 +25,11 @@ extern config_t sysconfig;
|
||||
/**
|
||||
* @brief: Write configuration back to flashrom
|
||||
*
|
||||
* @return 1 on success, 0 otherwise
|
||||
* @return 1 on success, 0 otherwise
|
||||
*/
|
||||
uint8_t config_save(void);
|
||||
|
||||
/**
|
||||
/**
|
||||
* @brief: Read configuration from flashrom and stores it to sysconfig
|
||||
*
|
||||
* @note: If no configuration is present within flashrom a new configuration will be created
|
||||
|
@ -1,5 +1,5 @@
|
||||
/**
|
||||
* Debug-Header.
|
||||
* Debug-Header.
|
||||
*
|
||||
* #define ENABLE_DEBUG, include this and then use DEBUG as printf you can toggle.
|
||||
*
|
||||
|
@ -10,7 +10,7 @@
|
||||
#ifndef IO_H
|
||||
#define IO_H
|
||||
|
||||
int fw_puts(char* data, int count);
|
||||
int fw_puts(char *data, int count);
|
||||
|
||||
/** @} */
|
||||
#endif /* IO_H */
|
||||
|
@ -27,7 +27,7 @@ void board_init(void);
|
||||
* @param[in] stack_start Start address of the stack
|
||||
* @param[in] stack_size Stack size
|
||||
*
|
||||
* @return stack pointer
|
||||
* @return stack pointer
|
||||
*/
|
||||
char *thread_stack_init(void *task_func, void *stack_start, int stack_size);
|
||||
|
||||
@ -39,7 +39,7 @@ void sched_task_exit(void);
|
||||
/**
|
||||
* @brief Prints human readable, ps-like thread information for debugging purposes
|
||||
*/
|
||||
void thread_print_stack (void);
|
||||
void thread_print_stack(void);
|
||||
|
||||
/**
|
||||
* @brief Calculates stack usage if thread was created using CREATE_STACKTEST
|
||||
@ -48,7 +48,7 @@ void thread_print_stack (void);
|
||||
*
|
||||
* @return The current usage (overwritten addresses) of the thread's stack
|
||||
*/
|
||||
int thread_measure_stack_usage(char* stack);
|
||||
int thread_measure_stack_usage(char *stack);
|
||||
|
||||
/** @} */
|
||||
#endif /* KERNEL_INTERN_H_ */
|
||||
|
@ -1,5 +1,5 @@
|
||||
#ifndef __LIFO_H
|
||||
#define __LIFO_H
|
||||
#define __LIFO_H
|
||||
|
||||
int lifo_empty(int *array);
|
||||
void lifo_init(int *array, int n);
|
||||
|
@ -50,7 +50,7 @@ typedef struct msg {
|
||||
* @brief Send a message.
|
||||
*
|
||||
* This function sends a message to another thread.
|
||||
* The msg structure has to be allocated (e.g. on the stack)
|
||||
* The msg structure has to be allocated (e.g. on the stack)
|
||||
* before calling the function and can be freed afterwards.
|
||||
* If called from an interrupt, this function will never block.
|
||||
*
|
||||
@ -63,7 +63,7 @@ typedef struct msg {
|
||||
* @return 0 if receiver is not waiting or has a full message queue and block == false
|
||||
* @return -1 on error (invalid PID)
|
||||
*/
|
||||
int msg_send(msg_t* m, unsigned int target_pid, bool block);
|
||||
int msg_send(msg_t *m, unsigned int target_pid, bool block);
|
||||
|
||||
|
||||
/**
|
||||
@ -77,7 +77,7 @@ int msg_send(msg_t* m, unsigned int target_pid, bool block);
|
||||
* @return 1 if sending was successfull
|
||||
* @return 0 if receiver is not waiting and block == false
|
||||
*/
|
||||
int msg_send_int(msg_t* m, unsigned int target_pid);
|
||||
int msg_send_int(msg_t *m, unsigned int target_pid);
|
||||
|
||||
|
||||
/**
|
||||
@ -88,7 +88,7 @@ int msg_send_int(msg_t* m, unsigned int target_pid);
|
||||
*
|
||||
* @return 1 Function always succeeds or blocks forever.
|
||||
*/
|
||||
int msg_receive(msg_t* m);
|
||||
int msg_receive(msg_t *m);
|
||||
|
||||
/**
|
||||
* @brief Send a message, block until reply received.
|
||||
@ -121,7 +121,7 @@ int msg_reply(msg_t *m, msg_t *reply);
|
||||
* @param array Pointer to preallocated array of msg objects
|
||||
* @param num Number of msg objects in array. MUST BE POWER OF TWO!
|
||||
*/
|
||||
int msg_init_queue(msg_t* array, int num);
|
||||
int msg_init_queue(msg_t *array, int num);
|
||||
|
||||
/** @} */
|
||||
#endif /* __MSG_H */
|
||||
|
@ -30,7 +30,7 @@ typedef struct mutex_t {
|
||||
* @param mutex pre-allocated mutex structure.
|
||||
* @return Always returns 1, always succeeds.
|
||||
*/
|
||||
int mutex_init(struct mutex_t* mutex);
|
||||
int mutex_init(struct mutex_t *mutex);
|
||||
|
||||
|
||||
/**
|
||||
@ -41,7 +41,7 @@ int mutex_init(struct mutex_t* mutex);
|
||||
* @return 1 if mutex was unlocked, now it is locked.
|
||||
* @return 0 if the mutex was locked.
|
||||
*/
|
||||
int mutex_trylock(struct mutex_t* mutex);
|
||||
int mutex_trylock(struct mutex_t *mutex);
|
||||
|
||||
/**
|
||||
* @brief Tries to get a mutex, blocking.
|
||||
@ -51,7 +51,7 @@ int mutex_trylock(struct mutex_t* mutex);
|
||||
* @return 1 getting the mutex was successful
|
||||
* @return <1 there was an error.
|
||||
*/
|
||||
int mutex_lock(struct mutex_t* mutex);
|
||||
int mutex_lock(struct mutex_t *mutex);
|
||||
|
||||
/**
|
||||
* @brief Unlocks the mutex.
|
||||
@ -60,7 +60,7 @@ int mutex_lock(struct mutex_t* mutex);
|
||||
*
|
||||
* @param yield If yield==MUTEX_YIELD, force context-switch after waking up other waiter.
|
||||
*/
|
||||
void mutex_unlock(struct mutex_t* mutex, int yield);
|
||||
void mutex_unlock(struct mutex_t *mutex, int yield);
|
||||
|
||||
#define MUTEX_YIELD 1
|
||||
#define MUTEX_INISR 2
|
||||
|
@ -11,9 +11,9 @@
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
void* _malloc(size_t size);
|
||||
void* _realloc(void *ptr, size_t size);
|
||||
void _free (void* ptr);
|
||||
void *_malloc(size_t size);
|
||||
void *_realloc(void *ptr, size_t size);
|
||||
void _free(void *ptr);
|
||||
|
||||
/** @} */
|
||||
#endif /* _MALLOC_H */
|
||||
|
@ -15,13 +15,13 @@ typedef struct queue_node_t {
|
||||
unsigned int priority;
|
||||
} queue_node_t;
|
||||
|
||||
queue_node_t* queue_remove_head(queue_node_t* root);
|
||||
void queue_add_tail(queue_node_t* root, queue_node_t* new_obj);
|
||||
void queue_add_head(queue_node_t* root, queue_node_t* new_obj);
|
||||
queue_node_t *queue_remove_head(queue_node_t* root);
|
||||
void queue_priority_add(queue_node_t* root, queue_node_t* new_obj);
|
||||
void queue_priority_add_generic(queue_node_t* root, queue_node_t* new_obj, int(*cmp)(queue_node_t*,queue_node_t*)) ;
|
||||
void queue_remove(queue_node_t* root, queue_node_t *node);
|
||||
queue_node_t *queue_remove_head(queue_node_t *root);
|
||||
void queue_add_tail(queue_node_t *root, queue_node_t *new_obj);
|
||||
void queue_add_head(queue_node_t *root, queue_node_t *new_obj);
|
||||
queue_node_t *queue_remove_head(queue_node_t *root);
|
||||
void queue_priority_add(queue_node_t *root, queue_node_t *new_obj);
|
||||
void queue_priority_add_generic(queue_node_t *root, queue_node_t *new_obj, int(*cmp)(queue_node_t *, queue_node_t *)) ;
|
||||
void queue_remove(queue_node_t *root, queue_node_t *node);
|
||||
|
||||
/** @} */
|
||||
#endif // __QUEUE_H
|
||||
|
@ -39,13 +39,13 @@ extern volatile int thread_pid;
|
||||
//#define SCHEDSTATISTICS
|
||||
#if SCHEDSTATISTICS
|
||||
|
||||
typedef struct schedstat {
|
||||
unsigned int laststart;
|
||||
unsigned int schedules;
|
||||
unsigned int runtime;
|
||||
}schedstat;
|
||||
typedef struct {
|
||||
unsigned int laststart;
|
||||
unsigned int schedules;
|
||||
unsigned int runtime;
|
||||
} schedstat;
|
||||
|
||||
extern schedstat pidlist[MAXTHREADS];
|
||||
extern schedstat pidlist[MAXTHREADS];
|
||||
#endif
|
||||
|
||||
/** @} */
|
||||
|
@ -36,7 +36,7 @@
|
||||
#define STATUS_TIMER_WAITING (0x0200)
|
||||
|
||||
typedef struct tcb_t {
|
||||
char* sp;
|
||||
char *sp;
|
||||
uint16_t status;
|
||||
|
||||
uint16_t pid;
|
||||
@ -44,14 +44,14 @@ typedef struct tcb_t {
|
||||
|
||||
clist_node_t rq_entry;
|
||||
|
||||
void* wait_data;
|
||||
void *wait_data;
|
||||
queue_node_t msg_waiters;
|
||||
|
||||
cib_t msg_queue;
|
||||
msg_t* msg_array;
|
||||
msg_t *msg_array;
|
||||
|
||||
const char* name;
|
||||
char* stack_start;
|
||||
const char *name;
|
||||
char *stack_start;
|
||||
int stack_size;
|
||||
} tcb_t;
|
||||
|
||||
|
@ -23,10 +23,10 @@
|
||||
|
||||
/**
|
||||
* @brief Creates a new thread.
|
||||
*
|
||||
*
|
||||
* @param stack Lowest address of preallocated stack space
|
||||
* @param stacksize
|
||||
* @param flags Options:
|
||||
* @param flags Options:
|
||||
* YIELD: force context switch.
|
||||
* CREATE_SLEEPING: set new thread to sleeping state, thread must be woken up manually.
|
||||
* CREATE_STACKTEST: initialize stack with values needed for stack overflow testing.
|
||||
@ -37,7 +37,7 @@
|
||||
*
|
||||
* @return returns <0 on error, pid of newly created task else.
|
||||
*/
|
||||
int thread_create(char *stack, int stacksize, char priority, int flags, void (*function) (void), const char* name);
|
||||
int thread_create(char *stack, int stacksize, char priority, int flags, void (*function) (void), const char *name);
|
||||
|
||||
/**
|
||||
* @brief returns the status of a process.
|
||||
@ -76,7 +76,7 @@ int thread_getlastpid(void);
|
||||
*
|
||||
* @param stack The stack you want to measure. try active_thread->stack_start.
|
||||
*/
|
||||
int thread_measure_stack_usage(char* stack);
|
||||
int thread_measure_stack_usage(char *stack);
|
||||
|
||||
/* @} */
|
||||
#endif /* __THREAD_H */
|
||||
|
@ -40,15 +40,16 @@ volatile int lpm_prevent_sleep = 0;
|
||||
|
||||
extern int main(void);
|
||||
|
||||
static void idle_thread(void) {
|
||||
static void idle_thread(void)
|
||||
{
|
||||
while(1) {
|
||||
if (lpm_prevent_sleep) {
|
||||
if(lpm_prevent_sleep) {
|
||||
lpm_set(LPM_IDLE);
|
||||
}
|
||||
else {
|
||||
lpm_set(LPM_IDLE);
|
||||
// lpm_set(LPM_SLEEP);
|
||||
// lpm_set(LPM_POWERDOWN);
|
||||
/* lpm_set(LPM_SLEEP); */
|
||||
/* lpm_set(LPM_POWERDOWN); */
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -69,19 +70,19 @@ void kernel_init(void)
|
||||
{
|
||||
dINT();
|
||||
printf("kernel_init(): This is RIOT!\n");
|
||||
|
||||
|
||||
sched_init();
|
||||
|
||||
if (thread_create(idle_stack, sizeof(idle_stack), PRIORITY_IDLE, CREATE_WOUT_YIELD | CREATE_STACKTEST, idle_thread, idle_name) < 0) {
|
||||
if(thread_create(idle_stack, sizeof(idle_stack), PRIORITY_IDLE, CREATE_WOUT_YIELD | CREATE_STACKTEST, idle_thread, idle_name) < 0) {
|
||||
printf("kernel_init(): error creating idle task.\n");
|
||||
}
|
||||
|
||||
if (thread_create(main_stack, sizeof(main_stack), PRIORITY_MAIN, CREATE_WOUT_YIELD | CREATE_STACKTEST, MAIN_FUNC, main_name) < 0) {
|
||||
if(thread_create(main_stack, sizeof(main_stack), PRIORITY_MAIN, CREATE_WOUT_YIELD | CREATE_STACKTEST, MAIN_FUNC, main_name) < 0) {
|
||||
printf("kernel_init(): error creating main task.\n");
|
||||
}
|
||||
|
||||
printf("kernel_init(): jumping into first task...\n");
|
||||
|
||||
|
||||
cpu_switch_context_exit();
|
||||
}
|
||||
|
||||
|
27
core/lifo.c
27
core/lifo.c
@ -1,35 +1,42 @@
|
||||
#include <lifo.h>
|
||||
|
||||
int lifo_empty(int *array) {
|
||||
int lifo_empty(int *array)
|
||||
{
|
||||
return array[0] == -1;
|
||||
}
|
||||
|
||||
void lifo_init(int *array, int n) {
|
||||
for (int i = 0; i <= n; i++) {
|
||||
void lifo_init(int *array, int n)
|
||||
{
|
||||
for(int i = 0; i <= n; i++) {
|
||||
array[i] = -1;
|
||||
}
|
||||
}
|
||||
|
||||
void lifo_insert(int *array, int i) {
|
||||
int index = i+1;
|
||||
void lifo_insert(int *array, int i)
|
||||
{
|
||||
int index = i + 1;
|
||||
array[index] = array[0];
|
||||
array[0] = i;
|
||||
}
|
||||
|
||||
int lifo_get(int *array) {
|
||||
int lifo_get(int *array)
|
||||
{
|
||||
int head = array[0];
|
||||
if (head != -1) {
|
||||
array[0] = array[head+1];
|
||||
|
||||
if(head != -1) {
|
||||
array[0] = array[head + 1];
|
||||
}
|
||||
|
||||
return head;
|
||||
}
|
||||
|
||||
|
||||
#ifdef WITH_MAIN
|
||||
#include <stdio.h>
|
||||
int main() {
|
||||
int main()
|
||||
{
|
||||
int array[5];
|
||||
|
||||
|
||||
lifo_init(array, 4);
|
||||
|
||||
lifo_insert(array, 0);
|
||||
|
132
core/msg.c
132
core/msg.c
@ -28,41 +28,45 @@
|
||||
//#define ENABLE_DEBUG
|
||||
#include "debug.h"
|
||||
|
||||
static int queue_msg(tcb_t *target, msg_t *m) {
|
||||
int n = cib_put(&(target->msg_queue));
|
||||
static int queue_msg(tcb_t *target, msg_t *m)
|
||||
{
|
||||
int n = cib_put(&(target->msg_queue));
|
||||
|
||||
if (n != -1) {
|
||||
target->msg_array[n] = *m;
|
||||
return 1;
|
||||
}
|
||||
if(n != -1) {
|
||||
target->msg_array[n] = *m;
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int msg_send(msg_t* m, unsigned int target_pid, bool block) {
|
||||
if (inISR()) {
|
||||
int msg_send(msg_t *m, unsigned int target_pid, bool block)
|
||||
{
|
||||
if(inISR()) {
|
||||
return msg_send_int(m, target_pid);
|
||||
}
|
||||
|
||||
tcb_t *target = (tcb_t*)sched_threads[target_pid];
|
||||
tcb_t *target = (tcb_t*) sched_threads[target_pid];
|
||||
|
||||
m->sender_pid = thread_pid;
|
||||
if (m->sender_pid == target_pid) {
|
||||
|
||||
if(m->sender_pid == target_pid) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (target == NULL) {
|
||||
if(target == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
dINT();
|
||||
if (target->status != STATUS_RECEIVE_BLOCKED) {
|
||||
if (target->msg_array && queue_msg(target, m)) {
|
||||
|
||||
if(target->status != STATUS_RECEIVE_BLOCKED) {
|
||||
if(target->msg_array && queue_msg(target, m)) {
|
||||
eINT();
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (! block ) {
|
||||
if(!block) {
|
||||
DEBUG("%s: receiver not waiting. block=%u\n", active_thread->name, block);
|
||||
eINT();
|
||||
return 0;
|
||||
@ -79,19 +83,22 @@ int msg_send(msg_t* m, unsigned int target_pid, bool block) {
|
||||
active_thread->wait_data = (void*) m;
|
||||
|
||||
int newstatus;
|
||||
if (active_thread->status == STATUS_REPLY_BLOCKED) {
|
||||
|
||||
if(active_thread->status == STATUS_REPLY_BLOCKED) {
|
||||
newstatus = STATUS_REPLY_BLOCKED;
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
newstatus = STATUS_SEND_BLOCKED;
|
||||
}
|
||||
|
||||
sched_set_status((tcb_t*)active_thread, newstatus);
|
||||
|
||||
sched_set_status((tcb_t*) active_thread, newstatus);
|
||||
|
||||
DEBUG("%s: back from send block.\n", active_thread->name);
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
DEBUG("%s: direct msg copy.\n", active_thread->name);
|
||||
/* copy msg to target */
|
||||
msg_t* target_message = (msg_t*)target->wait_data;
|
||||
msg_t *target_message = (msg_t*) target->wait_data;
|
||||
*target_message = *m;
|
||||
sched_set_status(target, STATUS_PENDING);
|
||||
}
|
||||
@ -102,28 +109,31 @@ int msg_send(msg_t* m, unsigned int target_pid, bool block) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
int msg_send_int(msg_t* m, unsigned int target_pid) {
|
||||
tcb_t *target = (tcb_t*)sched_threads[target_pid];
|
||||
int msg_send_int(msg_t *m, unsigned int target_pid)
|
||||
{
|
||||
tcb_t *target = (tcb_t *) sched_threads[target_pid];
|
||||
|
||||
if (target->status == STATUS_RECEIVE_BLOCKED) {
|
||||
if(target->status == STATUS_RECEIVE_BLOCKED) {
|
||||
DEBUG("msg_send_int: direct msg copy from %i to %i.\n", thread_getpid(), target_pid);
|
||||
|
||||
m->sender_pid = target_pid;
|
||||
|
||||
/* copy msg to target */
|
||||
msg_t* target_message = (msg_t*)target->wait_data;
|
||||
msg_t *target_message = (msg_t*) target->wait_data;
|
||||
*target_message = *m;
|
||||
sched_set_status(target, STATUS_PENDING);
|
||||
|
||||
sched_context_switch_request = 1;
|
||||
return 1;
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
DEBUG("msg_send_int: receiver not waiting.\n");
|
||||
return (queue_msg(target, m));
|
||||
}
|
||||
}
|
||||
|
||||
int msg_send_receive(msg_t *m, msg_t *reply, unsigned int target_pid) {
|
||||
int msg_send_receive(msg_t *m, msg_t *reply, unsigned int target_pid)
|
||||
{
|
||||
dINT();
|
||||
tcb_t *me = (tcb_t*) sched_threads[thread_pid];
|
||||
sched_set_status(me, STATUS_REPLY_BLOCKED);
|
||||
@ -131,23 +141,25 @@ int msg_send_receive(msg_t *m, msg_t *reply, unsigned int target_pid) {
|
||||
msg_send(m, target_pid, true);
|
||||
|
||||
/* msg_send blocks until reply received */
|
||||
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int msg_reply(msg_t *m, msg_t *reply) {
|
||||
int msg_reply(msg_t *m, msg_t *reply)
|
||||
{
|
||||
int state = disableIRQ();
|
||||
|
||||
tcb_t *target = (tcb_t*)sched_threads[m->sender_pid];
|
||||
if (target->status != STATUS_REPLY_BLOCKED) {
|
||||
tcb_t *target = (tcb_t*) sched_threads[m->sender_pid];
|
||||
|
||||
if(target->status != STATUS_REPLY_BLOCKED) {
|
||||
DEBUG("%s: msg_reply(): target \"%s\" not waiting for reply.", active_thread->name, target->name);
|
||||
restoreIRQ(state);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
DEBUG("%s: msg_reply(): direct msg copy.\n", active_thread->name);
|
||||
/* copy msg to target */
|
||||
msg_t* target_message = (msg_t*)target->wait_data;
|
||||
msg_t *target_message = (msg_t*) target->wait_data;
|
||||
*target_message = *reply;
|
||||
sched_set_status(target, STATUS_PENDING);
|
||||
restoreIRQ(state);
|
||||
@ -156,42 +168,49 @@ int msg_reply(msg_t *m, msg_t *reply) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
int msg_reply_int(msg_t *m, msg_t *reply) {
|
||||
tcb_t *target = (tcb_t*)sched_threads[m->sender_pid];
|
||||
if (target->status != STATUS_REPLY_BLOCKED) {
|
||||
int msg_reply_int(msg_t *m, msg_t *reply)
|
||||
{
|
||||
tcb_t *target = (tcb_t*) sched_threads[m->sender_pid];
|
||||
|
||||
if(target->status != STATUS_REPLY_BLOCKED) {
|
||||
DEBUG("%s: msg_reply_int(): target \"%s\" not waiting for reply.", active_thread->name, target->name);
|
||||
return -1;
|
||||
}
|
||||
msg_t* target_message = (msg_t*)target->wait_data;
|
||||
|
||||
msg_t *target_message = (msg_t*) target->wait_data;
|
||||
*target_message = *reply;
|
||||
sched_set_status(target, STATUS_PENDING);
|
||||
sched_context_switch_request = 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
int msg_receive(msg_t* m) {
|
||||
int msg_receive(msg_t *m)
|
||||
{
|
||||
dINT();
|
||||
DEBUG("%s: msg_receive.\n", active_thread->name);
|
||||
|
||||
tcb_t *me = (tcb_t*) sched_threads[thread_pid];
|
||||
|
||||
int n = -1;
|
||||
if (me->msg_array) {
|
||||
|
||||
if(me->msg_array) {
|
||||
n = cib_get(&(me->msg_queue));
|
||||
}
|
||||
|
||||
if (n >= 0) {
|
||||
if(n >= 0) {
|
||||
DEBUG("%s: msg_receive(): We've got a queued message.\n", active_thread->name);
|
||||
*m = me->msg_array[n];
|
||||
} else {
|
||||
me->wait_data = (void*) m;
|
||||
}
|
||||
else {
|
||||
me->wait_data = (void *) m;
|
||||
}
|
||||
|
||||
queue_node_t *node = queue_remove_head(&(me->msg_waiters));
|
||||
|
||||
if (node == NULL) {
|
||||
if(node == NULL) {
|
||||
DEBUG("%s: msg_receive(): No thread in waiting list.\n", active_thread->name);
|
||||
if (n < 0) {
|
||||
|
||||
if(n < 0) {
|
||||
DEBUG("%s: msg_receive(): No msg in queue. Going blocked.\n", active_thread->name);
|
||||
sched_set_status(me, STATUS_RECEIVE_BLOCKED);
|
||||
|
||||
@ -200,12 +219,14 @@ int msg_receive(msg_t* m) {
|
||||
|
||||
/* sender copied message */
|
||||
}
|
||||
return 1;
|
||||
} else {
|
||||
DEBUG("%s: msg_receive(): Wakeing up waiting thread.\n", active_thread->name);
|
||||
tcb_t *sender = (tcb_t*)node->data;
|
||||
|
||||
if (n >= 0) {
|
||||
return 1;
|
||||
}
|
||||
else {
|
||||
DEBUG("%s: msg_receive(): Wakeing up waiting thread.\n", active_thread->name);
|
||||
tcb_t *sender = (tcb_t*) node->data;
|
||||
|
||||
if(n >= 0) {
|
||||
/* we've already got a messgage from the queue. as there is a
|
||||
* waiter, take it's message into the just freed queue space.
|
||||
*/
|
||||
@ -213,7 +234,7 @@ int msg_receive(msg_t* m) {
|
||||
}
|
||||
|
||||
/* copy msg */
|
||||
msg_t* sender_msg = (msg_t*)sender->wait_data;
|
||||
msg_t *sender_msg = (msg_t*) sender->wait_data;
|
||||
*m = *sender_msg;
|
||||
|
||||
/* remove sender from queue */
|
||||
@ -225,14 +246,15 @@ int msg_receive(msg_t* m) {
|
||||
}
|
||||
}
|
||||
|
||||
int msg_init_queue(msg_t* array, int num) {
|
||||
int msg_init_queue(msg_t *array, int num)
|
||||
{
|
||||
/* make sure brainfuck condition is met */
|
||||
if (num && (num & (num - 1)) == 0) {
|
||||
tcb_t *me = (tcb_t*)active_thread;
|
||||
if(num && (num & (num - 1)) == 0) {
|
||||
tcb_t *me = (tcb_t*) active_thread;
|
||||
me->msg_array = array;
|
||||
cib_init(&(me->msg_queue), num);
|
||||
return 0;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
43
core/mutex.c
43
core/mutex.c
@ -26,7 +26,8 @@
|
||||
//#define ENABLE_DEBUG
|
||||
#include <debug.h>
|
||||
|
||||
int mutex_init(struct mutex_t* mutex) {
|
||||
int mutex_init(struct mutex_t *mutex)
|
||||
{
|
||||
mutex->val = 0;
|
||||
|
||||
mutex->queue.priority = 0;
|
||||
@ -36,37 +37,43 @@ int mutex_init(struct mutex_t* mutex) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
int mutex_trylock(struct mutex_t* mutex) {
|
||||
int mutex_trylock(struct mutex_t *mutex)
|
||||
{
|
||||
DEBUG("%s: trylocking to get mutex. val: %u\n", active_thread->name, mutex->val);
|
||||
return (atomic_set_return(&mutex->val, thread_pid ) == 0);
|
||||
return (atomic_set_return(&mutex->val, thread_pid) == 0);
|
||||
}
|
||||
|
||||
int prio(void) {
|
||||
int prio(void)
|
||||
{
|
||||
return active_thread->priority;
|
||||
}
|
||||
|
||||
int mutex_lock(struct mutex_t* mutex) {
|
||||
int mutex_lock(struct mutex_t *mutex)
|
||||
{
|
||||
DEBUG("%s: trying to get mutex. val: %u\n", active_thread->name, mutex->val);
|
||||
|
||||
if (atomic_set_return(&mutex->val,1) != 0) {
|
||||
// mutex was locked.
|
||||
if(atomic_set_return(&mutex->val, 1) != 0) {
|
||||
/* mutex was locked. */
|
||||
mutex_wait(mutex);
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void mutex_wait(struct mutex_t *mutex) {
|
||||
void mutex_wait(struct mutex_t *mutex)
|
||||
{
|
||||
int irqstate = disableIRQ();
|
||||
DEBUG("%s: Mutex in use. %u\n", active_thread->name, mutex->val);
|
||||
if (mutex->val == 0) {
|
||||
// somebody released the mutex. return.
|
||||
|
||||
if(mutex->val == 0) {
|
||||
/* somebody released the mutex. return. */
|
||||
mutex->val = thread_pid;
|
||||
DEBUG("%s: mutex_wait early out. %u\n", active_thread->name, mutex->val);
|
||||
restoreIRQ(irqstate);
|
||||
return;
|
||||
}
|
||||
|
||||
sched_set_status((tcb_t*)active_thread, STATUS_MUTEX_BLOCKED);
|
||||
sched_set_status((tcb_t*) active_thread, STATUS_MUTEX_BLOCKED);
|
||||
|
||||
queue_node_t n;
|
||||
n.priority = (unsigned int) active_thread->priority;
|
||||
@ -84,19 +91,21 @@ void mutex_wait(struct mutex_t *mutex) {
|
||||
/* we were woken up by scheduler. waker removed us from queue. we have the mutex now. */
|
||||
}
|
||||
|
||||
void mutex_unlock(struct mutex_t* mutex, int yield) {
|
||||
void mutex_unlock(struct mutex_t *mutex, int yield)
|
||||
{
|
||||
DEBUG("%s: unlocking mutex. val: %u pid: %u\n", active_thread->name, mutex->val, thread_pid);
|
||||
int irqstate = disableIRQ();
|
||||
|
||||
if (mutex->val != 0) {
|
||||
if (mutex->queue.next) {
|
||||
|
||||
if(mutex->val != 0) {
|
||||
if(mutex->queue.next) {
|
||||
queue_node_t *next = queue_remove_head(&(mutex->queue));
|
||||
tcb_t* process = (tcb_t*)next->data;
|
||||
tcb_t *process = (tcb_t*) next->data;
|
||||
DEBUG("%s: waking up waiter %s.\n", process->name);
|
||||
sched_set_status(process, STATUS_PENDING);
|
||||
|
||||
sched_switch(active_thread->priority, process->priority, inISR());
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
mutex->val = 0;
|
||||
}
|
||||
}
|
||||
|
@ -25,26 +25,30 @@
|
||||
|
||||
extern void *sbrk(int incr);
|
||||
|
||||
void *_malloc(size_t size) {
|
||||
void* ptr = sbrk(size);
|
||||
|
||||
void *_malloc(size_t size)
|
||||
{
|
||||
void *ptr = sbrk(size);
|
||||
|
||||
DEBUG("_malloc(): allocating block of size %u at 0x%X.\n", (unsigned int) size, (unsigned int)ptr);
|
||||
|
||||
if (ptr != (void*)-1) {
|
||||
|
||||
if(ptr != (void*) - 1) {
|
||||
return ptr;
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void *_realloc(void *ptr, size_t size) {
|
||||
void* newptr = _malloc(size);
|
||||
void *_realloc(void *ptr, size_t size)
|
||||
{
|
||||
void *newptr = _malloc(size);
|
||||
memcpy(newptr, ptr, size);
|
||||
free(ptr);
|
||||
return newptr;
|
||||
}
|
||||
|
||||
void _free(void* ptr) {
|
||||
void _free(void *ptr)
|
||||
{
|
||||
DEBUG("_free(): block at 0x%X lost.\n", (unsigned int)ptr);
|
||||
}
|
||||
|
||||
|
55
core/queue.c
55
core/queue.c
@ -21,47 +21,57 @@
|
||||
//#define ENABLE_DEBUG 0
|
||||
#include "debug.h"
|
||||
|
||||
void queue_remove(queue_node_t* root, queue_node_t *node) {
|
||||
while (root->next != NULL) {
|
||||
if (root->next == node) {
|
||||
void queue_remove(queue_node_t *root, queue_node_t *node)
|
||||
{
|
||||
while(root->next != NULL) {
|
||||
if(root->next == node) {
|
||||
root->next = node->next;
|
||||
node->next = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
root = root->next;
|
||||
}
|
||||
}
|
||||
|
||||
queue_node_t *queue_remove_head(queue_node_t* root) {
|
||||
queue_node_t *queue_remove_head(queue_node_t *root)
|
||||
{
|
||||
queue_node_t *head = root->next;
|
||||
if (head != NULL) {
|
||||
root->next = head->next;
|
||||
|
||||
if(head != NULL) {
|
||||
root->next = head->next;
|
||||
}
|
||||
|
||||
return head;
|
||||
}
|
||||
|
||||
void queue_add_tail(queue_node_t* node, queue_node_t* new_obj) {
|
||||
while (node->next != NULL) {
|
||||
void queue_add_tail(queue_node_t *node, queue_node_t *new_obj)
|
||||
{
|
||||
while(node->next != NULL) {
|
||||
node = node->next;
|
||||
}
|
||||
|
||||
node->next = new_obj;
|
||||
new_obj->next = NULL;
|
||||
}
|
||||
|
||||
void queue_add_head(queue_node_t* root, queue_node_t* new_obj) {
|
||||
void queue_add_head(queue_node_t *root, queue_node_t *new_obj)
|
||||
{
|
||||
new_obj->next = root->next;
|
||||
root->next = new_obj;
|
||||
}
|
||||
|
||||
void queue_priority_add(queue_node_t* root, queue_node_t* new_obj) {
|
||||
queue_node_t* node = root;
|
||||
void queue_priority_add(queue_node_t *root, queue_node_t *new_obj)
|
||||
{
|
||||
queue_node_t *node = root;
|
||||
|
||||
while (node->next != NULL) {
|
||||
if (node->next->priority > new_obj->priority) {
|
||||
while(node->next != NULL) {
|
||||
if(node->next->priority > new_obj->priority) {
|
||||
new_obj->next = node->next;
|
||||
node->next = new_obj;
|
||||
return;
|
||||
}
|
||||
|
||||
node = node->next;
|
||||
}
|
||||
|
||||
@ -69,15 +79,17 @@ void queue_priority_add(queue_node_t* root, queue_node_t* new_obj) {
|
||||
new_obj->next = NULL;
|
||||
}
|
||||
|
||||
void queue_priority_add_generic(queue_node_t* root, queue_node_t* new_obj, int (*cmp)(queue_node_t*,queue_node_t*)) {
|
||||
queue_node_t* node = root;
|
||||
void queue_priority_add_generic(queue_node_t *root, queue_node_t *new_obj, int (*cmp)(queue_node_t *, queue_node_t *))
|
||||
{
|
||||
queue_node_t *node = root;
|
||||
|
||||
while (node->next != NULL) {
|
||||
if (cmp(node->next, new_obj) < 0) {
|
||||
while(node->next != NULL) {
|
||||
if(cmp(node->next, new_obj) < 0) {
|
||||
new_obj->next = node->next;
|
||||
node->next = new_obj;
|
||||
return;
|
||||
}
|
||||
|
||||
node = node->next;
|
||||
}
|
||||
|
||||
@ -86,15 +98,18 @@ void queue_priority_add_generic(queue_node_t* root, queue_node_t* new_obj, int (
|
||||
}
|
||||
|
||||
|
||||
void queue_print(queue_node_t* node) {
|
||||
void queue_print(queue_node_t *node)
|
||||
{
|
||||
printf("queue:\n");
|
||||
while (node->next != NULL) {
|
||||
|
||||
while(node->next != NULL) {
|
||||
node = node->next;
|
||||
printf("Data: %u Priority: %u\n", node->data, node->priority);
|
||||
}
|
||||
}
|
||||
|
||||
void queue_print_node(queue_node_t *node) {
|
||||
void queue_print_node(queue_node_t *node)
|
||||
{
|
||||
printf("Data: %u Priority: %u Next: %u\n", (unsigned int)node->data, node->priority, (unsigned int)node->next);
|
||||
}
|
||||
|
||||
|
101
core/sched.c
101
core/sched.c
@ -41,14 +41,16 @@ void sched_register_cb(void (*callback)(uint32_t, uint32_t));
|
||||
|
||||
|
||||
#if SCHEDSTATISTICS
|
||||
static void (*sched_cb)(uint32_t timestamp, uint32_t value) = NULL;
|
||||
schedstat pidlist[MAXTHREADS];
|
||||
static void (*sched_cb) (uint32_t timestamp, uint32_t value) = NULL;
|
||||
schedstat pidlist[MAXTHREADS];
|
||||
#endif
|
||||
|
||||
void sched_init() {
|
||||
void sched_init()
|
||||
{
|
||||
printf("Scheduler...");
|
||||
int i;
|
||||
for (i=0; i<MAXTHREADS; i++) {
|
||||
|
||||
for(i = 0; i < MAXTHREADS; i++) {
|
||||
sched_threads[i] = NULL;
|
||||
#if SCHEDSTATISTICS
|
||||
pidlist[i].laststart = 0;
|
||||
@ -59,27 +61,31 @@ void sched_init() {
|
||||
|
||||
active_thread = NULL;
|
||||
thread_pid = -1;
|
||||
for (i = 0; i < SCHED_PRIO_LEVELS; i++) {
|
||||
|
||||
for(i = 0; i < SCHED_PRIO_LEVELS; i++) {
|
||||
runqueues[i] = NULL;
|
||||
}
|
||||
|
||||
printf("[OK]\n");
|
||||
}
|
||||
|
||||
void sched_run() {
|
||||
void sched_run()
|
||||
{
|
||||
sched_context_switch_request = 0;
|
||||
|
||||
tcb_t *my_active_thread = (tcb_t*)active_thread;
|
||||
tcb_t *my_active_thread = (tcb_t *)active_thread;
|
||||
|
||||
if (my_active_thread) {
|
||||
if( my_active_thread->status == STATUS_RUNNING) {
|
||||
my_active_thread->status = STATUS_PENDING;
|
||||
if(my_active_thread) {
|
||||
if(my_active_thread->status == STATUS_RUNNING) {
|
||||
my_active_thread->status = STATUS_PENDING;
|
||||
}
|
||||
|
||||
#ifdef SCHED_TEST_STACK
|
||||
if (*((unsigned int*)my_active_thread->stack_start) != (unsigned int) my_active_thread->stack_start) {
|
||||
|
||||
if(*((unsigned int *)my_active_thread->stack_start) != (unsigned int) my_active_thread->stack_start) {
|
||||
printf("scheduler(): stack overflow detected, task=%s pid=%u\n", my_active_thread->name, my_active_thread->pid);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
}
|
||||
@ -88,101 +94,118 @@ void sched_run() {
|
||||
/* TODO: setup dependency from SCHEDSTATISTICS to MODULE_HWTIMER */
|
||||
extern unsigned long hwtimer_now(void);
|
||||
unsigned int time = hwtimer_now();
|
||||
if (my_active_thread && (pidlist[my_active_thread->pid].laststart)) {
|
||||
|
||||
if(my_active_thread && (pidlist[my_active_thread->pid].laststart)) {
|
||||
pidlist[my_active_thread->pid].runtime += time - pidlist[my_active_thread->pid].laststart;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
DEBUG("\nscheduler: previous task: %s\n", ( my_active_thread == NULL) ? "none" : my_active_thread->name );
|
||||
DEBUG("\nscheduler: previous task: %s\n", (my_active_thread == NULL) ? "none" : my_active_thread->name);
|
||||
|
||||
if (num_tasks == 0) {
|
||||
if(num_tasks == 0) {
|
||||
DEBUG("scheduler: no tasks left.\n");
|
||||
|
||||
while(! num_tasks) {
|
||||
/* loop until a new task arrives */
|
||||
;
|
||||
/* loop until a new task arrives */
|
||||
;
|
||||
}
|
||||
|
||||
DEBUG("scheduler: new task created.\n");
|
||||
}
|
||||
|
||||
my_active_thread = NULL;
|
||||
|
||||
while(! my_active_thread) {
|
||||
int nextrq = number_of_lowest_bit(runqueue_bitcache);
|
||||
clist_node_t next = *(runqueues[nextrq]);
|
||||
DEBUG("scheduler: first in queue: %s\n", ((tcb_t*)next.data)->name);
|
||||
DEBUG("scheduler: first in queue: %s\n", ((tcb_t *)next.data)->name);
|
||||
clist_advance(&(runqueues[nextrq]));
|
||||
my_active_thread = (tcb_t*)next.data;
|
||||
my_active_thread = (tcb_t *)next.data;
|
||||
thread_pid = (volatile int) my_active_thread->pid;
|
||||
#if SCHEDSTATISTICS
|
||||
pidlist[my_active_thread->pid].laststart = time;
|
||||
pidlist[my_active_thread->pid].schedules ++;
|
||||
#endif
|
||||
#ifdef MODULE_NSS
|
||||
if (active_thread && active_thread->pid != last_pid) {
|
||||
|
||||
if(active_thread && active_thread->pid != last_pid) {
|
||||
last_pid = active_thread->pid;
|
||||
}
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
DEBUG("scheduler: next task: %s\n", my_active_thread->name);
|
||||
|
||||
if (my_active_thread != active_thread) {
|
||||
if (active_thread != NULL) { //TODO: necessary?
|
||||
if (active_thread->status == STATUS_RUNNING) {
|
||||
if(my_active_thread != active_thread) {
|
||||
if(active_thread != NULL) { /* TODO: necessary? */
|
||||
if(active_thread->status == STATUS_RUNNING) {
|
||||
active_thread->status = STATUS_PENDING ;
|
||||
}
|
||||
}
|
||||
sched_set_status((tcb_t*)my_active_thread, STATUS_RUNNING);
|
||||
|
||||
sched_set_status((tcb_t *)my_active_thread, STATUS_RUNNING);
|
||||
}
|
||||
|
||||
active_thread = (volatile tcb_t*) my_active_thread;
|
||||
active_thread = (volatile tcb_t *) my_active_thread;
|
||||
|
||||
DEBUG("scheduler: done.\n");
|
||||
}
|
||||
|
||||
#if SCHEDSTATISTICS
|
||||
void sched_register_cb(void (*callback)(uint32_t, uint32_t)) {
|
||||
sched_cb = callback;
|
||||
void sched_register_cb(void (*callback)(uint32_t, uint32_t))
|
||||
{
|
||||
sched_cb = callback;
|
||||
}
|
||||
#endif
|
||||
|
||||
void sched_set_status(tcb_t *process, unsigned int status) {
|
||||
if (status & STATUS_ON_RUNQUEUE) {
|
||||
if (! (process->status & STATUS_ON_RUNQUEUE)) {
|
||||
void sched_set_status(tcb_t *process, unsigned int status)
|
||||
{
|
||||
if(status & STATUS_ON_RUNQUEUE) {
|
||||
if(!(process->status & STATUS_ON_RUNQUEUE)) {
|
||||
DEBUG("adding process %s to runqueue %u.\n", process->name, process->priority);
|
||||
clist_add(&runqueues[process->priority], &(process->rq_entry));
|
||||
runqueue_bitcache |= 1 << process->priority;
|
||||
}
|
||||
} else {
|
||||
if (process->status & STATUS_ON_RUNQUEUE) {
|
||||
}
|
||||
else {
|
||||
if(process->status & STATUS_ON_RUNQUEUE) {
|
||||
DEBUG("removing process %s from runqueue %u.\n", process->name, process->priority);
|
||||
clist_remove(&runqueues[process->priority], &(process->rq_entry));
|
||||
if (! runqueues[process->priority] ) {
|
||||
|
||||
if(! runqueues[process->priority]) {
|
||||
runqueue_bitcache &= ~(1 << process->priority);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
process->status = status;
|
||||
}
|
||||
|
||||
void sched_switch(uint16_t current_prio, uint16_t other_prio, int in_isr) {
|
||||
void sched_switch(uint16_t current_prio, uint16_t other_prio, int in_isr)
|
||||
{
|
||||
DEBUG("%s: %i %i %i\n", active_thread->name, (int)current_prio, (int)other_prio, in_isr);
|
||||
if (current_prio <= other_prio) {
|
||||
if (in_isr) {
|
||||
|
||||
if(current_prio <= other_prio) {
|
||||
if(in_isr) {
|
||||
sched_context_switch_request = 1;
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
thread_yield();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void sched_task_exit(void) {
|
||||
void sched_task_exit(void)
|
||||
{
|
||||
DEBUG("sched_task_exit(): ending task %s...\n", active_thread->name);
|
||||
|
||||
dINT();
|
||||
sched_threads[active_thread->pid] = NULL;
|
||||
num_tasks--;
|
||||
|
||||
sched_set_status((tcb_t*)active_thread, STATUS_STOPPED);
|
||||
|
||||
sched_set_status((tcb_t *)active_thread, STATUS_STOPPED);
|
||||
|
||||
active_thread = NULL;
|
||||
cpu_switch_context_exit();
|
||||
|
120
core/thread.c
120
core/thread.c
@ -27,66 +27,89 @@
|
||||
#include "hwtimer.h"
|
||||
#include "sched.h"
|
||||
|
||||
inline int thread_getpid() {
|
||||
inline int thread_getpid()
|
||||
{
|
||||
return active_thread->pid;
|
||||
}
|
||||
|
||||
int thread_getlastpid() {
|
||||
int thread_getlastpid()
|
||||
{
|
||||
extern int last_pid;
|
||||
return last_pid;
|
||||
}
|
||||
|
||||
unsigned int thread_getstatus(int pid) {
|
||||
if (sched_threads[pid]==NULL)
|
||||
unsigned int thread_getstatus(int pid)
|
||||
{
|
||||
if(sched_threads[pid] == NULL) {
|
||||
return STATUS_NOT_FOUND;
|
||||
}
|
||||
|
||||
return sched_threads[pid]->status;
|
||||
}
|
||||
|
||||
void thread_sleep() {
|
||||
if ( inISR()) return;
|
||||
void thread_sleep()
|
||||
{
|
||||
if(inISR()) {
|
||||
return;
|
||||
}
|
||||
|
||||
dINT();
|
||||
sched_set_status((tcb_t*)active_thread, STATUS_SLEEPING);
|
||||
sched_set_status((tcb_t *)active_thread, STATUS_SLEEPING);
|
||||
eINT();
|
||||
thread_yield();
|
||||
}
|
||||
|
||||
int thread_wakeup(int pid) {
|
||||
int thread_wakeup(int pid)
|
||||
{
|
||||
DEBUG("thread_wakeup: Trying to wakeup PID %i...\n", pid);
|
||||
int isr = inISR();
|
||||
if (! isr) {
|
||||
|
||||
if(! isr) {
|
||||
DEBUG("thread_wakeup: Not in interrupt.\n");
|
||||
dINT();
|
||||
}
|
||||
|
||||
int result = sched_threads[pid]->status;
|
||||
if (result == STATUS_SLEEPING) {
|
||||
|
||||
if(result == STATUS_SLEEPING) {
|
||||
DEBUG("thread_wakeup: Thread is sleeping.\n");
|
||||
sched_set_status((tcb_t*)sched_threads[pid], STATUS_RUNNING);
|
||||
if (!isr) {
|
||||
sched_set_status((tcb_t *)sched_threads[pid], STATUS_RUNNING);
|
||||
|
||||
if(!isr) {
|
||||
eINT();
|
||||
thread_yield();
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
sched_context_switch_request = 1;
|
||||
}
|
||||
|
||||
return 1;
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
DEBUG("thread_wakeup: Thread is not sleeping!\n");
|
||||
if (!isr) eINT();
|
||||
|
||||
if(!isr) {
|
||||
eINT();
|
||||
}
|
||||
|
||||
return STATUS_NOT_FOUND;
|
||||
}
|
||||
}
|
||||
|
||||
int thread_measure_stack_usage(char* stack) {
|
||||
unsigned int* stackp = (unsigned int*)stack;
|
||||
int thread_measure_stack_usage(char *stack)
|
||||
{
|
||||
unsigned int *stackp = (unsigned int *)stack;
|
||||
|
||||
/* assumption that the comparison fails before or after end of stack */
|
||||
while( *stackp == (unsigned int)stackp )
|
||||
while(*stackp == (unsigned int)stackp) {
|
||||
stackp++;
|
||||
}
|
||||
|
||||
int space = (unsigned int)stackp - (unsigned int)stack;
|
||||
return space;
|
||||
}
|
||||
|
||||
int thread_create(char *stack, int stacksize, char priority, int flags, void (*function) (void), const char* name)
|
||||
int thread_create(char *stack, int stacksize, char priority, int flags, void (*function)(void), const char *name)
|
||||
{
|
||||
/* allocate our thread control block at the top of our stackspace */
|
||||
int total_stacksize = stacksize;
|
||||
@ -94,57 +117,65 @@ int thread_create(char *stack, int stacksize, char priority, int flags, void (*f
|
||||
|
||||
/* align tcb address on 32bit boundary */
|
||||
unsigned int tcb_address = (unsigned int) stack + stacksize;
|
||||
if ( tcb_address & 1 ) {
|
||||
|
||||
if(tcb_address & 1) {
|
||||
tcb_address--;
|
||||
stacksize--;
|
||||
}
|
||||
if ( tcb_address & 2 ) {
|
||||
tcb_address-=2;
|
||||
stacksize-=2;
|
||||
}
|
||||
tcb_t *cb = (tcb_t*) tcb_address;
|
||||
|
||||
if (priority >= SCHED_PRIO_LEVELS) {
|
||||
if(tcb_address & 2) {
|
||||
tcb_address -= 2;
|
||||
stacksize -= 2;
|
||||
}
|
||||
|
||||
tcb_t *cb = (tcb_t *) tcb_address;
|
||||
|
||||
if(priority >= SCHED_PRIO_LEVELS) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (flags & CREATE_STACKTEST) {
|
||||
if(flags & CREATE_STACKTEST) {
|
||||
/* assign each int of the stack the value of it's address */
|
||||
unsigned int *stackmax = (unsigned int*) ((char*)stack + stacksize);
|
||||
unsigned int* stackp = (unsigned int*)stack;
|
||||
unsigned int *stackmax = (unsigned int *)((char *)stack + stacksize);
|
||||
unsigned int *stackp = (unsigned int *)stack;
|
||||
|
||||
while(stackp < stackmax) {
|
||||
*stackp = (unsigned int)stackp;
|
||||
stackp++;
|
||||
}
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
/* create stack guard */
|
||||
*stack = (unsigned int)stack;
|
||||
}
|
||||
|
||||
if (! inISR()) {
|
||||
if(! inISR()) {
|
||||
dINT();
|
||||
}
|
||||
|
||||
int pid = 0;
|
||||
while (pid < MAXTHREADS) {
|
||||
if (sched_threads[pid] == NULL) {
|
||||
|
||||
while(pid < MAXTHREADS) {
|
||||
if(sched_threads[pid] == NULL) {
|
||||
sched_threads[pid] = cb;
|
||||
cb->pid = pid;
|
||||
break;
|
||||
}
|
||||
|
||||
pid++;
|
||||
}
|
||||
|
||||
if (pid == MAXTHREADS) {
|
||||
if(pid == MAXTHREADS) {
|
||||
DEBUG("thread_create(): too many threads!\n");
|
||||
|
||||
if (! inISR()) {
|
||||
if(! inISR()) {
|
||||
eINT();
|
||||
}
|
||||
|
||||
return -EOVERFLOW;
|
||||
}
|
||||
|
||||
cb->sp = thread_stack_init(function,stack,stacksize);
|
||||
cb->sp = thread_stack_init(function, stack, stacksize);
|
||||
cb->stack_start = stack;
|
||||
cb->stack_size = total_stacksize;
|
||||
|
||||
@ -163,28 +194,31 @@ int thread_create(char *stack, int stacksize, char priority, int flags, void (*f
|
||||
cb->msg_waiters.priority = 0;
|
||||
cb->msg_waiters.next = NULL;
|
||||
|
||||
cib_init(&(cb->msg_queue),0);
|
||||
cib_init(&(cb->msg_queue), 0);
|
||||
cb->msg_array = NULL;
|
||||
|
||||
num_tasks++;
|
||||
|
||||
DEBUG("Created thread %s. PID: %u. Priority: %u.\n", name, cb->pid, priority);
|
||||
|
||||
if (flags & CREATE_SLEEPING) {
|
||||
if(flags & CREATE_SLEEPING) {
|
||||
sched_set_status(cb, STATUS_SLEEPING);
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
sched_set_status(cb, STATUS_PENDING);
|
||||
if (!(flags & CREATE_WOUT_YIELD)) {
|
||||
if (! inISR()) {
|
||||
|
||||
if(!(flags & CREATE_WOUT_YIELD)) {
|
||||
if(! inISR()) {
|
||||
eINT();
|
||||
thread_yield();
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
sched_context_switch_request = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!inISR() && active_thread!=NULL) {
|
||||
if(!inISR() && active_thread != NULL) {
|
||||
eINT();
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user