mirror of
https://github.com/RIOT-OS/RIOT.git
synced 2024-12-29 04:50:03 +01:00
core: uncrustify
This commit is contained in:
parent
f99bc894de
commit
8efe5960aa
@ -303,7 +303,8 @@ void __atomic_store_c(size_t size, void *dest, const void *src, int memorder)
|
||||
* @param[in] ret put the old value from @p ptr in @p ret
|
||||
* @param[in] memorder memory ordering, ignored in this implementation
|
||||
*/
|
||||
void __atomic_exchange_c(size_t size, void *ptr, void *val, void *ret, int memorder)
|
||||
void __atomic_exchange_c(size_t size, void *ptr, void *val, void *ret,
|
||||
int memorder)
|
||||
{
|
||||
(void)memorder;
|
||||
unsigned int mask = irq_disable();
|
||||
@ -345,7 +346,8 @@ void __atomic_exchange_c(size_t size, void *ptr, void *val, void *ret, int memor
|
||||
* @return false otherwise
|
||||
*/
|
||||
bool __atomic_compare_exchange_c(size_t len, void *ptr, void *expected,
|
||||
void *desired, bool weak, int success_memorder, int failure_memorder)
|
||||
void *desired, bool weak, int success_memorder,
|
||||
int failure_memorder)
|
||||
{
|
||||
(void)weak;
|
||||
(void)success_memorder;
|
||||
@ -366,7 +368,8 @@ bool __atomic_compare_exchange_c(size_t len, void *ptr, void *expected,
|
||||
#if !defined(__llvm__) && !defined(__clang__)
|
||||
/* Memory barrier helper function, for platforms without barrier instructions */
|
||||
void __sync_synchronize(void) __attribute__((__weak__));
|
||||
void __sync_synchronize(void) {
|
||||
void __sync_synchronize(void)
|
||||
{
|
||||
/* ARMv4, ARMv5 do not have any hardware support for memory barriers,
|
||||
* This is a software only barrier and a no-op, and will likely break on SMP
|
||||
* systems, but we don't support any multi-CPU ARMv5 or ARMv4 boards in RIOT
|
||||
|
@ -63,6 +63,7 @@ unsigned bitarithm_bits_set(unsigned v)
|
||||
uint8_t bitarithm_bits_set_u32(uint32_t v)
|
||||
{
|
||||
uint8_t c;
|
||||
|
||||
for (c = 0; v; c++) {
|
||||
v &= v - 1; /* clear the least significant bit set */
|
||||
}
|
||||
|
@ -58,7 +58,8 @@ static void _cond_signal(cond_t *cond, bool broadcast)
|
||||
uint16_t min_prio = THREAD_PRIORITY_MIN + 1;
|
||||
|
||||
while ((next = list_remove_head(&cond->queue)) != NULL) {
|
||||
thread_t *process = container_of((clist_node_t *)next, thread_t, rq_entry);
|
||||
thread_t *process = container_of((clist_node_t *)next, thread_t,
|
||||
rq_entry);
|
||||
sched_set_status(process, STATUS_PENDING);
|
||||
uint16_t process_priority = process->priority;
|
||||
if (process_priority < min_prio) {
|
||||
|
@ -101,9 +101,11 @@ NORETURN void _assert_failure(const char *file, unsigned line);
|
||||
*
|
||||
* @see http://pubs.opengroup.org/onlinepubs/9699919799/functions/assert.html
|
||||
*/
|
||||
#define assert(cond) ((cond) ? (void)0 : _assert_failure(RIOT_FILE_RELATIVE, __LINE__))
|
||||
#define assert(cond) ((cond) ? (void)0 : _assert_failure(RIOT_FILE_RELATIVE, \
|
||||
__LINE__))
|
||||
#else
|
||||
#define assert(cond) ((cond) ? (void)0 : core_panic(PANIC_ASSERT_FAIL, assert_crash_message))
|
||||
#define assert(cond) ((cond) ? (void)0 : core_panic(PANIC_ASSERT_FAIL, \
|
||||
assert_crash_message))
|
||||
#endif
|
||||
|
||||
#if !defined __cplusplus
|
||||
|
@ -147,7 +147,8 @@ static inline unsigned bitarithm_lsb(unsigned v)
|
||||
{
|
||||
/* Source: http://graphics.stanford.edu/~seander/bithacks.html#ZerosOnRightMultLookup */
|
||||
extern const uint8_t MultiplyDeBruijnBitPosition[32];
|
||||
return MultiplyDeBruijnBitPosition[((uint32_t)((v & -v) * 0x077CB531U)) >> 27];
|
||||
return MultiplyDeBruijnBitPosition[((uint32_t)((v & -v) * 0x077CB531U)) >>
|
||||
27];
|
||||
}
|
||||
#else
|
||||
{
|
||||
@ -157,7 +158,7 @@ static inline unsigned bitarithm_lsb(unsigned v)
|
||||
while ((v & 0x01) == 0) {
|
||||
v >>= 1;
|
||||
r++;
|
||||
};
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -333,36 +333,42 @@ static inline uint64_t byteorder_swapll(uint64_t v)
|
||||
static inline be_uint16_t byteorder_ltobs(le_uint16_t v)
|
||||
{
|
||||
be_uint16_t result = { byteorder_swaps(v.u16) };
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline be_uint32_t byteorder_ltobl(le_uint32_t v)
|
||||
{
|
||||
be_uint32_t result = { byteorder_swapl(v.u32) };
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline be_uint64_t byteorder_ltobll(le_uint64_t v)
|
||||
{
|
||||
be_uint64_t result = { byteorder_swapll(v.u64) };
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline le_uint16_t byteorder_btols(be_uint16_t v)
|
||||
{
|
||||
le_uint16_t result = { byteorder_swaps(v.u16) };
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline le_uint32_t byteorder_btoll(be_uint32_t v)
|
||||
{
|
||||
le_uint32_t result = { byteorder_swapl(v.u32) };
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline le_uint64_t byteorder_btolll(be_uint64_t v)
|
||||
{
|
||||
le_uint64_t result = { byteorder_swapll(v.u64) };
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -380,18 +386,21 @@ static inline le_uint64_t byteorder_btolll(be_uint64_t v)
|
||||
static inline network_uint16_t byteorder_htons(uint16_t v)
|
||||
{
|
||||
network_uint16_t result = { _byteorder_swap(v, s) };
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline network_uint32_t byteorder_htonl(uint32_t v)
|
||||
{
|
||||
network_uint32_t result = { _byteorder_swap(v, l) };
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline network_uint64_t byteorder_htonll(uint64_t v)
|
||||
{
|
||||
network_uint64_t result = { _byteorder_swap(v, ll) };
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -428,18 +437,21 @@ static inline uint64_t htonll(uint64_t v)
|
||||
static inline uint16_t ntohs(uint16_t v)
|
||||
{
|
||||
network_uint16_t input = { v };
|
||||
|
||||
return byteorder_ntohs(input);
|
||||
}
|
||||
|
||||
static inline uint32_t ntohl(uint32_t v)
|
||||
{
|
||||
network_uint32_t input = { v };
|
||||
|
||||
return byteorder_ntohl(input);
|
||||
}
|
||||
|
||||
static inline uint64_t ntohll(uint64_t v)
|
||||
{
|
||||
network_uint64_t input = { v };
|
||||
|
||||
return byteorder_ntohll(input);
|
||||
}
|
||||
|
||||
|
@ -252,9 +252,11 @@ static inline clist_node_t *clist_rpop(clist_node_t *list)
|
||||
* @returns predecessor of node if found
|
||||
* @returns NULL if node is not a list member
|
||||
*/
|
||||
static inline clist_node_t *clist_find_before(const clist_node_t *list, const clist_node_t *node)
|
||||
static inline clist_node_t *clist_find_before(const clist_node_t *list,
|
||||
const clist_node_t *node)
|
||||
{
|
||||
clist_node_t *pos = list->next;
|
||||
|
||||
if (!pos) {
|
||||
return NULL;
|
||||
}
|
||||
@ -280,9 +282,11 @@ static inline clist_node_t *clist_find_before(const clist_node_t *list, const cl
|
||||
* @returns node if found
|
||||
* @returns NULL if node is not a list member
|
||||
*/
|
||||
static inline clist_node_t *clist_find(const clist_node_t *list, const clist_node_t *node)
|
||||
static inline clist_node_t *clist_find(const clist_node_t *list,
|
||||
const clist_node_t *node)
|
||||
{
|
||||
clist_node_t *tmp = clist_find_before(list, node);
|
||||
|
||||
if (tmp) {
|
||||
return tmp->next;
|
||||
}
|
||||
@ -339,9 +343,12 @@ static inline clist_node_t *clist_remove(clist_node_t *list, clist_node_t *node)
|
||||
* @returns NULL on empty list or full traversal
|
||||
* @returns node that caused @p func(node, arg) to exit non-zero
|
||||
*/
|
||||
static inline clist_node_t *clist_foreach(clist_node_t *list, int(*func)(clist_node_t *, void *), void *arg)
|
||||
static inline clist_node_t *clist_foreach(clist_node_t *list, int (*func)(
|
||||
clist_node_t *,
|
||||
void *), void *arg)
|
||||
{
|
||||
clist_node_t *node = list->next;
|
||||
|
||||
if (node) {
|
||||
do {
|
||||
node = node->next;
|
||||
@ -432,6 +439,7 @@ static inline size_t clist_count(clist_node_t *list)
|
||||
{
|
||||
clist_node_t *node = list->next;
|
||||
size_t cnt = 0;
|
||||
|
||||
if (node) {
|
||||
do {
|
||||
node = node->next;
|
||||
|
@ -47,7 +47,8 @@ extern "C" {
|
||||
#define DEBUG_PRINT(...) \
|
||||
do { \
|
||||
if ((sched_active_thread == NULL) || \
|
||||
(sched_active_thread->stack_size >= THREAD_EXTRA_STACKSIZE_PRINTF)) { \
|
||||
(sched_active_thread->stack_size >= \
|
||||
THREAD_EXTRA_STACKSIZE_PRINTF)) { \
|
||||
printf(__VA_ARGS__); \
|
||||
} \
|
||||
else { \
|
||||
|
@ -59,9 +59,11 @@ enum {
|
||||
* @param[in] queue array of msg_t used as queue
|
||||
* @param[in] queue_size number of msg_t objects in queue
|
||||
*/
|
||||
static inline void mbox_init(mbox_t *mbox, msg_t *queue, unsigned int queue_size)
|
||||
static inline void mbox_init(mbox_t *mbox, msg_t *queue,
|
||||
unsigned int queue_size)
|
||||
{
|
||||
mbox_t m = MBOX_INIT(queue, queue_size);
|
||||
|
||||
*mbox = m;
|
||||
}
|
||||
|
||||
|
@ -102,6 +102,7 @@ int _mutex_lock(mutex_t *mutex, volatile uint8_t *blocking);
|
||||
static inline int mutex_trylock(mutex_t *mutex)
|
||||
{
|
||||
volatile uint8_t blocking = 0;
|
||||
|
||||
return _mutex_lock(mutex, &blocking);
|
||||
}
|
||||
|
||||
@ -113,6 +114,7 @@ static inline int mutex_trylock(mutex_t *mutex)
|
||||
static inline void mutex_lock(mutex_t *mutex)
|
||||
{
|
||||
volatile uint8_t blocking = 1;
|
||||
|
||||
_mutex_lock(mutex, &blocking);
|
||||
}
|
||||
|
||||
|
@ -59,6 +59,7 @@ static inline void priority_queue_node_init(
|
||||
priority_queue_node_t *priority_queue_node)
|
||||
{
|
||||
priority_queue_node_t qn = PRIORITY_QUEUE_NODE_INIT;
|
||||
|
||||
*priority_queue_node = qn;
|
||||
}
|
||||
|
||||
@ -78,6 +79,7 @@ static inline void priority_queue_node_init(
|
||||
static inline void priority_queue_init(priority_queue_t *priority_queue)
|
||||
{
|
||||
priority_queue_t q = PRIORITY_QUEUE_INIT;
|
||||
|
||||
*priority_queue = q;
|
||||
}
|
||||
|
||||
|
@ -55,7 +55,8 @@ typedef struct {
|
||||
* @param[in] buffer Buffer to use by rb.
|
||||
* @param[in] bufsize `sizeof (buffer)`
|
||||
*/
|
||||
static inline void ringbuffer_init(ringbuffer_t *__restrict rb, char *buffer, unsigned bufsize)
|
||||
static inline void ringbuffer_init(ringbuffer_t *__restrict rb, char *buffer,
|
||||
unsigned bufsize)
|
||||
{
|
||||
rb->buf = buffer;
|
||||
rb->size = bufsize;
|
||||
@ -84,7 +85,8 @@ int ringbuffer_add_one(ringbuffer_t *__restrict rb, char c);
|
||||
* @param[in] n Maximum number of elements to add.
|
||||
* @returns Number of elements actually added. 0 if rb is full.
|
||||
*/
|
||||
unsigned ringbuffer_add(ringbuffer_t *__restrict rb, const char *buf, unsigned n);
|
||||
unsigned ringbuffer_add(ringbuffer_t *__restrict rb, const char *buf,
|
||||
unsigned n);
|
||||
|
||||
/**
|
||||
* @brief Peek and remove oldest element from the ringbuffer.
|
||||
@ -135,7 +137,8 @@ static inline int ringbuffer_full(const ringbuffer_t *__restrict rb)
|
||||
* @param[in,out] rb Ringbuffer to query.
|
||||
* @returns number of available bytes
|
||||
*/
|
||||
static inline unsigned int ringbuffer_get_free(const ringbuffer_t *__restrict rb)
|
||||
static inline unsigned int ringbuffer_get_free(
|
||||
const ringbuffer_t *__restrict rb)
|
||||
{
|
||||
return rb->size - rb->avail;
|
||||
}
|
||||
@ -154,7 +157,8 @@ int ringbuffer_peek_one(const ringbuffer_t *__restrict rb);
|
||||
* @param[in] n Read at most n elements.
|
||||
* @returns Same as ringbuffer_get()
|
||||
*/
|
||||
unsigned ringbuffer_peek(const ringbuffer_t *__restrict rb, char *buf, unsigned n);
|
||||
unsigned ringbuffer_peek(const ringbuffer_t *__restrict rb, char *buf,
|
||||
unsigned n);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
@ -80,6 +80,7 @@ typedef struct rmutex_t {
|
||||
static inline void rmutex_init(rmutex_t *rmutex)
|
||||
{
|
||||
rmutex_t empty_rmutex = RMUTEX_INIT;
|
||||
|
||||
*rmutex = empty_rmutex;
|
||||
}
|
||||
|
||||
|
@ -228,7 +228,8 @@ struct _thread {
|
||||
* @brief Size of the main task's stack in bytes
|
||||
*/
|
||||
#ifndef THREAD_STACKSIZE_MAIN
|
||||
#define THREAD_STACKSIZE_MAIN (THREAD_STACKSIZE_DEFAULT + THREAD_EXTRA_STACKSIZE_PRINTF)
|
||||
#define THREAD_STACKSIZE_MAIN (THREAD_STACKSIZE_DEFAULT + \
|
||||
THREAD_EXTRA_STACKSIZE_PRINTF)
|
||||
#endif
|
||||
|
||||
/**
|
||||
@ -283,7 +284,8 @@ struct _thread {
|
||||
* @brief Priority of the main thread
|
||||
*/
|
||||
#ifndef THREAD_PRIORITY_MAIN
|
||||
#define THREAD_PRIORITY_MAIN (THREAD_PRIORITY_MIN - (SCHED_PRIO_LEVELS/2))
|
||||
#define THREAD_PRIORITY_MAIN (THREAD_PRIORITY_MIN - \
|
||||
(SCHED_PRIO_LEVELS / 2))
|
||||
#endif
|
||||
|
||||
/**
|
||||
@ -437,6 +439,7 @@ int thread_wakeup(kernel_pid_t pid);
|
||||
static inline kernel_pid_t thread_getpid(void)
|
||||
{
|
||||
extern volatile kernel_pid_t sched_active_pid;
|
||||
|
||||
return sched_active_pid;
|
||||
}
|
||||
|
||||
@ -450,7 +453,8 @@ static inline kernel_pid_t thread_getpid(void)
|
||||
*
|
||||
* @return stack pointer
|
||||
*/
|
||||
char *thread_stack_init(thread_task_func_t task_func, void *arg, void *stack_start, int stack_size);
|
||||
char *thread_stack_init(thread_task_func_t task_func, void *arg,
|
||||
void *stack_start, int stack_size);
|
||||
|
||||
/**
|
||||
* @brief Add thread to list, sorted by priority (internal)
|
||||
|
@ -43,7 +43,9 @@ void lifo_insert(int *array, int i)
|
||||
|
||||
#ifdef DEVELHELP
|
||||
if ((array[index] != -1) && (array[0] != -1)) {
|
||||
LOG_WARNING("lifo_insert: overwriting array[%i] == %i with %i\n\n\n\t\tThe lifo is broken now.\n\n\n", index, array[index], array[0]);
|
||||
LOG_WARNING(
|
||||
"lifo_insert: overwriting array[%i] == %i with %i\n\n\n\t\tThe lifo is broken now.\n\n\n", index,
|
||||
array[index], array[0]);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -60,10 +60,12 @@ int _mbox_put(mbox_t *mbox, msg_t *msg, int blocking)
|
||||
unsigned irqstate = irq_disable();
|
||||
|
||||
list_node_t *next = list_remove_head(&mbox->readers);
|
||||
|
||||
if (next) {
|
||||
DEBUG("mbox: Thread %" PRIkernel_pid " mbox 0x%08x: _tryput(): "
|
||||
"there's a waiter.\n", sched_active_pid, (unsigned)mbox);
|
||||
thread_t *thread = container_of((clist_node_t*)next, thread_t, rq_entry);
|
||||
thread_t *thread =
|
||||
container_of((clist_node_t *)next, thread_t, rq_entry);
|
||||
*(msg_t *)thread->wait_data = *msg;
|
||||
_wake_waiter(thread, irqstate);
|
||||
return 1;
|
||||
@ -101,7 +103,8 @@ int _mbox_get(mbox_t *mbox, msg_t *msg, int blocking)
|
||||
*msg = mbox->msg_array[cib_get_unsafe(&mbox->cib)];
|
||||
list_node_t *next = list_remove_head(&mbox->writers);
|
||||
if (next) {
|
||||
thread_t *thread = container_of((clist_node_t*)next, thread_t, rq_entry);
|
||||
thread_t *thread = container_of((clist_node_t *)next, thread_t,
|
||||
rq_entry);
|
||||
_wake_waiter(thread, irqstate);
|
||||
}
|
||||
else {
|
||||
|
37
core/msg.c
37
core/msg.c
@ -37,11 +37,13 @@
|
||||
#include "debug.h"
|
||||
|
||||
static int _msg_receive(msg_t *m, int block);
|
||||
static int _msg_send(msg_t *m, kernel_pid_t target_pid, bool block, unsigned state);
|
||||
static int _msg_send(msg_t *m, kernel_pid_t target_pid, bool block,
|
||||
unsigned state);
|
||||
|
||||
static int queue_msg(thread_t *target, const msg_t *m)
|
||||
{
|
||||
int n = cib_put(&(target->msg_queue));
|
||||
|
||||
if (n < 0) {
|
||||
DEBUG("queue_msg(): message queue is full (or there is none)\n");
|
||||
return 0;
|
||||
@ -79,7 +81,8 @@ int msg_try_send(msg_t *m, kernel_pid_t target_pid)
|
||||
return _msg_send(m, target_pid, false, irq_disable());
|
||||
}
|
||||
|
||||
static int _msg_send(msg_t *m, kernel_pid_t target_pid, bool block, unsigned state)
|
||||
static int _msg_send(msg_t *m, kernel_pid_t target_pid, bool block,
|
||||
unsigned state)
|
||||
{
|
||||
#ifdef DEVELHELP
|
||||
if (!pid_is_valid(target_pid)) {
|
||||
@ -105,7 +108,8 @@ static int _msg_send(msg_t *m, kernel_pid_t target_pid, bool block, unsigned sta
|
||||
block, me->status, target->status);
|
||||
|
||||
if (target->status != STATUS_RECEIVE_BLOCKED) {
|
||||
DEBUG("msg_send() %s:%i: Target %" PRIkernel_pid " is not RECEIVE_BLOCKED.\n",
|
||||
DEBUG(
|
||||
"msg_send() %s:%i: Target %" PRIkernel_pid " is not RECEIVE_BLOCKED.\n",
|
||||
RIOT_FILE_RELATIVE, __LINE__, target_pid);
|
||||
|
||||
if (queue_msg(target, m)) {
|
||||
@ -120,7 +124,8 @@ static int _msg_send(msg_t *m, kernel_pid_t target_pid, bool block, unsigned sta
|
||||
}
|
||||
|
||||
if (!block) {
|
||||
DEBUG("msg_send: %" PRIkernel_pid ": Receiver not waiting, block=%u\n",
|
||||
DEBUG(
|
||||
"msg_send: %" PRIkernel_pid ": Receiver not waiting, block=%u\n",
|
||||
me->pid, block);
|
||||
irq_restore(state);
|
||||
return 0;
|
||||
@ -237,11 +242,13 @@ int msg_reply(msg_t *m, msg_t *reply)
|
||||
unsigned state = irq_disable();
|
||||
|
||||
thread_t *target = (thread_t *)sched_threads[m->sender_pid];
|
||||
|
||||
assert(target != NULL);
|
||||
|
||||
if (target->status != STATUS_REPLY_BLOCKED) {
|
||||
DEBUG("msg_reply(): %" PRIkernel_pid ": Target \"%" PRIkernel_pid
|
||||
"\" not waiting for reply.", sched_active_thread->pid, target->pid);
|
||||
"\" not waiting for reply.", sched_active_thread->pid,
|
||||
target->pid);
|
||||
irq_restore(state);
|
||||
return -1;
|
||||
}
|
||||
@ -265,7 +272,8 @@ int msg_reply_int(msg_t *m, msg_t *reply)
|
||||
|
||||
if (target->status != STATUS_REPLY_BLOCKED) {
|
||||
DEBUG("msg_reply_int(): %" PRIkernel_pid ": Target \"%" PRIkernel_pid
|
||||
"\" not waiting for reply.", sched_active_thread->pid, target->pid);
|
||||
"\" not waiting for reply.", sched_active_thread->pid,
|
||||
target->pid);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -289,6 +297,7 @@ int msg_receive(msg_t *m)
|
||||
static int _msg_receive(msg_t *m, int block)
|
||||
{
|
||||
unsigned state = irq_disable();
|
||||
|
||||
DEBUG("_msg_receive: %" PRIkernel_pid ": _msg_receive.\n",
|
||||
sched_active_thread->pid);
|
||||
|
||||
@ -307,7 +316,8 @@ static int _msg_receive(msg_t *m, int block)
|
||||
}
|
||||
|
||||
if (queue_index >= 0) {
|
||||
DEBUG("_msg_receive: %" PRIkernel_pid ": _msg_receive(): We've got a queued message.\n",
|
||||
DEBUG(
|
||||
"_msg_receive: %" PRIkernel_pid ": _msg_receive(): We've got a queued message.\n",
|
||||
sched_active_thread->pid);
|
||||
*m = me->msg_array[queue_index];
|
||||
}
|
||||
@ -318,11 +328,13 @@ static int _msg_receive(msg_t *m, int block)
|
||||
list_node_t *next = list_remove_head(&me->msg_waiters);
|
||||
|
||||
if (next == NULL) {
|
||||
DEBUG("_msg_receive: %" PRIkernel_pid ": _msg_receive(): No thread in waiting list.\n",
|
||||
DEBUG(
|
||||
"_msg_receive: %" PRIkernel_pid ": _msg_receive(): No thread in waiting list.\n",
|
||||
sched_active_thread->pid);
|
||||
|
||||
if (queue_index < 0) {
|
||||
DEBUG("_msg_receive(): %" PRIkernel_pid ": No msg in queue. Going blocked.\n",
|
||||
DEBUG(
|
||||
"_msg_receive(): %" PRIkernel_pid ": No msg in queue. Going blocked.\n",
|
||||
sched_active_thread->pid);
|
||||
sched_set_status(me, STATUS_RECEIVE_BLOCKED);
|
||||
|
||||
@ -339,10 +351,12 @@ static int _msg_receive(msg_t *m, int block)
|
||||
return 1;
|
||||
}
|
||||
else {
|
||||
DEBUG("_msg_receive: %" PRIkernel_pid ": _msg_receive(): Waking up waiting thread.\n",
|
||||
DEBUG(
|
||||
"_msg_receive: %" PRIkernel_pid ": _msg_receive(): Waking up waiting thread.\n",
|
||||
sched_active_thread->pid);
|
||||
|
||||
thread_t *sender = container_of((clist_node_t*)next, thread_t, rq_entry);
|
||||
thread_t *sender =
|
||||
container_of((clist_node_t *)next, thread_t, rq_entry);
|
||||
|
||||
if (queue_index >= 0) {
|
||||
/* We've already got a message from the queue. As there is a
|
||||
@ -392,6 +406,7 @@ int msg_avail(void)
|
||||
void msg_init_queue(msg_t *array, int num)
|
||||
{
|
||||
thread_t *me = (thread_t *)sched_active_thread;
|
||||
|
||||
me->msg_array = array;
|
||||
cib_init(&(me->msg_queue), num);
|
||||
}
|
||||
|
@ -44,6 +44,7 @@ void priority_queue_remove(priority_queue_t *root_, priority_queue_node_t *node)
|
||||
priority_queue_node_t *priority_queue_remove_head(priority_queue_t *root)
|
||||
{
|
||||
priority_queue_node_t *head = root->first;
|
||||
|
||||
if (head) {
|
||||
root->first = head->next;
|
||||
}
|
||||
@ -77,12 +78,14 @@ void priority_queue_print(priority_queue_t *root)
|
||||
printf("queue:\n");
|
||||
|
||||
for (priority_queue_node_t *node = root->first; node; node = node->next) {
|
||||
printf("Data: %u Priority: %lu\n", node->data, (unsigned long) node->priority);
|
||||
printf("Data: %u Priority: %lu\n", node->data,
|
||||
(unsigned long)node->priority);
|
||||
}
|
||||
}
|
||||
|
||||
void priority_queue_print_node(priority_queue_node_t *node)
|
||||
{
|
||||
printf("Data: %u Priority: %lu Next: %u\n", (unsigned int) node->data, (unsigned long) node->priority, (unsigned int)node->next);
|
||||
printf("Data: %u Priority: %lu Next: %u\n", (unsigned int)node->data,
|
||||
(unsigned long)node->priority, (unsigned int)node->next);
|
||||
}
|
||||
#endif
|
||||
|
@ -30,6 +30,7 @@
|
||||
static void add_tail(ringbuffer_t *restrict rb, char c)
|
||||
{
|
||||
unsigned pos = rb->start + rb->avail++;
|
||||
|
||||
if (pos >= rb->size) {
|
||||
pos -= rb->size;
|
||||
}
|
||||
@ -46,6 +47,7 @@ static void add_tail(ringbuffer_t *restrict rb, char c)
|
||||
static char get_head(ringbuffer_t *restrict rb)
|
||||
{
|
||||
char result = rb->buf[rb->start];
|
||||
|
||||
if ((--rb->avail == 0) || (++rb->start == rb->size)) {
|
||||
rb->start = 0;
|
||||
}
|
||||
@ -55,6 +57,7 @@ static char get_head(ringbuffer_t *restrict rb)
|
||||
unsigned ringbuffer_add(ringbuffer_t *restrict rb, const char *buf, unsigned n)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
if (ringbuffer_full(rb)) {
|
||||
break;
|
||||
@ -67,6 +70,7 @@ unsigned ringbuffer_add(ringbuffer_t *restrict rb, const char *buf, unsigned n)
|
||||
int ringbuffer_add_one(ringbuffer_t *restrict rb, char c)
|
||||
{
|
||||
int result = -1;
|
||||
|
||||
if (ringbuffer_full(rb)) {
|
||||
result = (unsigned char)get_head(rb);
|
||||
}
|
||||
@ -132,11 +136,14 @@ unsigned ringbuffer_remove(ringbuffer_t *restrict rb, unsigned n)
|
||||
int ringbuffer_peek_one(const ringbuffer_t *restrict rb_)
|
||||
{
|
||||
ringbuffer_t rb = *rb_;
|
||||
|
||||
return ringbuffer_get_one(&rb);
|
||||
}
|
||||
|
||||
unsigned ringbuffer_peek(const ringbuffer_t *restrict rb_, char *buf, unsigned n)
|
||||
unsigned ringbuffer_peek(const ringbuffer_t *restrict rb_, char *buf,
|
||||
unsigned n)
|
||||
{
|
||||
ringbuffer_t rb = *rb_;
|
||||
|
||||
return ringbuffer_get(&rb, buf, n);
|
||||
}
|
||||
|
@ -79,7 +79,8 @@ static int _lock(rmutex_t *rmutex, int trylock)
|
||||
|
||||
/* ensure that owner is read atomically, since I need a consistent value */
|
||||
owner = atomic_load_explicit(&rmutex->owner, memory_order_relaxed);
|
||||
DEBUG("rmutex %" PRIi16" : mutex held by %" PRIi16" \n", thread_getpid(), owner);
|
||||
DEBUG("rmutex %" PRIi16 " : mutex held by %" PRIi16 " \n",
|
||||
thread_getpid(), owner);
|
||||
|
||||
/* Case 1: Mutex is not held by me */
|
||||
if (owner != thread_getpid()) {
|
||||
@ -103,7 +104,8 @@ static int _lock(rmutex_t *rmutex, int trylock)
|
||||
DEBUG("rmutex %" PRIi16 " : setting the owner\n", thread_getpid());
|
||||
|
||||
/* ensure that owner is written atomically, since others need a consistent value */
|
||||
atomic_store_explicit(&rmutex->owner, thread_getpid(), memory_order_relaxed);
|
||||
atomic_store_explicit(&rmutex->owner, thread_getpid(),
|
||||
memory_order_relaxed);
|
||||
|
||||
DEBUG("rmutex %" PRIi16 " : increasing refs\n", thread_getpid());
|
||||
|
||||
@ -125,7 +127,8 @@ int rmutex_trylock(rmutex_t *rmutex)
|
||||
|
||||
void rmutex_unlock(rmutex_t *rmutex)
|
||||
{
|
||||
assert(atomic_load_explicit(&rmutex->owner,memory_order_relaxed) == thread_getpid());
|
||||
assert(atomic_load_explicit(&rmutex->owner,
|
||||
memory_order_relaxed) == thread_getpid());
|
||||
assert(rmutex->refcount > 0);
|
||||
|
||||
DEBUG("rmutex %" PRIi16 " : decrementing refs refs\n", thread_getpid());
|
||||
@ -140,7 +143,8 @@ void rmutex_unlock(rmutex_t *rmutex)
|
||||
DEBUG("rmutex %" PRIi16 " : resetting owner\n", thread_getpid());
|
||||
|
||||
/* ensure that owner is written only once */
|
||||
atomic_store_explicit(&rmutex->owner, KERNEL_PID_UNDEF, memory_order_relaxed);
|
||||
atomic_store_explicit(&rmutex->owner, KERNEL_PID_UNDEF,
|
||||
memory_order_relaxed);
|
||||
|
||||
DEBUG("rmutex %" PRIi16 " : releasing mutex\n", thread_getpid());
|
||||
|
||||
|
34
core/sched.c
34
core/sched.c
@ -54,9 +54,11 @@ static uint32_t runqueue_bitcache = 0;
|
||||
|
||||
/* Needed by OpenOCD to read sched_threads */
|
||||
#if defined(__APPLE__) && defined(__MACH__)
|
||||
#define FORCE_USED_SECTION __attribute__((used)) __attribute__((section ("__OPENOCD,__openocd")))
|
||||
#define FORCE_USED_SECTION __attribute__((used)) __attribute__((section( \
|
||||
"__OPENOCD,__openocd")))
|
||||
#else
|
||||
#define FORCE_USED_SECTION __attribute__((used)) __attribute__((section (".openocd")))
|
||||
#define FORCE_USED_SECTION __attribute__((used)) __attribute__((section( \
|
||||
".openocd")))
|
||||
#endif
|
||||
|
||||
FORCE_USED_SECTION
|
||||
@ -70,7 +72,8 @@ const uint8_t _tcb_name_offset = offsetof(thread_t, name);
|
||||
#endif
|
||||
|
||||
#ifdef MODULE_SCHED_CB
|
||||
static void (*sched_cb) (kernel_pid_t active_thread, kernel_pid_t next_thread) = NULL;
|
||||
static void (*sched_cb) (kernel_pid_t active_thread,
|
||||
kernel_pid_t next_thread) = NULL;
|
||||
#endif
|
||||
|
||||
int __attribute__((used)) sched_run(void)
|
||||
@ -83,7 +86,8 @@ int __attribute__((used)) sched_run(void)
|
||||
* since the threading should not be started before at least the idle thread was started.
|
||||
*/
|
||||
int nextrq = bitarithm_lsb(runqueue_bitcache);
|
||||
thread_t *next_thread = container_of(sched_runqueues[nextrq].next->next, thread_t, rq_entry);
|
||||
thread_t *next_thread = container_of(sched_runqueues[nextrq].next->next,
|
||||
thread_t, rq_entry);
|
||||
|
||||
DEBUG(
|
||||
"sched_run: active thread: %" PRIkernel_pid ", next thread: %" PRIkernel_pid "\n",
|
||||
@ -103,8 +107,11 @@ int __attribute__((used)) sched_run(void)
|
||||
}
|
||||
|
||||
#ifdef SCHED_TEST_STACK
|
||||
if (*((uintptr_t *) active_thread->stack_start) != (uintptr_t) active_thread->stack_start) {
|
||||
LOG_WARNING("scheduler(): stack overflow detected, pid=%" PRIkernel_pid "\n", active_thread->pid);
|
||||
if (*((uintptr_t *)active_thread->stack_start) !=
|
||||
(uintptr_t)active_thread->stack_start) {
|
||||
LOG_WARNING(
|
||||
"scheduler(): stack overflow detected, pid=%" PRIkernel_pid "\n",
|
||||
active_thread->pid);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@ -141,15 +148,18 @@ void sched_set_status(thread_t *process, thread_status_t status)
|
||||
{
|
||||
if (status >= STATUS_ON_RUNQUEUE) {
|
||||
if (!(process->status >= STATUS_ON_RUNQUEUE)) {
|
||||
DEBUG("sched_set_status: adding thread %" PRIkernel_pid " to runqueue %" PRIu8 ".\n",
|
||||
DEBUG(
|
||||
"sched_set_status: adding thread %" PRIkernel_pid " to runqueue %" PRIu8 ".\n",
|
||||
process->pid, process->priority);
|
||||
clist_rpush(&sched_runqueues[process->priority], &(process->rq_entry));
|
||||
clist_rpush(&sched_runqueues[process->priority],
|
||||
&(process->rq_entry));
|
||||
runqueue_bitcache |= 1 << process->priority;
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (process->status >= STATUS_ON_RUNQUEUE) {
|
||||
DEBUG("sched_set_status: removing thread %" PRIkernel_pid " from runqueue %" PRIu8 ".\n",
|
||||
DEBUG(
|
||||
"sched_set_status: removing thread %" PRIkernel_pid " from runqueue %" PRIu8 ".\n",
|
||||
process->pid, process->priority);
|
||||
clist_lpop(&sched_runqueues[process->priority]);
|
||||
|
||||
@ -170,7 +180,8 @@ void sched_switch(uint16_t other_prio)
|
||||
|
||||
DEBUG("sched_switch: active pid=%" PRIkernel_pid " prio=%" PRIu16 " on_runqueue=%i "
|
||||
", other_prio=%" PRIu16 "\n",
|
||||
active_thread->pid, current_prio, on_runqueue, other_prio);
|
||||
active_thread->pid, current_prio, on_runqueue,
|
||||
other_prio);
|
||||
|
||||
if (!on_runqueue || (current_prio > other_prio)) {
|
||||
if (irq_is_in()) {
|
||||
@ -189,7 +200,8 @@ void sched_switch(uint16_t other_prio)
|
||||
|
||||
NORETURN void sched_task_exit(void)
|
||||
{
|
||||
DEBUG("sched_task_exit: ending thread %" PRIkernel_pid "...\n", sched_active_thread->pid);
|
||||
DEBUG("sched_task_exit: ending thread %" PRIkernel_pid "...\n",
|
||||
sched_active_thread->pid);
|
||||
|
||||
(void)irq_disable();
|
||||
sched_threads[sched_active_pid] = NULL;
|
||||
|
@ -41,6 +41,7 @@ volatile thread_t *thread_get(kernel_pid_t pid)
|
||||
thread_status_t thread_getstatus(kernel_pid_t pid)
|
||||
{
|
||||
volatile thread_t *thread = thread_get(pid);
|
||||
|
||||
return thread ? thread->status : STATUS_NOT_FOUND;
|
||||
}
|
||||
|
||||
@ -143,6 +144,7 @@ void thread_yield(void)
|
||||
{
|
||||
unsigned old_state = irq_disable();
|
||||
thread_t *me = (thread_t *)sched_active_thread;
|
||||
|
||||
if (me->status >= STATUS_ON_RUNQUEUE) {
|
||||
clist_lpoprpush(&sched_runqueues[me->priority]);
|
||||
}
|
||||
@ -159,7 +161,8 @@ void thread_add_to_list(list_node_t *list, thread_t *thread)
|
||||
list_node_t *new_node = (list_node_t *)&thread->rq_entry;
|
||||
|
||||
while (list->next) {
|
||||
thread_t *list_entry = container_of((clist_node_t*)list->next, thread_t, rq_entry);
|
||||
thread_t *list_entry = container_of((clist_node_t *)list->next,
|
||||
thread_t, rq_entry);
|
||||
if (list_entry->priority > my_prio) {
|
||||
break;
|
||||
}
|
||||
@ -186,7 +189,9 @@ uintptr_t thread_measure_stack_free(char *stack)
|
||||
}
|
||||
#endif
|
||||
|
||||
kernel_pid_t thread_create(char *stack, int stacksize, uint8_t priority, int flags, thread_task_func_t function, void *arg, const char *name)
|
||||
kernel_pid_t thread_create(char *stack, int stacksize, uint8_t priority,
|
||||
int flags, thread_task_func_t function, void *arg,
|
||||
const char *name)
|
||||
{
|
||||
if (priority >= SCHED_PRIO_LEVELS) {
|
||||
return -EINVAL;
|
||||
@ -257,7 +262,8 @@ kernel_pid_t thread_create(char *stack, int stacksize, uint8_t priority, int fla
|
||||
thread->pid = pid;
|
||||
thread->sp = thread_stack_init(function, arg, stack, stacksize);
|
||||
|
||||
#if defined(DEVELHELP) || defined(SCHED_TEST_STACK) || defined(MODULE_MPU_STACK_GUARD)
|
||||
#if defined(DEVELHELP) || defined(SCHED_TEST_STACK) || \
|
||||
defined(MODULE_MPU_STACK_GUARD)
|
||||
thread->stack_start = stack;
|
||||
#endif
|
||||
|
||||
@ -280,7 +286,8 @@ kernel_pid_t thread_create(char *stack, int stacksize, uint8_t priority, int fla
|
||||
|
||||
sched_num_threads++;
|
||||
|
||||
DEBUG("Created thread %s. PID: %" PRIkernel_pid ". Priority: %u.\n", name, thread->pid, priority);
|
||||
DEBUG("Created thread %s. PID: %" PRIkernel_pid ". Priority: %u.\n", name,
|
||||
thread->pid, priority);
|
||||
|
||||
if (flags & THREAD_CREATE_SLEEPING) {
|
||||
sched_set_status(thread, STATUS_SLEEPING);
|
||||
|
@ -26,18 +26,22 @@
|
||||
#define ENABLE_DEBUG (0)
|
||||
#include "debug.h"
|
||||
|
||||
static thread_flags_t _thread_flags_clear_atomic(thread_t *thread, thread_flags_t mask)
|
||||
static thread_flags_t _thread_flags_clear_atomic(thread_t *thread,
|
||||
thread_flags_t mask)
|
||||
{
|
||||
unsigned state = irq_disable();
|
||||
|
||||
mask &= thread->flags;
|
||||
thread->flags &= ~mask;
|
||||
irq_restore(state);
|
||||
return mask;
|
||||
}
|
||||
|
||||
static void _thread_flags_wait(thread_flags_t mask, thread_t *thread, unsigned threadstate, unsigned irqstate)
|
||||
static void _thread_flags_wait(thread_flags_t mask, thread_t *thread,
|
||||
unsigned threadstate, unsigned irqstate)
|
||||
{
|
||||
DEBUG("_thread_flags_wait: me->flags=0x%08x me->mask=0x%08x. going blocked.\n",
|
||||
DEBUG(
|
||||
"_thread_flags_wait: me->flags=0x%08x me->mask=0x%08x. going blocked.\n",
|
||||
(unsigned)thread->flags, (unsigned)mask);
|
||||
|
||||
thread->wait_data = (void *)(unsigned)mask;
|
||||
@ -49,8 +53,10 @@ static void _thread_flags_wait(thread_flags_t mask, thread_t *thread, unsigned t
|
||||
thread_flags_t thread_flags_clear(thread_flags_t mask)
|
||||
{
|
||||
thread_t *me = (thread_t *)sched_active_thread;
|
||||
|
||||
mask = _thread_flags_clear_atomic(me, mask);
|
||||
DEBUG("thread_flags_clear(): pid %"PRIkernel_pid" clearing 0x%08x\n", thread_getpid(), mask);
|
||||
DEBUG("thread_flags_clear(): pid %" PRIkernel_pid " clearing 0x%08x\n",
|
||||
thread_getpid(), mask);
|
||||
return mask;
|
||||
}
|
||||
|
||||
@ -58,6 +64,7 @@ static void _thread_flags_wait_any(thread_flags_t mask)
|
||||
{
|
||||
thread_t *me = (thread_t *)sched_active_thread;
|
||||
unsigned state = irq_disable();
|
||||
|
||||
if (!(me->flags & mask)) {
|
||||
_thread_flags_wait(mask, me, STATUS_FLAG_BLOCKED_ANY, state);
|
||||
}
|
||||
@ -69,6 +76,7 @@ static void _thread_flags_wait_any(thread_flags_t mask)
|
||||
thread_flags_t thread_flags_wait_any(thread_flags_t mask)
|
||||
{
|
||||
thread_t *me = (thread_t *)sched_active_thread;
|
||||
|
||||
_thread_flags_wait_any(mask);
|
||||
return _thread_flags_clear_atomic(me, mask);
|
||||
}
|
||||
@ -87,8 +95,11 @@ thread_flags_t thread_flags_wait_all(thread_flags_t mask)
|
||||
{
|
||||
unsigned state = irq_disable();
|
||||
thread_t *me = (thread_t *)sched_active_thread;
|
||||
|
||||
if (!((me->flags & mask) == mask)) {
|
||||
DEBUG("thread_flags_wait_all(): pid %"PRIkernel_pid" waiting for %08x\n", thread_getpid(), (unsigned)mask);
|
||||
DEBUG(
|
||||
"thread_flags_wait_all(): pid %" PRIkernel_pid " waiting for %08x\n",
|
||||
thread_getpid(), (unsigned)mask);
|
||||
_thread_flags_wait(mask, me, STATUS_FLAG_BLOCKED_ALL, state);
|
||||
}
|
||||
else {
|
||||
@ -102,6 +113,7 @@ inline int __attribute__((always_inline)) thread_flags_wake(thread_t *thread)
|
||||
{
|
||||
unsigned wakeup;
|
||||
thread_flags_t mask = (uint16_t)(unsigned)thread->wait_data;
|
||||
|
||||
switch (thread->status) {
|
||||
case STATUS_FLAG_BLOCKED_ANY:
|
||||
wakeup = (thread->flags & mask);
|
||||
@ -115,7 +127,8 @@ inline int __attribute__((always_inline)) thread_flags_wake(thread_t *thread)
|
||||
}
|
||||
|
||||
if (wakeup) {
|
||||
DEBUG("_thread_flags_wake(): waking up pid %"PRIkernel_pid"\n", thread->pid);
|
||||
DEBUG("_thread_flags_wake(): waking up pid %" PRIkernel_pid "\n",
|
||||
thread->pid);
|
||||
sched_set_status(thread, STATUS_PENDING);
|
||||
sched_context_switch_request = 1;
|
||||
}
|
||||
@ -125,7 +138,8 @@ inline int __attribute__((always_inline)) thread_flags_wake(thread_t *thread)
|
||||
|
||||
void thread_flags_set(thread_t *thread, thread_flags_t mask)
|
||||
{
|
||||
DEBUG("thread_flags_set(): setting 0x%08x for pid %"PRIkernel_pid"\n", mask, thread->pid);
|
||||
DEBUG("thread_flags_set(): setting 0x%08x for pid %" PRIkernel_pid "\n",
|
||||
mask, thread->pid);
|
||||
unsigned state = irq_disable();
|
||||
thread->flags |= mask;
|
||||
if (thread_flags_wake(thread)) {
|
||||
|
Loading…
Reference in New Issue
Block a user