mirror of
https://github.com/RIOT-OS/RIOT.git
synced 2024-12-29 04:50:03 +01:00
core: Access internal vars via helper funcs
Replace accesses to `sched_active_thread`, `sched_active_pid`, and `sched_threads` with `thread_get_active()`, `thread_get_active_pid()`, and `thread_get_unchecked()` where sensible.
This commit is contained in:
parent
4a31578982
commit
57264c5059
@ -35,7 +35,7 @@ void cond_init(cond_t *cond)
|
||||
void cond_wait(cond_t *cond, mutex_t *mutex)
|
||||
{
|
||||
unsigned irqstate = irq_disable();
|
||||
thread_t *me = (thread_t *)sched_active_thread;
|
||||
thread_t *me = thread_get_active();
|
||||
|
||||
mutex_unlock(mutex);
|
||||
sched_set_status(me, STATUS_COND_BLOCKED);
|
||||
|
@ -46,8 +46,8 @@ extern "C" {
|
||||
#include "cpu_conf.h"
|
||||
#define DEBUG_PRINT(...) \
|
||||
do { \
|
||||
if ((sched_active_thread == NULL) || \
|
||||
(sched_active_thread->stack_size >= \
|
||||
if ((thread_get_active() == NULL) || \
|
||||
(thread_get_active()->stack_size >= \
|
||||
THREAD_EXTRA_STACKSIZE_PRINTF)) { \
|
||||
printf(__VA_ARGS__); \
|
||||
} \
|
||||
|
@ -132,11 +132,11 @@ thread_flags_t thread_flags_clear(thread_flags_t mask);
|
||||
* immediately, otherwise, it will suspend the thread (as
|
||||
* THREAD_STATUS_WAIT_ANY) until any of the flags in mask get set.
|
||||
*
|
||||
* Both ways, it will clear and return (sched_active_thread-flags & mask).
|
||||
* Both ways, it will clear and return (`thread_get_active()->flags & mask`).
|
||||
*
|
||||
* @param[in] mask mask of flags to wait for
|
||||
*
|
||||
* @returns flags that caused return/wakeup ((sched_active_thread-flags & mask).
|
||||
* @returns flags that caused return/wakeup (`thread_get_active()->flags & mask`).
|
||||
*/
|
||||
thread_flags_t thread_flags_wait_any(thread_flags_t mask);
|
||||
|
||||
@ -147,7 +147,7 @@ thread_flags_t thread_flags_wait_any(thread_flags_t mask);
|
||||
* immediately, otherwise, it will suspend the thread (as
|
||||
* THREAD_STATUS_WAIT_ALL) until all of the flags in mask have been set.
|
||||
*
|
||||
* Both ways, it will clear and return (sched_active_thread-flags & mask).
|
||||
* Both ways, it will clear and return (`thread_get_active()->flags & mask`).
|
||||
*
|
||||
* @param[in] mask mask of flags to wait for
|
||||
*
|
||||
|
18
core/mbox.c
18
core/mbox.c
@ -33,7 +33,7 @@ static void _wake_waiter(thread_t *thread, unsigned irqstate)
|
||||
sched_set_status(thread, STATUS_PENDING);
|
||||
|
||||
DEBUG("mbox: Thread %" PRIkernel_pid ": _wake_waiter(): waking up "
|
||||
"%" PRIkernel_pid ".\n", sched_active_pid, thread->pid);
|
||||
"%" PRIkernel_pid ".\n", thread_getpid(), thread->pid);
|
||||
|
||||
uint16_t process_priority = thread->priority;
|
||||
irq_restore(irqstate);
|
||||
@ -43,16 +43,16 @@ static void _wake_waiter(thread_t *thread, unsigned irqstate)
|
||||
static void _wait(list_node_t *wait_list, unsigned irqstate)
|
||||
{
|
||||
DEBUG("mbox: Thread %" PRIkernel_pid " _wait(): going blocked.\n",
|
||||
sched_active_pid);
|
||||
thread_getpid());
|
||||
|
||||
thread_t *me = (thread_t *)sched_active_thread;
|
||||
thread_t *me = thread_get_active();
|
||||
sched_set_status(me, STATUS_MBOX_BLOCKED);
|
||||
thread_add_to_list(wait_list, me);
|
||||
irq_restore(irqstate);
|
||||
thread_yield();
|
||||
|
||||
DEBUG("mbox: Thread %" PRIkernel_pid " _wait(): woke up.\n",
|
||||
sched_active_pid);
|
||||
thread_getpid());
|
||||
}
|
||||
|
||||
int _mbox_put(mbox_t *mbox, msg_t *msg, int blocking)
|
||||
@ -63,7 +63,7 @@ int _mbox_put(mbox_t *mbox, msg_t *msg, int blocking)
|
||||
|
||||
if (next) {
|
||||
DEBUG("mbox: Thread %" PRIkernel_pid " mbox 0x%08x: _tryput(): "
|
||||
"there's a waiter.\n", sched_active_pid, (unsigned)mbox);
|
||||
"there's a waiter.\n", thread_getpid(), (unsigned)mbox);
|
||||
thread_t *thread =
|
||||
container_of((clist_node_t *)next, thread_t, rq_entry);
|
||||
*(msg_t *)thread->wait_data = *msg;
|
||||
@ -83,8 +83,8 @@ int _mbox_put(mbox_t *mbox, msg_t *msg, int blocking)
|
||||
}
|
||||
|
||||
DEBUG("mbox: Thread %" PRIkernel_pid " mbox 0x%08x: _tryput(): "
|
||||
"queued message.\n", sched_active_pid, (unsigned)mbox);
|
||||
msg->sender_pid = sched_active_pid;
|
||||
"queued message.\n", thread_getpid(), (unsigned)mbox);
|
||||
msg->sender_pid = thread_getpid();
|
||||
/* copy msg into queue */
|
||||
mbox->msg_array[cib_put_unsafe(&mbox->cib)] = *msg;
|
||||
irq_restore(irqstate);
|
||||
@ -98,7 +98,7 @@ int _mbox_get(mbox_t *mbox, msg_t *msg, int blocking)
|
||||
|
||||
if (cib_avail(&mbox->cib)) {
|
||||
DEBUG("mbox: Thread %" PRIkernel_pid " mbox 0x%08x: _tryget(): "
|
||||
"got queued message.\n", sched_active_pid, (unsigned)mbox);
|
||||
"got queued message.\n", thread_getpid(), (unsigned)mbox);
|
||||
/* copy msg from queue */
|
||||
*msg = mbox->msg_array[cib_get_unsafe(&mbox->cib)];
|
||||
list_node_t *next = list_remove_head(&mbox->writers);
|
||||
@ -113,7 +113,7 @@ int _mbox_get(mbox_t *mbox, msg_t *msg, int blocking)
|
||||
return 1;
|
||||
}
|
||||
else if (blocking) {
|
||||
sched_active_thread->wait_data = (void *)msg;
|
||||
thread_get_active()->wait_data = msg;
|
||||
_wait(&mbox->readers, irqstate);
|
||||
/* sender has copied message */
|
||||
return 1;
|
||||
|
81
core/msg.c
81
core/msg.c
@ -65,7 +65,7 @@ int msg_send(msg_t *m, kernel_pid_t target_pid)
|
||||
if (irq_is_in()) {
|
||||
return msg_send_int(m, target_pid);
|
||||
}
|
||||
if (sched_active_pid == target_pid) {
|
||||
if (thread_getpid() == target_pid) {
|
||||
return msg_send_to_self(m);
|
||||
}
|
||||
return _msg_send(m, target_pid, true, irq_disable());
|
||||
@ -76,7 +76,7 @@ int msg_try_send(msg_t *m, kernel_pid_t target_pid)
|
||||
if (irq_is_in()) {
|
||||
return msg_send_int(m, target_pid);
|
||||
}
|
||||
if (sched_active_pid == target_pid) {
|
||||
if (thread_getpid() == target_pid) {
|
||||
return msg_send_to_self(m);
|
||||
}
|
||||
return _msg_send(m, target_pid, false, irq_disable());
|
||||
@ -91,9 +91,9 @@ static int _msg_send(msg_t *m, kernel_pid_t target_pid, bool block,
|
||||
}
|
||||
#endif /* DEVELHELP */
|
||||
|
||||
thread_t *target = (thread_t *)sched_threads[target_pid];
|
||||
thread_t *target = thread_get_unchecked(target_pid);
|
||||
|
||||
m->sender_pid = sched_active_pid;
|
||||
m->sender_pid = thread_getpid();
|
||||
|
||||
if (target == NULL) {
|
||||
DEBUG("msg_send(): target thread %d does not exist\n", target_pid);
|
||||
@ -101,11 +101,11 @@ static int _msg_send(msg_t *m, kernel_pid_t target_pid, bool block,
|
||||
return -1;
|
||||
}
|
||||
|
||||
thread_t *me = (thread_t *)sched_active_thread;
|
||||
thread_t *me = thread_get_active();
|
||||
|
||||
DEBUG("msg_send() %s:%i: Sending from %" PRIkernel_pid " to %" PRIkernel_pid
|
||||
". block=%i src->state=%i target->state=%i\n", RIOT_FILE_RELATIVE,
|
||||
__LINE__, sched_active_pid, target_pid,
|
||||
__LINE__, thread_getpid(), target_pid,
|
||||
block, me->status, target->status);
|
||||
|
||||
if (target->status != STATUS_RECEIVE_BLOCKED) {
|
||||
@ -125,9 +125,8 @@ static int _msg_send(msg_t *m, kernel_pid_t target_pid, bool block,
|
||||
}
|
||||
|
||||
if (!block) {
|
||||
DEBUG(
|
||||
"msg_send: %" PRIkernel_pid ": Receiver not waiting, block=%u\n",
|
||||
me->pid, block);
|
||||
DEBUG("msg_send: %" PRIkernel_pid ": Receiver not waiting, "
|
||||
"block=%u\n", me->pid, block);
|
||||
irq_restore(state);
|
||||
return 0;
|
||||
}
|
||||
@ -135,7 +134,7 @@ static int _msg_send(msg_t *m, kernel_pid_t target_pid, bool block,
|
||||
DEBUG("msg_send: %" PRIkernel_pid ": going send blocked.\n",
|
||||
me->pid);
|
||||
|
||||
me->wait_data = (void *)m;
|
||||
me->wait_data = m;
|
||||
|
||||
int newstatus;
|
||||
|
||||
@ -146,7 +145,7 @@ static int _msg_send(msg_t *m, kernel_pid_t target_pid, bool block,
|
||||
newstatus = STATUS_SEND_BLOCKED;
|
||||
}
|
||||
|
||||
sched_set_status((thread_t *)me, newstatus);
|
||||
sched_set_status(me, newstatus);
|
||||
|
||||
thread_add_to_list(&(target->msg_waiters), me);
|
||||
|
||||
@ -166,7 +165,7 @@ static int _msg_send(msg_t *m, kernel_pid_t target_pid, bool block,
|
||||
PRIkernel_pid " to %" PRIkernel_pid ".\n",
|
||||
me->pid, thread_getpid(), target_pid);
|
||||
/* copy msg to target */
|
||||
msg_t *target_message = (msg_t *)target->wait_data;
|
||||
msg_t *target_message = target->wait_data;
|
||||
*target_message = *m;
|
||||
sched_set_status(target, STATUS_PENDING);
|
||||
|
||||
@ -181,8 +180,8 @@ int msg_send_to_self(msg_t *m)
|
||||
{
|
||||
unsigned state = irq_disable();
|
||||
|
||||
m->sender_pid = sched_active_pid;
|
||||
int res = queue_msg((thread_t *)sched_active_thread, m);
|
||||
m->sender_pid = thread_getpid();
|
||||
int res = queue_msg(thread_get_active(), m);
|
||||
|
||||
irq_restore(state);
|
||||
return res;
|
||||
@ -196,7 +195,7 @@ static int _msg_send_oneway(msg_t *m, kernel_pid_t target_pid)
|
||||
}
|
||||
#endif /* DEVELHELP */
|
||||
|
||||
thread_t *target = (thread_t *)sched_threads[target_pid];
|
||||
thread_t *target = thread_get_unchecked(target_pid);
|
||||
|
||||
if (target == NULL) {
|
||||
DEBUG("%s: target thread %d does not exist\n", __func__, target_pid);
|
||||
@ -242,7 +241,7 @@ int msg_send_bus(msg_t *m, msg_bus_t *bus)
|
||||
const uint32_t event_mask = (1UL << (m->type & 0x1F));
|
||||
int count = 0;
|
||||
|
||||
m->sender_pid = in_irq ? KERNEL_PID_ISR : sched_active_pid;
|
||||
m->sender_pid = in_irq ? KERNEL_PID_ISR : thread_getpid();
|
||||
|
||||
unsigned state = irq_disable();
|
||||
|
||||
@ -269,11 +268,11 @@ int msg_send_bus(msg_t *m, msg_bus_t *bus)
|
||||
|
||||
int msg_send_receive(msg_t *m, msg_t *reply, kernel_pid_t target_pid)
|
||||
{
|
||||
assert(sched_active_pid != target_pid);
|
||||
assert(thread_getpid() != target_pid);
|
||||
unsigned state = irq_disable();
|
||||
thread_t *me = (thread_t *)sched_active_thread;
|
||||
thread_t *me = thread_get_active();
|
||||
sched_set_status(me, STATUS_REPLY_BLOCKED);
|
||||
me->wait_data = (void *)reply;
|
||||
me->wait_data = reply;
|
||||
|
||||
/* we re-use (abuse) reply for sending, because wait_data might be
|
||||
* overwritten if the target is not in RECEIVE_BLOCKED */
|
||||
@ -286,20 +285,20 @@ int msg_reply(msg_t *m, msg_t *reply)
|
||||
{
|
||||
unsigned state = irq_disable();
|
||||
|
||||
thread_t *target = (thread_t *)sched_threads[m->sender_pid];
|
||||
thread_t *target = thread_get_unchecked(m->sender_pid);
|
||||
|
||||
assert(target != NULL);
|
||||
|
||||
if (target->status != STATUS_REPLY_BLOCKED) {
|
||||
DEBUG("msg_reply(): %" PRIkernel_pid ": Target \"%" PRIkernel_pid
|
||||
"\" not waiting for reply.", sched_active_thread->pid,
|
||||
"\" not waiting for reply.", thread_getpid(),
|
||||
target->pid);
|
||||
irq_restore(state);
|
||||
return -1;
|
||||
}
|
||||
|
||||
DEBUG("msg_reply(): %" PRIkernel_pid ": Direct msg copy.\n",
|
||||
sched_active_thread->pid);
|
||||
thread_getpid());
|
||||
/* copy msg to target */
|
||||
msg_t *target_message = (msg_t *)target->wait_data;
|
||||
*target_message = *reply;
|
||||
@ -313,11 +312,11 @@ int msg_reply(msg_t *m, msg_t *reply)
|
||||
|
||||
int msg_reply_int(msg_t *m, msg_t *reply)
|
||||
{
|
||||
thread_t *target = (thread_t *)sched_threads[m->sender_pid];
|
||||
thread_t *target = thread_get_unchecked(m->sender_pid);
|
||||
|
||||
if (target->status != STATUS_REPLY_BLOCKED) {
|
||||
DEBUG("msg_reply_int(): %" PRIkernel_pid ": Target \"%" PRIkernel_pid
|
||||
"\" not waiting for reply.", sched_active_thread->pid,
|
||||
"\" not waiting for reply.", thread_getpid(),
|
||||
target->pid);
|
||||
return -1;
|
||||
}
|
||||
@ -344,9 +343,9 @@ static int _msg_receive(msg_t *m, int block)
|
||||
unsigned state = irq_disable();
|
||||
|
||||
DEBUG("_msg_receive: %" PRIkernel_pid ": _msg_receive.\n",
|
||||
sched_active_thread->pid);
|
||||
thread_getpid());
|
||||
|
||||
thread_t *me = (thread_t *)sched_active_thread;
|
||||
thread_t *me = thread_get_active();
|
||||
|
||||
int queue_index = -1;
|
||||
|
||||
@ -361,9 +360,8 @@ static int _msg_receive(msg_t *m, int block)
|
||||
}
|
||||
|
||||
if (queue_index >= 0) {
|
||||
DEBUG(
|
||||
"_msg_receive: %" PRIkernel_pid ": _msg_receive(): We've got a queued message.\n",
|
||||
sched_active_thread->pid);
|
||||
DEBUG("_msg_receive: %" PRIkernel_pid ": _msg_receive(): We've got a "
|
||||
"queued message.\n", thread_getpid());
|
||||
*m = me->msg_array[queue_index];
|
||||
}
|
||||
else {
|
||||
@ -373,21 +371,19 @@ static int _msg_receive(msg_t *m, int block)
|
||||
list_node_t *next = list_remove_head(&me->msg_waiters);
|
||||
|
||||
if (next == NULL) {
|
||||
DEBUG(
|
||||
"_msg_receive: %" PRIkernel_pid ": _msg_receive(): No thread in waiting list.\n",
|
||||
sched_active_thread->pid);
|
||||
DEBUG("_msg_receive: %" PRIkernel_pid ": _msg_receive(): No thread in "
|
||||
"waiting list.\n", thread_getpid());
|
||||
|
||||
if (queue_index < 0) {
|
||||
DEBUG(
|
||||
"_msg_receive(): %" PRIkernel_pid ": No msg in queue. Going blocked.\n",
|
||||
sched_active_thread->pid);
|
||||
DEBUG("_msg_receive(): %" PRIkernel_pid ": No msg in queue. Going "
|
||||
"blocked.\n", thread_getpid());
|
||||
sched_set_status(me, STATUS_RECEIVE_BLOCKED);
|
||||
|
||||
irq_restore(state);
|
||||
thread_yield_higher();
|
||||
|
||||
/* sender copied message */
|
||||
assert(sched_active_thread->status != STATUS_RECEIVE_BLOCKED);
|
||||
assert(thread_get_active()->status != STATUS_RECEIVE_BLOCKED);
|
||||
}
|
||||
else {
|
||||
irq_restore(state);
|
||||
@ -396,9 +392,8 @@ static int _msg_receive(msg_t *m, int block)
|
||||
return 1;
|
||||
}
|
||||
else {
|
||||
DEBUG(
|
||||
"_msg_receive: %" PRIkernel_pid ": _msg_receive(): Waking up waiting thread.\n",
|
||||
sched_active_thread->pid);
|
||||
DEBUG("_msg_receive: %" PRIkernel_pid ": _msg_receive(): Waking up "
|
||||
"waiting thread.\n", thread_getpid());
|
||||
|
||||
thread_t *sender =
|
||||
container_of((clist_node_t *)next, thread_t, rq_entry);
|
||||
@ -435,9 +430,9 @@ static int _msg_receive(msg_t *m, int block)
|
||||
int msg_avail(void)
|
||||
{
|
||||
DEBUG("msg_available: %" PRIkernel_pid ": msg_available.\n",
|
||||
sched_active_thread->pid);
|
||||
thread_getpid());
|
||||
|
||||
thread_t *me = (thread_t *)sched_active_thread;
|
||||
thread_t *me = thread_get_active();
|
||||
|
||||
int queue_index = -1;
|
||||
|
||||
@ -450,7 +445,7 @@ int msg_avail(void)
|
||||
|
||||
void msg_init_queue(msg_t *array, int num)
|
||||
{
|
||||
thread_t *me = (thread_t *)sched_active_thread;
|
||||
thread_t *me = thread_get_active();
|
||||
|
||||
me->msg_array = array;
|
||||
cib_init(&(me->msg_queue), num);
|
||||
@ -460,7 +455,7 @@ void msg_queue_print(void)
|
||||
{
|
||||
unsigned state = irq_disable();
|
||||
|
||||
thread_t *thread = (thread_t *)sched_active_thread;
|
||||
thread_t *thread = thread_get_active();
|
||||
cib_t *msg_queue = &thread->msg_queue;
|
||||
msg_t *msg_array = thread->msg_array;
|
||||
unsigned int i = msg_queue->read_count & msg_queue->mask;
|
||||
|
@ -37,7 +37,7 @@ void msg_bus_attach(msg_bus_t *bus, msg_bus_entry_t *entry)
|
||||
|
||||
entry->next.next = NULL;
|
||||
entry->event_mask = 0;
|
||||
entry->pid = sched_active_pid;
|
||||
entry->pid = thread_getpid();
|
||||
|
||||
state = irq_disable();
|
||||
list_add(&bus->subs, &entry->next);
|
||||
@ -62,7 +62,7 @@ msg_bus_entry_t *msg_bus_get_entry(msg_bus_t *bus)
|
||||
|
||||
msg_bus_entry_t *subscriber = container_of(e, msg_bus_entry_t, next);
|
||||
|
||||
if (subscriber->pid == sched_active_pid) {
|
||||
if (subscriber->pid == thread_getpid()) {
|
||||
s = subscriber;
|
||||
break;
|
||||
}
|
||||
|
16
core/mutex.c
16
core/mutex.c
@ -36,20 +36,20 @@ int _mutex_lock(mutex_t *mutex, volatile uint8_t *blocking)
|
||||
{
|
||||
unsigned irqstate = irq_disable();
|
||||
|
||||
DEBUG("PID[%" PRIkernel_pid "]: Mutex in use.\n", sched_active_pid);
|
||||
DEBUG("PID[%" PRIkernel_pid "]: Mutex in use.\n", thread_getpid());
|
||||
|
||||
if (mutex->queue.next == NULL) {
|
||||
/* mutex is unlocked. */
|
||||
mutex->queue.next = MUTEX_LOCKED;
|
||||
DEBUG("PID[%" PRIkernel_pid "]: mutex_wait early out.\n",
|
||||
sched_active_pid);
|
||||
thread_getpid());
|
||||
irq_restore(irqstate);
|
||||
return 1;
|
||||
}
|
||||
else if (*blocking) {
|
||||
thread_t *me = (thread_t *)sched_active_thread;
|
||||
thread_t *me = thread_get_active();
|
||||
DEBUG("PID[%" PRIkernel_pid "]: Adding node to mutex queue: prio: %"
|
||||
PRIu32 "\n", sched_active_pid, (uint32_t)me->priority);
|
||||
PRIu32 "\n", thread_getpid(), (uint32_t)me->priority);
|
||||
sched_set_status(me, STATUS_MUTEX_BLOCKED);
|
||||
if (mutex->queue.next == MUTEX_LOCKED) {
|
||||
mutex->queue.next = (list_node_t *)&me->rq_entry;
|
||||
@ -75,7 +75,7 @@ void mutex_unlock(mutex_t *mutex)
|
||||
unsigned irqstate = irq_disable();
|
||||
|
||||
DEBUG("mutex_unlock(): queue.next: %p pid: %" PRIkernel_pid "\n",
|
||||
(void *)mutex->queue.next, sched_active_pid);
|
||||
(void *)mutex->queue.next, thread_getpid());
|
||||
|
||||
if (mutex->queue.next == NULL) {
|
||||
/* the mutex was not locked */
|
||||
@ -110,7 +110,7 @@ void mutex_unlock(mutex_t *mutex)
|
||||
void mutex_unlock_and_sleep(mutex_t *mutex)
|
||||
{
|
||||
DEBUG("PID[%" PRIkernel_pid "]: unlocking mutex. queue.next: %p, and "
|
||||
"taking a nap\n", sched_active_pid, (void *)mutex->queue.next);
|
||||
"taking a nap\n", thread_getpid(), (void *)mutex->queue.next);
|
||||
unsigned irqstate = irq_disable();
|
||||
|
||||
if (mutex->queue.next) {
|
||||
@ -129,8 +129,8 @@ void mutex_unlock_and_sleep(mutex_t *mutex)
|
||||
}
|
||||
}
|
||||
|
||||
DEBUG("PID[%" PRIkernel_pid "]: going to sleep.\n", sched_active_pid);
|
||||
sched_set_status((thread_t *)sched_active_thread, STATUS_SLEEPING);
|
||||
DEBUG("PID[%" PRIkernel_pid "]: going to sleep.\n", thread_getpid());
|
||||
sched_set_status(thread_get_active(), STATUS_SLEEPING);
|
||||
irq_restore(irqstate);
|
||||
thread_yield_higher();
|
||||
}
|
||||
|
12
core/sched.c
12
core/sched.c
@ -102,7 +102,7 @@ static void _unschedule(thread_t *active_thread)
|
||||
|
||||
int __attribute__((used)) sched_run(void)
|
||||
{
|
||||
thread_t *active_thread = (thread_t *)sched_active_thread;
|
||||
thread_t *active_thread = thread_get_active();
|
||||
|
||||
if (!IS_USED(MODULE_CORE_IDLE_THREAD)) {
|
||||
if (!runqueue_bitcache) {
|
||||
@ -147,7 +147,7 @@ int __attribute__((used)) sched_run(void)
|
||||
|
||||
next_thread->status = STATUS_RUNNING;
|
||||
sched_active_pid = next_thread->pid;
|
||||
sched_active_thread = (volatile thread_t *)next_thread;
|
||||
sched_active_thread = next_thread;
|
||||
|
||||
#ifdef MODULE_MPU_STACK_GUARD
|
||||
mpu_configure(
|
||||
@ -194,7 +194,7 @@ void sched_set_status(thread_t *process, thread_status_t status)
|
||||
|
||||
void sched_switch(uint16_t other_prio)
|
||||
{
|
||||
thread_t *active_thread = (thread_t *)sched_active_thread;
|
||||
thread_t *active_thread = thread_get_active();
|
||||
uint16_t current_prio = active_thread->priority;
|
||||
int on_runqueue = (active_thread->status >= STATUS_ON_RUNQUEUE);
|
||||
|
||||
@ -221,13 +221,13 @@ void sched_switch(uint16_t other_prio)
|
||||
NORETURN void sched_task_exit(void)
|
||||
{
|
||||
DEBUG("sched_task_exit: ending thread %" PRIkernel_pid "...\n",
|
||||
sched_active_thread->pid);
|
||||
thread_getpid());
|
||||
|
||||
(void)irq_disable();
|
||||
sched_threads[sched_active_pid] = NULL;
|
||||
sched_threads[thread_getpid()] = NULL;
|
||||
sched_num_threads--;
|
||||
|
||||
sched_set_status((thread_t *)sched_active_thread, STATUS_STOPPED);
|
||||
sched_set_status(thread_get_active(), STATUS_STOPPED);
|
||||
|
||||
sched_active_thread = NULL;
|
||||
cpu_switch_context_exit();
|
||||
|
@ -32,7 +32,7 @@
|
||||
|
||||
thread_status_t thread_getstatus(kernel_pid_t pid)
|
||||
{
|
||||
volatile thread_t *thread = thread_get(pid);
|
||||
thread_t *thread = thread_get(pid);
|
||||
|
||||
return thread ? thread->status : STATUS_NOT_FOUND;
|
||||
}
|
||||
@ -40,7 +40,7 @@ thread_status_t thread_getstatus(kernel_pid_t pid)
|
||||
const char *thread_getname(kernel_pid_t pid)
|
||||
{
|
||||
#ifdef DEVELHELP
|
||||
volatile thread_t *thread = thread_get(pid);
|
||||
thread_t *thread = thread_get(pid);
|
||||
return thread ? thread->name : NULL;
|
||||
#else
|
||||
(void)pid;
|
||||
@ -55,7 +55,7 @@ void thread_zombify(void)
|
||||
}
|
||||
|
||||
irq_disable();
|
||||
sched_set_status((thread_t *)sched_active_thread, STATUS_ZOMBIE);
|
||||
sched_set_status(thread_get_active(), STATUS_ZOMBIE);
|
||||
irq_enable();
|
||||
thread_yield_higher();
|
||||
|
||||
@ -70,7 +70,7 @@ int thread_kill_zombie(kernel_pid_t pid)
|
||||
|
||||
int result = (int)STATUS_NOT_FOUND;
|
||||
|
||||
thread_t *thread = (thread_t *)thread_get(pid);
|
||||
thread_t *thread = thread_get(pid);
|
||||
|
||||
if (!thread) {
|
||||
DEBUG("thread_kill: Thread does not exist!\n");
|
||||
@ -98,7 +98,7 @@ void thread_sleep(void)
|
||||
}
|
||||
|
||||
unsigned state = irq_disable();
|
||||
sched_set_status((thread_t *)sched_active_thread, STATUS_SLEEPING);
|
||||
sched_set_status(thread_get_active(), STATUS_SLEEPING);
|
||||
irq_restore(state);
|
||||
thread_yield_higher();
|
||||
}
|
||||
@ -109,7 +109,7 @@ int thread_wakeup(kernel_pid_t pid)
|
||||
|
||||
unsigned old_state = irq_disable();
|
||||
|
||||
thread_t *thread = (thread_t *)thread_get(pid);
|
||||
thread_t *thread = thread_get(pid);
|
||||
|
||||
if (!thread) {
|
||||
DEBUG("thread_wakeup: Thread does not exist!\n");
|
||||
@ -135,7 +135,7 @@ int thread_wakeup(kernel_pid_t pid)
|
||||
void thread_yield(void)
|
||||
{
|
||||
unsigned old_state = irq_disable();
|
||||
thread_t *me = (thread_t *)sched_active_thread;
|
||||
thread_t *me = thread_get_active();
|
||||
|
||||
if (me->status >= STATUS_ON_RUNQUEUE) {
|
||||
clist_lpoprpush(&sched_runqueues[me->priority]);
|
||||
|
@ -52,7 +52,7 @@ static void _thread_flags_wait(thread_flags_t mask, thread_t *thread,
|
||||
|
||||
thread_flags_t thread_flags_clear(thread_flags_t mask)
|
||||
{
|
||||
thread_t *me = (thread_t *)sched_active_thread;
|
||||
thread_t *me = thread_get_active();
|
||||
|
||||
mask = _thread_flags_clear_atomic(me, mask);
|
||||
DEBUG("thread_flags_clear(): pid %" PRIkernel_pid " clearing 0x%08x\n",
|
||||
@ -62,7 +62,7 @@ thread_flags_t thread_flags_clear(thread_flags_t mask)
|
||||
|
||||
static void _thread_flags_wait_any(thread_flags_t mask)
|
||||
{
|
||||
thread_t *me = (thread_t *)sched_active_thread;
|
||||
thread_t *me = thread_get_active();
|
||||
unsigned state = irq_disable();
|
||||
|
||||
if (!(me->flags & mask)) {
|
||||
@ -75,7 +75,7 @@ static void _thread_flags_wait_any(thread_flags_t mask)
|
||||
|
||||
thread_flags_t thread_flags_wait_any(thread_flags_t mask)
|
||||
{
|
||||
thread_t *me = (thread_t *)sched_active_thread;
|
||||
thread_t *me = thread_get_active();
|
||||
|
||||
_thread_flags_wait_any(mask);
|
||||
return _thread_flags_clear_atomic(me, mask);
|
||||
@ -84,7 +84,7 @@ thread_flags_t thread_flags_wait_any(thread_flags_t mask)
|
||||
thread_flags_t thread_flags_wait_one(thread_flags_t mask)
|
||||
{
|
||||
_thread_flags_wait_any(mask);
|
||||
thread_t *me = (thread_t *)sched_active_thread;
|
||||
thread_t *me = thread_get_active();
|
||||
thread_flags_t tmp = me->flags & mask;
|
||||
/* clear all but least significant bit */
|
||||
tmp &= (~tmp + 1);
|
||||
@ -94,7 +94,7 @@ thread_flags_t thread_flags_wait_one(thread_flags_t mask)
|
||||
thread_flags_t thread_flags_wait_all(thread_flags_t mask)
|
||||
{
|
||||
unsigned state = irq_disable();
|
||||
thread_t *me = (thread_t *)sched_active_thread;
|
||||
thread_t *me = thread_get_active();
|
||||
|
||||
if (!((me->flags & mask) == mask)) {
|
||||
DEBUG(
|
||||
|
Loading…
Reference in New Issue
Block a user