mirror of
https://github.com/RIOT-OS/RIOT.git
synced 2024-12-29 04:50:03 +01:00
sys: Cleanup access to internal variables
Replace direct accesses to sched_active_thread and sched_active_pid with the helper functions thread_getpid() and thread_get_active(). This serves two purposes: 1. It makes accidental writes to those variable from outside core less likely. 2. Casting off the volatile qualifier is now well contained to those two functions
This commit is contained in:
parent
659c351c02
commit
3b6fa61829
@ -670,7 +670,7 @@ static void *_isotp_thread(void *args)
|
||||
/* setup the device layers message queue */
|
||||
msg_init_queue(msg_queue, CAN_ISOTP_MSG_QUEUE_SIZE);
|
||||
|
||||
isotp_pid = sched_active_pid;
|
||||
isotp_pid = thread_getpid();
|
||||
|
||||
while (1) {
|
||||
msg_receive(&msg);
|
||||
|
@ -79,8 +79,8 @@ void condition_variable::notify_all() noexcept {
|
||||
|
||||
void condition_variable::wait(unique_lock<mutex>& lock) noexcept {
|
||||
priority_queue_node_t n;
|
||||
n.priority = sched_active_thread->priority;
|
||||
n.data = sched_active_pid;
|
||||
n.priority = thread_get_active()->priority;
|
||||
n.data = thread_getpid();
|
||||
n.next = NULL;
|
||||
// the signaling thread may not hold the mutex, the queue is not thread safe
|
||||
unsigned old_state = irq_disable();
|
||||
@ -104,7 +104,7 @@ cv_status condition_variable::wait_until(unique_lock<mutex>& lock,
|
||||
timex_t before;
|
||||
xtimer_now_timex(&before);
|
||||
auto diff = timex_sub(timeout_time.native_handle(), before);
|
||||
xtimer_set_wakeup(&timer, timex_uint64(diff), sched_active_pid);
|
||||
xtimer_set_wakeup(&timer, timex_uint64(diff), thread_getpid());
|
||||
wait(lock);
|
||||
timex_t after;
|
||||
xtimer_now_timex(&after);
|
||||
|
@ -178,7 +178,7 @@ cv_status condition_variable::wait_for(unique_lock<mutex>& lock,
|
||||
= (duration_cast<microseconds>(timeout_duration - s)).count();
|
||||
xtimer_now_timex(&before);
|
||||
xtimer_t timer;
|
||||
xtimer_set_wakeup(&timer, timex_uint64(timeout), sched_active_pid);
|
||||
xtimer_set_wakeup(&timer, timex_uint64(timeout), thread_getpid());
|
||||
wait(lock);
|
||||
xtimer_now_timex(&after);
|
||||
xtimer_remove(&timer);
|
||||
|
@ -43,7 +43,7 @@ void thread::join() {
|
||||
if (joinable()) {
|
||||
auto status = thread_getstatus(m_handle);
|
||||
if (status != STATUS_NOT_FOUND && status != STATUS_STOPPED) {
|
||||
m_data->joining_thread = sched_active_pid;
|
||||
m_data->joining_thread = thread_getpid();
|
||||
thread_sleep();
|
||||
}
|
||||
m_handle = thread_uninitialized;
|
||||
|
@ -102,6 +102,7 @@
|
||||
#include "assert.h"
|
||||
#include "clist.h"
|
||||
#include "irq.h"
|
||||
#include "thread.h"
|
||||
#include "thread_flags.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
@ -118,7 +119,7 @@ extern "C" {
|
||||
/**
|
||||
* @brief event_queue_t static initializer
|
||||
*/
|
||||
#define EVENT_QUEUE_INIT { .waiter = (thread_t *)sched_active_thread }
|
||||
#define EVENT_QUEUE_INIT { .waiter = thread_get_active() }
|
||||
|
||||
/**
|
||||
* @brief static initializer for detached event queues
|
||||
@ -164,7 +165,7 @@ static inline void event_queues_init(event_queue_t *queues,
|
||||
size_t n_queues)
|
||||
{
|
||||
assert(queues && n_queues);
|
||||
thread_t *me = (thread_t *)sched_active_thread;
|
||||
thread_t *me = thread_get_active();
|
||||
for (size_t i = 0; i < n_queues; i++) {
|
||||
memset(&queues[i], '\0', sizeof(queues[0]));
|
||||
queues[i].waiter = me;
|
||||
@ -222,7 +223,7 @@ static inline void event_queue_init_detached(event_queue_t *queue)
|
||||
static inline void event_queues_claim(event_queue_t *queues, size_t n_queues)
|
||||
{
|
||||
assert(queues);
|
||||
thread_t *me = (thread_t *)sched_active_thread;
|
||||
thread_t *me = thread_get_active();
|
||||
for (size_t i = 0; i < n_queues; i++) {
|
||||
assert(queues[i].waiter == NULL);
|
||||
queues[i].waiter = me;
|
||||
|
@ -69,7 +69,7 @@
|
||||
* msg_init_queue(_msg_q, Q_SZ);
|
||||
* gnrc_netreg_entry me_reg = GNRC_NETREG_ENTRY_INIT_PID(
|
||||
* GNRC_NETREG_DEMUX_CTX_ALL,
|
||||
* sched_active_pid);
|
||||
* thread_getpid());
|
||||
* gnrc_netreg_register(GNRC_NETTYPE_IPV6, &me_reg);
|
||||
* while (1) {
|
||||
* msg_receive(&msg);
|
||||
|
@ -78,7 +78,7 @@ static inline int gnrc_neterr_reg(gnrc_pktsnip_t *pkt)
|
||||
if (pkt->err_sub != KERNEL_PID_UNDEF) {
|
||||
return EALREADY;
|
||||
}
|
||||
pkt->err_sub = sched_active_pid;
|
||||
pkt->err_sub = thread_getpid();
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
|
@ -53,14 +53,14 @@ static char _rd_regif[CONFIG_NANOCOAP_URI_MAX];
|
||||
static sock_udp_ep_t _rd_remote;
|
||||
|
||||
static mutex_t _mutex = MUTEX_INIT;
|
||||
static volatile thread_t *_waiter;
|
||||
static thread_t *_waiter;
|
||||
|
||||
static uint8_t buf[BUFSIZE];
|
||||
|
||||
static void _lock(void)
|
||||
{
|
||||
mutex_lock(&_mutex);
|
||||
_waiter = sched_active_thread;
|
||||
_waiter = thread_get_active();
|
||||
}
|
||||
|
||||
static int _sync(void)
|
||||
@ -103,7 +103,7 @@ static void _on_register(const gcoap_request_memo_t *memo, coap_pkt_t* pdu,
|
||||
flag = FLAG_TIMEOUT;
|
||||
}
|
||||
|
||||
thread_flags_set((thread_t *)_waiter, flag);
|
||||
thread_flags_set(_waiter, flag);
|
||||
}
|
||||
|
||||
static void _on_update_remove(unsigned req_state, coap_pkt_t *pdu, uint8_t code)
|
||||
@ -117,7 +117,7 @@ static void _on_update_remove(unsigned req_state, coap_pkt_t *pdu, uint8_t code)
|
||||
flag = FLAG_TIMEOUT;
|
||||
}
|
||||
|
||||
thread_flags_set((thread_t *)_waiter, flag);
|
||||
thread_flags_set(_waiter, flag);
|
||||
}
|
||||
|
||||
static void _on_update(const gcoap_request_memo_t *memo, coap_pkt_t *pdu,
|
||||
@ -202,7 +202,7 @@ static void _on_discover(const gcoap_request_memo_t *memo, coap_pkt_t *pdu,
|
||||
}
|
||||
|
||||
end:
|
||||
thread_flags_set((thread_t *)_waiter, flag);
|
||||
thread_flags_set(_waiter, flag);
|
||||
}
|
||||
|
||||
static int _discover_internal(const sock_udp_ep_t *remote,
|
||||
|
@ -77,12 +77,12 @@ static size_t _result_buf_len;
|
||||
static uint8_t reqbuf[CONFIG_GCOAP_PDU_BUF_SIZE] = {0};
|
||||
|
||||
static mutex_t _mutex = MUTEX_INIT;
|
||||
static volatile thread_t *_waiter;
|
||||
static thread_t *_waiter;
|
||||
|
||||
static void _lock(void)
|
||||
{
|
||||
mutex_lock(&_mutex);
|
||||
_waiter = sched_active_thread;
|
||||
_waiter = thread_get_active();
|
||||
}
|
||||
|
||||
static int _sync(void)
|
||||
@ -113,15 +113,15 @@ static void _on_lookup(const gcoap_request_memo_t *memo, coap_pkt_t *pdu,
|
||||
unsigned ct = coap_get_content_type(pdu);
|
||||
if (ct != COAP_FORMAT_LINK) {
|
||||
DEBUG("cord_lc: unsupported content format: %u\n", ct);
|
||||
thread_flags_set((thread_t *)_waiter, flag);
|
||||
thread_flags_set(_waiter, flag);
|
||||
}
|
||||
if (pdu->payload_len == 0) {
|
||||
flag = FLAG_NORSC;
|
||||
thread_flags_set((thread_t *)_waiter, flag);
|
||||
thread_flags_set(_waiter, flag);
|
||||
}
|
||||
if (pdu->payload_len >= _result_buf_len) {
|
||||
flag = FLAG_OVERFLOW;
|
||||
thread_flags_set((thread_t *)_waiter, flag);
|
||||
thread_flags_set(_waiter, flag);
|
||||
}
|
||||
memcpy(_result_buf, pdu->payload, pdu->payload_len);
|
||||
memset(_result_buf + pdu->payload_len, 0,
|
||||
@ -132,7 +132,7 @@ static void _on_lookup(const gcoap_request_memo_t *memo, coap_pkt_t *pdu,
|
||||
flag = FLAG_TIMEOUT;
|
||||
}
|
||||
|
||||
thread_flags_set((thread_t *)_waiter, flag);
|
||||
thread_flags_set(_waiter, flag);
|
||||
}
|
||||
|
||||
static ssize_t _add_filters_to_lookup(coap_pkt_t *pkt, cord_lc_filter_t *filters)
|
||||
@ -228,7 +228,7 @@ end:
|
||||
_result_buf = NULL;
|
||||
_result_buf_len = 0;
|
||||
}
|
||||
thread_flags_set((thread_t *)_waiter, flag);
|
||||
thread_flags_set(_waiter, flag);
|
||||
}
|
||||
|
||||
static int _send_rd_init_req(coap_pkt_t *pkt, const sock_udp_ep_t *remote,
|
||||
|
@ -90,14 +90,14 @@ static size_t get_len(uint8_t *buf, uint16_t *len)
|
||||
|
||||
static void time_evt(void *arg)
|
||||
{
|
||||
thread_flags_set((thread_t *)arg, TFLAGS_TIMEOUT);
|
||||
thread_flags_set(arg, TFLAGS_TIMEOUT);
|
||||
}
|
||||
|
||||
static int syncsend(uint8_t resp, size_t len, bool unlock)
|
||||
{
|
||||
int res = EMCUTE_TIMEOUT;
|
||||
waiton = resp;
|
||||
timer.arg = (void *)sched_active_thread;
|
||||
timer.arg = thread_get_active();
|
||||
/* clear flags, in case the timer was triggered last time right before the
|
||||
* remove was called */
|
||||
thread_flags_clear(TFLAGS_ANY);
|
||||
@ -129,7 +129,7 @@ static void on_disconnect(void)
|
||||
if (waiton == DISCONNECT) {
|
||||
gateway.port = 0;
|
||||
result = EMCUTE_OK;
|
||||
thread_flags_set((thread_t *)timer.arg, TFLAGS_RESP);
|
||||
thread_flags_set(timer.arg, TFLAGS_RESP);
|
||||
}
|
||||
}
|
||||
|
||||
@ -146,7 +146,7 @@ static void on_ack(uint8_t type, int id_pos, int ret_pos, int res_pos)
|
||||
} else {
|
||||
result = EMCUTE_REJECT;
|
||||
}
|
||||
thread_flags_set((thread_t *)timer.arg, TFLAGS_RESP);
|
||||
thread_flags_set(timer.arg, TFLAGS_RESP);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1468,11 +1468,11 @@ static void *_gnrc_netif_thread(void *args)
|
||||
msg_t reply = { .type = GNRC_NETAPI_MSG_TYPE_ACK };
|
||||
msg_t msg_queue[GNRC_NETIF_MSG_QUEUE_SIZE];
|
||||
|
||||
DEBUG("gnrc_netif: starting thread %i\n", sched_active_pid);
|
||||
DEBUG("gnrc_netif: starting thread %i\n", thread_getpid());
|
||||
netif = args;
|
||||
gnrc_netif_acquire(netif);
|
||||
dev = netif->dev;
|
||||
netif->pid = sched_active_pid;
|
||||
netif->pid = thread_getpid();
|
||||
|
||||
#if IS_USED(MODULE_GNRC_NETIF_EVENTS)
|
||||
netif->event_isr.handler = _event_handler_isr,
|
||||
|
@ -42,9 +42,9 @@ int gnrc_netreg_register(gnrc_nettype_t type, gnrc_netreg_entry_t *entry)
|
||||
#if DEVELHELP
|
||||
# if defined(MODULE_GNRC_NETAPI_MBOX) || defined(MODULE_GNRC_NETAPI_CALLBACKS)
|
||||
bool has_msg_q = (entry->type != GNRC_NETREG_TYPE_DEFAULT) ||
|
||||
thread_has_msg_queue(sched_threads[entry->target.pid]);
|
||||
thread_has_msg_queue(thread_get(entry->target.pid));
|
||||
# else
|
||||
bool has_msg_q = thread_has_msg_queue(sched_threads[entry->target.pid]);
|
||||
bool has_msg_q = thread_has_msg_queue(thread_get(entry->target.pid));
|
||||
# endif
|
||||
|
||||
/* only threads with a message queue are allowed to register at gnrc */
|
||||
|
@ -425,7 +425,7 @@ gnrc_pktsnip_t *gnrc_ipv6_ext_frag_reass(gnrc_pktsnip_t *pkt)
|
||||
}
|
||||
rbuf->arrival = xtimer_now_usec();
|
||||
xtimer_set_msg(&_gc_xtimer, CONFIG_GNRC_IPV6_EXT_FRAG_RBUF_TIMEOUT_US, &_gc_msg,
|
||||
sched_active_pid);
|
||||
thread_getpid());
|
||||
nh = fh->nh;
|
||||
offset = ipv6_ext_frag_get_offset(fh);
|
||||
switch (_overlaps(rbuf, offset, pkt->size)) {
|
||||
|
@ -175,7 +175,7 @@ static void *_event_loop(void *args)
|
||||
{
|
||||
msg_t msg, reply, msg_q[GNRC_IPV6_MSG_QUEUE_SIZE];
|
||||
gnrc_netreg_entry_t me_reg = GNRC_NETREG_ENTRY_INIT_PID(GNRC_NETREG_DEMUX_CTX_ALL,
|
||||
sched_active_pid);
|
||||
thread_getpid());
|
||||
|
||||
(void)args;
|
||||
msg_init_queue(msg_q, GNRC_IPV6_MSG_QUEUE_SIZE);
|
||||
|
@ -408,7 +408,7 @@ void gnrc_sixlowpan_frag_rb_gc(void)
|
||||
static inline void _set_rbuf_timeout(void)
|
||||
{
|
||||
xtimer_set_msg(&_gc_timer, CONFIG_GNRC_SIXLOWPAN_FRAG_RBUF_TIMEOUT_US,
|
||||
&_gc_timer_msg, sched_active_pid);
|
||||
&_gc_timer_msg, thread_getpid());
|
||||
}
|
||||
|
||||
static int _rbuf_get(const void *src, size_t src_len,
|
||||
|
@ -310,7 +310,7 @@ static void *_event_loop(void *args)
|
||||
{
|
||||
msg_t msg, reply, msg_q[GNRC_SIXLOWPAN_MSG_QUEUE_SIZE];
|
||||
gnrc_netreg_entry_t me_reg = GNRC_NETREG_ENTRY_INIT_PID(GNRC_NETREG_DEMUX_CTX_ALL,
|
||||
sched_active_pid);
|
||||
thread_getpid());
|
||||
|
||||
(void)args;
|
||||
msg_init_queue(msg_q, GNRC_SIXLOWPAN_MSG_QUEUE_SIZE);
|
||||
|
@ -258,7 +258,7 @@ ssize_t gnrc_sock_send(gnrc_pktsnip_t *payload, sock_ip_ep_t *local,
|
||||
while (err_report.type != GNRC_NETERR_MSG_TYPE) {
|
||||
msg_try_receive(&err_report);
|
||||
if (err_report.type != GNRC_NETERR_MSG_TYPE) {
|
||||
msg_try_send(&err_report, sched_active_pid);
|
||||
msg_try_send(&err_report, thread_getpid());
|
||||
}
|
||||
}
|
||||
if (err_report.content.value != last_status) {
|
||||
@ -270,7 +270,7 @@ ssize_t gnrc_sock_send(gnrc_pktsnip_t *payload, sock_ip_ep_t *local,
|
||||
while (err_report.type != GNRC_NETERR_MSG_TYPE) {
|
||||
msg_try_receive(&err_report);
|
||||
if (err_report.type != GNRC_NETERR_MSG_TYPE) {
|
||||
msg_try_send(&err_report, sched_active_pid);
|
||||
msg_try_send(&err_report, thread_getpid());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -226,7 +226,7 @@ static void *_event_loop(void *arg)
|
||||
msg_t msg, reply;
|
||||
msg_t msg_queue[GNRC_UDP_MSG_QUEUE_SIZE];
|
||||
gnrc_netreg_entry_t netreg = GNRC_NETREG_ENTRY_INIT_PID(GNRC_NETREG_DEMUX_CTX_ALL,
|
||||
sched_active_pid);
|
||||
thread_getpid());
|
||||
/* preset reply message */
|
||||
reply.type = GNRC_NETAPI_MSG_TYPE_ACK;
|
||||
reply.content.value = (uint32_t)-ENOTSUP;
|
||||
|
@ -641,7 +641,7 @@ int fib_register_rp(fib_table_t *table, uint8_t *prefix, size_t prefix_addr_type
|
||||
}
|
||||
|
||||
if (table->notify_rp_pos < FIB_MAX_REGISTERED_RP) {
|
||||
table->notify_rp[table->notify_rp_pos] = sched_active_pid;
|
||||
table->notify_rp[table->notify_rp_pos] = thread_getpid();
|
||||
universal_address_container_t *container = universal_address_add(prefix,
|
||||
prefix_addr_type_size);
|
||||
table->prefix_rp[table->notify_rp_pos] = container;
|
||||
|
@ -243,7 +243,7 @@ __attribute__((weak)) void heap_stats(void)
|
||||
*/
|
||||
pid_t _getpid(void)
|
||||
{
|
||||
return sched_active_pid;
|
||||
return thread_getpid();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -254,7 +254,7 @@ pid_t _getpid(void)
|
||||
pid_t _getpid_r(struct _reent *ptr)
|
||||
{
|
||||
(void) ptr;
|
||||
return sched_active_pid;
|
||||
return thread_getpid();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -69,9 +69,9 @@ static ssize_t pipe_rw(ringbuffer_t *rb,
|
||||
return 0;
|
||||
}
|
||||
else {
|
||||
*this_op_blocked = (thread_t *) sched_active_thread;
|
||||
*this_op_blocked = thread_get_active();
|
||||
|
||||
sched_set_status((thread_t *) sched_active_thread, STATUS_SLEEPING);
|
||||
sched_set_status(thread_get_active(), STATUS_SLEEPING);
|
||||
irq_restore(old_state);
|
||||
thread_yield_higher();
|
||||
}
|
||||
|
@ -252,7 +252,7 @@ int pthread_join(pthread_t th, void **thread_return)
|
||||
|
||||
switch (other->status) {
|
||||
case (PTS_RUNNING):
|
||||
other->joining_thread = sched_active_pid;
|
||||
other->joining_thread = thread_getpid();
|
||||
/* go blocked, I'm waking up if other thread exits */
|
||||
thread_sleep();
|
||||
/* falls through */
|
||||
@ -300,7 +300,7 @@ pthread_t pthread_self(void)
|
||||
{
|
||||
pthread_t result = 0;
|
||||
mutex_lock(&pthread_mutex);
|
||||
kernel_pid_t pid = sched_active_pid; /* sched_active_pid is volatile */
|
||||
kernel_pid_t pid = thread_getpid(); /* thread_getpid() is volatile */
|
||||
for (int i = 0; i < MAXTHREADS; i++) {
|
||||
if (pthread_sched_threads[i] && pthread_sched_threads[i]->thread_pid == pid) {
|
||||
result = i+1;
|
||||
|
@ -56,7 +56,7 @@ int pthread_barrier_wait(pthread_barrier_t *barrier)
|
||||
|
||||
mutex_lock(&barrier->mutex);
|
||||
DEBUG("%s: hit a synchronization barrier. pid=%" PRIkernel_pid"\n",
|
||||
thread_getname(sched_active_pid), sched_active_pid);
|
||||
thread_getname(thread_getpid()), thread_getpid());
|
||||
|
||||
int switch_prio = -1;
|
||||
|
||||
@ -64,10 +64,10 @@ int pthread_barrier_wait(pthread_barrier_t *barrier)
|
||||
/* need to wait for further threads */
|
||||
|
||||
DEBUG("%s: waiting for %u threads. pid=%" PRIkernel_pid "\n",
|
||||
thread_getname(sched_active_pid), barrier->count, sched_active_pid);
|
||||
thread_getname(thread_getpid()), barrier->count, thread_getpid());
|
||||
|
||||
pthread_barrier_waiting_node_t node;
|
||||
node.pid = sched_active_pid;
|
||||
node.pid = thread_getpid();
|
||||
node.next = barrier->next;
|
||||
node.cont = 0;
|
||||
|
||||
@ -90,7 +90,7 @@ int pthread_barrier_wait(pthread_barrier_t *barrier)
|
||||
/* all threads have arrived, wake everybody up */
|
||||
|
||||
DEBUG("%s: waking every other thread up. pid=%" PRIkernel_pid "\n",
|
||||
thread_getname(sched_active_pid), sched_active_pid);
|
||||
thread_getname(thread_getpid()), thread_getpid());
|
||||
|
||||
int count = 1; /* Count number of woken up threads.
|
||||
* The first thread is the current thread. */
|
||||
@ -99,7 +99,7 @@ int pthread_barrier_wait(pthread_barrier_t *barrier)
|
||||
++count;
|
||||
next->cont = 1;
|
||||
|
||||
thread_t *other = (thread_t *) sched_threads[next->pid];
|
||||
thread_t *other = thread_get(next->pid);
|
||||
switch_prio = priority_min(switch_prio, other->priority);
|
||||
sched_set_status(other, STATUS_PENDING);
|
||||
}
|
||||
|
@ -95,8 +95,8 @@ int pthread_cond_destroy(pthread_cond_t *cond)
|
||||
|
||||
void _init_cond_wait(pthread_cond_t *cond, priority_queue_node_t *n)
|
||||
{
|
||||
n->priority = sched_active_thread->priority;
|
||||
n->data = sched_active_pid;
|
||||
n->priority = thread_get_active()->priority;
|
||||
n->data = thread_getpid();
|
||||
n->next = NULL;
|
||||
|
||||
/* the signaling thread may not hold the mutex, the queue is not thread safe */
|
||||
@ -136,7 +136,7 @@ int pthread_cond_timedwait(pthread_cond_t *cond, mutex_t *mutex, const struct ti
|
||||
priority_queue_node_t n;
|
||||
|
||||
_init_cond_wait(cond, &n);
|
||||
xtimer_set_wakeup64(&timer, (then - now), sched_active_pid);
|
||||
xtimer_set_wakeup64(&timer, (then - now), thread_getpid());
|
||||
|
||||
mutex_unlock_and_sleep(mutex);
|
||||
|
||||
@ -165,7 +165,7 @@ int pthread_cond_signal(pthread_cond_t *cond)
|
||||
priority_queue_node_t *head = priority_queue_remove_head(&(cond->queue));
|
||||
int other_prio = -1;
|
||||
if (head != NULL) {
|
||||
thread_t *other_thread = (thread_t *) sched_threads[head->data];
|
||||
thread_t *other_thread = thread_get(head->data);
|
||||
if (other_thread) {
|
||||
other_prio = other_thread->priority;
|
||||
sched_set_status(other_thread, STATUS_PENDING);
|
||||
@ -199,7 +199,7 @@ int pthread_cond_broadcast(pthread_cond_t *cond)
|
||||
break;
|
||||
}
|
||||
|
||||
thread_t *other_thread = (thread_t *) sched_threads[head->data];
|
||||
thread_t *other_thread = thread_get(head->data);
|
||||
if (other_thread) {
|
||||
other_prio = max_prio(other_prio, other_thread->priority);
|
||||
sched_set_status(other_thread, STATUS_PENDING);
|
||||
|
@ -83,7 +83,7 @@ bool __pthread_rwlock_blocked_readingly(const pthread_rwlock_t *rwlock)
|
||||
}
|
||||
|
||||
priority_queue_node_t *qnode = rwlock->queue.first;
|
||||
if (qnode->priority > sched_active_thread->priority) {
|
||||
if (qnode->priority > thread_get_active()->priority) {
|
||||
/* the waiting thread has a lower priority */
|
||||
return false;
|
||||
}
|
||||
@ -124,11 +124,11 @@ static int pthread_rwlock_lock(pthread_rwlock_t *rwlock,
|
||||
/* queue for the lock */
|
||||
__pthread_rwlock_waiter_node_t waiting_node = {
|
||||
.is_writer = is_writer,
|
||||
.thread = (thread_t *) sched_active_thread,
|
||||
.thread = thread_get_active(),
|
||||
.qnode = {
|
||||
.next = NULL,
|
||||
.data = (uintptr_t) &waiting_node,
|
||||
.priority = sched_active_thread->priority,
|
||||
.priority = thread_get_active()->priority,
|
||||
},
|
||||
.continue_ = false,
|
||||
};
|
||||
@ -196,7 +196,7 @@ static int pthread_rwlock_timedlock(pthread_rwlock_t *rwlock,
|
||||
}
|
||||
else {
|
||||
xtimer_t timer;
|
||||
xtimer_set_wakeup64(&timer, (then - now), sched_active_pid);
|
||||
xtimer_set_wakeup64(&timer, (then - now), thread_getpid());
|
||||
int result = pthread_rwlock_lock(rwlock, is_blocked, is_writer, incr_when_held, true);
|
||||
if (result != ETIMEDOUT) {
|
||||
xtimer_remove(&timer);
|
||||
|
@ -1197,7 +1197,7 @@ int posix_socket_select(int fd)
|
||||
return res;
|
||||
}
|
||||
}
|
||||
socket->selecting_thread = (thread_t *)sched_active_thread;
|
||||
socket->selecting_thread = thread_get_active();
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
|
@ -115,7 +115,7 @@ void ps(void)
|
||||
#ifdef MODULE_SCHEDSTATISTICS
|
||||
uint64_t rt_sum = 0;
|
||||
for (kernel_pid_t i = KERNEL_PID_FIRST; i <= KERNEL_PID_LAST; i++) {
|
||||
thread_t *p = (thread_t *)sched_threads[i];
|
||||
thread_t *p = thread_get(i);
|
||||
if (p != NULL) {
|
||||
rt_sum += sched_pidlist[i].runtime_ticks;
|
||||
}
|
||||
@ -123,7 +123,7 @@ void ps(void)
|
||||
#endif /* MODULE_SCHEDSTATISTICS */
|
||||
|
||||
for (kernel_pid_t i = KERNEL_PID_FIRST; i <= KERNEL_PID_LAST; i++) {
|
||||
thread_t *p = (thread_t *)sched_threads[i];
|
||||
thread_t *p = thread_get(i);
|
||||
|
||||
if (p != NULL) {
|
||||
thread_status_t state = p->status; /* copy state */
|
||||
|
@ -22,8 +22,9 @@
|
||||
*/
|
||||
|
||||
#include "sched.h"
|
||||
#include "xtimer.h"
|
||||
#include "schedstatistics.h"
|
||||
#include "thread.h"
|
||||
#include "xtimer.h"
|
||||
|
||||
schedstat_t sched_pidlist[KERNEL_PID_LAST + 1];
|
||||
|
||||
@ -49,7 +50,7 @@ void init_schedstatistics(void)
|
||||
{
|
||||
/* Init laststart for the thread starting schedstatistics since the callback
|
||||
wasn't registered when it was first scheduled */
|
||||
schedstat_t *active_stat = &sched_pidlist[sched_active_pid];
|
||||
schedstat_t *active_stat = &sched_pidlist[thread_getpid()];
|
||||
active_stat->laststart = xtimer_now().ticks32;
|
||||
active_stat->schedules = 1;
|
||||
sched_register_cb(sched_statistics_cb);
|
||||
|
@ -88,7 +88,7 @@ int _gnrc_icmpv6_ping(int argc, char **argv)
|
||||
{
|
||||
_ping_data_t data = {
|
||||
.netreg = GNRC_NETREG_ENTRY_INIT_PID(ICMPV6_ECHO_REP,
|
||||
sched_active_pid),
|
||||
thread_getpid()),
|
||||
.count = DEFAULT_COUNT,
|
||||
.tmin = UINT_MAX,
|
||||
.datalen = DEFAULT_DATALEN,
|
||||
@ -121,7 +121,7 @@ int _gnrc_icmpv6_ping(int argc, char **argv)
|
||||
goto finish;
|
||||
default:
|
||||
/* requeue wrong packets */
|
||||
msg_send(&msg, sched_active_pid);
|
||||
msg_send(&msg, thread_getpid());
|
||||
break;
|
||||
}
|
||||
} while (data.num_recv < data.count);
|
||||
@ -130,7 +130,7 @@ finish:
|
||||
res = _finish(&data);
|
||||
gnrc_netreg_unregister(GNRC_NETTYPE_ICMPV6, &data.netreg);
|
||||
for (unsigned i = 0;
|
||||
i < cib_avail((cib_t *)&sched_active_thread->msg_queue);
|
||||
i < cib_avail(&thread_get_active()->msg_queue);
|
||||
i++) {
|
||||
msg_t msg;
|
||||
|
||||
@ -142,7 +142,7 @@ finish:
|
||||
}
|
||||
else {
|
||||
/* requeue other packets */
|
||||
msg_send(&msg, sched_active_pid);
|
||||
msg_send(&msg, thread_getpid());
|
||||
}
|
||||
}
|
||||
return res;
|
||||
@ -293,7 +293,7 @@ static void _pinger(_ping_data_t *data)
|
||||
}
|
||||
}
|
||||
xtimer_set_msg(&data->sched_timer, timer, &data->sched_msg,
|
||||
sched_active_pid);
|
||||
thread_getpid());
|
||||
bf_unset(data->cktab, (size_t)data->num_sent % CKTAB_SIZE);
|
||||
pkt = gnrc_icmpv6_echo_build(ICMPV6_ECHO_REQ, data->id,
|
||||
(uint16_t)data->num_sent++,
|
||||
|
@ -181,8 +181,7 @@ static void _set_ep_event(usbus_t *usbus, usbdev_ep_t *ep)
|
||||
irq_restore(state);
|
||||
}
|
||||
|
||||
thread_flags_set((thread_t *)thread_get(usbus->pid),
|
||||
USBUS_THREAD_FLAG_USBDEV_EP);
|
||||
thread_flags_set(thread_get(usbus->pid), USBUS_THREAD_FLAG_USBDEV_EP);
|
||||
}
|
||||
|
||||
static uint32_t _get_and_reset_ep_events(usbus_t *usbus)
|
||||
@ -226,11 +225,11 @@ static void *_usbus_thread(void *args)
|
||||
usbus_control_init(usbus, &ep0_handler);
|
||||
|
||||
usbdev_t *dev = usbus->dev;
|
||||
usbus->pid = sched_active_pid;
|
||||
usbus->pid = thread_getpid();
|
||||
usbus->addr = 0;
|
||||
usbus->iface = NULL;
|
||||
usbus->str_idx = 1;
|
||||
DEBUG("usbus: starting thread %i\n", sched_active_pid);
|
||||
DEBUG("usbus: starting thread %i\n", thread_getpid());
|
||||
/* setup the link-layer's message queue */
|
||||
/* register the event callback with the device driver */
|
||||
dev->cb = _event_cb;
|
||||
@ -297,8 +296,7 @@ static void _event_cb(usbdev_t *usbdev, usbdev_event_t event)
|
||||
usbus_t *usbus = (usbus_t *)usbdev->context;
|
||||
|
||||
if (event == USBDEV_EVENT_ESR) {
|
||||
thread_flags_set((thread_t *)thread_get(usbus->pid),
|
||||
USBUS_THREAD_FLAG_USBDEV);
|
||||
thread_flags_set(thread_get(usbus->pid), USBUS_THREAD_FLAG_USBDEV);
|
||||
}
|
||||
else {
|
||||
usbus_event_usb_t msg;
|
||||
|
@ -166,7 +166,7 @@ int _xtimer_msg_receive_timeout64(msg_t *m, uint64_t timeout_ticks) {
|
||||
msg_t tmsg;
|
||||
xtimer_t t;
|
||||
_setup_timer_msg(&tmsg, &t);
|
||||
_xtimer_set_msg64(&t, timeout_ticks, &tmsg, sched_active_pid);
|
||||
_xtimer_set_msg64(&t, timeout_ticks, &tmsg, thread_getpid());
|
||||
return _msg_wait(m, &tmsg, &t);
|
||||
}
|
||||
|
||||
@ -175,7 +175,7 @@ int _xtimer_msg_receive_timeout(msg_t *msg, uint32_t timeout_ticks)
|
||||
msg_t tmsg;
|
||||
xtimer_t t;
|
||||
_setup_timer_msg(&tmsg, &t);
|
||||
_xtimer_set_msg(&t, timeout_ticks, &tmsg, sched_active_pid);
|
||||
_xtimer_set_msg(&t, timeout_ticks, &tmsg, thread_getpid());
|
||||
return _msg_wait(msg, &tmsg, &t);
|
||||
}
|
||||
#endif /* MODULE_CORE_MSG */
|
||||
@ -247,7 +247,7 @@ static void _mutex_timeout(void *arg)
|
||||
*/
|
||||
unsigned int irqstate = irq_disable();
|
||||
|
||||
mutex_thread_t *mt = (mutex_thread_t *)arg;
|
||||
mutex_thread_t *mt = arg;
|
||||
mt->blocking = 0;
|
||||
_mutex_remove_thread_from_waiting_queue(mt->mutex, mt->thread, &mt->dequeued);
|
||||
irq_restore(irqstate);
|
||||
@ -256,11 +256,13 @@ static void _mutex_timeout(void *arg)
|
||||
int xtimer_mutex_lock_timeout(mutex_t *mutex, uint64_t timeout)
|
||||
{
|
||||
xtimer_t t;
|
||||
mutex_thread_t mt = { mutex, (thread_t *)sched_active_thread, .dequeued=0, .blocking=1 };
|
||||
mutex_thread_t mt = {
|
||||
mutex, thread_get_active(), .dequeued = 0, .blocking = 1
|
||||
};
|
||||
|
||||
if (timeout != 0) {
|
||||
t.callback = _mutex_timeout;
|
||||
t.arg = (void *)((mutex_thread_t *)&mt);
|
||||
t.arg = &mt;
|
||||
xtimer_set64(&t, timeout);
|
||||
}
|
||||
int ret = _mutex_lock(mutex, &mt.blocking);
|
||||
@ -294,7 +296,7 @@ static void _set_timeout_flag_callback(void* arg)
|
||||
static void _set_timeout_flag_prepare(xtimer_t *t)
|
||||
{
|
||||
t->callback = _set_timeout_flag_callback;
|
||||
t->arg = (thread_t *)sched_active_thread;
|
||||
t->arg = thread_get_active();
|
||||
thread_flags_clear(THREAD_FLAG_TIMEOUT);
|
||||
}
|
||||
|
||||
|
@ -109,7 +109,7 @@ int ztimer_msg_receive_timeout(ztimer_clock_t *clock, msg_t *msg,
|
||||
ztimer_t t;
|
||||
msg_t m = { .type = MSG_ZTIMER, .content.ptr = &m };
|
||||
|
||||
ztimer_set_msg(clock, &t, timeout, &m, sched_active_pid);
|
||||
ztimer_set_msg(clock, &t, timeout, &m, thread_getpid());
|
||||
|
||||
msg_receive(msg);
|
||||
ztimer_remove(clock, &t);
|
||||
@ -134,7 +134,7 @@ void ztimer_set_timeout_flag(ztimer_clock_t *clock, ztimer_t *t,
|
||||
uint32_t timeout)
|
||||
{
|
||||
t->callback = _set_timeout_flag_callback;
|
||||
t->arg = (thread_t *)sched_active_thread;
|
||||
t->arg = thread_get_active();
|
||||
thread_flags_clear(THREAD_FLAG_TIMEOUT);
|
||||
ztimer_set(clock, t, timeout);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user