diff --git a/sys/ps/ps.c b/sys/ps/ps.c index 6df9ce913b..73c0277e6d 100644 --- a/sys/ps/ps.c +++ b/sys/ps/ps.c @@ -74,6 +74,9 @@ void ps(void) #ifdef MODULE_SCHEDSTATISTICS uint64_t rt_sum = 0; + if (!IS_ACTIVE(MODULE_CORE_IDLE_THREAD)) { + rt_sum = sched_pidlist[KERNEL_PID_UNDEF].runtime_ticks; + } for (kernel_pid_t i = KERNEL_PID_FIRST; i <= KERNEL_PID_LAST; i++) { thread_t *p = thread_get(i); if (p != NULL) { diff --git a/sys/schedstatistics/schedstatistics.c b/sys/schedstatistics/schedstatistics.c index ec4cd48158..0d23881bf1 100644 --- a/sys/schedstatistics/schedstatistics.c +++ b/sys/schedstatistics/schedstatistics.c @@ -26,6 +26,10 @@ #include "thread.h" #include "xtimer.h" +/** + * When core_idle_thread is not active, the KERNEL_PID_UNDEF is used to track + * the idle time + */ schedstat_t sched_pidlist[KERNEL_PID_LAST + 1]; void sched_statistics_cb(kernel_pid_t active_thread, kernel_pid_t next_thread) @@ -33,13 +37,13 @@ void sched_statistics_cb(kernel_pid_t active_thread, kernel_pid_t next_thread) uint32_t now = xtimer_now().ticks32; /* Update active thread stats */ - if (active_thread != KERNEL_PID_UNDEF) { + if (!IS_USED(MODULE_CORE_IDLE_THREAD) || active_thread != KERNEL_PID_UNDEF) { schedstat_t *active_stat = &sched_pidlist[active_thread]; active_stat->runtime_ticks += now - active_stat->laststart; } /* Update next_thread stats */ - if (next_thread != KERNEL_PID_UNDEF) { + if (!IS_USED(MODULE_CORE_IDLE_THREAD) || next_thread != KERNEL_PID_UNDEF) { schedstat_t *next_stat = &sched_pidlist[next_thread]; next_stat->laststart = now; next_stat->schedules++;