1
0
mirror of https://github.com/RIOT-OS/RIOT.git synced 2024-12-29 04:50:03 +01:00
RIOT/cpu/esp_common/freertos/task.c

357 lines
10 KiB
C

/*
* Copyright (C) 2019 Gunar Schorcht
*
* This file is subject to the terms and conditions of the GNU Lesser
* General Public License v2.1. See the file LICENSE in the top level
* directory for more details.
*
* FreeRTOS to RIOT-OS adaption module for source code compatibility
*/
#ifndef DOXYGEN
#define ENABLE_DEBUG 0
#include "debug.h"
#include <assert.h>
#include <string.h>
#include "esp_common.h"
#include "esp_attr.h"
#include "log.h"
#include "syscalls.h"
#include "thread.h"
#if defined(MODULE_ESP_WIFI_ANY) || defined(MODULE_ESP_ETH)
#include "ztimer.h"
#endif
#include "timex.h"
#ifdef MCU_ESP32
#include "soc/soc.h"
#endif
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#define MHZ 1000000
#ifdef MCU_ESP8266
#include "rom/ets_sys.h"
#define PRO_CPU_NUM (0)
#endif
/**
* @brief Architecture specific data of thread control blocks
*/
typedef struct {
uint32_t saved_int_state;
uint32_t critical_nesting;
uint32_t notification_value;
bool notification_wait_for;
} thread_arch_ext_t;
volatile thread_arch_ext_t threads_arch_exts[KERNEL_PID_LAST + 1] = {};
BaseType_t xTaskCreatePinnedToCore(TaskFunction_t pvTaskCode,
const char * const pcName,
const uint32_t usStackDepth,
void * const pvParameters,
UBaseType_t uxPriority,
TaskHandle_t * const pvCreatedTask,
const BaseType_t xCoreID)
{
assert(xCoreID == 0 || xCoreID == tskNO_AFFINITY);
/* FreeRTOS priority values have to be inverted */
uxPriority = SCHED_PRIO_LEVELS - uxPriority - 1;
DEBUG("%s name=%s size=%d prio=%d pvCreatedTask=%p xCoreId=%d",
__func__, pcName, usStackDepth, uxPriority, pvCreatedTask, xCoreID);
char* stack = malloc(usStackDepth + sizeof(thread_t));
if (!stack) {
LOG_TAG_ERROR("freertos", "not enough memory to create task %s with "
"stack size of %d bytes\n", pcName, usStackDepth);
abort();
return pdFALSE;
}
kernel_pid_t pid = thread_create(stack,
usStackDepth + sizeof(thread_t),
uxPriority,
THREAD_CREATE_WOUT_YIELD |
THREAD_CREATE_STACKTEST,
(void *)pvTaskCode,
pvParameters, pcName);
DEBUG("pid=%d\n", pid);
if (pvCreatedTask) {
*pvCreatedTask = (TaskHandle_t)(0L + pid);
}
return (pid < 0) ? pdFALSE : pdTRUE;
}
BaseType_t xTaskCreate(TaskFunction_t pvTaskCode,
const char * const pcName,
const uint32_t usStackDepth,
void * const pvParameters,
UBaseType_t uxPriority,
TaskHandle_t * const pvCreatedTask)
{
return xTaskCreatePinnedToCore(pvTaskCode,
pcName,
usStackDepth,
pvParameters,
uxPriority,
pvCreatedTask,
PRO_CPU_NUM);
}
void vTaskDelete(TaskHandle_t xTaskToDelete)
{
extern volatile thread_t *sched_active_thread;
DEBUG("%s pid=%d task=%p\n", __func__, thread_getpid(), xTaskToDelete);
assert(xTaskToDelete != NULL);
uint32_t pid = (uint32_t)xTaskToDelete;
/* remove old task from scheduling */
thread_t* thread = (thread_t*)sched_threads[pid];
sched_set_status(thread, STATUS_STOPPED);
sched_threads[pid] = NULL;
sched_num_threads--;
sched_active_thread = NULL;
/* determine the new running task */
sched_run();
}
void vTaskSuspend(TaskHandle_t xTaskToSuspend)
{
extern volatile thread_t *sched_active_thread;
DEBUG("%s pid=%d task=%p\n", __func__, thread_getpid(), xTaskToSuspend);
uint32_t pid = (xTaskToSuspend == NULL) ? (uint32_t)thread_getpid()
: (uint32_t)xTaskToSuspend;
assert(pid <= KERNEL_PID_LAST);
/* set status to sleeping to suspend it */
thread_t* thread = (thread_t*)sched_threads[pid];
sched_set_status(thread, STATUS_SLEEPING);
/* trigger rescheduling */
sched_active_thread = NULL;
/* determine the new running task */
sched_run();
}
void vTaskResume(TaskHandle_t xTaskToResume)
{
extern volatile thread_t *sched_active_thread;
DEBUG("%s pid=%d task=%p\n", __func__, thread_getpid(), xTaskToResume);
uint32_t pid = (uint32_t)xTaskToResume;
assert(pid <= KERNEL_PID_LAST);
thread_wakeup (pid);
}
TaskHandle_t xTaskGetCurrentTaskHandle(void)
{
DEBUG("%s pid=%d\n", __func__, thread_getpid());
uint32_t pid = thread_getpid();
return (TaskHandle_t)pid;
}
void vTaskDelay( const TickType_t xTicksToDelay )
{
DEBUG("%s xTicksToDelay=%d\n", __func__, xTicksToDelay);
#if defined(MODULE_ESP_WIFI_ANY) || defined(MODULE_ESP_ETH)
uint64_t ms = xTicksToDelay * MS_PER_SEC / xPortGetTickRateHz();
ztimer_sleep(ZTIMER_MSEC, ms);
#endif
}
TickType_t xTaskGetTickCount (void)
{
return system_get_time() / US_PER_MS / portTICK_PERIOD_MS;
}
void vTaskEnterCritical( portMUX_TYPE *mux )
{
#ifdef MCU_ESP8266
/* we have to return on NMI */
if (NMIIrqIsOn) {
return;
}
#endif /* MCU_ESP8266 */
/* disable interrupts */
uint32_t state = irq_disable();
/* determine calling thread pid (can't fail) */
kernel_pid_t my_pid = thread_getpid();
DEBUG("%s pid=%d prio=%d mux=%p\n", __func__,
my_pid, sched_threads[my_pid]->priority, mux);
/* acquire the mutex with interrupts disabled */
if (mux) {
/* Locking the given mutex does not work here, as this function can also
be called in the interrupt context. Therefore, the given mutex is not
used. Instead, the basic default FreeRTOS mechanism for critical
sections is used by simply disabling interrupts. Since context
switches for the ESPs are also based on interrupts, there is no
possibility that another thread will enter the critical section
once the interrupts are disabled. */
/* mutex_lock(mux); */ /* TODO should be only a spin lock */
}
/* increment nesting counter and save old interrupt level */
threads_arch_exts[my_pid].critical_nesting++;
if (threads_arch_exts[my_pid].critical_nesting == 1) {
threads_arch_exts[my_pid].saved_int_state = state;
}
}
void vTaskExitCritical( portMUX_TYPE *mux )
{
#ifdef MCU_ESP8266
/* we have to return on NMI */
if (NMIIrqIsOn) {
return;
}
#endif /* MCU_ESP8266 */
/* determine calling thread pid (can't fail) */
kernel_pid_t my_pid = thread_getpid();
DEBUG("%s pid=%d prio=%d mux=%p\n", __func__,
my_pid, sched_threads[my_pid]->priority, mux);
/* release the mutex with interrupts disabled */
if (mux) {
/* mutex_unlock(mux); */ /* TODO should be only a spin lock */
}
/* decrement nesting counter and restore old interrupt level */
if (threads_arch_exts[my_pid].critical_nesting) {
threads_arch_exts[my_pid].critical_nesting--;
if (threads_arch_exts[my_pid].critical_nesting == 0) {
irq_restore(threads_arch_exts[my_pid].saved_int_state);
}
}
}
void vTaskStepTick(const TickType_t xTicksToJump)
{
DEBUG("%s xTicksToJump=%d\n", __func__, xTicksToJump);
/*
* TODO:
* At the moment, only the calling task is set to sleep state. Usually, the
* complete system should sleep but not only the task.
*/
vTaskDelay(xTicksToJump);
}
TickType_t prvGetExpectedIdleTime(void)
{
DEBUG("%s\n", __func__);
/*
* TODO:
* Since we are not able to estimate the time the system will be idle,
* we simply return 0.
*/
return 0;
}
BaseType_t xTaskNotifyGive(TaskHandle_t xTaskToNotify)
{
DEBUG("%s pid=%d task=%p\n", __func__, thread_getpid(), xTaskToNotify);
vTaskNotifyGiveFromISR(xTaskToNotify, NULL);
return pdPASS;
}
void vTaskNotifyGiveFromISR(TaskHandle_t xTaskToNotify,
BaseType_t *pxHigherPriorityTaskWoken)
{
DEBUG("%s pid=%d task=%p\n", __func__, thread_getpid(), xTaskToNotify);
uint32_t pid = (uint32_t)xTaskToNotify;
assert(pid <= KERNEL_PID_LAST);
vTaskEnterCritical(0);
threads_arch_exts[pid].notification_value++;
if (threads_arch_exts[pid].notification_wait_for) {
/* if the task is waiting for notification, set its status to pending */
thread_t* thread = (thread_t*)sched_threads[pid];
sched_set_status(thread, STATUS_PENDING);
if (thread->priority < sched_threads[thread_getpid()]->priority) {
/* a context switch is needed */
if (pxHigherPriorityTaskWoken) {
*pxHigherPriorityTaskWoken = pdTRUE;
}
vTaskExitCritical(0);
/* sets only the sched_context_switch_request in ISRs */
thread_yield_higher();
return;
}
}
vTaskExitCritical(0);
}
uint32_t ulTaskNotifyTake(BaseType_t xClearCountOnExit,
TickType_t xTicksToWait)
{
DEBUG("%s pid=%d\n", __func__, thread_getpid());
kernel_pid_t pid = thread_getpid();
assert((pid >= 0) && (pid <= KERNEL_PID_LAST));
vTaskEnterCritical(0);
uint32_t prev_value = threads_arch_exts[pid].notification_value;
if (prev_value) {
/* notification was pending */
threads_arch_exts[pid].notification_value--;
vTaskExitCritical(0);
}
else if (xTicksToWait == 0 || irq_is_in()) {
/* if delaying is not allowed */
DEBUG("%s pid=%d delaying not allowed\n", __func__, thread_getpid());
assert(0);
}
else {
/* suspend the calling thread to wait for notification */
threads_arch_exts[pid].notification_wait_for = true;
thread_t *me = thread_get_active();
sched_set_status(me, STATUS_SLEEPING);
DEBUG("%s pid=%d suspend calling thread\n", __func__, thread_getpid());
vTaskExitCritical(0);
thread_yield_higher();
/* TODO timeout handling with xTicksToWait */
DEBUG("%s pid=%d continue calling thread\n", __func__, thread_getpid());
}
threads_arch_exts[pid].notification_wait_for = false;
if (xClearCountOnExit) {
threads_arch_exts[pid].notification_value = 0;
}
return prev_value;
}
#endif /* DOXYGEN */