Fork of Tangara with customizations
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 
tangara-fw/src/tasks/tasks.cpp

212 lines
5.9 KiB

/*
* Copyright 2023 jacqueline <me@jacqueline.id.au>
*
* SPDX-License-Identifier: GPL-3.0-only
*/
#include "tasks.hpp"
#include <functional>
#include "esp_heap_caps.h"
#include "freertos/FreeRTOS.h"
#include "freertos/portmacro.h"
#include "memory_resource.hpp"
namespace tasks {
template <Type t>
auto Name() -> std::pmr::string;
template <>
auto Name<Type::kUi>() -> std::pmr::string {
return "ui";
}
template <>
auto Name<Type::kAudioDecoder>() -> std::pmr::string {
return "audio_dec";
}
template <>
auto Name<Type::kAudioConverter>() -> std::pmr::string {
return "audio_conv";
}
template <>
auto Name<Type::kDatabase>() -> std::pmr::string {
return "db_fg";
}
template <>
auto Name<Type::kDatabaseBackground>() -> std::pmr::string {
return "db_bg";
}
template <Type t>
auto AllocateStack() -> cpp::span<StackType_t>;
// Decoders often require a very large amount of stack space, since they aren't
// usually written with embedded use cases in mind.
template <>
auto AllocateStack<Type::kAudioDecoder>() -> cpp::span<StackType_t> {
constexpr std::size_t size = 24 * 1024;
static StackType_t sStack[size];
return {sStack, size};
}
// LVGL requires only a relatively small stack. However, it can be allocated in
// PSRAM so we give it a bit of headroom for safety.
template <>
auto AllocateStack<Type::kUi>() -> cpp::span<StackType_t> {
constexpr std::size_t size = 16 * 1024;
static StackType_t sStack[size];
return {sStack, size};
}
template <>
// PCM conversion and resampling uses a very small amount of stack. It works
// entirely with PSRAM-allocated buffers, so no real speed gain from allocating
// it internally.
auto AllocateStack<Type::kAudioConverter>() -> cpp::span<StackType_t> {
constexpr std::size_t size = 4 * 1024;
static StackType_t sStack[size];
return {sStack, size};
}
// Leveldb is designed for non-embedded use cases, where stack space isn't so
// much of a concern. It therefore uses an eye-wateringly large amount of stack.
template <>
auto AllocateStack<Type::kDatabase>() -> cpp::span<StackType_t> {
std::size_t size = 256 * 1024;
return {static_cast<StackType_t*>(heap_caps_malloc(size, MALLOC_CAP_SPIRAM)),
size};
}
template <>
auto AllocateStack<Type::kDatabaseBackground>() -> cpp::span<StackType_t> {
std::size_t size = 256 * 1024;
return {static_cast<StackType_t*>(heap_caps_malloc(size, MALLOC_CAP_SPIRAM)),
size};
}
// 2 KiB in internal ram
// 612 KiB in external ram.
/*
* Please keep the priorities below in descending order for better readability.
*/
template <Type t>
auto Priority() -> UBaseType_t;
// Realtime audio is the entire point of this device, so give these tasks the
// highest priority.
template <>
auto Priority<Type::kAudioDecoder>() -> UBaseType_t {
return configMAX_PRIORITIES - 1;
}
template <>
auto Priority<Type::kAudioConverter>() -> UBaseType_t {
return configMAX_PRIORITIES - 1;
}
// After audio issues, UI jank is the most noticeable kind of scheduling-induced
// slowness that the user is likely to notice or care about. Therefore we place
// this task directly below audio in terms of priority.
template <>
auto Priority<Type::kUi>() -> UBaseType_t {
return 10;
}
// Database interactions are all inherently async already, due to their
// potential for disk access. The user likely won't notice or care about a
// couple of ms extra delay due to scheduling, so give this task the lowest
// priority.
template <>
auto Priority<Type::kDatabase>() -> UBaseType_t {
return 2;
}
template <>
auto Priority<Type::kDatabaseBackground>() -> UBaseType_t {
return 1;
}
template <Type t>
auto WorkerQueueSize() -> std::size_t;
template <>
auto WorkerQueueSize<Type::kDatabase>() -> std::size_t {
return 8;
}
template <>
auto WorkerQueueSize<Type::kDatabaseBackground>() -> std::size_t {
return 8;
}
auto PersistentMain(void* fn) -> void {
auto* function = reinterpret_cast<std::function<void(void)>*>(fn);
std::invoke(*function);
assert("persistent task quit!" == 0);
vTaskDelete(NULL);
}
auto Worker::Main(void* instance) {
Worker* i = reinterpret_cast<Worker*>(instance);
while (1) {
WorkItem item;
if (xQueueReceive(i->queue_, &item, portMAX_DELAY)) {
if (item.quit) {
break;
} else if (item.fn != nullptr) {
std::invoke(*item.fn);
delete item.fn;
}
}
}
i->is_task_running_.store(false);
i->is_task_running_.notify_all();
// Wait for the instance's destructor to delete this task. We do this instead
// of just deleting ourselves so that it's 100% certain that it's safe to
// delete or reuse this task's stack.
while (1) {
vTaskDelay(portMAX_DELAY);
}
}
Worker::Worker(const std::pmr::string& name,
cpp::span<StackType_t> stack,
std::size_t queue_size,
UBaseType_t priority)
: stack_(stack.data()),
queue_(xQueueCreate(queue_size, sizeof(WorkItem))),
is_task_running_(true),
task_buffer_(static_cast<StaticTask_t*>(
heap_caps_malloc(sizeof(StaticTask_t),
MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT))),
task_(xTaskCreateStatic(&Main,
name.c_str(),
stack.size(),
this,
priority,
stack_,
task_buffer_)) {}
Worker::~Worker() {
WorkItem item{
.fn = nullptr,
.quit = true,
};
xQueueSend(queue_, &item, portMAX_DELAY);
is_task_running_.wait(true);
vTaskDelete(task_);
free(stack_);
}
template <>
auto Worker::Dispatch(const std::function<void(void)> fn) -> std::future<void> {
std::shared_ptr<std::promise<void>> promise =
std::make_shared<std::promise<void>>();
WorkItem item{
.fn = new std::function<void(void)>([=]() {
std::invoke(fn);
promise->set_value();
}),
.quit = false,
};
xQueueSend(queue_, &item, portMAX_DELAY);
return promise->get_future();
}
} // namespace tasks