Get basic audio playback going again

custom
jacqueline 2 years ago
parent ee5f662f9b
commit 8a2a2d2265
  1. 4
      src/app_console/app_console.cpp
  2. 4
      src/app_console/include/app_console.hpp
  3. 11
      src/audio/audio_decoder.cpp
  4. 12
      src/audio/audio_fsm.cpp
  5. 96
      src/audio/audio_task.cpp
  6. 4
      src/audio/fatfs_audio_input.cpp
  7. 3
      src/audio/include/audio_decoder.hpp
  8. 3
      src/audio/include/audio_element.hpp
  9. 2
      src/audio/include/fatfs_audio_input.hpp
  10. 2
      src/audio/pipeline.cpp
  11. 14
      src/drivers/display.cpp
  12. 4
      src/drivers/include/relative_wheel.hpp
  13. 1
      src/drivers/relative_wheel.cpp
  14. 3
      src/system_fsm/running.cpp
  15. 4
      src/ui/ui_fsm.cpp
  16. 10
      src/ui/wheel_encoder.cpp

@ -200,7 +200,7 @@ void RegisterDbDump() {
esp_console_cmd_register(&cmd);
}
AppConsole::AppConsole(std::weak_ptr<database::Database> database)
AppConsole::AppConsole(const std::weak_ptr<database::Database>& database)
: database_(database) {
sInstance = this;
}
@ -210,8 +210,8 @@ AppConsole::~AppConsole() {
auto AppConsole::RegisterExtraComponents() -> void {
RegisterListDir();
/*
RegisterPlayFile();
/*
RegisterToggle();
RegisterVolume();
RegisterAudioStatus();

@ -15,10 +15,10 @@ namespace console {
class AppConsole : public Console {
public:
explicit AppConsole(std::weak_ptr<database::Database> database);
explicit AppConsole(const std::weak_ptr<database::Database>& database);
virtual ~AppConsole();
std::weak_ptr<database::Database> database_;
const std::weak_ptr<database::Database>& database_;
protected:
virtual auto RegisterExtraComponents() -> void;

@ -14,7 +14,6 @@
#include <memory>
#include <variant>
#include "cbor/tinycbor/src/cborinternal_p.h"
#include "freertos/FreeRTOS.h"
#include "esp_heap_caps.h"
@ -36,7 +35,8 @@ AudioDecoder::AudioDecoder()
current_codec_(),
current_input_format_(),
current_output_format_(),
has_samples_to_send_(false) {}
has_samples_to_send_(false),
has_input_remaining_(false) {}
AudioDecoder::~AudioDecoder() {}
@ -70,6 +70,10 @@ auto AudioDecoder::ProcessStreamInfo(const StreamInfo& info) -> bool {
return true;
}
auto AudioDecoder::NeedsToProcess() const -> bool {
return has_samples_to_send_ || has_input_remaining_;
}
auto AudioDecoder::Process(const std::vector<InputStream>& inputs,
OutputStream* output) -> void {
auto input = inputs.begin();
@ -124,7 +128,8 @@ auto AudioDecoder::Process(const std::vector<InputStream>& inputs,
return;
}
if (res.value()) {
has_input_remaining_ = !res.value();
if (!has_input_remaining_) {
// We're out of useable data in this buffer. Finish immediately; there's
// nothing to send.
input->mark_incomplete();

@ -29,12 +29,7 @@ auto AudioState::Init(drivers::GpioExpander* gpio_expander,
sGpioExpander = gpio_expander;
sDac = dac;
sDatabase = database;
}
namespace states {
void Uninitialised::react(const system_fsm::BootComplete&) {
transit<Standby>([&]() {
sFileSource.reset(new FatfsAudioInput());
sI2SOutput.reset(new I2SAudioOutput(sGpioExpander, sDac));
@ -48,7 +43,12 @@ void Uninitialised::react(const system_fsm::BootComplete&) {
pipeline->AddInput(sFileSource.get());
task::StartPipeline(pipeline, sI2SOutput.get());
});
}
namespace states {
void Uninitialised::react(const system_fsm::BootComplete&) {
transit<Standby>();
}
void Standby::react(const PlayFile& ev) {

@ -43,20 +43,64 @@ namespace task {
static const char* kTag = "task";
// The default amount of time to wait between pipeline iterations for a single
// song.
static constexpr uint_fast16_t kDefaultDelayTicks = pdMS_TO_TICKS(5);
static constexpr uint_fast16_t kMaxDelayTicks = pdMS_TO_TICKS(10);
static constexpr uint_fast16_t kMinDelayTicks = pdMS_TO_TICKS(1);
void AudioTaskMain(std::unique_ptr<Pipeline> pipeline, IAudioSink* sink) {
// The stream format for bytes currently in the sink buffer.
std::optional<StreamInfo::Format> output_format;
uint_fast16_t delay_ticks = pdMS_TO_TICKS(5);
std::vector<Pipeline*> elements = pipeline->GetIterationOrder();
// How long to wait between pipeline iterations. This is reset for each song,
// and readjusted on the fly to maintain a reasonable amount playback buffer.
// Buffering too much will mean we process samples inefficiently, wasting CPU
// time, whilst buffering too little will affect the quality of the output.
uint_fast16_t delay_ticks = kDefaultDelayTicks;
std::vector<Pipeline*> all_elements = pipeline->GetIterationOrder();
events::EventQueue& event_queue = events::EventQueue::GetInstance();
while (1) {
event_queue.ServiceAudio(delay_ticks);
// First, see if we actually have any pipeline work to do in this iteration.
bool has_work = false;
// We always have work to do if there's still bytes to be sunk.
has_work = all_elements.back()->OutStream().info->bytes_in_stream > 0;
if (!has_work) {
for (Pipeline* p : all_elements) {
has_work = p->OutputElement()->NeedsToProcess();
if (has_work) {
break;
}
}
}
for (int i = 0; i < elements.size(); i++) {
// See if there's any new events.
event_queue.ServiceAudio(has_work ? delay_ticks : portMAX_DELAY);
if (!has_work) {
// See if we've been given work by this event.
for (Pipeline* p : all_elements) {
has_work = p->OutputElement()->NeedsToProcess();
if (has_work) {
delay_ticks = kDefaultDelayTicks;
break;
}
}
if (!has_work) {
continue;
}
}
// We have work to do! Allow each element in the pipeline to process one
// chunk. We iterate from input nodes first, so this should result in
// samples in the output buffer.
for (int i = 0; i < all_elements.size(); i++) {
std::vector<RawStream> raw_in_streams;
elements.at(i)->InStreams(&raw_in_streams);
RawStream raw_out_stream = elements.at(i)->OutStream();
all_elements.at(i)->InStreams(&raw_in_streams);
RawStream raw_out_stream = all_elements.at(i)->OutStream();
// Crop the input and output streams to the ranges that are safe to
// touch. For the input streams, this is the region that contains
@ -67,14 +111,14 @@ void AudioTaskMain(std::unique_ptr<Pipeline> pipeline, IAudioSink* sink) {
[&](RawStream& s) { in_streams.emplace_back(&s); });
OutputStream out_stream(&raw_out_stream);
elements.at(i)->OutputElement()->Process(in_streams, &out_stream);
all_elements.at(i)->OutputElement()->Process(in_streams, &out_stream);
}
RawStream raw_sink_stream = elements.front()->OutStream();
RawStream raw_sink_stream = all_elements.back()->OutStream();
InputStream sink_stream(&raw_sink_stream);
if (sink_stream.info().bytes_in_stream == 0) {
vTaskDelay(pdMS_TO_TICKS(100));
// No new bytes to sink, so skip sinking completely.
continue;
}
@ -86,24 +130,36 @@ void AudioTaskMain(std::unique_ptr<Pipeline> pipeline, IAudioSink* sink) {
ESP_LOGI(kTag, "reconfiguring dac");
output_format = sink_stream.info().format;
sink->Configure(*output_format);
} else {
continue;
}
}
// We've reconfigured the sink, or it was already configured correctly.
// Send through some data.
if (output_format == sink_stream.info().format &&
!std::holds_alternative<std::monostate>(*output_format)) {
std::size_t sent =
std::size_t bytes_sunk =
xStreamBufferSend(sink->buffer(), sink_stream.data().data(),
sink_stream.data().size_bytes(), 0);
if (sent > 0) {
ESP_LOGI(
kTag, "sunk %u bytes out of %u (%d %%)", sent,
sink_stream.info().bytes_in_stream,
(int)(((float)sent / (float)sink_stream.info().bytes_in_stream) *
100));
}
sink_stream.consume(sent);
// Adjust how long we wait for the next iteration if we're getting too far
// ahead or behind.
float sunk_percent = static_cast<float>(bytes_sunk) /
static_cast<float>(sink_stream.info().bytes_in_stream);
if (sunk_percent > 0.66f) {
// We're sinking a lot of the output buffer per iteration, so we need to
// be running faster.
delay_ticks--;
} else if (sunk_percent < 0.33f) {
// We're not sinking much of the output buffer per iteration, so we can
// slow down to save some cycles.
delay_ticks++;
}
delay_ticks = std::clamp(delay_ticks, kMinDelayTicks, kMaxDelayTicks);
// Finally, actually mark the bytes we sunk as consumed.
if (bytes_sunk > 0) {
sink_stream.consume(bytes_sunk);
}
}
}

@ -51,6 +51,10 @@ auto FatfsAudioInput::OpenFile(const std::string& path) -> bool {
return true;
}
auto FatfsAudioInput::NeedsToProcess() const -> bool {
return is_file_open_;
}
auto FatfsAudioInput::Process(const std::vector<InputStream>& inputs,
OutputStream* output) -> void {
if (!is_file_open_) {

@ -30,6 +30,8 @@ class AudioDecoder : public IAudioElement {
AudioDecoder();
~AudioDecoder();
auto NeedsToProcess() const -> bool override;
auto Process(const std::vector<InputStream>& inputs, OutputStream* output)
-> void override;
@ -41,6 +43,7 @@ class AudioDecoder : public IAudioElement {
std::optional<StreamInfo::Format> current_input_format_;
std::optional<StreamInfo::Format> current_output_format_;
bool has_samples_to_send_;
bool has_input_remaining_;
auto ProcessStreamInfo(const StreamInfo& info) -> bool;
};

@ -10,6 +10,7 @@
#include <cstdint>
#include <deque>
#include <memory>
#include <vector>
#include "freertos/FreeRTOS.h"
@ -46,6 +47,8 @@ class IAudioElement {
IAudioElement() {}
virtual ~IAudioElement() {}
virtual auto NeedsToProcess() const -> bool = 0;
virtual auto Process(const std::vector<InputStream>& inputs,
OutputStream* output) -> void = 0;
};

@ -33,6 +33,8 @@ class FatfsAudioInput : public IAudioElement {
auto OpenFile(const std::string& path) -> bool;
auto NeedsToProcess() const -> bool override;
auto Process(const std::vector<InputStream>& inputs, OutputStream* output)
-> void override;

@ -5,6 +5,7 @@
*/
#include "pipeline.hpp"
#include <algorithm>
#include <memory>
#include "stream_info.hpp"
@ -53,6 +54,7 @@ auto Pipeline::GetIterationOrder() -> std::vector<Pipeline*> {
}
}
std::reverse(found.begin(), found.end());
return found;
}

@ -106,14 +106,12 @@ auto Display::Create(GpioExpander* expander,
};
ledc_timer_config(&led_config);
ledc_channel_config_t led_channel {
.gpio_num = kDisplayLedEn,
ledc_channel_config_t led_channel{.gpio_num = kDisplayLedEn,
.speed_mode = LEDC_LOW_SPEED_MODE,
.channel = LEDC_CHANNEL_0,
.timer_sel = LEDC_TIMER_0,
.duty = 4095,
.hpoint = 0
};
.hpoint = 0};
ledc_channel_config(&led_channel);
// Next, init the SPI device
@ -270,13 +268,13 @@ void Display::OnLvglFlush(lv_disp_drv_t* disp_drv,
data[0] = SPI_SWAP_DATA_TX(area_copy.x1, 16);
data[1] = SPI_SWAP_DATA_TX(area_copy.x2, 16);
SendCommandWithData(displays::ST77XX_CASET,
reinterpret_cast<uint8_t*>(data), 4);
SendCommandWithData(displays::ST77XX_CASET, reinterpret_cast<uint8_t*>(data),
4);
data[0] = SPI_SWAP_DATA_TX(area_copy.y1, 16);
data[1] = SPI_SWAP_DATA_TX(area_copy.y2, 16);
SendCommandWithData(displays::ST77XX_RASET,
reinterpret_cast<uint8_t*>(data), 4);
SendCommandWithData(displays::ST77XX_RASET, reinterpret_cast<uint8_t*>(data),
4);
// Now send the pixels for this region.
uint32_t size = lv_area_get_width(area) * lv_area_get_height(area);

@ -20,7 +20,9 @@ namespace drivers {
class RelativeWheel {
public:
static auto Create(TouchWheel *touch) -> RelativeWheel* { return new RelativeWheel(touch); }
static auto Create(TouchWheel* touch) -> RelativeWheel* {
return new RelativeWheel(touch);
}
explicit RelativeWheel(TouchWheel* touch);

@ -74,5 +74,4 @@ auto RelativeWheel::ticks() -> std::int_fast16_t {
return t;
}
} // namespace drivers

@ -4,6 +4,7 @@
* SPDX-License-Identifier: GPL-3.0-only
*/
#include "freertos/projdefs.h"
#include "result.hpp"
#include "audio_fsm.hpp"
@ -24,6 +25,7 @@ static const char kTag[] = "RUN";
*/
void Running::entry() {
ESP_LOGI(kTag, "mounting sd card");
vTaskDelay(pdMS_TO_TICKS(250));
auto storage_res = drivers::SdStorage::Create(sGpioExpander.get());
if (storage_res.has_error()) {
events::Dispatch<StorageError, SystemState, audio::AudioState, ui::UiState>(
@ -31,6 +33,7 @@ void Running::entry() {
return;
}
sStorage.reset(storage_res.value());
vTaskDelay(pdMS_TO_TICKS(250));
ESP_LOGI(kTag, "opening database");
auto database_res = database::Database::Open();

@ -28,12 +28,14 @@ auto UiState::Init(drivers::GpioExpander* gpio_expander,
sGpioExpander = gpio_expander;
sTouchWheel = touchwheel;
sDisplay = display;
StartLvgl(sTouchWheel, sDisplay);
}
namespace states {
void PreBoot::react(const system_fsm::DisplayReady& ev) {
transit<Splash>([&]() { StartLvgl(sTouchWheel, sDisplay); });
transit<Splash>();
}
void Splash::entry() {

@ -10,11 +10,14 @@
namespace ui {
void encoder_read(lv_indev_drv_t* drv, lv_indev_data_t* data) {
TouchWheelEncoder *instance = reinterpret_cast<TouchWheelEncoder*>(drv->user_data);
TouchWheelEncoder* instance =
reinterpret_cast<TouchWheelEncoder*>(drv->user_data);
instance->Read(data);
}
TouchWheelEncoder::TouchWheelEncoder(std::weak_ptr<drivers::RelativeWheel> wheel) : wheel_(wheel) {
TouchWheelEncoder::TouchWheelEncoder(
std::weak_ptr<drivers::RelativeWheel> wheel)
: wheel_(wheel) {
lv_indev_drv_init(&driver_);
driver_.type = LV_INDEV_TYPE_ENCODER;
driver_.read_cb = encoder_read;
@ -32,7 +35,8 @@ auto TouchWheelEncoder::Read(lv_indev_data_t *data) -> void {
}
lock->Update();
data->state = lock->is_pressed() ? LV_INDEV_STATE_PRESSED : LV_INDEV_STATE_RELEASED;
data->state =
lock->is_pressed() ? LV_INDEV_STATE_PRESSED : LV_INDEV_STATE_RELEASED;
data->enc_diff = lock->ticks();
}

Loading…
Cancel
Save