Count samples going in and out of the drain buffer

This is a more accurate way of knowing which track is playing when, and
also simplifies a lot of fragile logic in audio_fsm
custom
jacqueline 12 months ago
parent b242ba9986
commit 265049c519
  1. 16
      src/drivers/bluetooth.cpp
  2. 22
      src/drivers/i2s_dac.cpp
  3. 3
      src/drivers/include/drivers/bluetooth.hpp
  4. 4
      src/drivers/include/drivers/i2s_dac.hpp
  5. 12
      src/tangara/audio/audio_decoder.cpp
  6. 2
      src/tangara/audio/audio_decoder.hpp
  7. 21
      src/tangara/audio/audio_events.hpp
  8. 259
      src/tangara/audio/audio_fsm.cpp
  9. 22
      src/tangara/audio/audio_fsm.hpp
  10. 1
      src/tangara/audio/audio_sink.hpp
  11. 4
      src/tangara/audio/bt_audio_output.cpp
  12. 2
      src/tangara/audio/bt_audio_output.hpp
  13. 4
      src/tangara/audio/i2s_audio_output.cpp
  14. 2
      src/tangara/audio/i2s_audio_output.hpp
  15. 99
      src/tangara/audio/processor.cpp
  16. 13
      src/tangara/audio/processor.hpp
  17. 57
      src/tangara/audio/stream_cues.cpp
  18. 46
      src/tangara/audio/stream_cues.hpp

@ -38,6 +38,7 @@ namespace drivers {
DRAM_ATTR static StreamBufferHandle_t sStream = nullptr;
DRAM_ATTR static std::atomic<float> sVolumeFactor = 1.f;
DRAM_ATTR static std::atomic<uint32_t> sSamplesUsed = 0;
static tasks::WorkerPool* sBgWorker;
@ -47,8 +48,8 @@ auto gap_cb(esp_bt_gap_cb_event_t event, esp_bt_gap_cb_param_t* param) -> void {
bluetooth::events::internal::Gap{.type = event, .param = param});
}
auto avrcp_cb(esp_avrc_ct_cb_event_t event,
esp_avrc_ct_cb_param_t* param) -> void {
auto avrcp_cb(esp_avrc_ct_cb_event_t event, esp_avrc_ct_cb_param_t* param)
-> void {
esp_avrc_ct_cb_param_t copy = *param;
sBgWorker->Dispatch<void>([=]() {
auto lock = bluetooth::BluetoothState::lock();
@ -73,6 +74,13 @@ IRAM_ATTR auto a2dp_data_cb(uint8_t* buf, int32_t buf_size) -> int32_t {
}
size_t bytes_received = xStreamBufferReceive(stream, buf, buf_size, 0);
size_t samples_received = bytes_received / 2;
if (UINT32_MAX - sSamplesUsed < samples_received) {
sSamplesUsed = samples_received - (UINT32_MAX - sSamplesUsed);
} else {
sSamplesUsed += samples_received;
}
// Apply software volume scaling.
int16_t* samples = reinterpret_cast<int16_t*>(buf);
float factor = sVolumeFactor.load();
@ -165,6 +173,10 @@ auto Bluetooth::SetVolumeFactor(float f) -> void {
sVolumeFactor = f;
}
auto Bluetooth::SamplesUsed() -> uint32_t {
return sSamplesUsed;
}
auto Bluetooth::SetEventHandler(std::function<void(bluetooth::Event)> cb)
-> void {
auto lock = bluetooth::BluetoothState::lock();

@ -5,6 +5,7 @@
*/
#include "drivers/i2s_dac.hpp"
#include <stdint.h>
#include <cmath>
#include <cstdint>
@ -140,11 +141,10 @@ auto I2SDac::SetPaused(bool paused) -> void {
}
}
static volatile bool sSwapWords = false;
DRAM_ATTR static volatile bool sSwapWords = false;
auto I2SDac::Reconfigure(Channels ch,
BitsPerSample bps,
SampleRate rate) -> void {
auto I2SDac::Reconfigure(Channels ch, BitsPerSample bps, SampleRate rate)
-> void {
std::lock_guard<std::mutex> lock(configure_mutex_);
if (i2s_active_) {
@ -217,6 +217,8 @@ auto I2SDac::WriteData(const std::span<const std::byte>& data) -> void {
}
}
DRAM_ATTR static volatile uint32_t sSamplesRead = 0;
extern "C" IRAM_ATTR auto callback(i2s_chan_handle_t handle,
i2s_event_data_t* event,
void* user_ctx) -> bool {
@ -235,6 +237,14 @@ extern "C" IRAM_ATTR auto callback(i2s_chan_handle_t handle,
size_t bytes_written =
xStreamBufferReceiveFromISR(src, buf, event->size, &ret);
// Assume 16 bit samples.
size_t samples = bytes_written / 2;
if (UINT32_MAX - sSamplesRead < samples) {
sSamplesRead = samples - (UINT32_MAX - sSamplesRead);
} else {
sSamplesRead = sSamplesRead + samples;
}
// The ESP32's I2S peripheral has a different endianness to its processors.
// ESP-IDF handles this difference for stereo channels, but not for mono
// channels. We therefore sometimes need to swap each pair of words as they're
@ -276,6 +286,10 @@ auto I2SDac::SetSource(StreamBufferHandle_t buffer) -> void {
}
}
auto I2SDac::SamplesUsed() -> uint32_t {
return sSamplesRead;
}
auto I2SDac::set_channel(bool enabled) -> void {
if (i2s_active_ == enabled) {
return;

@ -13,10 +13,10 @@
#include <freertos/stream_buffer.h>
#include <stdint.h>
#include "drivers/bluetooth_types.hpp"
#include "drivers/nvs.hpp"
#include "esp_a2dp_api.h"
#include "esp_avrc_api.h"
#include "esp_gap_bt_api.h"
#include "drivers/nvs.hpp"
#include "tasks.hpp"
#include "tinyfsm.hpp"
#include "tinyfsm/include/tinyfsm.hpp"
@ -44,6 +44,7 @@ class Bluetooth {
auto SetSource(StreamBufferHandle_t) -> void;
auto SetVolumeFactor(float) -> void;
auto SamplesUsed() -> uint32_t;
auto SetEventHandler(std::function<void(bluetooth::Event)> cb) -> void;
};

@ -11,8 +11,8 @@
#include <functional>
#include <memory>
#include <optional>
#include <utility>
#include <span>
#include <utility>
#include "driver/i2s_std.h"
#include "driver/i2s_types.h"
@ -71,6 +71,8 @@ class I2SDac {
auto WriteData(const std::span<const std::byte>& data) -> void;
auto SetSource(StreamBufferHandle_t buffer) -> void;
auto SamplesUsed() -> uint32_t;
// Not copyable or movable.
I2SDac(const I2SDac&) = delete;
I2SDac& operator=(const I2SDac&) = delete;

@ -92,7 +92,7 @@ void Decoder::Main() {
// If we were already decoding, then make sure we finish up the current
// file gracefully.
if (stream_) {
finishDecode();
finishDecode(true);
}
// Ensure there's actually stream data; we might have been given nullptr
@ -106,7 +106,7 @@ void Decoder::Main() {
}
if (!continueDecode()) {
finishDecode();
finishDecode(false);
}
}
}
@ -179,12 +179,16 @@ auto Decoder::continueDecode() -> bool {
return !res->is_stream_finished;
}
auto Decoder::finishDecode() -> void {
auto Decoder::finishDecode(bool cancel) -> void {
assert(track_);
// Tell everyone we're finished.
if (cancel) {
events::Audio().Dispatch(internal::DecodingCancelled{.track = track_});
} else {
events::Audio().Dispatch(internal::DecodingFinished{.track = track_});
processor_->endStream();
}
processor_->endStream(cancel);
// Clean up after ourselves.
stream_.reset();

@ -40,7 +40,7 @@ class Decoder {
auto prepareDecode(std::shared_ptr<TaggedStream>) -> void;
auto continueDecode() -> bool;
auto finishDecode() -> void;
auto finishDecode(bool cancel) -> void;
std::shared_ptr<SampleProcessor> processor_;

@ -84,13 +84,6 @@ struct PlaybackUpdate : tinyfsm::Event {
struct SetTrack : tinyfsm::Event {
std::variant<std::string, database::TrackId, std::monostate> new_track;
std::optional<uint32_t> seek_to_second;
enum Transition {
kHardCut,
kGapless,
// TODO: kCrossFade
};
Transition transition;
};
struct TogglePlayPause : tinyfsm::Event {
@ -146,21 +139,25 @@ struct DecodingFailedToStart : tinyfsm::Event {
std::shared_ptr<TrackInfo> track;
};
struct DecodingCancelled : tinyfsm::Event {
std::shared_ptr<TrackInfo> track;
};
struct DecodingFinished : tinyfsm::Event {
std::shared_ptr<TrackInfo> track;
};
struct StreamStarted : tinyfsm::Event {
std::shared_ptr<TrackInfo> track;
IAudioOutput::Format src_format;
IAudioOutput::Format dst_format;
IAudioOutput::Format sink_format;
uint32_t cue_at_sample;
};
struct StreamUpdate : tinyfsm::Event {
uint32_t samples_sunk;
struct StreamEnded : tinyfsm::Event {
uint32_t cue_at_sample;
};
struct StreamEnded : tinyfsm::Event {};
struct StreamHeartbeat : tinyfsm::Event {};
} // namespace internal

@ -5,8 +5,8 @@
*/
#include "audio/audio_fsm.hpp"
#include <stdint.h>
#include <cstdint>
#include <future>
#include <memory>
#include <variant>
@ -18,6 +18,7 @@
#include "freertos/FreeRTOS.h"
#include "freertos/portmacro.h"
#include "freertos/projdefs.h"
#include "tinyfsm.hpp"
#include "audio/audio_decoder.hpp"
#include "audio/audio_events.hpp"
@ -25,6 +26,7 @@
#include "audio/bt_audio_output.hpp"
#include "audio/fatfs_stream_factory.hpp"
#include "audio/i2s_audio_output.hpp"
#include "audio/stream_cues.hpp"
#include "audio/track_queue.hpp"
#include "database/future_fetcher.hpp"
#include "database/track.hpp"
@ -38,7 +40,6 @@
#include "sample.hpp"
#include "system_fsm/service_locator.hpp"
#include "system_fsm/system_events.hpp"
#include "tinyfsm.hpp"
namespace audio {
@ -47,11 +48,13 @@ namespace audio {
std::shared_ptr<system_fsm::ServiceLocator> AudioState::sServices;
std::shared_ptr<FatfsStreamFactory> AudioState::sStreamFactory;
std::unique_ptr<Decoder> AudioState::sDecoder;
std::shared_ptr<SampleProcessor> AudioState::sSampleProcessor;
std::shared_ptr<IAudioOutput> AudioState::sOutput;
std::shared_ptr<I2SAudioOutput> AudioState::sI2SOutput;
std::shared_ptr<BluetoothAudioOutput> AudioState::sBtOutput;
std::shared_ptr<IAudioOutput> AudioState::sOutput;
// Two seconds of samples for two channels, at a representative sample rate.
constexpr size_t kDrainLatencySamples = 48000 * 2 * 2;
@ -61,30 +64,33 @@ constexpr size_t kDrainBufferSize =
StreamBufferHandle_t AudioState::sDrainBuffer;
std::optional<IAudioOutput::Format> AudioState::sDrainFormat;
std::shared_ptr<TrackInfo> AudioState::sCurrentTrack;
uint64_t AudioState::sCurrentSamples;
bool AudioState::sCurrentTrackIsFromQueue;
StreamCues AudioState::sStreamCues;
std::shared_ptr<TrackInfo> AudioState::sNextTrack;
uint64_t AudioState::sNextTrackCueSamples;
bool AudioState::sNextTrackIsFromQueue;
bool AudioState::sIsResampling;
bool AudioState::sIsPaused = true;
auto AudioState::currentPositionSeconds() -> std::optional<uint32_t> {
if (!sCurrentTrack || !sDrainFormat) {
return {};
auto AudioState::emitPlaybackUpdate(bool paused) -> void {
std::optional<uint32_t> position;
auto current = sStreamCues.current();
if (current.first && sDrainFormat) {
position = (current.second /
(sDrainFormat->num_channels * sDrainFormat->sample_rate)) +
current.first->start_offset.value_or(0);
}
return sCurrentSamples /
(sDrainFormat->num_channels * sDrainFormat->sample_rate);
PlaybackUpdate event{
.current_track = current.first,
.track_position = position,
.paused = paused,
};
events::System().Dispatch(event);
events::Ui().Dispatch(event);
}
void AudioState::react(const QueueUpdate& ev) {
SetTrack cmd{
.new_track = std::monostate{},
.seek_to_second = {},
.transition = SetTrack::Transition::kHardCut,
};
auto current = sServices->track_queue().current();
@ -97,20 +103,13 @@ void AudioState::react(const QueueUpdate& ev) {
if (!ev.current_changed) {
return;
}
sNextTrackIsFromQueue = true;
cmd.transition = SetTrack::Transition::kHardCut;
break;
case QueueUpdate::kRepeatingLastTrack:
sNextTrackIsFromQueue = true;
cmd.transition = SetTrack::Transition::kGapless;
break;
case QueueUpdate::kTrackFinished:
if (!ev.current_changed) {
cmd.new_track = std::monostate{};
} else {
sNextTrackIsFromQueue = true;
}
cmd.transition = SetTrack::Transition::kGapless;
break;
case QueueUpdate::kDeserialised:
default:
@ -123,32 +122,9 @@ void AudioState::react(const QueueUpdate& ev) {
}
void AudioState::react(const SetTrack& ev) {
// Remember the current track if there is one, since we need to preserve some
// of the state if it turns out this SetTrack event corresponds to seeking
// within the current track.
std::string prev_uri;
bool prev_from_queue = false;
if (sCurrentTrack) {
prev_uri = sCurrentTrack->uri;
prev_from_queue = sCurrentTrackIsFromQueue;
}
if (ev.transition == SetTrack::Transition::kHardCut) {
sCurrentTrack.reset();
sCurrentSamples = 0;
sCurrentTrackIsFromQueue = false;
clearDrainBuffer();
}
if (std::holds_alternative<std::monostate>(ev.new_track)) {
ESP_LOGI(kTag, "playback finished, awaiting drain");
sDecoder->open({});
awaitEmptyDrainBuffer();
sCurrentTrack.reset();
sDrainFormat.reset();
sCurrentSamples = 0;
sCurrentTrackIsFromQueue = false;
transit<states::Standby>();
return;
}
@ -166,81 +142,62 @@ void AudioState::react(const SetTrack& ev) {
sStreamFactory->create(std::get<std::string>(new_track), seek_to);
}
// This was a seek or replay within the same track; don't forget where
// the track originally came from.
// FIXME:
// sNextTrackIsFromQueue = prev_from_queue;
sDecoder->open(stream);
});
}
void AudioState::react(const TogglePlayPause& ev) {
sIsPaused = !ev.set_to.value_or(sIsPaused);
if (!sIsPaused && is_in_state<states::Standby>() && sCurrentTrack) {
if (!sIsPaused && is_in_state<states::Standby>() &&
sStreamCues.current().first) {
transit<states::Playback>();
} else if (sIsPaused && is_in_state<states::Playback>()) {
transit<states::Standby>();
}
}
void AudioState::react(const internal::StreamStarted& ev) {
sDrainFormat = ev.dst_format;
sIsResampling = ev.src_format != ev.dst_format;
sNextTrack = ev.track;
sNextTrackCueSamples = sCurrentSamples + (kDrainLatencySamples / 2);
ESP_LOGI(kTag, "new stream %s %u ch @ %lu hz (resample=%i)",
ev.track->uri.c_str(), sDrainFormat->num_channels,
sDrainFormat->sample_rate, sIsResampling);
}
void AudioState::react(const internal::StreamEnded&) {
ESP_LOGI(kTag, "stream ended");
if (sCurrentTrackIsFromQueue) {
sServices->track_queue().finish();
} else {
tinyfsm::FsmList<AudioState>::dispatch(SetTrack{
.new_track = std::monostate{},
.seek_to_second = {},
.transition = SetTrack::Transition::kGapless,
});
void AudioState::react(const internal::DecodingFinished& ev) {
// If we just finished playing whatever's at the front of the queue, then we
// need to advanve and start playing the next one ASAP in order to continue
// gaplessly.
sServices->bg_worker().Dispatch<void>([=]() {
auto& queue = sServices->track_queue();
auto current = queue.current();
if (!current) {
return;
}
auto db = sServices->database().lock();
if (!db) {
return;
}
auto path = db->getTrackPath(*current);
if (!path) {
return;
}
if (*path == ev.track->uri) {
queue.finish();
}
});
}
void AudioState::react(const internal::StreamUpdate& ev) {
sCurrentSamples += ev.samples_sunk;
if (sNextTrack && sCurrentSamples >= sNextTrackCueSamples) {
ESP_LOGI(kTag, "next track is now sinking");
sCurrentTrack = sNextTrack;
sCurrentSamples -= sNextTrackCueSamples;
sCurrentSamples += sNextTrack->start_offset.value_or(0) *
(sDrainFormat->num_channels * sDrainFormat->sample_rate);
sCurrentTrackIsFromQueue = sNextTrackIsFromQueue;
sNextTrack.reset();
sNextTrackCueSamples = 0;
sNextTrackIsFromQueue = false;
void AudioState::react(const internal::StreamStarted& ev) {
if (sDrainFormat != ev.sink_format) {
sDrainFormat = ev.sink_format;
ESP_LOGI(kTag, "sink_format=%u ch @ %lu hz", sDrainFormat->num_channels,
sDrainFormat->sample_rate);
}
if (sCurrentTrack) {
PlaybackUpdate event{
.current_track = sCurrentTrack,
.track_position = currentPositionSeconds(),
.paused = !is_in_state<states::Playback>(),
};
events::System().Dispatch(event);
events::Ui().Dispatch(event);
}
sStreamCues.addCue(ev.track, ev.cue_at_sample);
if (sCurrentTrack && !sIsPaused && !is_in_state<states::Playback>()) {
ESP_LOGI(kTag, "ready to play!");
if (!sIsPaused && !is_in_state<states::Playback>()) {
transit<states::Playback>();
}
}
void AudioState::react(const internal::StreamEnded& ev) {
sStreamCues.addCue({}, ev.cue_at_sample);
}
void AudioState::react(const system_fsm::BluetoothEvent& ev) {
if (ev.event != drivers::bluetooth::Event::kConnectionStateChanged) {
return;
@ -276,14 +233,6 @@ void AudioState::react(const StepDownVolume& ev) {
}
}
void AudioState::react(const system_fsm::HasPhonesChanged& ev) {
if (ev.has_headphones) {
ESP_LOGI(kTag, "headphones in!");
} else {
ESP_LOGI(kTag, "headphones out!");
}
}
void AudioState::react(const SetVolume& ev) {
if (ev.db.has_value()) {
if (sOutput->SetVolumeDb(ev.db.value())) {
@ -354,43 +303,6 @@ void AudioState::react(const OutputModeChanged& ev) {
}
}
auto AudioState::clearDrainBuffer() -> void {
// Tell the decoder to stop adding new samples. This might not take effect
// immediately, since the decoder might currently be stuck waiting for space
// to become available in the drain buffer.
sDecoder->open({});
auto mode = sOutput->mode();
if (mode == IAudioOutput::Modes::kOnPlaying) {
// If we're currently playing, then the drain buffer will be actively
// draining on its own. Just keep trying to reset until it works.
while (xStreamBufferReset(sDrainBuffer) != pdPASS) {
}
} else {
// If we're not currently playing, then we need to actively pull samples
// out of the drain buffer to unblock the decoder.
while (!xStreamBufferIsEmpty(sDrainBuffer)) {
// Read a little to unblock the decoder.
uint8_t drain[2048];
xStreamBufferReceive(sDrainBuffer, drain, sizeof(drain), 0);
// Try to quickly discard the rest.
xStreamBufferReset(sDrainBuffer);
}
}
}
auto AudioState::awaitEmptyDrainBuffer() -> void {
if (is_in_state<states::Playback>()) {
for (int i = 0; i < 10 && !xStreamBufferIsEmpty(sDrainBuffer); i++) {
vTaskDelay(pdMS_TO_TICKS(250));
}
}
if (!xStreamBufferIsEmpty(sDrainBuffer)) {
clearDrainBuffer();
}
}
auto AudioState::commitVolume() -> void {
auto mode = sServices->nvs().OutputMode();
auto vol = sOutput->GetVolume();
@ -455,7 +367,7 @@ void Uninitialised::react(const system_fsm::BootComplete& ev) {
.left_bias = nvs.AmpLeftBias(),
});
sSampleProcessor.reset(new SampleProcessor());
sSampleProcessor.reset(new SampleProcessor(sDrainBuffer));
sSampleProcessor->SetOutput(sOutput);
sDecoder.reset(Decoder::Start(sSampleProcessor));
@ -470,7 +382,8 @@ void Standby::react(const system_fsm::KeyLockChanged& ev) {
if (!ev.locking) {
return;
}
sServices->bg_worker().Dispatch<void>([this]() {
auto current = sStreamCues.current();
sServices->bg_worker().Dispatch<void>([=]() {
auto db = sServices->database().lock();
if (!db) {
return;
@ -483,10 +396,13 @@ void Standby::react(const system_fsm::KeyLockChanged& ev) {
}
db->put(kQueueKey, queue.serialise());
if (sCurrentTrack) {
if (current.first && sDrainFormat) {
uint32_t seconds = (current.second / (sDrainFormat->num_channels *
sDrainFormat->sample_rate)) +
current.first->start_offset.value_or(0);
cppbor::Array current_track{
cppbor::Tstr{sCurrentTrack->uri},
cppbor::Uint{currentPositionSeconds().value_or(0)},
cppbor::Tstr{current.first->uri},
cppbor::Uint{seconds},
};
db->put(kCurrentFileKey, current_track.toString());
}
@ -521,7 +437,6 @@ void Standby::react(const system_fsm::SdStateChanged& ev) {
events::Audio().Dispatch(SetTrack{
.new_track = filename,
.seek_to_second = pos,
.transition = SetTrack::Transition::kHardCut,
});
}
}
@ -537,32 +452,29 @@ void Standby::react(const system_fsm::SdStateChanged& ev) {
});
}
static TimerHandle_t sHeartbeatTimer;
static void heartbeat(TimerHandle_t) {
events::Audio().Dispatch(internal::StreamHeartbeat{});
}
void Playback::entry() {
ESP_LOGI(kTag, "audio output resumed");
sOutput->mode(IAudioOutput::Modes::kOnPlaying);
emitPlaybackUpdate(false);
PlaybackUpdate event{
.current_track = sCurrentTrack,
.track_position = currentPositionSeconds(),
.paused = false,
};
events::System().Dispatch(event);
events::Ui().Dispatch(event);
if (!sHeartbeatTimer) {
sHeartbeatTimer =
xTimerCreate("stream", pdMS_TO_TICKS(250), true, NULL, heartbeat);
}
xTimerStart(sHeartbeatTimer, portMAX_DELAY);
}
void Playback::exit() {
ESP_LOGI(kTag, "audio output paused");
xTimerStop(sHeartbeatTimer, portMAX_DELAY);
sOutput->mode(IAudioOutput::Modes::kOnPaused);
PlaybackUpdate event{
.current_track = sCurrentTrack,
.track_position = currentPositionSeconds(),
.paused = true,
};
events::System().Dispatch(event);
events::Ui().Dispatch(event);
emitPlaybackUpdate(true);
}
void Playback::react(const system_fsm::SdStateChanged& ev) {
@ -571,6 +483,17 @@ void Playback::react(const system_fsm::SdStateChanged& ev) {
}
}
void Playback::react(const internal::StreamHeartbeat& ev) {
sStreamCues.update(sOutput->samplesUsed());
auto current = sStreamCues.current();
if (!current.first) {
transit<Standby>();
} else {
emitPlaybackUpdate(false);
}
}
} // namespace states
} // namespace audio

@ -11,6 +11,7 @@
#include <memory>
#include <vector>
#include "audio/stream_cues.hpp"
#include "tinyfsm.hpp"
#include "audio/audio_decoder.hpp"
@ -46,13 +47,13 @@ class AudioState : public tinyfsm::Fsm<AudioState> {
void react(const SetTrack&);
void react(const TogglePlayPause&);
void react(const internal::DecodingFinished&);
void react(const internal::StreamStarted&);
void react(const internal::StreamUpdate&);
void react(const internal::StreamEnded&);
virtual void react(const internal::StreamHeartbeat&) {}
void react(const StepUpVolume&);
void react(const StepDownVolume&);
virtual void react(const system_fsm::HasPhonesChanged&);
void react(const SetVolume&);
void react(const SetVolumeLimit&);
@ -66,10 +67,7 @@ class AudioState : public tinyfsm::Fsm<AudioState> {
virtual void react(const system_fsm::BluetoothEvent&);
protected:
auto clearDrainBuffer() -> void;
auto awaitEmptyDrainBuffer() -> void;
auto playTrack(database::TrackId id) -> void;
auto emitPlaybackUpdate(bool paused) -> void;
auto commitVolume() -> void;
static std::shared_ptr<system_fsm::ServiceLocator> sServices;
@ -83,19 +81,10 @@ class AudioState : public tinyfsm::Fsm<AudioState> {
static StreamBufferHandle_t sDrainBuffer;
static std::shared_ptr<TrackInfo> sCurrentTrack;
static uint64_t sCurrentSamples;
static StreamCues sStreamCues;
static std::optional<IAudioOutput::Format> sDrainFormat;
static bool sCurrentTrackIsFromQueue;
static std::shared_ptr<TrackInfo> sNextTrack;
static uint64_t sNextTrackCueSamples;
static bool sNextTrackIsFromQueue;
static bool sIsResampling;
static bool sIsPaused;
auto currentPositionSeconds() -> std::optional<uint32_t>;
};
namespace states {
@ -122,6 +111,7 @@ class Playback : public AudioState {
void exit() override;
void react(const system_fsm::SdStateChanged&) override;
void react(const internal::StreamHeartbeat&) override;
using AudioState::react;
};

@ -75,6 +75,7 @@ class IAudioOutput {
virtual auto PrepareFormat(const Format&) -> Format = 0;
virtual auto Configure(const Format& format) -> void = 0;
virtual auto samplesUsed() -> uint32_t = 0;
auto stream() -> StreamBufferHandle_t { return stream_; }

@ -121,4 +121,8 @@ auto BluetoothAudioOutput::Configure(const Format& fmt) -> void {
// No configuration necessary; the output format is fixed.
}
auto BluetoothAudioOutput::samplesUsed() -> uint32_t {
return bluetooth_.SamplesUsed();
}
} // namespace audio

@ -45,6 +45,8 @@ class BluetoothAudioOutput : public IAudioOutput {
auto PrepareFormat(const Format&) -> Format override;
auto Configure(const Format& format) -> void override;
auto samplesUsed() -> uint32_t override;
BluetoothAudioOutput(const BluetoothAudioOutput&) = delete;
BluetoothAudioOutput& operator=(const BluetoothAudioOutput&) = delete;

@ -230,4 +230,8 @@ auto I2SAudioOutput::Configure(const Format& fmt) -> void {
current_config_ = fmt;
}
auto I2SAudioOutput::samplesUsed() -> uint32_t {
return dac_->SamplesUsed();
}
} // namespace audio

@ -43,6 +43,8 @@ class I2SAudioOutput : public IAudioOutput {
auto PrepareFormat(const Format&) -> Format override;
auto Configure(const Format& format) -> void override;
auto samplesUsed() -> uint32_t override;
I2SAudioOutput(const I2SAudioOutput&) = delete;
I2SAudioOutput& operator=(const I2SAudioOutput&) = delete;

@ -5,10 +5,12 @@
*/
#include "audio/processor.hpp"
#include <stdint.h>
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <limits>
#include "audio/audio_events.hpp"
#include "audio/audio_sink.hpp"
@ -31,14 +33,15 @@ static constexpr std::size_t kSourceBufferLength = kSampleBufferLength * 2;
namespace audio {
SampleProcessor::SampleProcessor()
SampleProcessor::SampleProcessor(StreamBufferHandle_t sink)
: commands_(xQueueCreate(1, sizeof(Args))),
resampler_(nullptr),
source_(xStreamBufferCreateWithCaps(kSourceBufferLength,
sizeof(sample::Sample) * 2,
MALLOC_CAP_DMA)),
sink_(sink),
leftover_bytes_(0),
samples_sunk_(0) {
samples_written_(0) {
input_buffer_ = {
reinterpret_cast<sample::Sample*>(heap_caps_calloc(
kSampleBufferLength, sizeof(sample::Sample), MALLOC_CAP_DMA)),
@ -60,10 +63,12 @@ SampleProcessor::~SampleProcessor() {
}
auto SampleProcessor::SetOutput(std::shared_ptr<IAudioOutput> output) -> void {
// FIXME: We should add synchronisation here, but we should be careful about
// not impacting performance given that the output will change only very
// rarely (if ever).
sink_ = output;
assert(xStreamBufferIsEmpty(sink_));
// FIXME: We should add synchronisation here, but we should be careful
// about not impacting performance given that the output will change only
// very rarely (if ever).
output_ = output;
samples_written_ = output_->samplesUsed();
}
auto SampleProcessor::beginStream(std::shared_ptr<TrackInfo> track) -> void {
@ -71,6 +76,7 @@ auto SampleProcessor::beginStream(std::shared_ptr<TrackInfo> track) -> void {
.track = new std::shared_ptr<TrackInfo>(track),
.samples_available = 0,
.is_end_of_stream = false,
.clear_buffers = false,
};
xQueueSend(commands_, &args, portMAX_DELAY);
}
@ -80,16 +86,18 @@ auto SampleProcessor::continueStream(std::span<sample::Sample> input) -> void {
.track = nullptr,
.samples_available = input.size(),
.is_end_of_stream = false,
.clear_buffers = false,
};
xQueueSend(commands_, &args, portMAX_DELAY);
xStreamBufferSend(source_, input.data(), input.size_bytes(), portMAX_DELAY);
}
auto SampleProcessor::endStream() -> void {
auto SampleProcessor::endStream(bool cancelled) -> void {
Args args{
.track = nullptr,
.samples_available = 0,
.is_end_of_stream = true,
.clear_buffers = cancelled,
};
xQueueSend(commands_, &args, portMAX_DELAY);
}
@ -108,7 +116,7 @@ auto SampleProcessor::Main() -> void {
handleContinueStream(args.samples_available);
}
if (args.is_end_of_stream) {
handleEndStream();
handleEndStream(args.clear_buffers);
}
}
}
@ -116,31 +124,32 @@ auto SampleProcessor::Main() -> void {
auto SampleProcessor::handleBeginStream(std::shared_ptr<TrackInfo> track)
-> void {
if (track->format != source_format_) {
resampler_.reset();
source_format_ = track->format;
// The new stream has a different format to the previous stream (or there
// was no previous stream).
// First, clean up our filters.
resampler_.reset();
leftover_bytes_ = 0;
auto new_target = sink_->PrepareFormat(track->format);
if (new_target != target_format_) {
// The new format is different to the old one. Wait for the sink to
// drain before continuing.
while (!xStreamBufferIsEmpty(sink_->stream())) {
ESP_LOGI(kTag, "waiting for sink stream to drain...");
// TODO(jacqueline): Get the sink drain ISR to notify us of this
// via semaphore instead of busy-ish waiting.
vTaskDelay(pdMS_TO_TICKS(10));
// If the output is idle, then we can reconfigure it to the closest format
// to our new source.
// If the output *wasn't* idle, then we can't reconfigure without an
// audible gap in playback. So instead, we simply keep the same target
// format and begin resampling.
if (xStreamBufferIsEmpty(sink_)) {
target_format_ = output_->PrepareFormat(track->format);
output_->Configure(target_format_);
}
sink_->Configure(new_target);
}
target_format_ = new_target;
if (xStreamBufferIsEmpty(sink_)) {
samples_written_ = output_->samplesUsed();
}
samples_sunk_ = 0;
events::Audio().Dispatch(internal::StreamStarted{
.track = track,
.src_format = source_format_,
.dst_format = target_format_,
.sink_format = target_format_,
.cue_at_sample = samples_written_,
});
}
@ -222,8 +231,8 @@ auto SampleProcessor::handleSamples(std::span<sample::Sample> input) -> size_t {
return samples_used;
}
auto SampleProcessor::handleEndStream() -> void {
if (resampler_) {
auto SampleProcessor::handleEndStream(bool clear_bufs) -> void {
if (resampler_ && !clear_bufs) {
size_t read, written;
std::tie(read, written) = resampler_->Process({}, resampled_buffer_, true);
@ -232,33 +241,31 @@ auto SampleProcessor::handleEndStream() -> void {
}
}
// Send a final update to finish off this stream's samples.
if (samples_sunk_ > 0) {
events::Audio().Dispatch(internal::StreamUpdate{
.samples_sunk = samples_sunk_,
});
samples_sunk_ = 0;
if (clear_bufs) {
assert(xStreamBufferReset(sink_));
samples_written_ = output_->samplesUsed();
}
// FIXME: This discards any leftover samples, but there probably shouldn't be
// any leftover samples. Can this be an assert instead?
leftover_bytes_ = 0;
events::Audio().Dispatch(internal::StreamEnded{});
events::Audio().Dispatch(internal::StreamEnded{
.cue_at_sample = samples_written_,
});
}
auto SampleProcessor::sendToSink(std::span<sample::Sample> samples) -> void {
// Update the number of samples sunk so far *before* actually sinking them,
// since writing to the stream buffer will block when the buffer gets full.
samples_sunk_ += samples.size();
if (samples_sunk_ >=
target_format_.sample_rate * target_format_.num_channels) {
events::Audio().Dispatch(internal::StreamUpdate{
.samples_sunk = samples_sunk_,
});
samples_sunk_ = 0;
}
auto data = std::as_bytes(samples);
xStreamBufferSend(sink_, data.data(), data.size(), portMAX_DELAY);
xStreamBufferSend(sink_->stream(),
reinterpret_cast<std::byte*>(samples.data()),
samples.size_bytes(), portMAX_DELAY);
uint32_t samples_before_overflow =
std::numeric_limits<uint32_t>::max() - samples_written_;
if (samples_before_overflow < samples.size()) {
samples_written_ = samples.size() - samples_before_overflow;
} else {
samples_written_ += samples.size();
}
}
} // namespace audio

@ -27,21 +27,21 @@ namespace audio {
*/
class SampleProcessor {
public:
SampleProcessor();
SampleProcessor(StreamBufferHandle_t sink);
~SampleProcessor();
auto SetOutput(std::shared_ptr<IAudioOutput>) -> void;
auto beginStream(std::shared_ptr<TrackInfo>) -> void;
auto continueStream(std::span<sample::Sample>) -> void;
auto endStream() -> void;
auto endStream(bool cancelled) -> void;
private:
auto Main() -> void;
auto handleBeginStream(std::shared_ptr<TrackInfo>) -> void;
auto handleContinueStream(size_t samples_available) -> void;
auto handleEndStream() -> void;
auto handleEndStream(bool cancel) -> void;
auto handleSamples(std::span<sample::Sample>) -> size_t;
@ -51,23 +51,26 @@ class SampleProcessor {
std::shared_ptr<TrackInfo>* track;
size_t samples_available;
bool is_end_of_stream;
bool clear_buffers;
};
QueueHandle_t commands_;
std::unique_ptr<Resampler> resampler_;
StreamBufferHandle_t source_;
StreamBufferHandle_t sink_;
std::span<sample::Sample> input_buffer_;
std::span<std::byte> input_buffer_as_bytes_;
std::span<sample::Sample> resampled_buffer_;
std::shared_ptr<IAudioOutput> sink_;
std::shared_ptr<IAudioOutput> output_;
IAudioOutput::Format source_format_;
IAudioOutput::Format target_format_;
size_t leftover_bytes_;
uint32_t samples_sunk_;
uint32_t samples_written_;
};
} // namespace audio

@ -0,0 +1,57 @@
/*
* Copyright 2024 jacqueline <me@jacqueline.id.au>
*
* SPDX-License-Identifier: GPL-3.0-only
*/
#include "audio/stream_cues.hpp"
#include <cstdint>
#include <memory>
namespace audio {
StreamCues::StreamCues() : now_(0) {}
auto StreamCues::update(uint32_t sample) -> void {
if (sample < now_) {
// The current time must have overflowed. Deal with any cues between now_
// and UINT32_MAX, then proceed as normal.
while (!upcoming_.empty() && upcoming_.front().start_at > now_) {
current_ = upcoming_.front();
upcoming_.pop_front();
}
}
now_ = sample;
while (!upcoming_.empty() && upcoming_.front().start_at <= now_) {
current_ = upcoming_.front();
upcoming_.pop_front();
}
}
auto StreamCues::addCue(std::shared_ptr<TrackInfo> track, uint32_t sample)
-> void {
upcoming_.push_back(Cue{
.track = track,
.start_at = sample,
});
}
auto StreamCues::current() -> std::pair<std::shared_ptr<TrackInfo>, uint32_t> {
if (!current_) {
return {};
}
uint32_t duration;
if (now_ < current_->start_at) {
// now_ overflowed since this track started.
duration = now_ + (UINT32_MAX - current_->start_at);
} else {
duration = now_ - current_->start_at;
}
return {current_->track, duration};
}
} // namespace audio

@ -0,0 +1,46 @@
/*
* Copyright 2024 jacqueline <me@jacqueline.id.au>
*
* SPDX-License-Identifier: GPL-3.0-only
*/
#pragma once
#include <stdint.h>
#include <cstdint>
#include <deque>
#include <memory>
#include "audio/audio_events.hpp"
namespace audio {
/*
* Utility for tracking which track is currently being played (and how long it
* has been playing for) based on counting samples that are put into and taken
* out of the audio processor's output buffer.
*/
class StreamCues {
public:
StreamCues();
/* Updates the current track given the new most recently played sample. */
auto update(uint32_t sample) -> void;
/* Returns the current track, and how long it has been playing for. */
auto current() -> std::pair<std::shared_ptr<TrackInfo>, uint32_t>;
auto addCue(std::shared_ptr<TrackInfo>, uint32_t start_at) -> void;
private:
uint32_t now_;
struct Cue {
std::shared_ptr<TrackInfo> track;
uint32_t start_at;
};
std::optional<Cue> current_;
std::deque<Cue> upcoming_;
};
} // namespace audio
Loading…
Cancel
Save