|
|
@ -16,6 +16,7 @@ |
|
|
|
#include <memory> |
|
|
|
#include <memory> |
|
|
|
#include <optional> |
|
|
|
#include <optional> |
|
|
|
#include <sstream> |
|
|
|
#include <sstream> |
|
|
|
|
|
|
|
#include <variant> |
|
|
|
|
|
|
|
|
|
|
|
#include "collation.hpp" |
|
|
|
#include "collation.hpp" |
|
|
|
#include "cppbor.h" |
|
|
|
#include "cppbor.h" |
|
|
@ -200,11 +201,11 @@ Database::~Database() { |
|
|
|
sIsDbOpen.store(false); |
|
|
|
sIsDbOpen.store(false); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
auto Database::Put(const std::string& key, const std::string& val) -> void { |
|
|
|
auto Database::put(const std::string& key, const std::string& val) -> void { |
|
|
|
db_->Put(leveldb::WriteOptions{}, kKeyCustom + key, val); |
|
|
|
db_->Put(leveldb::WriteOptions{}, kKeyCustom + key, val); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
auto Database::Get(const std::string& key) -> std::optional<std::string> { |
|
|
|
auto Database::get(const std::string& key) -> std::optional<std::string> { |
|
|
|
std::string val; |
|
|
|
std::string val; |
|
|
|
auto res = db_->Get(leveldb::ReadOptions{}, kKeyCustom + key, &val); |
|
|
|
auto res = db_->Get(leveldb::ReadOptions{}, kKeyCustom + key, &val); |
|
|
|
if (!res.ok()) { |
|
|
|
if (!res.ok()) { |
|
|
@ -213,9 +214,40 @@ auto Database::Get(const std::string& key) -> std::optional<std::string> { |
|
|
|
return val; |
|
|
|
return val; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
auto Database::Update() -> std::future<void> { |
|
|
|
auto Database::getTrackPath(TrackId id) -> std::optional<std::string> { |
|
|
|
|
|
|
|
auto track_data = dbGetTrackData(id); |
|
|
|
|
|
|
|
if (!track_data) { |
|
|
|
|
|
|
|
return {}; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
return std::string{track_data->filepath.data(), track_data->filepath.size()}; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto Database::getTrack(TrackId id) -> std::shared_ptr<Track> { |
|
|
|
|
|
|
|
std::shared_ptr<TrackData> data = dbGetTrackData(id); |
|
|
|
|
|
|
|
if (!data || data->is_tombstoned) { |
|
|
|
|
|
|
|
return {}; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
std::shared_ptr<TrackTags> tags = tag_parser_.ReadAndParseTags( |
|
|
|
|
|
|
|
{data->filepath.data(), data->filepath.size()}); |
|
|
|
|
|
|
|
if (!tags) { |
|
|
|
|
|
|
|
return {}; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
return std::make_shared<Track>(data, tags); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto Database::getIndexes() -> std::vector<IndexInfo> { |
|
|
|
|
|
|
|
// TODO(jacqueline): This probably needs to be async? When we have runtime
|
|
|
|
|
|
|
|
// configurable indexes, they will need to come from somewhere.
|
|
|
|
|
|
|
|
return { |
|
|
|
|
|
|
|
kAllTracks, |
|
|
|
|
|
|
|
kAllAlbums, |
|
|
|
|
|
|
|
kAlbumsByArtist, |
|
|
|
|
|
|
|
kTracksByGenre, |
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto Database::updateIndexes() -> void { |
|
|
|
events::Ui().Dispatch(event::UpdateStarted{}); |
|
|
|
events::Ui().Dispatch(event::UpdateStarted{}); |
|
|
|
return worker_task_->Dispatch<void>([&]() -> void { |
|
|
|
|
|
|
|
leveldb::ReadOptions read_options; |
|
|
|
leveldb::ReadOptions read_options; |
|
|
|
read_options.fill_cache = false; |
|
|
|
read_options.fill_cache = false; |
|
|
|
|
|
|
|
|
|
|
@ -266,8 +298,8 @@ auto Database::Update() -> std::future<void> { |
|
|
|
track->modified_at = modified_at; |
|
|
|
track->modified_at = modified_at; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
std::shared_ptr<TrackTags> tags = |
|
|
|
std::shared_ptr<TrackTags> tags = tag_parser_.ReadAndParseTags( |
|
|
|
tag_parser_.ReadAndParseTags(track->filepath); |
|
|
|
{track->filepath.data(), track->filepath.size()}); |
|
|
|
if (!tags || tags->encoding() == Container::kUnsupported) { |
|
|
|
if (!tags || tags->encoding() == Container::kUnsupported) { |
|
|
|
// We couldn't read the tags for this track. Either they were
|
|
|
|
// We couldn't read the tags for this track. Either they were
|
|
|
|
// malformed, or perhaps the file is missing. Either way, tombstone
|
|
|
|
// malformed, or perhaps the file is missing. Either way, tombstone
|
|
|
@ -305,7 +337,7 @@ auto Database::Update() -> std::future<void> { |
|
|
|
// Stage 2: search for newly added files.
|
|
|
|
// Stage 2: search for newly added files.
|
|
|
|
ESP_LOGI(kTag, "scanning for new tracks"); |
|
|
|
ESP_LOGI(kTag, "scanning for new tracks"); |
|
|
|
uint64_t num_processed = 0; |
|
|
|
uint64_t num_processed = 0; |
|
|
|
file_gatherer_.FindFiles("", [&](const std::pmr::string& path, |
|
|
|
file_gatherer_.FindFiles("", [&](const std::string& path, |
|
|
|
const FILINFO& info) { |
|
|
|
const FILINFO& info) { |
|
|
|
num_processed++; |
|
|
|
num_processed++; |
|
|
|
events::Ui().Dispatch(event::UpdateProgress{ |
|
|
|
events::Ui().Dispatch(event::UpdateProgress{ |
|
|
@ -375,7 +407,8 @@ auto Database::Update() -> std::future<void> { |
|
|
|
dbPutTrackData(*existing_data); |
|
|
|
dbPutTrackData(*existing_data); |
|
|
|
auto t = std::make_shared<Track>(existing_data, tags); |
|
|
|
auto t = std::make_shared<Track>(existing_data, tags); |
|
|
|
dbCreateIndexesForTrack(*t); |
|
|
|
dbCreateIndexesForTrack(*t); |
|
|
|
} else if (existing_data->filepath != path) { |
|
|
|
} else if (existing_data->filepath != |
|
|
|
|
|
|
|
std::pmr::string{path.data(), path.size()}) { |
|
|
|
ESP_LOGW(kTag, "tag hash collision for %s and %s", |
|
|
|
ESP_LOGW(kTag, "tag hash collision for %s and %s", |
|
|
|
existing_data->filepath.c_str(), path.c_str()); |
|
|
|
existing_data->filepath.c_str(), path.c_str()); |
|
|
|
ESP_LOGI(kTag, "hash components: %s, %s, %s", |
|
|
|
ESP_LOGI(kTag, "hash components: %s, %s, %s", |
|
|
@ -385,148 +418,8 @@ auto Database::Update() -> std::future<void> { |
|
|
|
} |
|
|
|
} |
|
|
|
}); |
|
|
|
}); |
|
|
|
events::Ui().Dispatch(event::UpdateFinished{}); |
|
|
|
events::Ui().Dispatch(event::UpdateFinished{}); |
|
|
|
}); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto Database::GetTrackPath(TrackId id) |
|
|
|
|
|
|
|
-> std::future<std::optional<std::pmr::string>> { |
|
|
|
|
|
|
|
return worker_task_->Dispatch<std::optional<std::pmr::string>>( |
|
|
|
|
|
|
|
[=, this]() -> std::optional<std::pmr::string> { |
|
|
|
|
|
|
|
auto track_data = dbGetTrackData(id); |
|
|
|
|
|
|
|
if (track_data) { |
|
|
|
|
|
|
|
return track_data->filepath; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
return {}; |
|
|
|
|
|
|
|
}); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto Database::GetTrack(TrackId id) -> std::future<std::shared_ptr<Track>> { |
|
|
|
|
|
|
|
return worker_task_->Dispatch<std::shared_ptr<Track>>( |
|
|
|
|
|
|
|
[=, this]() -> std::shared_ptr<Track> { |
|
|
|
|
|
|
|
std::shared_ptr<TrackData> data = dbGetTrackData(id); |
|
|
|
|
|
|
|
if (!data || data->is_tombstoned) { |
|
|
|
|
|
|
|
return {}; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
std::shared_ptr<TrackTags> tags = |
|
|
|
|
|
|
|
tag_parser_.ReadAndParseTags(data->filepath); |
|
|
|
|
|
|
|
if (!tags) { |
|
|
|
|
|
|
|
return {}; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
return std::make_shared<Track>(data, tags); |
|
|
|
|
|
|
|
}); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto Database::GetBulkTracks(std::vector<TrackId> ids) |
|
|
|
|
|
|
|
-> std::future<std::vector<std::shared_ptr<Track>>> { |
|
|
|
|
|
|
|
return worker_task_->Dispatch<std::vector<std::shared_ptr<Track>>>( |
|
|
|
|
|
|
|
[=, this]() -> std::vector<std::shared_ptr<Track>> { |
|
|
|
|
|
|
|
std::map<TrackId, std::shared_ptr<Track>> id_to_track{}; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Sort the list of ids so that we can retrieve them all in a single
|
|
|
|
|
|
|
|
// iteration through the database, without re-seeking.
|
|
|
|
|
|
|
|
std::vector<TrackId> sorted_ids = ids; |
|
|
|
|
|
|
|
std::sort(sorted_ids.begin(), sorted_ids.end()); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
std::unique_ptr<leveldb::Iterator> it{ |
|
|
|
|
|
|
|
db_->NewIterator(leveldb::ReadOptions{})}; |
|
|
|
|
|
|
|
for (const TrackId& id : sorted_ids) { |
|
|
|
|
|
|
|
std::string key = EncodeDataKey(id); |
|
|
|
|
|
|
|
it->Seek(key); |
|
|
|
|
|
|
|
if (!it->Valid() || it->key() != key) { |
|
|
|
|
|
|
|
// This id wasn't found at all. Skip it.
|
|
|
|
|
|
|
|
continue; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
std::shared_ptr<Track> track = |
|
|
|
|
|
|
|
ParseRecord<Track>(it->key(), it->value()); |
|
|
|
|
|
|
|
if (track) { |
|
|
|
|
|
|
|
id_to_track.insert({id, track}); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// We've fetched all of the ids in the request, so now just put them
|
|
|
|
|
|
|
|
// back into the order they were asked for in.
|
|
|
|
|
|
|
|
std::vector<std::shared_ptr<Track>> results; |
|
|
|
|
|
|
|
for (const TrackId& id : ids) { |
|
|
|
|
|
|
|
if (id_to_track.contains(id)) { |
|
|
|
|
|
|
|
results.push_back(id_to_track.at(id)); |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
// This lookup failed.
|
|
|
|
|
|
|
|
results.push_back({}); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
return results; |
|
|
|
|
|
|
|
}); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto Database::GetIndexes() -> std::vector<IndexInfo> { |
|
|
|
|
|
|
|
// TODO(jacqueline): This probably needs to be async? When we have runtime
|
|
|
|
|
|
|
|
// configurable indexes, they will need to come from somewhere.
|
|
|
|
|
|
|
|
return { |
|
|
|
|
|
|
|
kAllTracks, |
|
|
|
|
|
|
|
kAllAlbums, |
|
|
|
|
|
|
|
kAlbumsByArtist, |
|
|
|
|
|
|
|
kTracksByGenre, |
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto Database::GetTracksByIndex(IndexId index, std::size_t page_size) |
|
|
|
|
|
|
|
-> std::future<Result<IndexRecord>*> { |
|
|
|
|
|
|
|
return worker_task_->Dispatch<Result<IndexRecord>*>( |
|
|
|
|
|
|
|
[=, this]() -> Result<IndexRecord>* { |
|
|
|
|
|
|
|
IndexKey::Header header{ |
|
|
|
|
|
|
|
.id = index, |
|
|
|
|
|
|
|
.depth = 0, |
|
|
|
|
|
|
|
.components_hash = 0, |
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
std::string prefix = EncodeIndexPrefix(header); |
|
|
|
|
|
|
|
Continuation c{.prefix = {prefix.data(), prefix.size()}, |
|
|
|
|
|
|
|
.start_key = {prefix.data(), prefix.size()}, |
|
|
|
|
|
|
|
.forward = true, |
|
|
|
|
|
|
|
.was_prev_forward = true, |
|
|
|
|
|
|
|
.page_size = page_size}; |
|
|
|
|
|
|
|
return dbGetPage<IndexRecord>(c); |
|
|
|
|
|
|
|
}); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto Database::GetTracks(std::size_t page_size) -> std::future<Result<Track>*> { |
|
|
|
|
|
|
|
return worker_task_->Dispatch<Result<Track>*>([=, this]() -> Result<Track>* { |
|
|
|
|
|
|
|
std::string prefix = EncodeDataPrefix(); |
|
|
|
|
|
|
|
Continuation c{.prefix = {prefix.data(), prefix.size()}, |
|
|
|
|
|
|
|
.start_key = {prefix.data(), prefix.size()}, |
|
|
|
|
|
|
|
.forward = true, |
|
|
|
|
|
|
|
.was_prev_forward = true, |
|
|
|
|
|
|
|
.page_size = page_size}; |
|
|
|
|
|
|
|
return dbGetPage<Track>(c); |
|
|
|
|
|
|
|
}); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto Database::GetDump(std::size_t page_size) |
|
|
|
|
|
|
|
-> std::future<Result<std::pmr::string>*> { |
|
|
|
|
|
|
|
return worker_task_->Dispatch<Result<std::pmr::string>*>( |
|
|
|
|
|
|
|
[=, this]() -> Result<std::pmr::string>* { |
|
|
|
|
|
|
|
Continuation c{.prefix = "", |
|
|
|
|
|
|
|
.start_key = "", |
|
|
|
|
|
|
|
.forward = true, |
|
|
|
|
|
|
|
.was_prev_forward = true, |
|
|
|
|
|
|
|
.page_size = page_size}; |
|
|
|
|
|
|
|
return dbGetPage<std::pmr::string>(c); |
|
|
|
|
|
|
|
}); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
template <typename T> |
|
|
|
|
|
|
|
auto Database::GetPage(Continuation* c) -> std::future<Result<T>*> { |
|
|
|
|
|
|
|
Continuation copy = *c; |
|
|
|
|
|
|
|
return worker_task_->Dispatch<Result<T>*>( |
|
|
|
|
|
|
|
[=, this]() -> Result<T>* { return dbGetPage<T>(copy); }); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
template auto Database::GetPage<Track>(Continuation* c) |
|
|
|
|
|
|
|
-> std::future<Result<Track>*>; |
|
|
|
|
|
|
|
template auto Database::GetPage<IndexRecord>(Continuation* c) |
|
|
|
|
|
|
|
-> std::future<Result<IndexRecord>*>; |
|
|
|
|
|
|
|
template auto Database::GetPage<std::pmr::string>(Continuation* c) |
|
|
|
|
|
|
|
-> std::future<Result<std::pmr::string>*>; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto Database::dbMintNewTrackId() -> TrackId { |
|
|
|
auto Database::dbMintNewTrackId() -> TrackId { |
|
|
|
TrackId next_id = 1; |
|
|
|
TrackId next_id = 1; |
|
|
|
std::string val; |
|
|
|
std::string val; |
|
|
@ -592,7 +485,7 @@ auto Database::dbGetHash(const uint64_t& hash) -> std::optional<TrackId> { |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
auto Database::dbCreateIndexesForTrack(const Track& track) -> void { |
|
|
|
auto Database::dbCreateIndexesForTrack(const Track& track) -> void { |
|
|
|
for (const IndexInfo& index : GetIndexes()) { |
|
|
|
for (const IndexInfo& index : getIndexes()) { |
|
|
|
leveldb::WriteBatch writes; |
|
|
|
leveldb::WriteBatch writes; |
|
|
|
auto entries = Index(collator_, index, track); |
|
|
|
auto entries = Index(collator_, index, track); |
|
|
|
for (const auto& it : entries) { |
|
|
|
for (const auto& it : entries) { |
|
|
@ -609,7 +502,7 @@ auto Database::dbRemoveIndexes(std::shared_ptr<TrackData> data) -> void { |
|
|
|
return; |
|
|
|
return; |
|
|
|
} |
|
|
|
} |
|
|
|
Track track{data, tags}; |
|
|
|
Track track{data, tags}; |
|
|
|
for (const IndexInfo& index : GetIndexes()) { |
|
|
|
for (const IndexInfo& index : getIndexes()) { |
|
|
|
auto entries = Index(collator_, index, track); |
|
|
|
auto entries = Index(collator_, index, track); |
|
|
|
for (auto it = entries.rbegin(); it != entries.rend(); it++) { |
|
|
|
for (auto it = entries.rbegin(); it != entries.rend(); it++) { |
|
|
|
auto key = EncodeIndexKey(it->first); |
|
|
|
auto key = EncodeIndexKey(it->first); |
|
|
@ -666,512 +559,209 @@ auto Database::dbRecoverTagsFromHashes( |
|
|
|
return out; |
|
|
|
return out; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
template <typename T> |
|
|
|
auto seekToOffset(leveldb::Iterator* it, int offset) { |
|
|
|
auto Database::dbGetPage(const Continuation& c) -> Result<T>* { |
|
|
|
while (it->Valid() && offset != 0) { |
|
|
|
// Work out our starting point. Sometimes this will already done.
|
|
|
|
if (offset < 0) { |
|
|
|
std::unique_ptr<leveldb::Iterator> it{ |
|
|
|
|
|
|
|
db_->NewIterator(leveldb::ReadOptions{})}; |
|
|
|
|
|
|
|
it->Seek({c.start_key.data(), c.start_key.size()}); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Fix off-by-one if we just changed direction.
|
|
|
|
|
|
|
|
if (c.forward != c.was_prev_forward) { |
|
|
|
|
|
|
|
if (c.forward) { |
|
|
|
|
|
|
|
it->Next(); |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
it->Prev(); |
|
|
|
it->Prev(); |
|
|
|
} |
|
|
|
offset++; |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Grab results.
|
|
|
|
|
|
|
|
std::optional<std::pmr::string> first_key; |
|
|
|
|
|
|
|
std::vector<std::shared_ptr<T>> records; |
|
|
|
|
|
|
|
while (records.size() < c.page_size && it->Valid()) { |
|
|
|
|
|
|
|
if (!it->key().starts_with({c.prefix.data(), c.prefix.size()})) { |
|
|
|
|
|
|
|
break; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
if (!first_key) { |
|
|
|
|
|
|
|
first_key = it->key().ToString(); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
std::shared_ptr<T> parsed = ParseRecord<T>(it->key(), it->value()); |
|
|
|
|
|
|
|
if (parsed) { |
|
|
|
|
|
|
|
records.push_back(parsed); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
if (c.forward) { |
|
|
|
|
|
|
|
it->Next(); |
|
|
|
|
|
|
|
} else { |
|
|
|
} else { |
|
|
|
it->Prev(); |
|
|
|
it->Next(); |
|
|
|
|
|
|
|
offset--; |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (!it->Valid() || |
|
|
|
|
|
|
|
!it->key().starts_with({c.prefix.data(), c.prefix.size()})) { |
|
|
|
|
|
|
|
it.reset(); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Put results into canonical order if we were iterating backwards.
|
|
|
|
auto Database::getRecord(const SearchKey& c) |
|
|
|
if (!c.forward) { |
|
|
|
-> std::optional<std::pair<std::pmr::string, Record>> { |
|
|
|
std::reverse(records.begin(), records.end()); |
|
|
|
std::unique_ptr<leveldb::Iterator> it{ |
|
|
|
} |
|
|
|
db_->NewIterator(leveldb::ReadOptions{})}; |
|
|
|
|
|
|
|
|
|
|
|
// Work out the new continuations.
|
|
|
|
it->Seek(c.startKey()); |
|
|
|
std::optional<Continuation> next_page; |
|
|
|
seekToOffset(it.get(), c.offset); |
|
|
|
if (c.forward) { |
|
|
|
if (!it->Valid() || !it->key().starts_with(std::string_view{c.prefix})) { |
|
|
|
if (it != nullptr) { |
|
|
|
return {}; |
|
|
|
// We were going forward, and now we want the next page.
|
|
|
|
|
|
|
|
std::pmr::string key{it->key().data(), it->key().size(), |
|
|
|
|
|
|
|
&memory::kSpiRamResource}; |
|
|
|
|
|
|
|
next_page = Continuation{ |
|
|
|
|
|
|
|
.prefix = c.prefix, |
|
|
|
|
|
|
|
.start_key = key, |
|
|
|
|
|
|
|
.forward = true, |
|
|
|
|
|
|
|
.was_prev_forward = true, |
|
|
|
|
|
|
|
.page_size = c.page_size, |
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
// No iterator means we ran out of results in this direction.
|
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
// We were going backwards, and now we want the next page. This is a
|
|
|
|
|
|
|
|
// reversal, to set the start key to the first record we saw and mark that
|
|
|
|
|
|
|
|
// it's off by one.
|
|
|
|
|
|
|
|
next_page = Continuation{ |
|
|
|
|
|
|
|
.prefix = c.prefix, |
|
|
|
|
|
|
|
.start_key = *first_key, |
|
|
|
|
|
|
|
.forward = true, |
|
|
|
|
|
|
|
.was_prev_forward = false, |
|
|
|
|
|
|
|
.page_size = c.page_size, |
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
std::optional<Continuation> prev_page; |
|
|
|
std::optional<IndexKey> key = ParseIndexKey(it->key()); |
|
|
|
if (c.forward) { |
|
|
|
if (!key) { |
|
|
|
// We were going forwards, and now we want the previous page. Set the
|
|
|
|
ESP_LOGW(kTag, "parsing index key failed"); |
|
|
|
// search key to the first result we saw, and mark that it's off by one.
|
|
|
|
return {}; |
|
|
|
prev_page = Continuation{ |
|
|
|
|
|
|
|
.prefix = c.prefix, |
|
|
|
|
|
|
|
.start_key = *first_key, |
|
|
|
|
|
|
|
.forward = false, |
|
|
|
|
|
|
|
.was_prev_forward = true, |
|
|
|
|
|
|
|
.page_size = c.page_size, |
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
if (it != nullptr) { |
|
|
|
|
|
|
|
// We were going backwards, and we still want to go backwards.
|
|
|
|
|
|
|
|
std::pmr::string key{it->key().data(), it->key().size(), |
|
|
|
|
|
|
|
&memory::kSpiRamResource}; |
|
|
|
|
|
|
|
prev_page = Continuation{ |
|
|
|
|
|
|
|
.prefix = c.prefix, |
|
|
|
|
|
|
|
.start_key = key, |
|
|
|
|
|
|
|
.forward = false, |
|
|
|
|
|
|
|
.was_prev_forward = false, |
|
|
|
|
|
|
|
.page_size = c.page_size, |
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
// No iterator means we ran out of results in this direction.
|
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
return new Result<T>(std::move(records), next_page, prev_page); |
|
|
|
return std::make_pair(std::pmr::string{it->key().data(), it->key().size(), |
|
|
|
|
|
|
|
&memory::kSpiRamResource}, |
|
|
|
|
|
|
|
Record{*key, it->value()}); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
auto Database::dbCount(const Continuation& c) -> size_t { |
|
|
|
auto Database::countRecords(const SearchKey& c) -> size_t { |
|
|
|
std::unique_ptr<leveldb::Iterator> it{ |
|
|
|
std::unique_ptr<leveldb::Iterator> it{ |
|
|
|
db_->NewIterator(leveldb::ReadOptions{})}; |
|
|
|
db_->NewIterator(leveldb::ReadOptions{})}; |
|
|
|
size_t count = 0; |
|
|
|
|
|
|
|
for (it->Seek({c.start_key.data(), c.start_key.size()}); |
|
|
|
|
|
|
|
it->Valid() && it->key().starts_with({c.prefix.data(), c.prefix.size()}); |
|
|
|
|
|
|
|
it->Next()) { |
|
|
|
|
|
|
|
count++; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
return count; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
template auto Database::dbGetPage<Track>(const Continuation& c) |
|
|
|
|
|
|
|
-> Result<Track>*; |
|
|
|
|
|
|
|
template auto Database::dbGetPage<std::pmr::string>(const Continuation& c) |
|
|
|
|
|
|
|
-> Result<std::pmr::string>*; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
template <> |
|
|
|
it->Seek(c.startKey()); |
|
|
|
auto Database::ParseRecord<IndexRecord>(const leveldb::Slice& key, |
|
|
|
seekToOffset(it.get(), c.offset); |
|
|
|
const leveldb::Slice& val) |
|
|
|
if (!it->Valid() || !it->key().starts_with(std::string_view{c.prefix})) { |
|
|
|
-> std::shared_ptr<IndexRecord> { |
|
|
|
|
|
|
|
std::optional<IndexKey> data = ParseIndexKey(key); |
|
|
|
|
|
|
|
if (!data) { |
|
|
|
|
|
|
|
return {}; |
|
|
|
return {}; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
std::optional<std::pmr::string> title; |
|
|
|
size_t count = 0; |
|
|
|
if (!val.empty()) { |
|
|
|
while (it->Valid() && it->key().starts_with(std::string_view{c.prefix})) { |
|
|
|
title = val.ToString(); |
|
|
|
it->Next(); |
|
|
|
|
|
|
|
count++; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
return std::make_shared<IndexRecord>(*data, title, data->track); |
|
|
|
return count; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
template <> |
|
|
|
auto SearchKey::startKey() const -> std::string_view { |
|
|
|
auto Database::ParseRecord<Track>(const leveldb::Slice& key, |
|
|
|
if (key) { |
|
|
|
const leveldb::Slice& val) |
|
|
|
return *key; |
|
|
|
-> std::shared_ptr<Track> { |
|
|
|
|
|
|
|
std::shared_ptr<TrackData> data = ParseDataValue(val); |
|
|
|
|
|
|
|
if (!data || data->is_tombstoned) { |
|
|
|
|
|
|
|
return {}; |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
std::shared_ptr<TrackTags> tags = |
|
|
|
return prefix; |
|
|
|
tag_parser_.ReadAndParseTags(data->filepath); |
|
|
|
|
|
|
|
if (!tags) { |
|
|
|
|
|
|
|
return {}; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
return std::make_shared<Track>(data, tags); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
template <> |
|
|
|
Record::Record(const IndexKey& key, const leveldb::Slice& t) |
|
|
|
auto Database::ParseRecord<std::pmr::string>(const leveldb::Slice& key, |
|
|
|
: text_(t.data(), t.size(), &memory::kSpiRamResource) { |
|
|
|
const leveldb::Slice& val) |
|
|
|
if (key.track) { |
|
|
|
-> std::shared_ptr<std::pmr::string> { |
|
|
|
contents_ = *key.track; |
|
|
|
std::ostringstream stream; |
|
|
|
|
|
|
|
stream << "key: "; |
|
|
|
|
|
|
|
if (key.size() < 3 || key.data()[1] != '\0') { |
|
|
|
|
|
|
|
stream << key.ToString().c_str(); |
|
|
|
|
|
|
|
} else { |
|
|
|
} else { |
|
|
|
for (size_t i = 0; i < key.size(); i++) { |
|
|
|
contents_ = ExpandHeader(key.header, key.item); |
|
|
|
if (i == 0) { |
|
|
|
|
|
|
|
stream << key.data()[i]; |
|
|
|
|
|
|
|
} else if (i == 1) { |
|
|
|
|
|
|
|
stream << " / 0x"; |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
stream << std::hex << std::setfill('0') << std::setw(2) |
|
|
|
|
|
|
|
<< static_cast<int>(key.data()[i]); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
if (!val.empty()) { |
|
|
|
|
|
|
|
stream << "\tval: 0x"; |
|
|
|
|
|
|
|
for (int i = 0; i < val.size(); i++) { |
|
|
|
|
|
|
|
stream << std::hex << std::setfill('0') << std::setw(2) |
|
|
|
|
|
|
|
<< static_cast<int>(val.data()[i]); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
std::pmr::string res{stream.str(), &memory::kSpiRamResource}; |
|
|
|
|
|
|
|
return std::make_shared<std::pmr::string>(res); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
IndexRecord::IndexRecord(const IndexKey& key, |
|
|
|
auto Record::text() const -> std::string_view { |
|
|
|
std::optional<std::pmr::string> title, |
|
|
|
return text_; |
|
|
|
std::optional<TrackId> track) |
|
|
|
|
|
|
|
: key_(key), override_text_(title), track_(track) {} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto IndexRecord::text() const -> std::optional<std::pmr::string> { |
|
|
|
|
|
|
|
if (override_text_) { |
|
|
|
|
|
|
|
return override_text_; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
return key_.item; |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
auto IndexRecord::track() const -> std::optional<TrackId> { |
|
|
|
auto Record::contents() const |
|
|
|
return track_; |
|
|
|
-> const std::variant<TrackId, IndexKey::Header>& { |
|
|
|
|
|
|
|
return contents_; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
auto IndexRecord::Expand(std::size_t page_size) const |
|
|
|
Iterator::Iterator(std::shared_ptr<Database> db, IndexId idx) |
|
|
|
-> std::optional<Continuation> { |
|
|
|
: Iterator(db, |
|
|
|
if (track_) { |
|
|
|
IndexKey::Header{ |
|
|
|
return {}; |
|
|
|
.id = idx, |
|
|
|
} |
|
|
|
.depth = 0, |
|
|
|
std::string new_prefix = EncodeIndexPrefix(ExpandHeader()); |
|
|
|
.components_hash = 0, |
|
|
|
return Continuation{ |
|
|
|
}) {} |
|
|
|
.prefix = {new_prefix.data(), new_prefix.size()}, |
|
|
|
|
|
|
|
.start_key = {new_prefix.data(), new_prefix.size()}, |
|
|
|
|
|
|
|
.forward = true, |
|
|
|
|
|
|
|
.was_prev_forward = true, |
|
|
|
|
|
|
|
.page_size = page_size, |
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto IndexRecord::ExpandHeader() const -> IndexKey::Header { |
|
|
|
|
|
|
|
return ::database::ExpandHeader(key_.header, key_.item); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Iterator::Iterator(std::weak_ptr<Database> db, const IndexInfo& idx) |
|
|
|
|
|
|
|
: db_(db), pos_mutex_(), current_pos_(), prev_pos_() { |
|
|
|
|
|
|
|
std::string prefix = EncodeIndexPrefix( |
|
|
|
|
|
|
|
IndexKey::Header{.id = idx.id, .depth = 0, .components_hash = 0}); |
|
|
|
|
|
|
|
current_pos_ = Continuation{.prefix = {prefix.data(), prefix.size()}, |
|
|
|
|
|
|
|
.start_key = {prefix.data(), prefix.size()}, |
|
|
|
|
|
|
|
.forward = true, |
|
|
|
|
|
|
|
.was_prev_forward = true, |
|
|
|
|
|
|
|
.page_size = 1}; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto Iterator::Parse(std::weak_ptr<Database> db, const cppbor::Array& encoded) |
|
|
|
|
|
|
|
-> std::optional<Iterator> { |
|
|
|
|
|
|
|
// Ensure the input looks reasonable.
|
|
|
|
|
|
|
|
if (encoded.size() != 3) { |
|
|
|
|
|
|
|
return {}; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (encoded[0]->type() != cppbor::TSTR) { |
|
|
|
Iterator::Iterator(std::shared_ptr<Database> db, const IndexKey::Header& header) |
|
|
|
return {}; |
|
|
|
: db_(db), key_{}, current_() { |
|
|
|
} |
|
|
|
std::string prefix = EncodeIndexPrefix(header); |
|
|
|
const std::string& prefix = encoded[0]->asTstr()->value(); |
|
|
|
key_ = { |
|
|
|
|
|
|
|
.prefix = {prefix.data(), prefix.size(), &memory::kSpiRamResource}, |
|
|
|
std::optional<Continuation> current_pos{}; |
|
|
|
.key = {}, |
|
|
|
if (encoded[1]->type() == cppbor::TSTR) { |
|
|
|
.offset = 0, |
|
|
|
const std::string& key = encoded[1]->asTstr()->value(); |
|
|
|
|
|
|
|
current_pos = Continuation{ |
|
|
|
|
|
|
|
.prefix = {prefix.data(), prefix.size()}, |
|
|
|
|
|
|
|
.start_key = {key.data(), key.size()}, |
|
|
|
|
|
|
|
.forward = true, |
|
|
|
|
|
|
|
.was_prev_forward = true, |
|
|
|
|
|
|
|
.page_size = 1, |
|
|
|
|
|
|
|
}; |
|
|
|
}; |
|
|
|
|
|
|
|
iterate(key_); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
std::optional<Continuation> prev_pos{}; |
|
|
|
auto Iterator::value() const -> const std::optional<Record>& { |
|
|
|
if (encoded[2]->type() == cppbor::TSTR) { |
|
|
|
return current_; |
|
|
|
const std::string& key = encoded[2]->asTstr()->value(); |
|
|
|
|
|
|
|
current_pos = Continuation{ |
|
|
|
|
|
|
|
.prefix = {prefix.data(), prefix.size()}, |
|
|
|
|
|
|
|
.start_key = {key.data(), key.size()}, |
|
|
|
|
|
|
|
.forward = false, |
|
|
|
|
|
|
|
.was_prev_forward = false, |
|
|
|
|
|
|
|
.page_size = 1, |
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
return Iterator{db, std::move(current_pos), std::move(prev_pos)}; |
|
|
|
auto Iterator::next() -> void { |
|
|
|
|
|
|
|
SearchKey new_key = key_; |
|
|
|
|
|
|
|
new_key.offset = 1; |
|
|
|
|
|
|
|
iterate(new_key); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
Iterator::Iterator(std::weak_ptr<Database> db, const Continuation& c) |
|
|
|
auto Iterator::prev() -> void { |
|
|
|
: db_(db), pos_mutex_(), current_pos_(c), prev_pos_() {} |
|
|
|
SearchKey new_key = key_; |
|
|
|
|
|
|
|
new_key.offset = -1; |
|
|
|
Iterator::Iterator(const Iterator& other) |
|
|
|
iterate(new_key); |
|
|
|
: db_(other.db_), |
|
|
|
|
|
|
|
pos_mutex_(), |
|
|
|
|
|
|
|
current_pos_(other.current_pos_), |
|
|
|
|
|
|
|
prev_pos_(other.prev_pos_) {} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Iterator::Iterator(std::weak_ptr<Database> db, |
|
|
|
|
|
|
|
std::optional<Continuation>&& cur, |
|
|
|
|
|
|
|
std::optional<Continuation>&& prev) |
|
|
|
|
|
|
|
: db_(db), current_pos_(cur), prev_pos_(prev) {} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Iterator& Iterator::operator=(const Iterator& other) { |
|
|
|
|
|
|
|
current_pos_ = other.current_pos_; |
|
|
|
|
|
|
|
prev_pos_ = other.prev_pos_; |
|
|
|
|
|
|
|
return *this; |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
auto Iterator::Next(Callback cb) -> void { |
|
|
|
auto Iterator::iterate(const SearchKey& key) -> void { |
|
|
|
auto db = db_.lock(); |
|
|
|
auto db = db_.lock(); |
|
|
|
if (!db) { |
|
|
|
if (!db) { |
|
|
|
InvokeNull(cb); |
|
|
|
ESP_LOGW(kTag, "iterate with dead db"); |
|
|
|
return; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
db->worker_task_->Dispatch<void>([=]() { |
|
|
|
|
|
|
|
std::lock_guard lock{pos_mutex_}; |
|
|
|
|
|
|
|
if (!current_pos_) { |
|
|
|
|
|
|
|
InvokeNull(cb); |
|
|
|
|
|
|
|
return; |
|
|
|
return; |
|
|
|
} |
|
|
|
} |
|
|
|
std::unique_ptr<Result<IndexRecord>> res{ |
|
|
|
auto res = db->getRecord(key); |
|
|
|
db->dbGetPage<IndexRecord>(*current_pos_)}; |
|
|
|
if (res) { |
|
|
|
prev_pos_ = current_pos_; |
|
|
|
key_ = { |
|
|
|
current_pos_ = res->next_page(); |
|
|
|
.prefix = key_.prefix, |
|
|
|
if (!res || res->values().empty() || !res->values()[0]) { |
|
|
|
.key = res->first, |
|
|
|
ESP_LOGI(kTag, "dropping empty result"); |
|
|
|
.offset = 0, |
|
|
|
InvokeNull(cb); |
|
|
|
}; |
|
|
|
return; |
|
|
|
current_ = res->second; |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
key_ = key; |
|
|
|
|
|
|
|
current_.reset(); |
|
|
|
} |
|
|
|
} |
|
|
|
std::invoke(cb, *res->values()[0]); |
|
|
|
|
|
|
|
}); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
auto Iterator::NextSync() -> std::optional<IndexRecord> { |
|
|
|
auto Iterator::count() const -> size_t { |
|
|
|
auto db = db_.lock(); |
|
|
|
auto db = db_.lock(); |
|
|
|
if (!db) { |
|
|
|
if (!db) { |
|
|
|
return {}; |
|
|
|
ESP_LOGW(kTag, "count with dead db"); |
|
|
|
} |
|
|
|
return 0; |
|
|
|
std::lock_guard lock{pos_mutex_}; |
|
|
|
|
|
|
|
if (!current_pos_) { |
|
|
|
|
|
|
|
return {}; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
std::unique_ptr<Result<IndexRecord>> res{ |
|
|
|
|
|
|
|
db->dbGetPage<IndexRecord>(*current_pos_)}; |
|
|
|
|
|
|
|
prev_pos_ = current_pos_; |
|
|
|
|
|
|
|
current_pos_ = res->next_page(); |
|
|
|
|
|
|
|
if (!res || res->values().empty() || !res->values()[0]) { |
|
|
|
|
|
|
|
ESP_LOGI(kTag, "dropping empty result"); |
|
|
|
|
|
|
|
return {}; |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
return *res->values()[0]; |
|
|
|
return db->countRecords(key_); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
auto Iterator::PeekSync() -> std::optional<IndexRecord> { |
|
|
|
TrackIterator::TrackIterator(const Iterator& it) : db_(it.db_), levels_() { |
|
|
|
auto db = db_.lock(); |
|
|
|
levels_.push_back(it); |
|
|
|
if (!db) { |
|
|
|
next(false); |
|
|
|
return {}; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
auto pos = current_pos_; |
|
|
|
|
|
|
|
if (!pos) { |
|
|
|
|
|
|
|
return {}; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
std::unique_ptr<Result<IndexRecord>> res{db->dbGetPage<IndexRecord>(*pos)}; |
|
|
|
|
|
|
|
if (!res || res->values().empty() || !res->values()[0]) { |
|
|
|
|
|
|
|
return {}; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
return *res->values()[0]; |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
auto Iterator::Prev(Callback cb) -> void { |
|
|
|
auto TrackIterator::next() -> void { |
|
|
|
auto db = db_.lock(); |
|
|
|
next(true); |
|
|
|
if (!db) { |
|
|
|
|
|
|
|
InvokeNull(cb); |
|
|
|
|
|
|
|
return; |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
db->worker_task_->Dispatch<void>([=]() { |
|
|
|
|
|
|
|
std::lock_guard lock{pos_mutex_}; |
|
|
|
auto TrackIterator::next(bool advance) -> void { |
|
|
|
if (!prev_pos_) { |
|
|
|
while (!levels_.empty()) { |
|
|
|
InvokeNull(cb); |
|
|
|
if (advance) { |
|
|
|
return; |
|
|
|
levels_.back().next(); |
|
|
|
} |
|
|
|
|
|
|
|
std::unique_ptr<Result<IndexRecord>> res{ |
|
|
|
|
|
|
|
db->dbGetPage<IndexRecord>(*current_pos_)}; |
|
|
|
|
|
|
|
current_pos_ = prev_pos_; |
|
|
|
|
|
|
|
prev_pos_ = res->prev_page(); |
|
|
|
|
|
|
|
std::invoke(cb, *res->values()[0]); |
|
|
|
|
|
|
|
}); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
auto Iterator::Size() const -> size_t { |
|
|
|
auto& cur = levels_.back().value(); |
|
|
|
|
|
|
|
if (!cur) { |
|
|
|
|
|
|
|
// The current top iterator is out of tracks. Pop it, and move the parent
|
|
|
|
|
|
|
|
// to the next item.
|
|
|
|
|
|
|
|
levels_.pop_back(); |
|
|
|
|
|
|
|
advance = true; |
|
|
|
|
|
|
|
} else if (std::holds_alternative<IndexKey::Header>(cur->contents())) { |
|
|
|
|
|
|
|
// This record is a branch. Push a new iterator.
|
|
|
|
|
|
|
|
auto key = std::get<IndexKey::Header>(cur->contents()); |
|
|
|
auto db = db_.lock(); |
|
|
|
auto db = db_.lock(); |
|
|
|
if (!db) { |
|
|
|
if (!db) { |
|
|
|
return {}; |
|
|
|
return; |
|
|
|
} |
|
|
|
|
|
|
|
std::optional<Continuation> pos = current_pos_; |
|
|
|
|
|
|
|
if (!pos) { |
|
|
|
|
|
|
|
return 0; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
return db->dbCount(*pos); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto Iterator::InvokeNull(Callback cb) -> void { |
|
|
|
|
|
|
|
std::invoke(cb, std::optional<IndexRecord>{}); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto Iterator::cbor() const -> cppbor::Array&& { |
|
|
|
|
|
|
|
cppbor::Array res; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
std::pmr::string prefix; |
|
|
|
|
|
|
|
if (current_pos_) { |
|
|
|
|
|
|
|
prefix = current_pos_->prefix; |
|
|
|
|
|
|
|
} else if (prev_pos_) { |
|
|
|
|
|
|
|
prefix = prev_pos_->prefix; |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
ESP_LOGW(kTag, "iterator has no prefix"); |
|
|
|
|
|
|
|
return std::move(res); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
levels_.emplace_back(db, key); |
|
|
|
if (current_pos_) { |
|
|
|
// Don't skip the first value of the new level.
|
|
|
|
res.add(cppbor::Tstr(current_pos_->start_key)); |
|
|
|
advance = false; |
|
|
|
} else { |
|
|
|
} else if (std::holds_alternative<TrackId>(cur->contents())) { |
|
|
|
res.add(cppbor::Null()); |
|
|
|
// New record is a leaf.
|
|
|
|
|
|
|
|
break; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (prev_pos_) { |
|
|
|
|
|
|
|
res.add(cppbor::Tstr(prev_pos_->start_key)); |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
res.add(cppbor::Null()); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
return std::move(res); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
auto TrackIterator::Parse(std::weak_ptr<Database> db, |
|
|
|
auto TrackIterator::value() const -> std::optional<TrackId> { |
|
|
|
const cppbor::Array& encoded) |
|
|
|
if (levels_.empty()) { |
|
|
|
-> std::optional<TrackIterator> { |
|
|
|
|
|
|
|
TrackIterator ret{db}; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for (const auto& item : encoded) { |
|
|
|
|
|
|
|
if (item->type() == cppbor::ARRAY) { |
|
|
|
|
|
|
|
auto it = Iterator::Parse(db, *item->asArray()); |
|
|
|
|
|
|
|
if (it) { |
|
|
|
|
|
|
|
ret.levels_.push_back(std::move(*it)); |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
return {}; |
|
|
|
return {}; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
auto cur = levels_.back().value(); |
|
|
|
|
|
|
|
if (!cur) { |
|
|
|
|
|
|
|
return {}; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
if (std::holds_alternative<TrackId>(cur->contents())) { |
|
|
|
|
|
|
|
return std::get<TrackId>(cur->contents()); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
return {}; |
|
|
|
return ret; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
TrackIterator::TrackIterator(const Iterator& it) : db_(it.db_), levels_() { |
|
|
|
|
|
|
|
if (it.current_pos_) { |
|
|
|
|
|
|
|
levels_.push_back(it); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
NextLeaf(); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
TrackIterator::TrackIterator(const TrackIterator& other) |
|
|
|
|
|
|
|
: db_(other.db_), levels_(other.levels_) {} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
TrackIterator::TrackIterator(std::weak_ptr<Database> db) : db_(db), levels_() {} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
TrackIterator& TrackIterator::operator=(TrackIterator&& other) { |
|
|
|
|
|
|
|
levels_ = std::move(other.levels_); |
|
|
|
|
|
|
|
return *this; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto TrackIterator::Next() -> std::optional<TrackId> { |
|
|
|
|
|
|
|
std::optional<TrackId> next{}; |
|
|
|
|
|
|
|
while (!next && !levels_.empty()) { |
|
|
|
|
|
|
|
auto next_record = levels_.back().NextSync(); |
|
|
|
|
|
|
|
if (!next_record) { |
|
|
|
|
|
|
|
levels_.pop_back(); |
|
|
|
|
|
|
|
NextLeaf(); |
|
|
|
|
|
|
|
continue; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
// May still be nullopt_t; hence the loop.
|
|
|
|
|
|
|
|
next = next_record->track(); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
return next; |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
auto TrackIterator::Size() const -> size_t { |
|
|
|
auto TrackIterator::count() const -> size_t { |
|
|
|
size_t size = 0; |
|
|
|
size_t size = 0; |
|
|
|
TrackIterator copy{*this}; |
|
|
|
TrackIterator copy{*this}; |
|
|
|
while (!copy.levels_.empty()) { |
|
|
|
while (!copy.levels_.empty()) { |
|
|
|
size += copy.levels_.back().Size(); |
|
|
|
size += copy.levels_.back().count(); |
|
|
|
copy.levels_.pop_back(); |
|
|
|
copy.levels_.pop_back(); |
|
|
|
copy.NextLeaf(); |
|
|
|
copy.next(); |
|
|
|
} |
|
|
|
} |
|
|
|
return size; |
|
|
|
return size; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
auto TrackIterator::NextLeaf() -> void { |
|
|
|
|
|
|
|
while (!levels_.empty()) { |
|
|
|
|
|
|
|
ESP_LOGI(kTag, "check next candidate"); |
|
|
|
|
|
|
|
Iterator& candidate = levels_.back(); |
|
|
|
|
|
|
|
auto next = candidate.PeekSync(); |
|
|
|
|
|
|
|
if (!next) { |
|
|
|
|
|
|
|
ESP_LOGI(kTag, "candidate is empty"); |
|
|
|
|
|
|
|
levels_.pop_back(); |
|
|
|
|
|
|
|
continue; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
if (!next->track()) { |
|
|
|
|
|
|
|
ESP_LOGI(kTag, "candidate is a branch"); |
|
|
|
|
|
|
|
candidate.NextSync(); |
|
|
|
|
|
|
|
levels_.push_back(Iterator{db_, next->Expand(1).value()}); |
|
|
|
|
|
|
|
continue; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
ESP_LOGI(kTag, "candidate is a leaf"); |
|
|
|
|
|
|
|
break; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto TrackIterator::cbor() const -> cppbor::Array&& { |
|
|
|
|
|
|
|
cppbor::Array res; |
|
|
|
|
|
|
|
for (const auto& i : levels_) { |
|
|
|
|
|
|
|
res.add(i.cbor()); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
return std::move(res); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
} // namespace database
|
|
|
|
} // namespace database
|
|
|
|