updated build system
Some checks reported errors
BlockStorage/repertory/pipeline/head Something is wrong with the build of this commit

This commit is contained in:
2024-10-19 11:10:36 -05:00
parent c72dec6369
commit 2fb53e34af
24 changed files with 1330 additions and 831 deletions

View File

@@ -57,6 +57,7 @@ private:
i_provider &provider_;
private:
utils::db::sqlite::db3_t db_;
std::atomic<std::uint64_t> next_handle_{0U};
mutable std::recursive_mutex open_file_mtx_;
std::unordered_map<std::string, std::shared_ptr<i_closeable_open_file>>
@@ -68,8 +69,6 @@ private:
std::unique_ptr<std::thread> upload_thread_;
private:
[[nodiscard]] auto create_db() const -> utils::db::sqlite::db3_t;
void close_timed_out_files();
auto get_open_file_by_handle(std::uint64_t handle) const
@@ -83,22 +82,12 @@ private:
std::shared_ptr<i_closeable_open_file> closeable_file) -> api_error;
void queue_upload(const std::string &api_path, const std::string &source_path,
bool no_lock, sqlite3 *db);
bool no_lock);
void remove_resume(const std::string &api_path,
const std::string &source_path, sqlite3 *db);
void remove_upload(const std::string &api_path, bool no_lock);
void remove_upload(const std::string &api_path, bool no_lock, sqlite3 *db);
[[nodiscard]] auto rename_directory(const std::string &from_api_path,
const std::string &to_api_path,
sqlite3 *db) -> api_error;
[[nodiscard]] auto rename_file(const std::string &from_api_path,
const std::string &to_api_path, bool overwrite,
sqlite3 *db) -> api_error;
void swap_renamed_items(std::string from_api_path, std::string to_api_path,
bool directory, sqlite3 *db);
bool directory);
void upload_completed(const file_upload_completed &evt);
@@ -108,8 +97,7 @@ public:
[[nodiscard]] auto get_next_handle() -> std::uint64_t;
auto handle_file_rename(const std::string &from_api_path,
const std::string &to_api_path, sqlite3 *db)
-> api_error;
const std::string &to_api_path) -> api_error;
void queue_upload(const i_open_file &file) override;
@@ -148,27 +136,27 @@ public:
[[nodiscard]] auto has_no_open_file_handles() const -> bool override;
[[nodiscard]] auto is_processing(const std::string &api_path) const
-> bool override;
[[nodiscard]] auto
is_processing(const std::string &api_path) const -> bool override;
#if defined(PROJECT_TESTING)
[[nodiscard]] auto open(std::shared_ptr<i_closeable_open_file> of,
const open_file_data &ofd, std::uint64_t &handle,
std::shared_ptr<i_open_file> &file) -> api_error;
#endif
#endif // defined(PROJECT_TESTING)
[[nodiscard]] auto open(const std::string &api_path, bool directory,
const open_file_data &ofd, std::uint64_t &handle,
std::shared_ptr<i_open_file> &file) -> api_error;
[[nodiscard]] auto remove_file(const std::string &api_path) -> api_error;
[[nodiscard]] auto rename_directory(const std::string &from_api_path,
const std::string &to_api_path)
-> api_error;
[[nodiscard]] auto
rename_directory(const std::string &from_api_path,
const std::string &to_api_path) -> api_error;
[[nodiscard]] auto rename_file(const std::string &from_api_path,
const std::string &to_api_path, bool overwrite)
-> api_error;
const std::string &to_api_path,
bool overwrite) -> api_error;
void start();

View File

@@ -43,8 +43,8 @@
#include "utils/time.hpp"
namespace {
[[nodiscard]] auto create_resume_entry(const repertory::i_open_file &file)
-> json {
[[nodiscard]] auto
create_resume_entry(const repertory::i_open_file &file) -> json {
return {
{"chunk_size", file.get_chunk_size()},
{"path", file.get_api_path()},
@@ -106,6 +106,10 @@ const std::map<std::string, std::string> sql_create_tables{
namespace repertory {
file_manager::file_manager(app_config &config, i_provider &provider)
: config_(config), provider_(provider) {
db_ = utils::db::sqlite::create_db(
utils::path::combine(config_.get_data_directory(), {"file_manager.db"}),
sql_create_tables);
if (not provider_.is_direct_only()) {
E_SUBSCRIBE_EXACT(file_upload_completed,
[this](const file_upload_completed &completed) {
@@ -116,6 +120,7 @@ file_manager::file_manager(app_config &config, i_provider &provider)
file_manager::~file_manager() {
stop();
db_.reset();
E_CONSUMER_RELEASE();
}
@@ -170,38 +175,6 @@ void file_manager::close_timed_out_files() {
closeable_list.clear();
}
auto file_manager::create_db() const -> utils::db::sqlite::db3_t {
auto db_path =
utils::path::combine(config_.get_data_directory(), {"file_manager.db"});
sqlite3 *db3{nullptr};
auto db_res =
sqlite3_open_v2(db_path.c_str(), &db3,
SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, nullptr);
if (db_res != SQLITE_OK) {
throw std::runtime_error("failed to open db|" + db_path + '|' +
std::to_string(db_res) + '|' +
sqlite3_errstr(db_res));
}
auto db = utils::db::sqlite::db3_t{
db3,
utils::db::sqlite::sqlite3_deleter(),
};
for (auto &&create_item : sql_create_tables) {
std::string err;
if (not utils::db::sqlite::execute_sql(*db, create_item.second, err)) {
db.reset();
throw std::runtime_error(err);
}
}
utils::db::sqlite::set_journal_mode(*db);
return db;
}
auto file_manager::create(const std::string &api_path, api_meta_map &meta,
open_file_data ofd, std::uint64_t &handle,
std::shared_ptr<i_open_file> &file) -> api_error {
@@ -361,10 +334,8 @@ auto file_manager::get_stored_downloads() const -> std::vector<json> {
return {};
}
auto db = create_db();
std::vector<json> ret;
auto result = utils::db::sqlite::db_select{*db.get(), resume_table}.go();
auto result = utils::db::sqlite::db_select{*db_, resume_table}.go();
while (result.has_row()) {
try {
std::optional<utils::db::sqlite::db_select::row> row;
@@ -385,8 +356,8 @@ auto file_manager::get_stored_downloads() const -> std::vector<json> {
}
auto file_manager::handle_file_rename(const std::string &from_api_path,
const std::string &to_api_path,
sqlite3 *db) -> api_error {
const std::string &to_api_path)
-> api_error {
std::string source_path{};
auto file_iter = open_file_lookup_.find(from_api_path);
if (file_iter != open_file_lookup_.end()) {
@@ -399,7 +370,7 @@ auto file_manager::handle_file_rename(const std::string &from_api_path,
source_path = upload_lookup_.at(from_api_path)->get_source_path();
}
} else {
auto result = utils::db::sqlite::db_select{*db, upload_table}
auto result = utils::db::sqlite::db_select{*db_, upload_table}
.column("source_path")
.where("api_path")
.equals(from_api_path)
@@ -411,22 +382,22 @@ auto file_manager::handle_file_rename(const std::string &from_api_path,
}
}
remove_upload(from_api_path, true, db);
remove_upload(from_api_path, true);
auto ret = provider_.rename_file(from_api_path, to_api_path);
if (ret != api_error::success) {
queue_upload(from_api_path, source_path, false, db);
queue_upload(from_api_path, source_path, false);
return ret;
}
swap_renamed_items(from_api_path, to_api_path, false, db);
swap_renamed_items(from_api_path, to_api_path, false);
ret = source_path.empty()
? api_error::success
: provider_.set_item_meta(to_api_path, META_SOURCE, source_path);
if (should_upload) {
queue_upload(to_api_path, source_path, false, db);
queue_upload(to_api_path, source_path, false);
}
return ret;
@@ -447,9 +418,7 @@ auto file_manager::is_processing(const std::string &api_path) const -> bool {
}
upload_lock.unlock();
auto db = create_db();
utils::db::sqlite::db_select query{*db.get(), upload_table};
utils::db::sqlite::db_select query{*db_, upload_table};
if (query.where("api_path").equals(api_path).go().has_row()) {
return true;
};
@@ -469,11 +438,10 @@ auto file_manager::open(const std::string &api_path, bool directory,
return open(api_path, directory, ofd, handle, file, nullptr);
}
auto file_manager::open(const std::string &api_path, bool directory,
const open_file_data &ofd, std::uint64_t &handle,
std::shared_ptr<i_open_file> &file,
std::shared_ptr<i_closeable_open_file> closeable_file)
-> api_error {
auto file_manager::open(
const std::string &api_path, bool directory, const open_file_data &ofd,
std::uint64_t &handle, std::shared_ptr<i_open_file> &file,
std::shared_ptr<i_closeable_open_file> closeable_file) -> api_error {
const auto create_and_add_handle =
[&](std::shared_ptr<i_closeable_open_file> cur_file) {
handle = get_next_handle();
@@ -517,14 +485,11 @@ auto file_manager::open(const std::string &api_path, bool directory,
}
void file_manager::queue_upload(const i_open_file &file) {
auto db = create_db();
return queue_upload(file.get_api_path(), file.get_source_path(), false,
db.get());
return queue_upload(file.get_api_path(), file.get_source_path(), false);
}
void file_manager::queue_upload(const std::string &api_path,
const std::string &source_path, bool no_lock,
sqlite3 *db) {
const std::string &source_path, bool no_lock) {
if (provider_.is_direct_only()) {
return;
}
@@ -533,10 +498,10 @@ void file_manager::queue_upload(const std::string &api_path,
if (not no_lock) {
lock = std::make_unique<mutex_lock>(upload_mtx_);
}
remove_upload(api_path, true, db);
remove_upload(api_path, true);
auto result =
utils::db::sqlite::db_insert{*db, upload_table}
utils::db::sqlite::db_insert{*db_, upload_table}
.or_replace()
.column_value("api_path", api_path)
.column_value("date_time",
@@ -544,7 +509,7 @@ void file_manager::queue_upload(const std::string &api_path,
.column_value("source_path", source_path)
.go();
if (result.ok()) {
remove_resume(api_path, source_path, db);
remove_resume(api_path, source_path);
event_system::instance().raise<file_upload_queued>(api_path, source_path);
} else {
event_system::instance().raise<file_upload_failed>(
@@ -570,10 +535,9 @@ auto file_manager::remove_file(const std::string &api_path) -> api_error {
close_all(api_path);
auto db = create_db();
remove_upload(api_path, true, db.get());
remove_upload(api_path, true);
auto result = utils::db::sqlite::db_delete{*db.get(), resume_table}
auto result = utils::db::sqlite::db_delete{*db_, resume_table}
.where("api_path")
.equals(api_path)
.go();
@@ -599,13 +563,7 @@ auto file_manager::remove_file(const std::string &api_path) -> api_error {
void file_manager::remove_resume(const std::string &api_path,
const std::string &source_path) {
auto db = create_db();
return remove_resume(api_path, source_path, db.get());
}
void file_manager::remove_resume(const std::string &api_path,
const std::string &source_path, sqlite3 *db) {
auto result = utils::db::sqlite::db_delete{*db, resume_table}
auto result = utils::db::sqlite::db_delete{*db_, resume_table}
.where("api_path")
.equals(api_path)
.go();
@@ -616,12 +574,10 @@ void file_manager::remove_resume(const std::string &api_path,
}
void file_manager::remove_upload(const std::string &api_path) {
auto db = create_db();
remove_upload(api_path, false, db.get());
remove_upload(api_path, false);
}
void file_manager::remove_upload(const std::string &api_path, bool no_lock,
sqlite3 *db) {
void file_manager::remove_upload(const std::string &api_path, bool no_lock) {
REPERTORY_USES_FUNCTION_NAME();
if (provider_.is_direct_only()) {
@@ -633,7 +589,7 @@ void file_manager::remove_upload(const std::string &api_path, bool no_lock,
lock = std::make_unique<mutex_lock>(upload_mtx_);
}
auto result = utils::db::sqlite::db_delete{*db, upload_table}
auto result = utils::db::sqlite::db_delete{*db_, upload_table}
.where("api_path")
.equals(api_path)
.go();
@@ -643,7 +599,7 @@ void file_manager::remove_upload(const std::string &api_path, bool no_lock,
"failed to remove from upload table");
}
result = utils::db::sqlite::db_delete{*db, upload_active_table}
result = utils::db::sqlite::db_delete{*db_, upload_active_table}
.where("api_path")
.equals(api_path)
.go();
@@ -670,13 +626,6 @@ void file_manager::remove_upload(const std::string &api_path, bool no_lock,
auto file_manager::rename_directory(const std::string &from_api_path,
const std::string &to_api_path)
-> api_error {
auto db = create_db();
return rename_directory(from_api_path, to_api_path, db.get());
}
auto file_manager::rename_directory(const std::string &from_api_path,
const std::string &to_api_path, sqlite3 *db)
-> api_error {
if (not provider_.is_rename_supported()) {
return api_error::not_implemented;
}
@@ -732,9 +681,8 @@ auto file_manager::rename_directory(const std::string &from_api_path,
auto old_api_path = api_path;
auto new_api_path = utils::path::create_api_path(utils::path::combine(
to_api_path, {old_api_path.substr(from_api_path.size())}));
res = list[i].directory
? rename_directory(old_api_path, new_api_path, db)
: rename_file(old_api_path, new_api_path, false, db);
res = list[i].directory ? rename_directory(old_api_path, new_api_path)
: rename_file(old_api_path, new_api_path, false);
}
}
@@ -747,20 +695,13 @@ auto file_manager::rename_directory(const std::string &from_api_path,
return res;
}
swap_renamed_items(from_api_path, to_api_path, true, db);
swap_renamed_items(from_api_path, to_api_path, true);
return api_error::success;
}
auto file_manager::rename_file(const std::string &from_api_path,
const std::string &to_api_path, bool overwrite)
-> api_error {
auto db = create_db();
return rename_file(from_api_path, to_api_path, overwrite, db.get());
}
auto file_manager::rename_file(const std::string &from_api_path,
const std::string &to_api_path, bool overwrite,
sqlite3 *db) -> api_error {
const std::string &to_api_path,
bool overwrite) -> api_error {
if (not provider_.is_rename_supported()) {
return api_error::not_implemented;
}
@@ -828,7 +769,7 @@ auto file_manager::rename_file(const std::string &from_api_path,
}
}
return handle_file_rename(from_api_path, to_api_path, db);
return handle_file_rename(from_api_path, to_api_path);
}
void file_manager::start() {
@@ -849,8 +790,6 @@ void file_manager::start() {
return;
}
auto db = create_db();
struct active_item final {
std::string api_path;
std::string source_path;
@@ -858,8 +797,7 @@ void file_manager::start() {
std::vector<active_item> active_items{};
auto result =
utils::db::sqlite::db_select{*db.get(), upload_active_table}.go();
auto result = utils::db::sqlite::db_select{*db_, upload_active_table}.go();
while (result.has_row()) {
try {
std::optional<utils::db::sqlite::db_select::row> row;
@@ -875,12 +813,11 @@ void file_manager::start() {
}
for (auto &&active_item : active_items) {
queue_upload(active_item.api_path, active_item.source_path, false,
db.get());
queue_upload(active_item.api_path, active_item.source_path, false);
}
active_items.clear();
result = utils::db::sqlite::db_select{*db.get(), resume_table}.go();
result = utils::db::sqlite::db_select{*db_, resume_table}.go();
if (not result.ok()) {
return;
}
@@ -999,8 +936,7 @@ void file_manager::store_resume(const i_open_file &file) {
return;
}
auto db = create_db();
auto result = utils::db::sqlite::db_insert{*db.get(), resume_table}
auto result = utils::db::sqlite::db_insert{*db_, resume_table}
.or_replace()
.column_value("api_path", file.get_api_path())
.column_value("data", create_resume_entry(file).dump())
@@ -1018,8 +954,7 @@ void file_manager::store_resume(const i_open_file &file) {
}
void file_manager::swap_renamed_items(std::string from_api_path,
std::string to_api_path, bool directory,
sqlite3 *db) {
std::string to_api_path, bool directory) {
REPERTORY_USES_FUNCTION_NAME();
auto file_iter = open_file_lookup_.find(from_api_path);
@@ -1034,7 +969,7 @@ void file_manager::swap_renamed_items(std::string from_api_path,
return;
}
auto result = utils::db::sqlite::db_update{*db, resume_table}
auto result = utils::db::sqlite::db_update{*db_, resume_table}
.column_value("api_path", to_api_path)
.where("api_path")
.equals(from_api_path)
@@ -1052,11 +987,9 @@ void file_manager::upload_completed(const file_upload_completed &evt) {
unique_mutex_lock upload_lock(upload_mtx_);
if (not utils::string::to_bool(evt.get_cancelled().get<std::string>())) {
auto db = create_db();
auto err = api_error_from_string(evt.get_result().get<std::string>());
if (err == api_error::success) {
auto result = utils::db::sqlite::db_delete{*db.get(), upload_active_table}
auto result = utils::db::sqlite::db_delete{*db_, upload_active_table}
.where("api_path")
.equals(evt.get_api_path().get<std::string>())
.go();
@@ -1075,12 +1008,12 @@ void file_manager::upload_completed(const file_upload_completed &evt) {
not utils::file::file(evt.get_source().get<std::string>()).exists()) {
event_system::instance().raise<file_upload_not_found>(
evt.get_api_path(), evt.get_source());
remove_upload(evt.get_api_path(), true, db.get());
remove_upload(evt.get_api_path(), true);
} else {
event_system::instance().raise<file_upload_retry>(
evt.get_api_path(), evt.get_source(), err);
queue_upload(evt.get_api_path(), evt.get_source(), true, db.get());
queue_upload(evt.get_api_path(), evt.get_source(), true);
upload_notify_.wait_for(upload_lock, 5s);
}
}
@@ -1092,8 +1025,6 @@ void file_manager::upload_completed(const file_upload_completed &evt) {
void file_manager::upload_handler() {
REPERTORY_USES_FUNCTION_NAME();
auto db = create_db();
while (not stop_requested_) {
auto should_wait{true};
unique_mutex_lock upload_lock(upload_mtx_);
@@ -1103,7 +1034,7 @@ void file_manager::upload_handler() {
}
if (upload_lookup_.size() < config_.get_max_upload_count()) {
auto result = utils::db::sqlite::db_select{*db.get(), upload_table}
auto result = utils::db::sqlite::db_select{*db_, upload_table}
.order_by("api_path", true)
.limit(1)
.go();
@@ -1121,7 +1052,7 @@ void file_manager::upload_handler() {
should_wait = false;
event_system::instance().raise<file_upload_not_found>(api_path,
source_path);
remove_upload(api_path, true, db.get());
remove_upload(api_path, true);
} break;
case api_error::success: {
@@ -1129,13 +1060,13 @@ void file_manager::upload_handler() {
upload_lookup_[fsi.api_path] =
std::make_unique<upload>(fsi, provider_);
auto del_res = utils::db::sqlite::db_delete{*db.get(), upload_table}
auto del_res = utils::db::sqlite::db_delete{*db_, upload_table}
.where("api_path")
.equals(api_path)
.go();
if (del_res.ok()) {
auto ins_res =
utils::db::sqlite::db_insert{*db.get(), upload_active_table}
utils::db::sqlite::db_insert{*db_, upload_active_table}
.column_value("api_path", api_path)
.column_value("source_path", source_path)
.go();
@@ -1150,7 +1081,7 @@ void file_manager::upload_handler() {
default: {
event_system::instance().raise<file_upload_retry>(api_path,
source_path, res);
queue_upload(api_path, source_path, true, db.get());
queue_upload(api_path, source_path, true);
} break;
}
}

View File

@@ -1025,35 +1025,9 @@ auto encrypt_provider::start(api_item_added_callback /*api_item_added*/,
return false;
}
auto db_path =
utils::path::combine(config_.get_data_directory(), {"meta.db"});
sqlite3 *db3{nullptr};
auto res =
sqlite3_open_v2(db_path.c_str(), &db3,
SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, nullptr);
if (res != SQLITE_OK) {
utils::error::raise_error(function_name, "failed to open db|" + db_path +
'|' + std::to_string(res) +
'|' + sqlite3_errstr(res));
return false;
}
db_ = utils::db::sqlite::db3_t{
db3,
utils::db::sqlite::sqlite3_deleter(),
};
for (auto &&create : sql_create_tables) {
std::string err;
if (not utils::db::sqlite::execute_sql(*db_, create.second, err)) {
utils::error::raise_error(function_name, "failed to create table|" +
create.first + '|' + err);
db_.reset();
return false;
}
}
utils::db::sqlite::set_journal_mode(*db_);
db_ = utils::db::sqlite::create_db(
utils::path::combine(config_.get_data_directory(), {"meta.db"}),
sql_create_tables);
const auto cfg = config_.get_encrypt_config();

View File

@@ -34,42 +34,24 @@ namespace repertory {
meta_db::meta_db(const app_config &cfg) {
REPERTORY_USES_FUNCTION_NAME();
auto db_path = utils::path::combine(cfg.get_data_directory(), {"meta.db"});
sqlite3 *db3{nullptr};
auto res =
sqlite3_open_v2(db_path.c_str(), &db3,
SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, nullptr);
if (res != SQLITE_OK) {
utils::error::raise_error(function_name, "failed to open db|" + db_path +
'|' + std::to_string(res) +
'|' + sqlite3_errstr(res));
return;
}
db_ = utils::db::sqlite::db3_t{
db3,
utils::db::sqlite::sqlite3_deleter(),
const std::map<std::string, std::string> sql_create_tables{
{
{"meta"},
{"CREATE TABLE IF NOT EXISTS "
"meta "
"("
"api_path TEXT PRIMARY KEY ASC, "
"data TEXT, "
"directory INTEGER, "
"pinned INTEGER, "
"source_path TEXT"
");"},
},
};
const auto *create = "CREATE TABLE IF NOT EXISTS "
"meta "
"("
"api_path TEXT PRIMARY KEY ASC, "
"data TEXT, "
"directory INTEGER, "
"pinned INTEGER, "
"source_path TEXT"
");";
std::string err;
if (not utils::db::sqlite::execute_sql(*db_, create, err)) {
utils::error::raise_error(function_name,
"failed to create db|" + db_path + '|' + err);
db_.reset();
return;
}
utils::db::sqlite::set_journal_mode(*db_);
db_ = utils::db::sqlite::create_db(
utils::path::combine(cfg.get_data_directory(), {"meta.db"}),
sql_create_tables);
}
meta_db::~meta_db() { db_.reset(); }