Compare commits
21 Commits
72fe3613d3
...
65b7428bdf
Author | SHA1 | Date | |
---|---|---|---|
65b7428bdf | |||
f68917c8cc | |||
80fd52625e | |||
9961cb700e | |||
bb983594d5 | |||
eeea09f65a | |||
95b9464c9f | |||
f1ab604fe9 | |||
d9740445dc | |||
69d44c27cc | |||
f8ded1ecd1 | |||
3cae30eedd | |||
e34f0efc79 | |||
c9ac60a2fc | |||
8c8d7b3bf9 | |||
7fa51e906c | |||
a0d653fff7 | |||
2df67abffb | |||
c944039759 | |||
73d1d993d7 | |||
efa5e07549 |
@ -28,6 +28,64 @@
|
||||
#include "utils/error_utils.hpp"
|
||||
|
||||
namespace repertory {
|
||||
template <typename data_t> class atomic final {
|
||||
public:
|
||||
atomic() : mtx_(std::make_shared<std::mutex>()) {}
|
||||
atomic(const atomic &) = default;
|
||||
atomic(data_t data)
|
||||
: data_(std::move(data)), mtx_(std::make_shared<std::mutex>()) {}
|
||||
atomic(data_t data, std::unique_ptr<std::mutex> mtx)
|
||||
: data_(std::move(data)), mtx_(std::move(mtx)) {}
|
||||
atomic(atomic &&) = default;
|
||||
|
||||
~atomic() = default;
|
||||
|
||||
private:
|
||||
data_t data_;
|
||||
std::shared_ptr<std::mutex> mtx_;
|
||||
|
||||
public:
|
||||
auto operator=(const atomic &) -> atomic & = default;
|
||||
|
||||
auto operator=(atomic &&) -> atomic & = default;
|
||||
|
||||
auto operator==(const atomic &at_data) const -> bool {
|
||||
mutex_lock lock(*mtx_);
|
||||
return static_cast<data_t>(at_data) == data_;
|
||||
}
|
||||
|
||||
auto operator==(const data_t &data) const -> bool {
|
||||
mutex_lock lock(*mtx_);
|
||||
return data == data_;
|
||||
}
|
||||
|
||||
auto operator!=(const atomic &at_data) const -> bool {
|
||||
mutex_lock lock(*mtx_);
|
||||
return static_cast<data_t>(at_data) != data_;
|
||||
}
|
||||
|
||||
auto operator!=(const data_t &data) const -> bool {
|
||||
mutex_lock lock(*mtx_);
|
||||
return data != data_;
|
||||
}
|
||||
|
||||
auto operator=(const data_t &data) -> atomic & {
|
||||
mutex_lock lock(*mtx_);
|
||||
if (&data != &data_) {
|
||||
data_ = data;
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
operator data_t() const {
|
||||
mutex_lock lock(*mtx_);
|
||||
return data_;
|
||||
}
|
||||
};
|
||||
|
||||
using atomic_string = atomic<std::string>;
|
||||
|
||||
class app_config final {
|
||||
public:
|
||||
[[nodiscard]] static auto default_agent_name(const provider_type &prov)
|
||||
@ -57,71 +115,71 @@ public:
|
||||
~app_config() { save(); }
|
||||
|
||||
private:
|
||||
provider_type prov_;
|
||||
std::string api_auth_;
|
||||
std::uint16_t api_port_;
|
||||
std::string api_user_;
|
||||
bool config_changed_;
|
||||
std::string data_directory_;
|
||||
database_type db_type_{database_type::rocksdb};
|
||||
std::uint8_t download_timeout_secs_;
|
||||
bool enable_chunk_downloader_timeout_;
|
||||
bool enable_comm_duration_events_;
|
||||
bool enable_drive_events_;
|
||||
bool enable_max_cache_size_;
|
||||
std::atomic<provider_type> prov_;
|
||||
atomic_string api_auth_;
|
||||
std::atomic<std::uint16_t> api_port_;
|
||||
atomic_string api_user_;
|
||||
std::atomic<bool> config_changed_;
|
||||
std::atomic<database_type> db_type_{database_type::rocksdb};
|
||||
std::atomic<std::uint8_t> download_timeout_secs_;
|
||||
std::atomic<bool> enable_chunk_downloader_timeout_;
|
||||
std::atomic<bool> enable_comm_duration_events_;
|
||||
std::atomic<bool> enable_drive_events_;
|
||||
std::atomic<bool> enable_max_cache_size_;
|
||||
#if defined(_WIN32)
|
||||
bool enable_mount_manager_;
|
||||
std::atomic<bool> enable_mount_manager_;
|
||||
#endif // defined(_WIN32)
|
||||
bool enable_remote_mount_;
|
||||
encrypt_config encrypt_config_;
|
||||
event_level event_level_;
|
||||
std::uint32_t eviction_delay_mins_;
|
||||
bool eviction_uses_accessed_time_;
|
||||
std::uint16_t high_freq_interval_secs_;
|
||||
bool is_remote_mount_;
|
||||
std::uint16_t low_freq_interval_secs_;
|
||||
std::uint64_t max_cache_size_bytes_;
|
||||
std::uint8_t max_upload_count_;
|
||||
std::uint16_t med_freq_interval_secs_;
|
||||
std::uint8_t min_download_timeout_secs_;
|
||||
std::uint16_t online_check_retry_secs_;
|
||||
std::uint16_t orphaned_file_retention_days_;
|
||||
std::string preferred_download_type_;
|
||||
std::uint8_t read_ahead_count_;
|
||||
std::uint8_t remote_client_pool_size_;
|
||||
std::string remote_host_name_or_ip_;
|
||||
std::uint8_t remote_max_connections_;
|
||||
std::uint16_t remote_port_;
|
||||
std::uint16_t remote_receive_timeout_secs_;
|
||||
std::uint16_t remote_send_timeout_secs_;
|
||||
std::string remote_token_;
|
||||
std::uint16_t retry_read_count_;
|
||||
std::uint16_t ring_buffer_file_size_;
|
||||
std::uint16_t task_wait_ms_;
|
||||
std::atomic<bool> enable_remote_mount_;
|
||||
std::atomic<event_level> event_level_;
|
||||
std::atomic<std::uint32_t> eviction_delay_mins_;
|
||||
std::atomic<bool> eviction_uses_accessed_time_;
|
||||
std::atomic<std::uint16_t> high_freq_interval_secs_;
|
||||
std::atomic<bool> is_remote_mount_;
|
||||
std::atomic<std::uint16_t> low_freq_interval_secs_;
|
||||
std::atomic<std::uint64_t> max_cache_size_bytes_;
|
||||
std::atomic<std::uint8_t> max_upload_count_;
|
||||
std::atomic<std::uint16_t> med_freq_interval_secs_;
|
||||
std::atomic<std::uint8_t> min_download_timeout_secs_;
|
||||
std::atomic<std::uint16_t> online_check_retry_secs_;
|
||||
std::atomic<std::uint16_t> orphaned_file_retention_days_;
|
||||
atomic_string preferred_download_type_;
|
||||
std::atomic<std::uint8_t> read_ahead_count_;
|
||||
std::atomic<std::uint8_t> remote_client_pool_size_;
|
||||
atomic_string remote_host_name_or_ip_;
|
||||
std::atomic<std::uint8_t> remote_max_connections_;
|
||||
std::atomic<std::uint16_t> remote_port_;
|
||||
std::atomic<std::uint16_t> remote_receive_timeout_secs_;
|
||||
std::atomic<std::uint16_t> remote_send_timeout_secs_;
|
||||
atomic_string remote_token_;
|
||||
std::atomic<std::uint16_t> retry_read_count_;
|
||||
std::atomic<std::uint16_t> ring_buffer_file_size_;
|
||||
std::atomic<std::uint16_t> task_wait_ms_;
|
||||
|
||||
private:
|
||||
std::string cache_directory_;
|
||||
host_config hc_{};
|
||||
s3_config s3_config_{};
|
||||
sia_config sia_config_{};
|
||||
atomic_string cache_directory_;
|
||||
atomic_string data_directory_;
|
||||
atomic<encrypt_config> encrypt_config_;
|
||||
atomic<host_config> hc_;
|
||||
atomic<s3_config> s3_config_;
|
||||
atomic<sia_config> sia_config_;
|
||||
std::uint64_t version_{REPERTORY_CONFIG_VERSION};
|
||||
std::string log_directory_;
|
||||
atomic_string log_directory_;
|
||||
mutable std::recursive_mutex read_write_mutex_;
|
||||
mutable std::recursive_mutex remote_mount_mutex_;
|
||||
|
||||
private:
|
||||
auto get_database_value(const json &json_document, const std::string &name,
|
||||
database_type &dst, bool &success_flag) -> bool;
|
||||
std::atomic<database_type> &dst, bool &success_flag)
|
||||
-> bool;
|
||||
|
||||
template <typename dest>
|
||||
auto get_value(const json &json_document, const std::string &name, dest &dst,
|
||||
bool &success_flag) -> bool {
|
||||
template <typename dest_t>
|
||||
auto get_value(const json &json_document, const std::string &name,
|
||||
dest_t &dst, bool &success_flag) -> bool {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto ret{false};
|
||||
try {
|
||||
if (json_document.find(name) != json_document.end()) {
|
||||
dst = json_document[name].get<dest>();
|
||||
dst = json_document[name].get<dest_t>();
|
||||
ret = true;
|
||||
} else {
|
||||
success_flag = false;
|
||||
@ -137,8 +195,8 @@ private:
|
||||
|
||||
[[nodiscard]] auto load() -> bool;
|
||||
|
||||
template <typename dest, typename source>
|
||||
auto set_value(dest &dst, const source &src) -> bool {
|
||||
template <typename dst_t, typename src_t>
|
||||
auto set_value(dst_t &dst, const src_t &src) -> bool {
|
||||
auto ret{false};
|
||||
recur_mutex_lock lock(read_write_mutex_);
|
||||
if (dst != src) {
|
||||
@ -219,7 +277,8 @@ public:
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_high_frequency_interval_secs() const -> std::uint16_t {
|
||||
return std::max(static_cast<std::uint16_t>(1U), high_freq_interval_secs_);
|
||||
return std::max(static_cast<std::uint16_t>(1U),
|
||||
static_cast<std::uint16_t>(high_freq_interval_secs_));
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_host_config() const -> host_config { return hc_; }
|
||||
@ -235,27 +294,32 @@ public:
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_low_frequency_interval_secs() const -> std::uint16_t {
|
||||
return std::max(static_cast<std::uint16_t>(1U), low_freq_interval_secs_);
|
||||
return std::max(static_cast<std::uint16_t>(1U),
|
||||
static_cast<std::uint16_t>(low_freq_interval_secs_));
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_max_cache_size_bytes() const -> std::uint64_t;
|
||||
|
||||
[[nodiscard]] auto get_max_upload_count() const -> std::uint8_t {
|
||||
return std::max(std::uint8_t(1U), max_upload_count_);
|
||||
return std::max(std::uint8_t(1U),
|
||||
static_cast<std::uint8_t>(max_upload_count_));
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_med_frequency_interval_secs() const -> std::uint16_t {
|
||||
return std::max(static_cast<std::uint16_t>(1U), med_freq_interval_secs_);
|
||||
return std::max(static_cast<std::uint16_t>(1U),
|
||||
static_cast<std::uint16_t>(med_freq_interval_secs_));
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_online_check_retry_secs() const -> std::uint16_t {
|
||||
return std::max(std::uint16_t(15U), online_check_retry_secs_);
|
||||
return std::max(std::uint16_t(15U),
|
||||
static_cast<std::uint16_t>(online_check_retry_secs_));
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_orphaned_file_retention_days() const -> std::uint16_t {
|
||||
return std::min(static_cast<std::uint16_t>(31U),
|
||||
std::max(static_cast<std::uint16_t>(1U),
|
||||
orphaned_file_retention_days_));
|
||||
return std::min(
|
||||
static_cast<std::uint16_t>(31U),
|
||||
std::max(static_cast<std::uint16_t>(1U),
|
||||
static_cast<std::uint16_t>(orphaned_file_retention_days_)));
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_preferred_download_type() const -> download_type {
|
||||
@ -268,11 +332,13 @@ public:
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_read_ahead_count() const -> std::uint8_t {
|
||||
return std::max(static_cast<std::uint8_t>(1U), read_ahead_count_);
|
||||
return std::max(static_cast<std::uint8_t>(1U),
|
||||
static_cast<std::uint8_t>(read_ahead_count_));
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_remote_client_pool_size() const -> std::uint8_t {
|
||||
return std::max(static_cast<std::uint8_t>(5U), remote_client_pool_size_);
|
||||
return std::max(static_cast<std::uint8_t>(5U),
|
||||
static_cast<std::uint8_t>(remote_client_pool_size_));
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_remote_host_name_or_ip() const -> std::string {
|
||||
@ -280,7 +346,8 @@ public:
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_remote_max_connections() const -> std::uint8_t {
|
||||
return std::max(static_cast<std::uint8_t>(1U), remote_max_connections_);
|
||||
return std::max(static_cast<std::uint8_t>(1U),
|
||||
static_cast<std::uint8_t>(remote_max_connections_));
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_remote_port() const -> std::uint16_t {
|
||||
@ -300,13 +367,15 @@ public:
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_retry_read_count() const -> std::uint16_t {
|
||||
return std::max(std::uint16_t(2), retry_read_count_);
|
||||
return std::max(std::uint16_t(2),
|
||||
static_cast<std::uint16_t>(retry_read_count_));
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_ring_buffer_file_size() const -> std::uint16_t {
|
||||
return std::max(
|
||||
static_cast<std::uint16_t>(64U),
|
||||
std::min(static_cast<std::uint16_t>(1024U), ring_buffer_file_size_));
|
||||
std::min(static_cast<std::uint16_t>(1024U),
|
||||
static_cast<std::uint16_t>(ring_buffer_file_size_)));
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_s3_config() const -> s3_config { return s3_config_; }
|
||||
@ -316,7 +385,8 @@ public:
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_task_wait_ms() const -> std::uint16_t {
|
||||
return std::max(static_cast<std::uint16_t>(50U), task_wait_ms_);
|
||||
return std::max(static_cast<std::uint16_t>(50U),
|
||||
static_cast<std::uint16_t>(task_wait_ms_));
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_value_by_name(const std::string &name) -> std::string;
|
||||
@ -370,6 +440,8 @@ public:
|
||||
|
||||
void set_enable_remote_mount(bool enable_remote_mount);
|
||||
|
||||
void set_encrypt_config(encrypt_config cfg);
|
||||
|
||||
void set_event_level(const event_level &level) {
|
||||
if (set_value(event_level_, level)) {
|
||||
event_system::instance().raise<event_level_changed>(
|
||||
@ -390,25 +462,7 @@ public:
|
||||
set_value(high_freq_interval_secs_, high_frequency_interval_secs);
|
||||
}
|
||||
|
||||
#if defined(PROJECT_TESTING)
|
||||
void set_host_config(host_config hc) {
|
||||
config_changed_ = true;
|
||||
hc_ = std::move(hc);
|
||||
save();
|
||||
}
|
||||
|
||||
void set_s3_config(s3_config s3) {
|
||||
config_changed_ = true;
|
||||
s3_config_ = std::move(s3);
|
||||
save();
|
||||
}
|
||||
|
||||
void set_sia_config(sia_config sia) {
|
||||
config_changed_ = true;
|
||||
sia_config_ = std::move(sia);
|
||||
save();
|
||||
}
|
||||
#endif // defined(PROJECT_TESTING)
|
||||
void set_host_config(host_config cfg);
|
||||
|
||||
void set_is_remote_mount(bool is_remote_mount);
|
||||
|
||||
@ -439,8 +493,8 @@ public:
|
||||
set_value(orphaned_file_retention_days_, orphaned_file_retention_days);
|
||||
}
|
||||
|
||||
void set_preferred_download_type(const download_type &dt) {
|
||||
set_value(preferred_download_type_, download_type_to_string(dt));
|
||||
void set_preferred_download_type(const download_type &type) {
|
||||
set_value(preferred_download_type_, download_type_to_string(type));
|
||||
}
|
||||
|
||||
void set_read_ahead_count(std::uint8_t read_ahead_count) {
|
||||
@ -484,6 +538,10 @@ public:
|
||||
set_value(retry_read_count_, retry_read_count);
|
||||
}
|
||||
|
||||
void set_s3_config(s3_config cfg);
|
||||
|
||||
void set_sia_config(sia_config cfg);
|
||||
|
||||
void set_task_wait_ms(std::uint16_t task_wait_ms) {
|
||||
set_value(task_wait_ms_, task_wait_ms);
|
||||
}
|
||||
|
@ -42,20 +42,28 @@ private:
|
||||
|
||||
private:
|
||||
std::unique_ptr<rocksdb::TransactionDB> db_{nullptr};
|
||||
rocksdb::ColumnFamilyHandle *directory_family_{};
|
||||
rocksdb::ColumnFamilyHandle *file_family_{};
|
||||
rocksdb::ColumnFamilyHandle *source_family_{};
|
||||
|
||||
private:
|
||||
void create_or_open(bool clear);
|
||||
|
||||
[[nodiscard]] auto create_iterator() const
|
||||
[[nodiscard]] auto create_iterator(rocksdb::ColumnFamilyHandle *family) const
|
||||
-> std::shared_ptr<rocksdb::Iterator>;
|
||||
|
||||
[[nodiscard]] static auto
|
||||
perform_action(std::string_view function_name,
|
||||
std::function<rocksdb::Status()> action) -> bool;
|
||||
std::function<rocksdb::Status()> action) -> api_error;
|
||||
|
||||
[[nodiscard]] auto perform_action(
|
||||
std::string_view function_name,
|
||||
std::function<rocksdb::Status(rocksdb::Transaction *txn)> action) -> bool;
|
||||
std::function<rocksdb::Status(rocksdb::Transaction *txn)> action)
|
||||
-> api_error;
|
||||
|
||||
[[nodiscard]] auto remove_item(const std::string &api_path,
|
||||
const std::string &source_path,
|
||||
rocksdb::Transaction *txn) -> rocksdb::Status;
|
||||
|
||||
public:
|
||||
[[nodiscard]] auto add_directory(const std::string &api_path,
|
||||
|
@ -43,7 +43,7 @@ private:
|
||||
|
||||
private:
|
||||
std::unique_ptr<rocksdb::TransactionDB> db_{nullptr};
|
||||
rocksdb::ColumnFamilyHandle *default_family_{};
|
||||
rocksdb::ColumnFamilyHandle *meta_family_{};
|
||||
rocksdb::ColumnFamilyHandle *pinned_family_{};
|
||||
rocksdb::ColumnFamilyHandle *size_family_{};
|
||||
rocksdb::ColumnFamilyHandle *source_family_{};
|
||||
|
@ -0,0 +1,65 @@
|
||||
/*
|
||||
Copyright <2018-2024> <scott.e.graves@protonmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
#ifndef REPERTORY_INCLUDE_FILE_MANAGER_CACHE_SIZE_MGR_HPP_
|
||||
#define REPERTORY_INCLUDE_FILE_MANAGER_CACHE_SIZE_MGR_HPP_
|
||||
|
||||
#include "types/repertory.hpp"
|
||||
|
||||
namespace repertory {
|
||||
class app_config;
|
||||
|
||||
class cache_size_mgr final {
|
||||
public:
|
||||
cache_size_mgr(const cache_size_mgr &) = delete;
|
||||
cache_size_mgr(cache_size_mgr &&) = delete;
|
||||
auto operator=(const cache_size_mgr &) -> cache_size_mgr & = delete;
|
||||
auto operator=(cache_size_mgr &&) -> cache_size_mgr & = delete;
|
||||
|
||||
protected:
|
||||
cache_size_mgr() = default;
|
||||
|
||||
~cache_size_mgr() { stop(); }
|
||||
|
||||
private:
|
||||
static cache_size_mgr instance_;
|
||||
|
||||
private:
|
||||
app_config *cfg_{nullptr};
|
||||
std::uint64_t cache_size_{0U};
|
||||
std::mutex mtx_;
|
||||
std::condition_variable notify_;
|
||||
stop_type stop_requested_{false};
|
||||
|
||||
public:
|
||||
[[nodiscard]] auto expand(std::uint64_t size) -> api_error;
|
||||
|
||||
void initialize(app_config *cfg);
|
||||
|
||||
[[nodiscard]] static auto instance() -> cache_size_mgr & { return instance_; }
|
||||
|
||||
[[nodiscard]] auto shrink(std::uint64_t size) -> api_error;
|
||||
|
||||
void stop();
|
||||
};
|
||||
} // namespace repertory
|
||||
|
||||
#endif // REPERTORY_INCLUDE_FILE_MANAGER_CACHE_SIZE_MGR_HPP_
|
@ -54,9 +54,9 @@ private:
|
||||
|
||||
private:
|
||||
app_config &config_;
|
||||
std::unique_ptr<i_file_db> db_;
|
||||
|
||||
private:
|
||||
std::unique_ptr<i_file_db> db_{nullptr};
|
||||
i_file_manager *fm_{nullptr};
|
||||
std::unordered_map<std::string, std::shared_ptr<reader_info>> reader_lookup_;
|
||||
std::recursive_mutex reader_lookup_mtx_;
|
||||
|
@ -24,15 +24,25 @@
|
||||
|
||||
#include "types/repertory.hpp"
|
||||
|
||||
namespace repertory::utils {
|
||||
namespace repertory {
|
||||
class app_config;
|
||||
|
||||
namespace utils {
|
||||
void calculate_allocation_size(bool directory, std::uint64_t file_size,
|
||||
UINT64 allocation_size,
|
||||
std::string &allocation_meta_size);
|
||||
|
||||
[[nodiscard]] auto
|
||||
create_volume_label(const provider_type &prov) -> std::string;
|
||||
create_rocksdb(const app_config &cfg, const std::string &name,
|
||||
const std::vector<rocksdb::ColumnFamilyDescriptor> &families,
|
||||
std::vector<rocksdb::ColumnFamilyHandle *> &handles, bool clear)
|
||||
-> std::unique_ptr<rocksdb::TransactionDB>;
|
||||
|
||||
[[nodiscard]] auto create_volume_label(const provider_type &prov)
|
||||
-> std::string;
|
||||
|
||||
[[nodiscard]] auto get_attributes_from_meta(const api_meta_map &meta) -> DWORD;
|
||||
} // namespace repertory::utils
|
||||
} // namespace utils
|
||||
} // namespace repertory
|
||||
|
||||
#endif // REPERTORY_INCLUDE_UTILS_UTILS_HPP_
|
||||
|
@ -30,29 +30,30 @@
|
||||
#include "utils/utils.hpp"
|
||||
|
||||
namespace {
|
||||
constexpr const auto default_api_auth_size = 48U;
|
||||
constexpr const auto default_download_timeout_ces = 30U;
|
||||
constexpr const auto default_eviction_delay_mins = 10U;
|
||||
constexpr const auto default_high_freq_interval_secs = 30U;
|
||||
constexpr const auto default_low_freq_interval_secs = 60U * 60U;
|
||||
constexpr const auto default_max_cache_size_bytes =
|
||||
20ULL * 1024ULL * 1024ULL * 1024ULL;
|
||||
constexpr const auto default_max_upload_count = 5U;
|
||||
constexpr const auto default_med_freq_interval_secs = 2U * 60U;
|
||||
constexpr const auto default_min_download_timeout_secs = 5U;
|
||||
constexpr const auto default_online_check_retry_secs = 60U;
|
||||
constexpr const auto default_orphaned_file_retention_days = 15U;
|
||||
constexpr const auto default_read_ahead_count = 4U;
|
||||
constexpr const auto default_remote_client_pool_size = 10U;
|
||||
constexpr const auto default_remote_host_name_or_ip = "";
|
||||
constexpr const auto default_remote_max_connections = 20U;
|
||||
constexpr const auto default_remote_receive_timeout_secs = 120U;
|
||||
constexpr const auto default_remote_send_timeout_secs = 30U;
|
||||
constexpr const auto default_remote_token = "";
|
||||
constexpr const auto default_retry_read_count = 6U;
|
||||
constexpr const auto default_ring_buffer_file_size = 512U;
|
||||
constexpr const auto default_task_wait_ms = 100U;
|
||||
constexpr const auto retry_save_count = 5U;
|
||||
constexpr const auto default_api_auth_size{48U};
|
||||
constexpr const auto default_download_timeout_ces{30U};
|
||||
constexpr const auto default_eviction_delay_mins{10U};
|
||||
constexpr const auto default_high_freq_interval_secs{30U};
|
||||
constexpr const auto default_low_freq_interval_secs{60U * 60U};
|
||||
constexpr const std::uint64_t default_max_cache_size_bytes = {
|
||||
20ULL * 1024ULL * 1024ULL * 1024ULL,
|
||||
};
|
||||
constexpr const auto default_max_upload_count{5U};
|
||||
constexpr const auto default_med_freq_interval_secs{2U * 60U};
|
||||
constexpr const auto default_min_download_timeout_secs{5U};
|
||||
constexpr const auto default_online_check_retry_secs{60U};
|
||||
constexpr const auto default_orphaned_file_retention_days{15U};
|
||||
constexpr const auto default_read_ahead_count{4U};
|
||||
constexpr const auto default_remote_client_pool_size{10U};
|
||||
constexpr const auto default_remote_host_name_or_ip{""};
|
||||
constexpr const auto default_remote_max_connections{20U};
|
||||
constexpr const auto default_remote_receive_timeout_secs{120U};
|
||||
constexpr const auto default_remote_send_timeout_secs{30U};
|
||||
constexpr const auto default_remote_token{""};
|
||||
constexpr const auto default_retry_read_count{6U};
|
||||
constexpr const auto default_ring_buffer_file_size{512U};
|
||||
constexpr const auto default_task_wait_ms{100U};
|
||||
constexpr const auto retry_save_count{5U};
|
||||
} // namespace
|
||||
|
||||
namespace repertory {
|
||||
@ -63,9 +64,6 @@ app_config::app_config(const provider_type &prov,
|
||||
api_port_(default_rpc_port(prov)),
|
||||
api_user_(std::string{REPERTORY}),
|
||||
config_changed_(false),
|
||||
data_directory_(data_directory.empty()
|
||||
? default_data_directory(prov)
|
||||
: utils::path::absolute(data_directory)),
|
||||
download_timeout_secs_(default_download_timeout_ces),
|
||||
enable_chunk_downloader_timeout_(true),
|
||||
enable_comm_duration_events_(false),
|
||||
@ -100,11 +98,16 @@ app_config::app_config(const provider_type &prov,
|
||||
retry_read_count_(default_retry_read_count),
|
||||
ring_buffer_file_size_(default_ring_buffer_file_size),
|
||||
task_wait_ms_(default_task_wait_ms) {
|
||||
data_directory_ = data_directory.empty()
|
||||
? default_data_directory(prov)
|
||||
: utils::path::absolute(data_directory);
|
||||
cache_directory_ = utils::path::combine(data_directory_, {"cache"});
|
||||
log_directory_ = utils::path::combine(data_directory_, {"logs"});
|
||||
|
||||
hc_.agent_string = default_agent_name(prov_);
|
||||
hc_.api_port = default_api_port(prov_);
|
||||
host_config host_cfg{hc_};
|
||||
host_cfg.agent_string = default_agent_name(prov_);
|
||||
host_cfg.api_port = default_api_port(prov_);
|
||||
hc_ = host_cfg;
|
||||
|
||||
if (not utils::file::directory(data_directory_).create_directory()) {
|
||||
throw startup_exception("unable to create: " + data_directory_);
|
||||
@ -128,7 +131,8 @@ auto app_config::get_config_file_path() const -> std::string {
|
||||
}
|
||||
|
||||
auto app_config::get_database_value(const json &json_document,
|
||||
const std::string &name, database_type &dst,
|
||||
const std::string &name,
|
||||
std::atomic<database_type> &dst,
|
||||
bool &success_flag) -> bool {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
@ -243,23 +247,12 @@ auto app_config::get_json() const -> json {
|
||||
{"EnableMountManager", enable_mount_manager_},
|
||||
#endif // defined(_WIN32)
|
||||
{"EnableMaxCacheSize", enable_max_cache_size_},
|
||||
{"EncryptConfig",
|
||||
{
|
||||
{"EncryptionToken", encrypt_config_.encryption_token},
|
||||
{"Path", encrypt_config_.path},
|
||||
}},
|
||||
{"EncryptConfig", get_encrypt_config()},
|
||||
{"EventLevel", event_level_to_string(event_level_)},
|
||||
{"EvictionDelayMinutes", eviction_delay_mins_},
|
||||
{"EvictionUsesAccessedTime", eviction_uses_accessed_time_},
|
||||
{"HighFreqIntervalSeconds", high_freq_interval_secs_},
|
||||
{"HostConfig",
|
||||
{
|
||||
{"AgentString", hc_.agent_string},
|
||||
{"ApiPassword", hc_.api_password},
|
||||
{"ApiPort", hc_.api_port},
|
||||
{"HostNameOrIp", hc_.host_name_or_ip},
|
||||
{"TimeoutMs", hc_.timeout_ms},
|
||||
}},
|
||||
{"HostConfig", get_host_config()},
|
||||
{"LowFreqIntervalSeconds", low_freq_interval_secs_},
|
||||
{"MaxCacheSizeBytes", max_cache_size_bytes_},
|
||||
{"MaxUploadCount", max_upload_count_},
|
||||
@ -284,22 +277,8 @@ auto app_config::get_json() const -> json {
|
||||
},
|
||||
{"RetryReadCount", retry_read_count_},
|
||||
{"RingBufferFileSize", ring_buffer_file_size_},
|
||||
{"S3Config",
|
||||
{
|
||||
{"AccessKey", s3_config_.access_key},
|
||||
{"Bucket", s3_config_.bucket},
|
||||
{"EncryptionToken", s3_config_.encryption_token},
|
||||
{"Region", s3_config_.region},
|
||||
{"SecretKey", s3_config_.secret_key},
|
||||
{"TimeoutMs", s3_config_.timeout_ms},
|
||||
{"URL", s3_config_.url},
|
||||
{"UsePathStyle", s3_config_.use_path_style},
|
||||
{"UseRegionInURL", s3_config_.use_region_in_url},
|
||||
}},
|
||||
{"SiaConfig",
|
||||
{
|
||||
{"Bucket", sia_config_.bucket},
|
||||
}},
|
||||
{"S3Config", get_s3_config()},
|
||||
{"SiaConfig", get_sia_config()},
|
||||
{"TaskWaitMillis", task_wait_ms_},
|
||||
{"Version", version_}};
|
||||
|
||||
@ -357,7 +336,7 @@ auto app_config::get_json() const -> json {
|
||||
auto app_config::get_max_cache_size_bytes() const -> std::uint64_t {
|
||||
auto max_space =
|
||||
std::max(static_cast<std::uint64_t>(100ULL * 1024ULL * 1024ULL),
|
||||
max_cache_size_bytes_);
|
||||
static_cast<std::uint64_t>(max_cache_size_bytes_));
|
||||
auto free_space = utils::file::get_free_drive_space(get_cache_directory());
|
||||
return free_space.has_value() ? std::min(free_space.value(), max_space)
|
||||
: max_space;
|
||||
@ -393,13 +372,13 @@ auto app_config::get_value_by_name(const std::string &name) -> std::string {
|
||||
|
||||
try {
|
||||
if (name == "ApiAuth") {
|
||||
return api_auth_;
|
||||
return get_api_auth();
|
||||
}
|
||||
if (name == "ApiPort") {
|
||||
return std::to_string(get_api_port());
|
||||
}
|
||||
if (name == "ApiUser") {
|
||||
return api_user_;
|
||||
return get_api_user();
|
||||
}
|
||||
if (name == "DatabaseType") {
|
||||
return database_type_to_string(get_database_type());
|
||||
@ -421,14 +400,14 @@ auto app_config::get_value_by_name(const std::string &name) -> std::string {
|
||||
#if defined(_WIN32)
|
||||
}
|
||||
if (name == "EnableMountManager") {
|
||||
return std::to_string(get_enable_mount_manager());
|
||||
return utils::string::from_bool(get_enable_mount_manager());
|
||||
#endif // defined(_WIN32)
|
||||
}
|
||||
if (name == "EncryptConfig.Path") {
|
||||
return utils::path::absolute(encrypt_config_.path);
|
||||
return utils::path::absolute(get_encrypt_config().path);
|
||||
}
|
||||
if (name == "EncryptConfig.EncryptionToken") {
|
||||
return encrypt_config_.encryption_token;
|
||||
return get_encrypt_config().encryption_token;
|
||||
}
|
||||
if (name == "EventLevel") {
|
||||
return event_level_to_string(get_event_level());
|
||||
@ -443,19 +422,19 @@ auto app_config::get_value_by_name(const std::string &name) -> std::string {
|
||||
return std::to_string(get_high_frequency_interval_secs());
|
||||
}
|
||||
if (name == "HostConfig.AgentString") {
|
||||
return hc_.agent_string;
|
||||
return get_host_config().agent_string;
|
||||
}
|
||||
if (name == "HostConfig.ApiPassword") {
|
||||
return hc_.api_password;
|
||||
return get_host_config().api_password;
|
||||
}
|
||||
if (name == "HostConfig.ApiPort") {
|
||||
return std::to_string(hc_.api_port);
|
||||
return std::to_string(get_host_config().api_port);
|
||||
}
|
||||
if (name == "HostConfig.HostNameOrIp") {
|
||||
return hc_.host_name_or_ip;
|
||||
return get_host_config().host_name_or_ip;
|
||||
}
|
||||
if (name == "HostConfig.TimeoutMs") {
|
||||
return std::to_string(hc_.timeout_ms);
|
||||
return std::to_string(get_host_config().timeout_ms);
|
||||
}
|
||||
if (name == "LowFreqIntervalSeconds") {
|
||||
return std::to_string(get_low_frequency_interval_secs());
|
||||
@ -516,34 +495,34 @@ auto app_config::get_value_by_name(const std::string &name) -> std::string {
|
||||
return std::to_string(get_ring_buffer_file_size());
|
||||
}
|
||||
if (name == "S3Config.AccessKey") {
|
||||
return s3_config_.access_key;
|
||||
return get_s3_config().access_key;
|
||||
}
|
||||
if (name == "S3Config.Bucket") {
|
||||
return s3_config_.bucket;
|
||||
return get_s3_config().bucket;
|
||||
}
|
||||
if (name == "S3Config.EncryptionToken") {
|
||||
return s3_config_.encryption_token;
|
||||
return get_s3_config().encryption_token;
|
||||
}
|
||||
if (name == "S3Config.Region") {
|
||||
return s3_config_.region;
|
||||
return get_s3_config().region;
|
||||
}
|
||||
if (name == "S3Config.SecretKey") {
|
||||
return s3_config_.secret_key;
|
||||
return get_s3_config().secret_key;
|
||||
}
|
||||
if (name == "S3Config.URL") {
|
||||
return s3_config_.url;
|
||||
return get_s3_config().url;
|
||||
}
|
||||
if (name == "S3Config.UsePathStyle") {
|
||||
return utils::string::from_bool(s3_config_.use_path_style);
|
||||
return utils::string::from_bool(get_s3_config().use_path_style);
|
||||
}
|
||||
if (name == "S3Config.UseRegionInURL") {
|
||||
return utils::string::from_bool(s3_config_.use_region_in_url);
|
||||
return utils::string::from_bool(get_s3_config().use_region_in_url);
|
||||
}
|
||||
if (name == "S3Config.TimeoutMs") {
|
||||
return std::to_string(s3_config_.timeout_ms);
|
||||
return std::to_string(get_s3_config().timeout_ms);
|
||||
}
|
||||
if (name == "SiaConfig.Bucket") {
|
||||
return sia_config_.bucket;
|
||||
return get_sia_config().bucket;
|
||||
}
|
||||
if (name == "TaskWaitMillis") {
|
||||
return std::to_string(get_task_wait_ms());
|
||||
@ -559,7 +538,7 @@ auto app_config::load() -> bool {
|
||||
|
||||
auto ret{false};
|
||||
|
||||
const auto config_file_path = get_config_file_path();
|
||||
auto config_file_path = get_config_file_path();
|
||||
recur_mutex_lock lock(read_write_mutex_);
|
||||
if (utils::file::file(config_file_path).exists()) {
|
||||
try {
|
||||
@ -567,11 +546,11 @@ auto app_config::load() -> bool {
|
||||
if (config_file.is_open()) {
|
||||
std::stringstream stream;
|
||||
stream << config_file.rdbuf();
|
||||
const auto json_text = stream.str();
|
||||
auto json_text = stream.str();
|
||||
config_file.close();
|
||||
ret = not json_text.empty();
|
||||
if (ret) {
|
||||
const auto json_document = json::parse(json_text);
|
||||
auto json_document = json::parse(json_text);
|
||||
|
||||
get_value(json_document, "ApiAuth", api_auth_, ret);
|
||||
get_value(json_document, "ApiPort", api_port_, ret);
|
||||
@ -591,13 +570,8 @@ auto app_config::load() -> bool {
|
||||
ret);
|
||||
|
||||
if (json_document.find("EncryptConfig") != json_document.end()) {
|
||||
auto encrypt_config_json = json_document["EncryptConfig"];
|
||||
auto encrypt = encrypt_config_;
|
||||
get_value(encrypt_config_json, "Path", encrypt.path, ret);
|
||||
encrypt_config_.path = utils::path::absolute(encrypt_config_.path);
|
||||
get_value(encrypt_config_json, "EncryptionToken",
|
||||
encrypt.encryption_token, ret);
|
||||
encrypt_config_ = encrypt;
|
||||
encrypt_config_ =
|
||||
json_document["EncryptConfig"].get<encrypt_config>();
|
||||
} else {
|
||||
ret = false;
|
||||
}
|
||||
@ -608,44 +582,19 @@ auto app_config::load() -> bool {
|
||||
}
|
||||
|
||||
if (json_document.find("HostConfig") != json_document.end()) {
|
||||
auto host_config_json = json_document["HostConfig"];
|
||||
auto cfg = hc_;
|
||||
get_value(host_config_json, "AgentString", cfg.agent_string, ret);
|
||||
get_value(host_config_json, "ApiPassword", cfg.api_password, ret);
|
||||
get_value(host_config_json, "ApiPort", cfg.api_port, ret);
|
||||
get_value(host_config_json, "HostNameOrIp", cfg.host_name_or_ip,
|
||||
ret);
|
||||
get_value(host_config_json, "TimeoutMs", cfg.timeout_ms, ret);
|
||||
hc_ = cfg;
|
||||
hc_ = json_document["HostConfig"].get<host_config>();
|
||||
} else {
|
||||
ret = false;
|
||||
}
|
||||
|
||||
if (json_document.find("S3Config") != json_document.end()) {
|
||||
auto s3_config_json = json_document["S3Config"];
|
||||
auto s3_cfg = s3_config_;
|
||||
get_value(s3_config_json, "AccessKey", s3_cfg.access_key, ret);
|
||||
get_value(s3_config_json, "Bucket", s3_cfg.bucket, ret);
|
||||
get_value(s3_config_json, "EncryptionToken",
|
||||
s3_cfg.encryption_token, ret);
|
||||
get_value(s3_config_json, "Region", s3_cfg.region, ret);
|
||||
get_value(s3_config_json, "SecretKey", s3_cfg.secret_key, ret);
|
||||
get_value(s3_config_json, "TimeoutMs", s3_cfg.timeout_ms, ret);
|
||||
get_value(s3_config_json, "URL", s3_cfg.url, ret);
|
||||
get_value(s3_config_json, "UsePathStyle", s3_cfg.use_path_style,
|
||||
ret);
|
||||
get_value(s3_config_json, "UseRegionInURL",
|
||||
s3_cfg.use_region_in_url, ret);
|
||||
s3_config_ = s3_cfg;
|
||||
s3_config_ = json_document["S3Config"].get<s3_config>();
|
||||
} else {
|
||||
ret = false;
|
||||
}
|
||||
|
||||
if (json_document.find("SiaConfig") != json_document.end()) {
|
||||
auto sia_config_json = json_document["SiaConfig"];
|
||||
auto sia_cfg = sia_config_;
|
||||
get_value(sia_config_json, "Bucket", sia_cfg.bucket, ret);
|
||||
sia_config_ = sia_cfg;
|
||||
sia_config_ = json_document["SiaConfig"].get<sia_config>();
|
||||
} else {
|
||||
ret = false;
|
||||
}
|
||||
@ -727,7 +676,7 @@ auto app_config::load() -> bool {
|
||||
void app_config::save() {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
const auto file_path = get_config_file_path();
|
||||
auto file_path = get_config_file_path();
|
||||
recur_mutex_lock lock(read_write_mutex_);
|
||||
if (config_changed_ || not utils::file::file(file_path).exists()) {
|
||||
if (not utils::file::directory(data_directory_).exists()) {
|
||||
@ -751,17 +700,21 @@ void app_config::save() {
|
||||
}
|
||||
|
||||
void app_config::set_enable_remote_mount(bool enable_remote_mount) {
|
||||
recur_mutex_lock remote_lock(remote_mount_mutex_);
|
||||
if (get_is_remote_mount()) {
|
||||
set_value(enable_remote_mount_, false);
|
||||
} else {
|
||||
set_value(enable_remote_mount_, enable_remote_mount);
|
||||
return;
|
||||
}
|
||||
|
||||
set_value(enable_remote_mount_, enable_remote_mount);
|
||||
}
|
||||
|
||||
void app_config::set_is_remote_mount(bool is_remote_mount) {
|
||||
recur_mutex_lock remote_lock(remote_mount_mutex_);
|
||||
void app_config::set_encrypt_config(encrypt_config cfg) {
|
||||
set_value(encrypt_config_, cfg);
|
||||
}
|
||||
|
||||
void app_config::set_host_config(host_config cfg) { set_value(hc_, cfg); }
|
||||
|
||||
void app_config::set_is_remote_mount(bool is_remote_mount) {
|
||||
if (get_enable_remote_mount()) {
|
||||
set_value(is_remote_mount_, false);
|
||||
return;
|
||||
@ -770,6 +723,10 @@ void app_config::set_is_remote_mount(bool is_remote_mount) {
|
||||
set_value(is_remote_mount_, is_remote_mount);
|
||||
}
|
||||
|
||||
void app_config::set_s3_config(s3_config cfg) { set_value(s3_config_, cfg); }
|
||||
|
||||
void app_config::set_sia_config(sia_config cfg) { set_value(sia_config_, cfg); }
|
||||
|
||||
auto app_config::set_value_by_name(const std::string &name,
|
||||
const std::string &value) -> std::string {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
@ -794,7 +751,7 @@ auto app_config::set_value_by_name(const std::string &name,
|
||||
if (name == "DatabaseType") {
|
||||
set_database_type(
|
||||
database_type_from_string(value, database_type::rocksdb));
|
||||
return database_type_to_string(db_type_);
|
||||
return database_type_to_string(get_database_type());
|
||||
}
|
||||
if (name == "EnableChunkDownloaderTimeout") {
|
||||
set_enable_chunk_downloader_timeout(utils::string::to_bool(value));
|
||||
@ -820,11 +777,11 @@ auto app_config::set_value_by_name(const std::string &name,
|
||||
}
|
||||
if (name == "EncryptConfig.EncryptionToken") {
|
||||
set_value(encrypt_config_.encryption_token, value);
|
||||
return encrypt_config_.encryption_token;
|
||||
return get_encrypt_config().encryption_token;
|
||||
}
|
||||
if (name == "EncryptConfig.Path") {
|
||||
set_value(encrypt_config_.path, utils::path::absolute(value));
|
||||
return encrypt_config_.path;
|
||||
return get_encrypt_config().path;
|
||||
}
|
||||
if (name == "EventLevel") {
|
||||
set_event_level(event_level_from_string(value));
|
||||
@ -844,23 +801,23 @@ auto app_config::set_value_by_name(const std::string &name,
|
||||
}
|
||||
if (name == "HostConfig.AgentString") {
|
||||
set_value(hc_.agent_string, value);
|
||||
return hc_.agent_string;
|
||||
return get_host_config().agent_string;
|
||||
}
|
||||
if (name == "HostConfig.ApiPassword") {
|
||||
set_value(hc_.api_password, value);
|
||||
return hc_.api_password;
|
||||
return get_host_config().api_password;
|
||||
}
|
||||
if (name == "HostConfig.ApiPort") {
|
||||
set_value(hc_.api_port, utils::string::to_uint16(value));
|
||||
return std::to_string(hc_.api_port);
|
||||
return std::to_string(get_host_config().api_port);
|
||||
}
|
||||
if (name == "HostConfig.HostNameOrIp") {
|
||||
set_value(hc_.host_name_or_ip, value);
|
||||
return hc_.host_name_or_ip;
|
||||
return get_host_config().host_name_or_ip;
|
||||
}
|
||||
if (name == "HostConfig.TimeoutMs") {
|
||||
set_value(hc_.timeout_ms, utils::string::to_uint32(value));
|
||||
return std::to_string(hc_.timeout_ms);
|
||||
return std::to_string(get_host_config().timeout_ms);
|
||||
}
|
||||
if (name == "LowFreqIntervalSeconds") {
|
||||
set_low_frequency_interval_secs(utils::string::to_uint16(value));
|
||||
@ -941,43 +898,43 @@ auto app_config::set_value_by_name(const std::string &name,
|
||||
}
|
||||
if (name == "S3Config.AccessKey") {
|
||||
set_value(s3_config_.access_key, value);
|
||||
return s3_config_.access_key;
|
||||
return get_s3_config().access_key;
|
||||
}
|
||||
if (name == "S3Config.Bucket") {
|
||||
set_value(s3_config_.bucket, value);
|
||||
return s3_config_.bucket;
|
||||
return get_s3_config().bucket;
|
||||
}
|
||||
if (name == "S3Config.Region") {
|
||||
set_value(s3_config_.region, value);
|
||||
return s3_config_.region;
|
||||
return get_s3_config().region;
|
||||
}
|
||||
if (name == "S3Config.SecretKey") {
|
||||
set_value(s3_config_.secret_key, value);
|
||||
return s3_config_.secret_key;
|
||||
return get_s3_config().secret_key;
|
||||
}
|
||||
if (name == "S3Config.URL") {
|
||||
set_value(s3_config_.url, value);
|
||||
return s3_config_.url;
|
||||
return get_s3_config().url;
|
||||
}
|
||||
if (name == "S3Config.UsePathStyle") {
|
||||
set_value(s3_config_.use_path_style, utils::string::to_bool(value));
|
||||
return utils::string::from_bool(s3_config_.use_path_style);
|
||||
return utils::string::from_bool(get_s3_config().use_path_style);
|
||||
}
|
||||
if (name == "S3Config.UseRegionInURL") {
|
||||
set_value(s3_config_.use_region_in_url, utils::string::to_bool(value));
|
||||
return utils::string::from_bool(s3_config_.use_region_in_url);
|
||||
return utils::string::from_bool(get_s3_config().use_region_in_url);
|
||||
}
|
||||
if (name == "S3Config.TimeoutMs") {
|
||||
set_value(s3_config_.timeout_ms, utils::string::to_uint32(value));
|
||||
return std::to_string(s3_config_.timeout_ms);
|
||||
return std::to_string(get_s3_config().timeout_ms);
|
||||
}
|
||||
if (name == "S3Config.EncryptionToken") {
|
||||
set_value(s3_config_.encryption_token, value);
|
||||
return s3_config_.encryption_token;
|
||||
return get_s3_config().encryption_token;
|
||||
}
|
||||
if (name == "SiaConfig.Bucket") {
|
||||
set_value(sia_config_.bucket, value);
|
||||
return sia_config_.bucket;
|
||||
return get_sia_config().bucket;
|
||||
}
|
||||
if (name == "TaskWaitMillis") {
|
||||
set_task_wait_ms(utils::string::to_uint16(value));
|
||||
|
@ -28,39 +28,7 @@
|
||||
#include "utils/file.hpp"
|
||||
#include "utils/path.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
namespace {
|
||||
[[nodiscard]] auto
|
||||
create_rocksdb(const repertory::app_config &cfg, const std::string &name,
|
||||
const std::vector<rocksdb::ColumnFamilyDescriptor> &families,
|
||||
std::vector<rocksdb::ColumnFamilyHandle *> &handles, bool clear)
|
||||
-> std::unique_ptr<rocksdb::TransactionDB> {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto path = repertory::utils::path::combine(cfg.get_data_directory(), {name});
|
||||
if (clear &&
|
||||
not repertory::utils::file::directory{path}.remove_recursively()) {
|
||||
repertory::utils::error::raise_error(
|
||||
function_name, "failed to remove " + name + " db|" + path);
|
||||
}
|
||||
|
||||
rocksdb::Options options{};
|
||||
options.create_if_missing = true;
|
||||
options.create_missing_column_families = true;
|
||||
options.db_log_dir = cfg.get_log_directory();
|
||||
options.keep_log_file_num = 10;
|
||||
|
||||
rocksdb::TransactionDB *ptr{};
|
||||
auto status = rocksdb::TransactionDB::Open(
|
||||
options, rocksdb::TransactionDBOptions{}, path, families, &handles, &ptr);
|
||||
if (not status.ok()) {
|
||||
repertory::utils::error::raise_error(function_name, status.ToString());
|
||||
throw repertory::startup_exception(status.ToString());
|
||||
}
|
||||
|
||||
return std::unique_ptr<rocksdb::TransactionDB>(ptr);
|
||||
}
|
||||
} // namespace
|
||||
#include "utils/utils.hpp"
|
||||
|
||||
namespace repertory {
|
||||
rdb_file_db::rdb_file_db(const app_config &cfg) : cfg_(cfg) {
|
||||
@ -75,75 +43,280 @@ void rdb_file_db::create_or_open(bool clear) {
|
||||
auto families = std::vector<rocksdb::ColumnFamilyDescriptor>();
|
||||
families.emplace_back(rocksdb::kDefaultColumnFamilyName,
|
||||
rocksdb::ColumnFamilyOptions());
|
||||
families.emplace_back("file", rocksdb::ColumnFamilyOptions());
|
||||
families.emplace_back("source", rocksdb::ColumnFamilyOptions());
|
||||
|
||||
auto handles = std::vector<rocksdb::ColumnFamilyHandle *>();
|
||||
db_ = create_rocksdb(cfg_, "file", families, handles, clear);
|
||||
db_ = utils::create_rocksdb(cfg_, "file", families, handles, clear);
|
||||
|
||||
std::size_t idx{};
|
||||
directory_family_ = handles.at(idx++);
|
||||
file_family_ = handles.at(idx++);
|
||||
source_family_ = handles.at(idx++);
|
||||
}
|
||||
|
||||
auto rdb_file_db::add_directory(const std::string &api_path,
|
||||
const std::string &source_path) -> api_error {}
|
||||
const std::string &source_path) -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
std::string existing_source_path;
|
||||
auto result = get_directory_source_path(api_path, existing_source_path);
|
||||
if (result != api_error::success &&
|
||||
result != api_error::directory_not_found) {
|
||||
return result;
|
||||
}
|
||||
|
||||
return perform_action(
|
||||
function_name, [&](rocksdb::Transaction *txn) -> rocksdb::Status {
|
||||
if (not existing_source_path.empty()) {
|
||||
auto res = remove_item(api_path, existing_source_path, txn);
|
||||
if (not res.ok() && not res.IsNotFound()) {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
auto res = txn->Put(directory_family_, api_path, source_path);
|
||||
if (not res.ok()) {
|
||||
return res;
|
||||
}
|
||||
|
||||
return txn->Put(source_family_, source_path, api_path);
|
||||
});
|
||||
}
|
||||
|
||||
auto rdb_file_db::add_or_update_file(const i_file_db::file_data &data)
|
||||
-> api_error {}
|
||||
-> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
std::string existing_source_path;
|
||||
auto result = get_file_source_path(data.api_path, existing_source_path);
|
||||
if (result != api_error::success && result != api_error::item_not_found) {
|
||||
return result;
|
||||
}
|
||||
|
||||
return perform_action(
|
||||
function_name, [&](rocksdb::Transaction *txn) -> rocksdb::Status {
|
||||
if (not existing_source_path.empty()) {
|
||||
auto res = remove_item(data.api_path, existing_source_path, txn);
|
||||
if (not res.ok() && not res.IsNotFound()) {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
json json_data = {
|
||||
{"file_size", data.file_size},
|
||||
{"iv", data.iv_list},
|
||||
{"source_path", data.source_path},
|
||||
};
|
||||
|
||||
auto res = txn->Put(file_family_, data.api_path, json_data.dump());
|
||||
if (not res.ok()) {
|
||||
return res;
|
||||
}
|
||||
|
||||
return txn->Put(source_family_, data.source_path, data.api_path);
|
||||
});
|
||||
}
|
||||
|
||||
void rdb_file_db::clear() { create_or_open(true); }
|
||||
|
||||
auto rdb_file_db::create_iterator() const
|
||||
auto rdb_file_db::create_iterator(rocksdb::ColumnFamilyHandle *family) const
|
||||
-> std::shared_ptr<rocksdb::Iterator> {
|
||||
return std::shared_ptr<rocksdb::Iterator>(
|
||||
db_->NewIterator(rocksdb::ReadOptions()));
|
||||
db_->NewIterator(rocksdb::ReadOptions{}, family));
|
||||
}
|
||||
|
||||
auto rdb_file_db::count() const -> std::uint64_t {}
|
||||
auto rdb_file_db::count() const -> std::uint64_t {
|
||||
std::uint64_t ret{};
|
||||
|
||||
auto iter = create_iterator(source_family_);
|
||||
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
||||
++ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
auto rdb_file_db::get_api_path(const std::string &source_path,
|
||||
std::string &api_path) const -> api_error {}
|
||||
std::string &api_path) const -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
return perform_action(function_name, [&]() -> rocksdb::Status {
|
||||
return db_->Get(rocksdb::ReadOptions{}, source_family_, source_path,
|
||||
&api_path);
|
||||
});
|
||||
}
|
||||
|
||||
auto rdb_file_db::get_directory_api_path(const std::string &source_path,
|
||||
std::string &api_path) const
|
||||
-> api_error {}
|
||||
-> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto result = perform_action(function_name, [&]() -> rocksdb::Status {
|
||||
auto res = db_->Get(rocksdb::ReadOptions{}, source_family_, source_path,
|
||||
&api_path);
|
||||
if (not res.ok()) {
|
||||
return res;
|
||||
}
|
||||
|
||||
std::string value;
|
||||
return db_->Get(rocksdb::ReadOptions{}, directory_family_, api_path,
|
||||
&value);
|
||||
});
|
||||
|
||||
if (result != api_error::success) {
|
||||
api_path.clear();
|
||||
}
|
||||
|
||||
return result == api_error::item_not_found ? api_error::directory_not_found
|
||||
: result;
|
||||
}
|
||||
|
||||
auto rdb_file_db::get_directory_source_path(const std::string &api_path,
|
||||
std::string &source_path) const
|
||||
-> api_error {}
|
||||
-> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto result = perform_action(function_name, [&]() -> rocksdb::Status {
|
||||
return db_->Get(rocksdb::ReadOptions{}, directory_family_, api_path,
|
||||
&source_path);
|
||||
});
|
||||
|
||||
return result == api_error::item_not_found ? api_error::directory_not_found
|
||||
: result;
|
||||
}
|
||||
|
||||
auto rdb_file_db::get_file_api_path(const std::string &source_path,
|
||||
std::string &api_path) const -> api_error {}
|
||||
std::string &api_path) const -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto result = perform_action(function_name, [&]() -> rocksdb::Status {
|
||||
auto res = db_->Get(rocksdb::ReadOptions{}, source_family_, source_path,
|
||||
&api_path);
|
||||
if (not res.ok()) {
|
||||
return res;
|
||||
}
|
||||
|
||||
std::string value;
|
||||
return db_->Get(rocksdb::ReadOptions{}, file_family_, api_path, &value);
|
||||
});
|
||||
|
||||
if (result != api_error::success) {
|
||||
api_path.clear();
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
auto rdb_file_db::get_file_data(const std::string &api_path,
|
||||
i_file_db::file_data &data) const -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto result = perform_action(function_name, [&]() -> rocksdb::Status {
|
||||
std::string value;
|
||||
auto res = db_->Get(rocksdb::ReadOptions{}, file_family_, api_path, &value);
|
||||
if (not res.ok()) {
|
||||
return res;
|
||||
}
|
||||
|
||||
auto json_data = json::parse(value);
|
||||
data.api_path = api_path;
|
||||
data.file_size = json_data.at("file_size").get<std::uint64_t>();
|
||||
data.iv_list =
|
||||
json_data.at("iv")
|
||||
.get<std::vector<
|
||||
std::array<unsigned char,
|
||||
crypto_aead_xchacha20poly1305_IETF_NPUBBYTES>>>();
|
||||
data.source_path = json_data.at("source_path").get<std::string>();
|
||||
|
||||
return res;
|
||||
});
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
auto rdb_file_db::get_file_source_path(const std::string &api_path,
|
||||
std::string &source_path) const
|
||||
-> api_error {}
|
||||
-> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto rdb_file_db::get_item_list() const -> std::vector<i_file_db::file_info> {}
|
||||
auto result = perform_action(function_name, [&]() -> rocksdb::Status {
|
||||
std::string value;
|
||||
auto res = db_->Get(rocksdb::ReadOptions{}, file_family_, api_path, &value);
|
||||
if (not res.ok()) {
|
||||
return res;
|
||||
}
|
||||
|
||||
auto json_data = json::parse(value);
|
||||
source_path = json_data.at("source_path").get<std::string>();
|
||||
return res;
|
||||
});
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
auto rdb_file_db::get_item_list() const -> std::vector<i_file_db::file_info> {
|
||||
std::vector<i_file_db::file_info> ret{};
|
||||
{
|
||||
auto iter = create_iterator(directory_family_);
|
||||
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
||||
ret.emplace_back(i_file_db::file_info{
|
||||
iter->key().ToString(),
|
||||
true,
|
||||
iter->value().ToString(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
auto iter = create_iterator(file_family_);
|
||||
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
||||
auto json_data = json::parse(iter->value().ToString());
|
||||
ret.emplace_back(i_file_db::file_info{
|
||||
iter->key().ToString(),
|
||||
true,
|
||||
json_data.at("source_path").get<std::string>(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
auto rdb_file_db::get_source_path(const std::string &api_path,
|
||||
std::string &source_path) const -> api_error {
|
||||
auto iter = create_iterator(source_family_);
|
||||
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
||||
if (iter->value().ToString() != api_path) {
|
||||
continue;
|
||||
}
|
||||
|
||||
source_path = iter->key().ToString();
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
return api_error::item_not_found;
|
||||
}
|
||||
|
||||
auto rdb_file_db::perform_action(std::string_view function_name,
|
||||
std::function<rocksdb::Status()> action)
|
||||
-> bool {
|
||||
try {
|
||||
auto res = action();
|
||||
if (not res.ok()) {
|
||||
utils::error::raise_error(function_name, res.ToString());
|
||||
}
|
||||
|
||||
return res.ok();
|
||||
} catch (const std::exception &ex) {
|
||||
utils::error::raise_error(function_name, ex);
|
||||
-> api_error {
|
||||
auto res = action();
|
||||
if (res.ok()) {
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
return false;
|
||||
if (not res.IsNotFound()) {
|
||||
utils::error::raise_error(function_name, res.ToString());
|
||||
}
|
||||
|
||||
return res.IsNotFound() ? api_error::item_not_found : api_error::error;
|
||||
}
|
||||
|
||||
auto rdb_file_db::perform_action(
|
||||
std::string_view function_name,
|
||||
std::function<rocksdb::Status(rocksdb::Transaction *txn)> action) -> bool {
|
||||
std::function<rocksdb::Status(rocksdb::Transaction *txn)> action)
|
||||
-> api_error {
|
||||
std::unique_ptr<rocksdb::Transaction> txn{
|
||||
db_->BeginTransaction(rocksdb::WriteOptions{},
|
||||
rocksdb::TransactionOptions{}),
|
||||
@ -154,12 +327,12 @@ auto rdb_file_db::perform_action(
|
||||
if (res.ok()) {
|
||||
auto commit_res = txn->Commit();
|
||||
if (commit_res.ok()) {
|
||||
return true;
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
utils::error::raise_error(function_name,
|
||||
"rocksdb commit failed|" + res.ToString());
|
||||
return false;
|
||||
return api_error::error;
|
||||
}
|
||||
|
||||
utils::error::raise_error(function_name,
|
||||
@ -172,8 +345,37 @@ auto rdb_file_db::perform_action(
|
||||
auto rollback_res = txn->Rollback();
|
||||
utils::error::raise_error(function_name, "rocksdb rollback failed|" +
|
||||
rollback_res.ToString());
|
||||
return false;
|
||||
return api_error::error;
|
||||
}
|
||||
|
||||
auto rdb_file_db::remove_item(const std::string &api_path) -> api_error {}
|
||||
auto rdb_file_db::remove_item(const std::string &api_path) -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
std::string source_path;
|
||||
auto res = get_source_path(api_path, source_path);
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
}
|
||||
|
||||
return perform_action(function_name,
|
||||
[&](rocksdb::Transaction *txn) -> rocksdb::Status {
|
||||
return remove_item(api_path, source_path, txn);
|
||||
});
|
||||
}
|
||||
|
||||
auto rdb_file_db::remove_item(const std::string &api_path,
|
||||
const std::string &source_path,
|
||||
rocksdb::Transaction *txn) -> rocksdb::Status {
|
||||
auto res = txn->Delete(source_family_, source_path);
|
||||
if (not res.ok()) {
|
||||
return res;
|
||||
}
|
||||
|
||||
res = txn->Delete(directory_family_, api_path);
|
||||
if (not res.ok()) {
|
||||
return res;
|
||||
}
|
||||
|
||||
return txn->Delete(file_family_, api_path);
|
||||
}
|
||||
} // namespace repertory
|
||||
|
@ -28,39 +28,7 @@
|
||||
#include "utils/file.hpp"
|
||||
#include "utils/path.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
namespace {
|
||||
[[nodiscard]] auto
|
||||
create_rocksdb(const repertory::app_config &cfg, const std::string &name,
|
||||
const std::vector<rocksdb::ColumnFamilyDescriptor> &families,
|
||||
std::vector<rocksdb::ColumnFamilyHandle *> &handles, bool clear)
|
||||
-> std::unique_ptr<rocksdb::TransactionDB> {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto path = repertory::utils::path::combine(cfg.get_data_directory(), {name});
|
||||
if (clear &&
|
||||
not repertory::utils::file::directory{path}.remove_recursively()) {
|
||||
repertory::utils::error::raise_error(
|
||||
function_name, "failed to remove " + name + " db|" + path);
|
||||
}
|
||||
|
||||
rocksdb::Options options{};
|
||||
options.create_if_missing = true;
|
||||
options.create_missing_column_families = true;
|
||||
options.db_log_dir = cfg.get_log_directory();
|
||||
options.keep_log_file_num = 10;
|
||||
|
||||
rocksdb::TransactionDB *ptr{};
|
||||
auto status = rocksdb::TransactionDB::Open(
|
||||
options, rocksdb::TransactionDBOptions{}, path, families, &handles, &ptr);
|
||||
if (not status.ok()) {
|
||||
repertory::utils::error::raise_error(function_name, status.ToString());
|
||||
throw repertory::startup_exception(status.ToString());
|
||||
}
|
||||
|
||||
return std::unique_ptr<rocksdb::TransactionDB>(ptr);
|
||||
}
|
||||
} // namespace
|
||||
#include "utils/utils.hpp"
|
||||
|
||||
namespace repertory {
|
||||
rdb_file_mgr_db::rdb_file_mgr_db(const app_config &cfg) : cfg_(cfg) {
|
||||
@ -79,12 +47,12 @@ void rdb_file_mgr_db::create_or_open(bool clear) {
|
||||
families.emplace_back("upload", rocksdb::ColumnFamilyOptions());
|
||||
|
||||
auto handles = std::vector<rocksdb::ColumnFamilyHandle *>();
|
||||
db_ = create_rocksdb(cfg_, "file_mgr", families, handles, clear);
|
||||
db_ = utils::create_rocksdb(cfg_, "file_mgr", families, handles, clear);
|
||||
|
||||
std::size_t idx{};
|
||||
resume_family_ = handles[idx++];
|
||||
upload_active_family_ = handles[idx++];
|
||||
upload_family_ = handles[idx++];
|
||||
resume_family_ = handles.at(idx++);
|
||||
upload_active_family_ = handles.at(idx++);
|
||||
upload_family_ = handles.at(idx++);
|
||||
}
|
||||
|
||||
auto rdb_file_mgr_db::add_resume(const resume_entry &entry) -> bool {
|
||||
|
@ -27,39 +27,7 @@
|
||||
#include "utils/file.hpp"
|
||||
#include "utils/path.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
namespace {
|
||||
[[nodiscard]] auto
|
||||
create_rocksdb(const repertory::app_config &cfg, const std::string &name,
|
||||
const std::vector<rocksdb::ColumnFamilyDescriptor> &families,
|
||||
std::vector<rocksdb::ColumnFamilyHandle *> &handles, bool clear)
|
||||
-> std::unique_ptr<rocksdb::TransactionDB> {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto path = repertory::utils::path::combine(cfg.get_data_directory(), {name});
|
||||
if (clear &&
|
||||
not repertory::utils::file::directory{path}.remove_recursively()) {
|
||||
repertory::utils::error::raise_error(
|
||||
function_name, "failed to remove " + name + " db|" + path);
|
||||
}
|
||||
|
||||
rocksdb::Options options{};
|
||||
options.create_if_missing = true;
|
||||
options.create_missing_column_families = true;
|
||||
options.db_log_dir = cfg.get_log_directory();
|
||||
options.keep_log_file_num = 10;
|
||||
|
||||
rocksdb::TransactionDB *ptr{};
|
||||
auto status = rocksdb::TransactionDB::Open(
|
||||
options, rocksdb::TransactionDBOptions{}, path, families, &handles, &ptr);
|
||||
if (not status.ok()) {
|
||||
repertory::utils::error::raise_error(function_name, status.ToString());
|
||||
throw repertory::startup_exception(status.ToString());
|
||||
}
|
||||
|
||||
return std::unique_ptr<rocksdb::TransactionDB>(ptr);
|
||||
}
|
||||
} // namespace
|
||||
#include "utils/utils.hpp"
|
||||
|
||||
namespace repertory {
|
||||
rdb_meta_db::rdb_meta_db(const app_config &cfg) : cfg_(cfg) {
|
||||
@ -79,13 +47,13 @@ void rdb_meta_db::create_or_open(bool clear) {
|
||||
families.emplace_back("source", rocksdb::ColumnFamilyOptions());
|
||||
|
||||
auto handles = std::vector<rocksdb::ColumnFamilyHandle *>();
|
||||
db_ = create_rocksdb(cfg_, "provider_meta", families, handles, clear);
|
||||
db_ = utils::create_rocksdb(cfg_, "provider_meta", families, handles, clear);
|
||||
|
||||
std::size_t idx{};
|
||||
default_family_ = handles[idx++];
|
||||
pinned_family_ = handles[idx++];
|
||||
size_family_ = handles[idx++];
|
||||
source_family_ = handles[idx++];
|
||||
meta_family_ = handles.at(idx++);
|
||||
pinned_family_ = handles.at(idx++);
|
||||
size_family_ = handles.at(idx++);
|
||||
source_family_ = handles.at(idx++);
|
||||
}
|
||||
|
||||
void rdb_meta_db::clear() { create_or_open(true); }
|
||||
@ -112,7 +80,7 @@ auto rdb_meta_db::get_api_path(const std::string &source_path,
|
||||
|
||||
auto rdb_meta_db::get_api_path_list() const -> std::vector<std::string> {
|
||||
std::vector<std::string> ret;
|
||||
auto iter = create_iterator(default_family_);
|
||||
auto iter = create_iterator(meta_family_);
|
||||
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
||||
ret.push_back(iter->key().ToString());
|
||||
}
|
||||
@ -130,8 +98,7 @@ auto rdb_meta_db::get_item_meta_json(const std::string &api_path,
|
||||
{
|
||||
std::string value;
|
||||
auto res = perform_action(function_name, [&]() -> rocksdb::Status {
|
||||
return db_->Get(rocksdb::ReadOptions{}, default_family_, api_path,
|
||||
&value);
|
||||
return db_->Get(rocksdb::ReadOptions{}, meta_family_, api_path, &value);
|
||||
});
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
@ -239,7 +206,7 @@ auto rdb_meta_db::get_pinned_files() const -> std::vector<std::string> {
|
||||
|
||||
auto rdb_meta_db::get_total_item_count() const -> std::uint64_t {
|
||||
std::uint64_t ret{};
|
||||
auto iter = create_iterator(default_family_);
|
||||
auto iter = create_iterator(meta_family_);
|
||||
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
||||
++ret;
|
||||
}
|
||||
@ -349,7 +316,7 @@ auto rdb_meta_db::remove_api_path(const std::string &api_path,
|
||||
}
|
||||
}
|
||||
|
||||
return txn->Delete(default_family_, api_path);
|
||||
return txn->Delete(meta_family_, api_path);
|
||||
}
|
||||
|
||||
auto rdb_meta_db::remove_item_meta(const std::string &api_path,
|
||||
@ -523,7 +490,7 @@ auto rdb_meta_db::update_item_meta(const std::string &api_path, json json_data,
|
||||
}
|
||||
}
|
||||
|
||||
return set_status(txn->Put(default_family_, api_path, json_data.dump()));
|
||||
return set_status(txn->Put(meta_family_, api_path, json_data.dump()));
|
||||
};
|
||||
|
||||
if (base_txn == nullptr) {
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include "db/impl/sqlite_file_db.hpp"
|
||||
|
||||
#include "app_config.hpp"
|
||||
#include "types/startup_exception.hpp"
|
||||
#include "utils/config.hpp"
|
||||
#include "utils/db/sqlite/db_common.hpp"
|
||||
#include "utils/db/sqlite/db_delete.hpp"
|
||||
@ -29,6 +30,7 @@
|
||||
#include "utils/db/sqlite/db_select.hpp"
|
||||
#include "utils/db/sqlite/db_update.hpp"
|
||||
#include "utils/error_utils.hpp"
|
||||
#include "utils/file.hpp"
|
||||
#include "utils/path.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
@ -51,9 +53,14 @@ const std::map<std::string, std::string> sql_create_tables = {
|
||||
|
||||
namespace repertory {
|
||||
sqlite_file_db::sqlite_file_db(const app_config &cfg) {
|
||||
db_ = utils::db::sqlite::create_db(
|
||||
utils::path::combine(cfg.get_data_directory(), {"provider_file.db"}),
|
||||
sql_create_tables);
|
||||
auto db_dir = utils::path::combine(cfg.get_data_directory(), {"db"});
|
||||
if (not utils::file::directory{db_dir}.create_directory()) {
|
||||
throw startup_exception(
|
||||
fmt::format("failed to create db directory|", db_dir));
|
||||
}
|
||||
|
||||
db_ = utils::db::sqlite::create_db(utils::path::combine(db_dir, {"file.db"}),
|
||||
sql_create_tables);
|
||||
}
|
||||
|
||||
sqlite_file_db::~sqlite_file_db() { db_.reset(); }
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include "db/impl/sqlite_file_mgr_db.hpp"
|
||||
|
||||
#include "app_config.hpp"
|
||||
#include "types/startup_exception.hpp"
|
||||
#include "utils/config.hpp"
|
||||
#include "utils/db/sqlite/db_common.hpp"
|
||||
#include "utils/db/sqlite/db_delete.hpp"
|
||||
@ -29,6 +30,7 @@
|
||||
#include "utils/db/sqlite/db_select.hpp"
|
||||
#include "utils/db/sqlite/db_update.hpp"
|
||||
#include "utils/error_utils.hpp"
|
||||
#include "utils/file.hpp"
|
||||
#include "utils/path.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
@ -75,9 +77,14 @@ const std::map<std::string, std::string> sql_create_tables{
|
||||
|
||||
namespace repertory {
|
||||
sqlite_file_mgr_db::sqlite_file_mgr_db(const app_config &cfg) {
|
||||
auto db_dir = utils::path::combine(cfg.get_data_directory(), {"db"});
|
||||
if (not utils::file::directory{db_dir}.create_directory()) {
|
||||
throw startup_exception(
|
||||
fmt::format("failed to create db directory|", db_dir));
|
||||
}
|
||||
|
||||
db_ = utils::db::sqlite::create_db(
|
||||
utils::path::combine(cfg.get_data_directory(), {"mgr.db"}),
|
||||
sql_create_tables);
|
||||
utils::path::combine(db_dir, {"file_mgr.db"}), sql_create_tables);
|
||||
}
|
||||
|
||||
sqlite_file_mgr_db::~sqlite_file_mgr_db() { db_.reset(); }
|
||||
|
@ -22,11 +22,13 @@
|
||||
#include "db/impl/sqlite_meta_db.hpp"
|
||||
|
||||
#include "app_config.hpp"
|
||||
#include "types/startup_exception.hpp"
|
||||
#include "utils/db/sqlite/db_common.hpp"
|
||||
#include "utils/db/sqlite/db_delete.hpp"
|
||||
#include "utils/db/sqlite/db_insert.hpp"
|
||||
#include "utils/db/sqlite/db_select.hpp"
|
||||
#include "utils/error_utils.hpp"
|
||||
#include "utils/file.hpp"
|
||||
#include "utils/path.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
@ -48,9 +50,14 @@ sqlite_meta_db::sqlite_meta_db(const app_config &cfg) {
|
||||
},
|
||||
};
|
||||
|
||||
db_ = utils::db::sqlite::create_db(
|
||||
utils::path::combine(cfg.get_data_directory(), {"provider_meta.db"}),
|
||||
sql_create_tables);
|
||||
auto db_dir = utils::path::combine(cfg.get_data_directory(), {"db"});
|
||||
if (not utils::file::directory{db_dir}.create_directory()) {
|
||||
throw startup_exception(
|
||||
fmt::format("failed to create db directory|", db_dir));
|
||||
}
|
||||
|
||||
db_ = utils::db::sqlite::create_db(utils::path::combine(db_dir, {"meta.db"}),
|
||||
sql_create_tables);
|
||||
}
|
||||
|
||||
sqlite_meta_db::~sqlite_meta_db() { db_.reset(); }
|
||||
|
114
repertory/librepertory/src/file_manager/cache_size_mgr.cpp
Normal file
114
repertory/librepertory/src/file_manager/cache_size_mgr.cpp
Normal file
@ -0,0 +1,114 @@
|
||||
/*
|
||||
Copyright <2018-2024> <scott.e.graves@protonmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
#include "file_manager/cache_size_mgr.hpp"
|
||||
|
||||
#include "app_config.hpp"
|
||||
#include "events/event.hpp"
|
||||
#include "events/event_system.hpp"
|
||||
#include "types/startup_exception.hpp"
|
||||
#include "utils/file_utils.hpp"
|
||||
|
||||
namespace repertory {
|
||||
// clang-format off
|
||||
E_SIMPLE2(max_cache_size_reached, warn, true,
|
||||
std::uint64_t, cache_size, sz, E_FROM_UINT64,
|
||||
std::uint64_t, max_cache_size, max, E_FROM_UINT64
|
||||
);
|
||||
// clang-format on
|
||||
|
||||
cache_size_mgr cache_size_mgr::instance_{};
|
||||
|
||||
auto cache_size_mgr::expand(std::uint64_t size) -> api_error {
|
||||
if (size == 0U) {
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
unique_mutex_lock lock(mtx_);
|
||||
if (cfg_ == nullptr) {
|
||||
return api_error::error;
|
||||
}
|
||||
|
||||
cache_size_ += size;
|
||||
|
||||
auto max_cache_size = cfg_->get_max_cache_size_bytes();
|
||||
|
||||
while (not stop_requested_ && cache_size_ > max_cache_size) {
|
||||
event_system::instance().raise<max_cache_size_reached>(cache_size_,
|
||||
max_cache_size);
|
||||
notify_.notify_all();
|
||||
notify_.wait(lock);
|
||||
}
|
||||
|
||||
notify_.notify_all();
|
||||
|
||||
return stop_requested_ ? api_error::error : api_error::success;
|
||||
}
|
||||
|
||||
void cache_size_mgr::initialize(app_config *cfg) {
|
||||
if (cfg == nullptr) {
|
||||
throw startup_exception("app_config must not be null");
|
||||
}
|
||||
|
||||
mutex_lock lock(mtx_);
|
||||
cfg_ = cfg;
|
||||
|
||||
stop_requested_ = false;
|
||||
|
||||
auto cache_dir = utils::file::directory{cfg_->get_cache_directory()};
|
||||
if (not cache_dir.create_directory()) {
|
||||
throw startup_exception(fmt::format("failed to create cache directory|{}",
|
||||
cache_dir.get_path()));
|
||||
}
|
||||
|
||||
cache_size_ = cache_dir.size(false);
|
||||
|
||||
notify_.notify_all();
|
||||
}
|
||||
|
||||
auto cache_size_mgr::shrink(std::uint64_t size) -> api_error {
|
||||
if (size == 0U) {
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
mutex_lock lock(mtx_);
|
||||
if (cache_size_ >= size) {
|
||||
cache_size_ -= size;
|
||||
} else {
|
||||
cache_size_ = 0U;
|
||||
}
|
||||
|
||||
notify_.notify_all();
|
||||
|
||||
return stop_requested_ ? api_error::error : api_error::success;
|
||||
}
|
||||
|
||||
void cache_size_mgr::stop() {
|
||||
if (stop_requested_) {
|
||||
return;
|
||||
}
|
||||
|
||||
stop_requested_ = true;
|
||||
|
||||
mutex_lock lock(mtx_);
|
||||
notify_.notify_all();
|
||||
}
|
||||
} // namespace repertory
|
@ -23,6 +23,7 @@
|
||||
|
||||
#include "app_config.hpp"
|
||||
#include "db/file_mgr_db.hpp"
|
||||
#include "file_manager/cache_size_mgr.hpp"
|
||||
#include "file_manager/events.hpp"
|
||||
#include "file_manager/open_file.hpp"
|
||||
#include "file_manager/open_file_base.hpp"
|
||||
@ -173,8 +174,16 @@ auto file_manager::evict_file(const std::string &api_path) -> bool {
|
||||
|
||||
open_file_lookup_.erase(api_path);
|
||||
|
||||
auto removed = utils::file::file{source_path}.remove();
|
||||
auto file = utils::file::file{source_path};
|
||||
auto file_size = file.size().value_or(0U);
|
||||
auto removed = file.remove();
|
||||
if (removed) {
|
||||
res = cache_size_mgr::instance().shrink(file_size);
|
||||
if (res != api_error::success) {
|
||||
utils::error::raise_api_path_error(function_name, api_path, res,
|
||||
"failed to shrink cache");
|
||||
}
|
||||
|
||||
event_system::instance().raise<filesystem_item_evicted>(api_path,
|
||||
source_path);
|
||||
}
|
||||
@ -464,12 +473,21 @@ auto file_manager::remove_file(const std::string &api_path) -> api_error {
|
||||
return res;
|
||||
}
|
||||
|
||||
if (not utils::file::file{fsi.source_path}.remove()) {
|
||||
utils::error::raise_api_path_error(
|
||||
function_name, fsi.api_path, fsi.source_path,
|
||||
utils::get_last_error_code(), "failed to delete source");
|
||||
auto file = utils::file::file{fsi.source_path};
|
||||
auto file_size = file.size().value_or(0U);
|
||||
if (file.remove()) {
|
||||
res = cache_size_mgr::instance().shrink(file_size);
|
||||
if (res != api_error::success) {
|
||||
utils::error::raise_api_path_error(function_name, api_path, res,
|
||||
"failed to shrink cache");
|
||||
}
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
utils::error::raise_api_path_error(
|
||||
function_name, fsi.api_path, fsi.source_path,
|
||||
utils::get_last_error_code(), "failed to delete source");
|
||||
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,7 @@
|
||||
*/
|
||||
#include "file_manager/open_file.hpp"
|
||||
|
||||
#include "file_manager/cache_size_mgr.hpp"
|
||||
#include "file_manager/events.hpp"
|
||||
#include "file_manager/i_upload_manager.hpp"
|
||||
#include "platform/platform.hpp"
|
||||
@ -419,6 +420,22 @@ auto open_file::resize(std::uint64_t new_file_size) -> api_error {
|
||||
return api_error::invalid_operation;
|
||||
}
|
||||
|
||||
if (new_file_size == fsi_.size) {
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
if (new_file_size > fsi_.size) {
|
||||
auto res = cache_size_mgr::instance().expand(new_file_size - fsi_.size);
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
}
|
||||
} else {
|
||||
auto res = cache_size_mgr::instance().shrink(fsi_.size - new_file_size);
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
return native_operation(
|
||||
new_file_size, [this, &new_file_size](native_handle) -> api_error {
|
||||
return nf_->truncate(new_file_size) ? api_error::success
|
||||
@ -475,16 +492,25 @@ auto open_file::close() -> bool {
|
||||
}
|
||||
|
||||
if (err != api_error::success || read_state_.all()) {
|
||||
mgr_.remove_resume(get_api_path(), get_source_path());
|
||||
mgr_.remove_resume(fsi_.api_path, get_source_path());
|
||||
}
|
||||
|
||||
if (err == api_error::success) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (not utils::file::file(fsi_.source_path).remove()) {
|
||||
auto file = utils::file::file{fsi_.source_path};
|
||||
auto file_size = file.size().value_or(0U);
|
||||
if (file.remove()) {
|
||||
auto res = cache_size_mgr::instance().shrink(file_size);
|
||||
if (res != api_error::success) {
|
||||
utils::error::raise_api_path_error(function_name, fsi_.api_path,
|
||||
fsi_.source_path, res,
|
||||
"failed to shrink cache");
|
||||
}
|
||||
} else {
|
||||
utils::error::raise_api_path_error(
|
||||
function_name, get_api_path(), fsi_.source_path,
|
||||
function_name, fsi_.api_path, fsi_.source_path,
|
||||
utils::get_last_error_code(), "failed to delete source file");
|
||||
}
|
||||
|
||||
@ -494,7 +520,7 @@ auto open_file::close() -> bool {
|
||||
auto res =
|
||||
provider_.set_item_meta(fsi_.api_path, META_SOURCE, fsi_.source_path);
|
||||
if (res != api_error::success) {
|
||||
utils::error::raise_api_path_error(function_name, get_api_path(),
|
||||
utils::error::raise_api_path_error(function_name, fsi_.api_path,
|
||||
fsi_.source_path, res,
|
||||
"failed to set new source path");
|
||||
}
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "db/meta_db.hpp"
|
||||
#include "events/event_system.hpp"
|
||||
#include "events/events.hpp"
|
||||
#include "file_manager/cache_size_mgr.hpp"
|
||||
#include "file_manager/i_file_manager.hpp"
|
||||
#include "platform/platform.hpp"
|
||||
#include "utils/file_utils.hpp"
|
||||
@ -49,8 +50,8 @@ void base_provider::add_all_items(const stop_type &stop_requested) {
|
||||
}
|
||||
|
||||
auto base_provider::create_api_file(std::string path, std::string key,
|
||||
std::uint64_t size,
|
||||
std::uint64_t file_time) -> api_file {
|
||||
std::uint64_t size, std::uint64_t file_time)
|
||||
-> api_file {
|
||||
api_file file{};
|
||||
file.api_path = utils::path::create_api_path(path);
|
||||
file.api_parent = utils::path::get_parent_api_path(file.api_path);
|
||||
@ -82,8 +83,8 @@ auto base_provider::create_api_file(std::string path, std::uint64_t size,
|
||||
}
|
||||
|
||||
auto base_provider::create_directory_clone_source_meta(
|
||||
const std::string &source_api_path,
|
||||
const std::string &api_path) -> api_error {
|
||||
const std::string &source_api_path, const std::string &api_path)
|
||||
-> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
bool exists{};
|
||||
@ -180,8 +181,8 @@ auto base_provider::create_directory(const std::string &api_path,
|
||||
return set_item_meta(api_path, meta);
|
||||
}
|
||||
|
||||
auto base_provider::create_file(const std::string &api_path,
|
||||
api_meta_map &meta) -> api_error {
|
||||
auto base_provider::create_file(const std::string &api_path, api_meta_map &meta)
|
||||
-> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
bool exists{};
|
||||
@ -238,8 +239,9 @@ auto base_provider::create_file(const std::string &api_path,
|
||||
return api_error::error;
|
||||
}
|
||||
|
||||
auto base_provider::get_api_path_from_source(
|
||||
const std::string &source_path, std::string &api_path) const -> api_error {
|
||||
auto base_provider::get_api_path_from_source(const std::string &source_path,
|
||||
std::string &api_path) const
|
||||
-> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
if (source_path.empty()) {
|
||||
@ -252,8 +254,9 @@ auto base_provider::get_api_path_from_source(
|
||||
return db3_->get_api_path(source_path, api_path);
|
||||
}
|
||||
|
||||
auto base_provider::get_directory_items(
|
||||
const std::string &api_path, directory_item_list &list) const -> api_error {
|
||||
auto base_provider::get_directory_items(const std::string &api_path,
|
||||
directory_item_list &list) const
|
||||
-> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
bool exists{};
|
||||
@ -317,9 +320,10 @@ auto base_provider::get_file_size(const std::string &api_path,
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
auto base_provider::get_filesystem_item(
|
||||
const std::string &api_path, bool directory,
|
||||
filesystem_item &fsi) const -> api_error {
|
||||
auto base_provider::get_filesystem_item(const std::string &api_path,
|
||||
bool directory,
|
||||
filesystem_item &fsi) const
|
||||
-> api_error {
|
||||
bool exists{};
|
||||
auto res = is_directory(api_path, exists);
|
||||
if (res != api_error::success) {
|
||||
@ -352,9 +356,10 @@ auto base_provider::get_filesystem_item(
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
auto base_provider::get_filesystem_item_and_file(
|
||||
const std::string &api_path, api_file &file,
|
||||
filesystem_item &fsi) const -> api_error {
|
||||
auto base_provider::get_filesystem_item_and_file(const std::string &api_path,
|
||||
api_file &file,
|
||||
filesystem_item &fsi) const
|
||||
-> api_error {
|
||||
auto res = get_file(api_path, file);
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
@ -751,6 +756,8 @@ auto base_provider::start(api_item_added_callback api_item_added,
|
||||
return false;
|
||||
}
|
||||
|
||||
cache_size_mgr::instance().initialize(&config_);
|
||||
|
||||
polling::instance().set_callback({
|
||||
"check_deleted",
|
||||
polling::frequency::low,
|
||||
@ -761,6 +768,7 @@ auto base_provider::start(api_item_added_callback api_item_added,
|
||||
}
|
||||
|
||||
void base_provider::stop() {
|
||||
cache_size_mgr::instance().stop();
|
||||
polling::instance().remove_callback("check_deleted");
|
||||
db3_.reset();
|
||||
}
|
||||
|
@ -533,8 +533,14 @@ auto encrypt_provider::is_directory(const std::string &api_path,
|
||||
try {
|
||||
std::string source_path;
|
||||
auto result = db_->get_directory_source_path(api_path, source_path);
|
||||
|
||||
if (result != api_error::success) {
|
||||
return result;
|
||||
if (result != api_error::directory_not_found) {
|
||||
return result;
|
||||
}
|
||||
|
||||
exists = false;
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
exists = utils::file::directory{source_path}.exists();
|
||||
@ -555,7 +561,12 @@ auto encrypt_provider::is_file(const std::string &api_path, bool &exists) const
|
||||
std::string source_path;
|
||||
auto result = db_->get_file_source_path(api_path, source_path);
|
||||
if (result != api_error::success) {
|
||||
return result;
|
||||
if (result != api_error::item_not_found) {
|
||||
return result;
|
||||
}
|
||||
|
||||
exists = false;
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
exists = utils::file::file{source_path}.exists();
|
||||
@ -829,9 +840,6 @@ auto encrypt_provider::start(api_item_added_callback /*api_item_added*/,
|
||||
|
||||
db_ = create_file_db(config_);
|
||||
|
||||
auto cfg = config_.get_encrypt_config();
|
||||
auto cfg_path = utils::path::absolute(cfg.path);
|
||||
|
||||
std::string source_path;
|
||||
auto result = db_->get_directory_source_path("/", source_path);
|
||||
if (result != api_error::success &&
|
||||
@ -840,6 +848,7 @@ auto encrypt_provider::start(api_item_added_callback /*api_item_added*/,
|
||||
fmt::format("failed to get root|{}", api_error_to_string(result)));
|
||||
}
|
||||
|
||||
auto cfg_path = utils::path::absolute(config_.get_encrypt_config().path);
|
||||
if (result == api_error::success) {
|
||||
auto cur_path = utils::path::absolute(source_path);
|
||||
#if defined(_WIN32)
|
||||
@ -849,10 +858,10 @@ auto encrypt_provider::start(api_item_added_callback /*api_item_added*/,
|
||||
if (cur_path != cfg_path) {
|
||||
#endif // defined(_WIN32)
|
||||
throw startup_exception(fmt::format(
|
||||
"source path has changed|cur|{}|cfg|{}", cur_path, cfg.path));
|
||||
"source path has changed|cur|{}|cfg|{}", cur_path, cfg_path));
|
||||
}
|
||||
} else {
|
||||
result = db_->add_directory("/", utils::path::absolute(source_path));
|
||||
result = db_->add_directory("/", utils::path::absolute(cfg_path));
|
||||
if (result != api_error::success) {
|
||||
throw startup_exception(
|
||||
fmt::format("failed to create root|{}", api_error_to_string(result)));
|
||||
|
@ -22,7 +22,10 @@
|
||||
#include "utils/utils.hpp"
|
||||
|
||||
#include "app_config.hpp"
|
||||
#include "types/startup_exception.hpp"
|
||||
#include "utils/common.hpp"
|
||||
#include "utils/file.hpp"
|
||||
#include "utils/path.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
namespace repertory::utils {
|
||||
@ -44,6 +47,42 @@ void calculate_allocation_size(bool directory, std::uint64_t file_size,
|
||||
allocation_meta_size = std::to_string(allocation_size);
|
||||
}
|
||||
|
||||
auto create_rocksdb(
|
||||
const app_config &cfg, const std::string &name,
|
||||
const std::vector<rocksdb::ColumnFamilyDescriptor> &families,
|
||||
std::vector<rocksdb::ColumnFamilyHandle *> &handles, bool clear)
|
||||
-> std::unique_ptr<rocksdb::TransactionDB> {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto db_dir = utils::path::combine(cfg.get_data_directory(), {"db"});
|
||||
if (not utils::file::directory{db_dir}.create_directory()) {
|
||||
throw startup_exception(
|
||||
fmt::format("failed to create db directory|", db_dir));
|
||||
}
|
||||
|
||||
auto path = utils::path::combine(db_dir, {name});
|
||||
if (clear && not utils::file::directory{path}.remove_recursively()) {
|
||||
utils::error::raise_error(function_name,
|
||||
"failed to remove " + name + " db|" + path);
|
||||
}
|
||||
|
||||
rocksdb::Options options{};
|
||||
options.create_if_missing = true;
|
||||
options.create_missing_column_families = true;
|
||||
options.db_log_dir = cfg.get_log_directory();
|
||||
options.keep_log_file_num = 10;
|
||||
|
||||
rocksdb::TransactionDB *ptr{};
|
||||
auto status = rocksdb::TransactionDB::Open(
|
||||
options, rocksdb::TransactionDBOptions{}, path, families, &handles, &ptr);
|
||||
if (not status.ok()) {
|
||||
throw startup_exception(fmt::format("failed to open rocksdb|path{}|error{}",
|
||||
path, status.ToString()));
|
||||
}
|
||||
|
||||
return std::unique_ptr<rocksdb::TransactionDB>(ptr);
|
||||
}
|
||||
|
||||
auto create_volume_label(const provider_type &prov) -> std::string {
|
||||
return "repertory_" + app_config::get_provider_name(prov);
|
||||
}
|
||||
|
@ -58,8 +58,7 @@ protected:
|
||||
}
|
||||
};
|
||||
|
||||
// using file_db_types = ::testing::Types<sqlite_file_db, rdb_file_db>;
|
||||
using file_db_types = ::testing::Types<sqlite_file_db>;
|
||||
using file_db_types = ::testing::Types<rdb_file_db, sqlite_file_db>;
|
||||
|
||||
template <typename db_t> std::unique_ptr<app_config> file_db_test<db_t>::config;
|
||||
|
||||
|
@ -58,7 +58,7 @@ protected:
|
||||
}
|
||||
};
|
||||
|
||||
using file_mgr_db_types = ::testing::Types<sqlite_file_mgr_db, rdb_file_mgr_db>;
|
||||
using file_mgr_db_types = ::testing::Types<rdb_file_mgr_db, sqlite_file_mgr_db>;
|
||||
|
||||
template <typename db_t>
|
||||
std::unique_ptr<app_config> file_mgr_db_test<db_t>::config;
|
||||
|
@ -83,7 +83,8 @@ TYPED_TEST(file_db_test, can_get_api_path_for_file) {
|
||||
EXPECT_STREQ("/file", api_path.c_str());
|
||||
}
|
||||
|
||||
TYPED_TEST(file_db_test, item_not_found_is_returned_for_non_existing_api_path) {
|
||||
TYPED_TEST(file_db_test,
|
||||
item_not_found_is_returned_for_non_existing_source_path) {
|
||||
this->file_db->clear();
|
||||
|
||||
std::string api_path;
|
||||
@ -213,5 +214,119 @@ TYPED_TEST(file_db_test,
|
||||
EXPECT_EQ(api_error::item_not_found,
|
||||
this->file_db->get_file_data("/file", data));
|
||||
}
|
||||
// test can update file source, iv, size
|
||||
|
||||
TYPED_TEST(file_db_test, can_update_existing_file_iv) {
|
||||
this->file_db->clear();
|
||||
|
||||
EXPECT_EQ(api_error::success, this->file_db->add_or_update_file({
|
||||
"/file",
|
||||
1U,
|
||||
{{}, {}},
|
||||
"c:\\test\\file.txt",
|
||||
}));
|
||||
|
||||
EXPECT_EQ(api_error::success, this->file_db->add_or_update_file({
|
||||
"/file",
|
||||
1U,
|
||||
{{}, {}, {}},
|
||||
"c:\\test\\file.txt",
|
||||
}));
|
||||
|
||||
i_file_db::file_data data{};
|
||||
EXPECT_EQ(api_error::success, this->file_db->get_file_data("/file", data));
|
||||
EXPECT_STREQ("/file", data.api_path.c_str());
|
||||
EXPECT_EQ(1U, data.file_size);
|
||||
EXPECT_EQ(3U, data.iv_list.size());
|
||||
EXPECT_STREQ("c:\\test\\file.txt", data.source_path.c_str());
|
||||
|
||||
EXPECT_EQ(1U, this->file_db->count());
|
||||
}
|
||||
|
||||
TYPED_TEST(file_db_test, can_update_existing_file_size) {
|
||||
this->file_db->clear();
|
||||
|
||||
EXPECT_EQ(api_error::success, this->file_db->add_or_update_file({
|
||||
"/file",
|
||||
1U,
|
||||
{{}, {}},
|
||||
"c:\\test\\file.txt",
|
||||
}));
|
||||
|
||||
EXPECT_EQ(api_error::success, this->file_db->add_or_update_file({
|
||||
"/file",
|
||||
2U,
|
||||
{{}, {}},
|
||||
"c:\\test\\file.txt",
|
||||
}));
|
||||
|
||||
i_file_db::file_data data{};
|
||||
EXPECT_EQ(api_error::success, this->file_db->get_file_data("/file", data));
|
||||
EXPECT_STREQ("/file", data.api_path.c_str());
|
||||
EXPECT_EQ(2U, data.file_size);
|
||||
EXPECT_EQ(2U, data.iv_list.size());
|
||||
EXPECT_STREQ("c:\\test\\file.txt", data.source_path.c_str());
|
||||
|
||||
EXPECT_EQ(1U, this->file_db->count());
|
||||
}
|
||||
|
||||
TYPED_TEST(file_db_test, can_update_existing_file_source_path) {
|
||||
this->file_db->clear();
|
||||
|
||||
EXPECT_EQ(api_error::success, this->file_db->add_or_update_file({
|
||||
"/file",
|
||||
1U,
|
||||
{{}, {}},
|
||||
"c:\\test\\file.txt",
|
||||
}));
|
||||
|
||||
EXPECT_EQ(api_error::success, this->file_db->add_or_update_file({
|
||||
"/file",
|
||||
1U,
|
||||
{{}, {}},
|
||||
"c:\\test\\file2.txt",
|
||||
}));
|
||||
|
||||
i_file_db::file_data data{};
|
||||
EXPECT_EQ(api_error::success, this->file_db->get_file_data("/file", data));
|
||||
EXPECT_STREQ("/file", data.api_path.c_str());
|
||||
EXPECT_EQ(1U, data.file_size);
|
||||
EXPECT_EQ(2U, data.iv_list.size());
|
||||
EXPECT_STREQ("c:\\test\\file2.txt", data.source_path.c_str());
|
||||
|
||||
EXPECT_EQ(1U, this->file_db->count());
|
||||
}
|
||||
|
||||
TYPED_TEST(file_db_test, can_get_source_path_for_directory) {
|
||||
this->file_db->clear();
|
||||
|
||||
EXPECT_EQ(api_error::success, this->file_db->add_directory("/", "c:\\test"));
|
||||
std::string source_path;
|
||||
EXPECT_EQ(api_error::success,
|
||||
this->file_db->get_source_path("/", source_path));
|
||||
EXPECT_STREQ("c:\\test", source_path.c_str());
|
||||
}
|
||||
|
||||
TYPED_TEST(file_db_test, can_get_source_path_for_file) {
|
||||
this->file_db->clear();
|
||||
|
||||
EXPECT_EQ(api_error::success, this->file_db->add_or_update_file({
|
||||
"/file",
|
||||
0U,
|
||||
{},
|
||||
"c:\\test\\file.txt",
|
||||
}));
|
||||
std::string source_path;
|
||||
EXPECT_EQ(api_error::success,
|
||||
this->file_db->get_source_path("/file", source_path));
|
||||
EXPECT_STREQ("c:\\test\\file.txt", source_path.c_str());
|
||||
}
|
||||
|
||||
TYPED_TEST(file_db_test, item_not_found_is_returned_for_non_existing_api_path) {
|
||||
this->file_db->clear();
|
||||
|
||||
std::string source_path;
|
||||
EXPECT_EQ(api_error::item_not_found,
|
||||
this->file_db->get_source_path("/file", source_path));
|
||||
EXPECT_TRUE(source_path.empty());
|
||||
}
|
||||
} // namespace repertory
|
||||
|
Reference in New Issue
Block a user