Compare commits
1 Commits
68e76285fe
...
v2.1.0-rc-
Author | SHA1 | Date | |
---|---|---|---|
8dd46b8ad8 |
@ -161,6 +161,7 @@ openssldir
|
||||
pkgconfig
|
||||
plarge_integer
|
||||
plex
|
||||
println
|
||||
project_enable_fontconfig
|
||||
project_enable_gtkmm
|
||||
project_enable_libdsm
|
||||
|
10
CHANGELOG.md
10
CHANGELOG.md
@ -2,6 +2,10 @@
|
||||
|
||||
## v2.0.2-rc
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
* Refactored `config.json` - will need to verify configuration settings prior to mounting
|
||||
|
||||
### Issues
|
||||
|
||||
* \#12 \[Unit Test\] Complete all providers unit tests
|
||||
@ -16,14 +20,16 @@
|
||||
* \#19 \[bug\] Rename file is broken for files that are existing
|
||||
* \#23 \[bug\] Incorrect file size displayed while upload is pending
|
||||
* \#24 RocksDB implementations should be transactional
|
||||
* \#25 Writes should block when maximum cache size is reached
|
||||
* \#26 Complete ring buffer and direct download support
|
||||
|
||||
### Changes from v2.0.1-rc
|
||||
|
||||
* Ability to choose between RocksDB and SQLite databases
|
||||
* Added direct reads and implemented download fallback
|
||||
* Corrected file times on S3 and Sia providers
|
||||
* Corrected handling of `chown()` and `chmod()`
|
||||
* Fixed erroneous download of chunks after resize
|
||||
* Comprehensive WinFSP and FUSE unit tests, including remote testing
|
||||
* Ability to choose between RocksDB and SQLite databases
|
||||
|
||||
## v2.0.1-rc
|
||||
|
||||
|
@ -1,15 +1,15 @@
|
||||
set(BINUTILS_VERSION 2.41)
|
||||
set(BOOST_MAJOR_VERSION 1)
|
||||
set(BOOST_MINOR_VERSION 87)
|
||||
set(BOOST_PATCH_VERSION 0)
|
||||
set(BOOST2_MAJOR_VERSION 1)
|
||||
set(BOOST2_MINOR_VERSION 76)
|
||||
set(BOOST2_PATCH_VERSION 0)
|
||||
set(BOOST_MAJOR_VERSION 1)
|
||||
set(BOOST_MINOR_VERSION 87)
|
||||
set(BOOST_PATCH_VERSION 0)
|
||||
set(CPP_HTTPLIB_VERSION 0.18.1)
|
||||
set(CURL_VERSION 8.11.0)
|
||||
set(CURL2_VERSION 8_11_0)
|
||||
set(EXPAT_VERSION 2.6.4)
|
||||
set(CURL_VERSION 8.11.0)
|
||||
set(EXPAT2_VERSION 2_6_4)
|
||||
set(EXPAT_VERSION 2.6.4)
|
||||
set(GCC_VERSION 14.2.0)
|
||||
set(GTEST_VERSION 1.15.2)
|
||||
set(ICU_VERSION 75-1)
|
||||
@ -22,7 +22,7 @@ set(PKG_CONFIG_VERSION 0.29.2)
|
||||
set(PUGIXML_VERSION 1.14)
|
||||
set(ROCKSDB_VERSION 9.7.4)
|
||||
set(SPDLOG_VERSION 1.15.0)
|
||||
set(SQLITE_VERSION 3460100)
|
||||
set(SQLITE2_VERSION 3.46.1)
|
||||
set(SQLITE_VERSION 3460100)
|
||||
set(STDUUID_VERSION 1.2.3)
|
||||
set(ZLIB_VERSION 1.3.1)
|
||||
|
@ -22,471 +22,233 @@
|
||||
#ifndef REPERTORY_INCLUDE_APP_CONFIG_HPP_
|
||||
#define REPERTORY_INCLUDE_APP_CONFIG_HPP_
|
||||
|
||||
#include "events/event_system.hpp"
|
||||
#include "events/events.hpp"
|
||||
#include "events/event.hpp"
|
||||
#include "types/remote.hpp"
|
||||
#include "types/repertory.hpp"
|
||||
#include "utils/error_utils.hpp"
|
||||
|
||||
namespace repertory {
|
||||
class app_config final {
|
||||
public:
|
||||
[[nodiscard]] static auto default_agent_name(const provider_type &prov)
|
||||
-> std::string;
|
||||
[[nodiscard]] static auto
|
||||
default_agent_name(const provider_type &prov) -> std::string;
|
||||
|
||||
[[nodiscard]] static auto default_api_port(const provider_type &prov)
|
||||
-> std::uint16_t;
|
||||
[[nodiscard]] static auto
|
||||
default_api_port(const provider_type &prov) -> std::uint16_t;
|
||||
|
||||
[[nodiscard]] static auto default_data_directory(const provider_type &prov)
|
||||
-> std::string;
|
||||
[[nodiscard]] static auto
|
||||
default_data_directory(const provider_type &prov) -> std::string;
|
||||
|
||||
[[nodiscard]] static auto default_remote_port(const provider_type &prov)
|
||||
-> std::uint16_t;
|
||||
[[nodiscard]] static auto
|
||||
default_remote_api_port(const provider_type &prov) -> std::uint16_t;
|
||||
|
||||
[[nodiscard]] static auto default_rpc_port(const provider_type &prov)
|
||||
-> std::uint16_t;
|
||||
[[nodiscard]] static auto
|
||||
default_rpc_port(const provider_type &prov) -> std::uint16_t;
|
||||
|
||||
[[nodiscard]] static auto get_provider_display_name(const provider_type &prov)
|
||||
-> std::string;
|
||||
[[nodiscard]] static auto
|
||||
get_provider_display_name(const provider_type &prov) -> std::string;
|
||||
|
||||
[[nodiscard]] static auto get_provider_name(const provider_type &prov)
|
||||
-> std::string;
|
||||
[[nodiscard]] static auto
|
||||
get_provider_name(const provider_type &prov) -> std::string;
|
||||
|
||||
public:
|
||||
app_config(const provider_type &prov, std::string_view data_directory = "");
|
||||
|
||||
app_config() = delete;
|
||||
app_config(app_config &&) = delete;
|
||||
app_config(const app_config &) = delete;
|
||||
|
||||
~app_config() { save(); }
|
||||
|
||||
auto operator=(const app_config &) -> app_config & = delete;
|
||||
auto operator=(app_config &&) -> app_config & = delete;
|
||||
|
||||
private:
|
||||
provider_type prov_;
|
||||
std::string api_auth_;
|
||||
std::uint16_t api_port_;
|
||||
std::string api_user_;
|
||||
bool config_changed_;
|
||||
std::string data_directory_;
|
||||
database_type db_type_{database_type::rocksdb};
|
||||
std::uint8_t download_timeout_secs_;
|
||||
bool enable_chunk_downloader_timeout_;
|
||||
bool enable_comm_duration_events_;
|
||||
bool enable_drive_events_;
|
||||
bool enable_max_cache_size_;
|
||||
atomic<std::string> api_auth_;
|
||||
std::atomic<std::uint16_t> api_port_;
|
||||
atomic<std::string> api_user_;
|
||||
std::atomic<bool> config_changed_;
|
||||
std::atomic<database_type> db_type_{database_type::rocksdb};
|
||||
std::atomic<std::uint8_t> download_timeout_secs_;
|
||||
std::atomic<bool> enable_download_timeout_;
|
||||
std::atomic<bool> enable_drive_events_;
|
||||
#if defined(_WIN32)
|
||||
bool enable_mount_manager_;
|
||||
std::atomic<bool> enable_mount_manager_;
|
||||
#endif // defined(_WIN32)
|
||||
bool enable_remote_mount_;
|
||||
encrypt_config encrypt_config_;
|
||||
event_level event_level_;
|
||||
std::uint32_t eviction_delay_mins_;
|
||||
bool eviction_uses_accessed_time_;
|
||||
std::uint16_t high_freq_interval_secs_;
|
||||
bool is_remote_mount_;
|
||||
std::uint16_t low_freq_interval_secs_;
|
||||
std::uint64_t max_cache_size_bytes_;
|
||||
std::uint8_t max_upload_count_;
|
||||
std::uint16_t med_freq_interval_secs_;
|
||||
std::uint8_t min_download_timeout_secs_;
|
||||
std::uint16_t online_check_retry_secs_;
|
||||
std::uint16_t orphaned_file_retention_days_;
|
||||
std::string preferred_download_type_;
|
||||
std::uint8_t read_ahead_count_;
|
||||
std::uint8_t remote_client_pool_size_;
|
||||
std::string remote_host_name_or_ip_;
|
||||
std::uint8_t remote_max_connections_;
|
||||
std::uint16_t remote_port_;
|
||||
std::uint16_t remote_receive_timeout_secs_;
|
||||
std::uint16_t remote_send_timeout_secs_;
|
||||
std::string remote_token_;
|
||||
std::uint16_t retry_read_count_;
|
||||
std::uint16_t ring_buffer_file_size_;
|
||||
std::uint16_t task_wait_ms_;
|
||||
std::atomic<event_level> event_level_;
|
||||
std::atomic<std::uint32_t> eviction_delay_mins_;
|
||||
std::atomic<bool> eviction_uses_accessed_time_;
|
||||
std::atomic<std::uint16_t> high_freq_interval_secs_;
|
||||
std::atomic<std::uint16_t> low_freq_interval_secs_;
|
||||
std::atomic<std::uint64_t> max_cache_size_bytes_;
|
||||
std::atomic<std::uint8_t> max_upload_count_;
|
||||
std::atomic<std::uint16_t> med_freq_interval_secs_;
|
||||
std::atomic<std::uint16_t> online_check_retry_secs_;
|
||||
std::atomic<std::uint16_t> orphaned_file_retention_days_;
|
||||
std::atomic<download_type> preferred_download_type_;
|
||||
std::atomic<std::uint16_t> retry_read_count_;
|
||||
std::atomic<std::uint16_t> ring_buffer_file_size_;
|
||||
std::atomic<std::uint16_t> task_wait_ms_;
|
||||
|
||||
private:
|
||||
std::string cache_directory_;
|
||||
host_config hc_{};
|
||||
s3_config s3_config_{};
|
||||
sia_config sia_config_{};
|
||||
std::uint64_t version_{REPERTORY_CONFIG_VERSION};
|
||||
std::string data_directory_;
|
||||
atomic<encrypt_config> encrypt_config_;
|
||||
atomic<host_config> host_config_;
|
||||
std::string log_directory_;
|
||||
mutable std::recursive_mutex read_write_mutex_;
|
||||
mutable std::recursive_mutex remote_mount_mutex_;
|
||||
atomic<remote::remote_config> remote_config_;
|
||||
atomic<remote::remote_mount> remote_mount_;
|
||||
atomic<s3_config> s3_config_;
|
||||
atomic<sia_config> sia_config_;
|
||||
std::unordered_map<std::string, std::function<std::string()>>
|
||||
value_get_lookup_;
|
||||
std::unordered_map<std::string,
|
||||
std::function<std::string(const std::string &)>>
|
||||
value_set_lookup_;
|
||||
std::uint64_t version_{REPERTORY_CONFIG_VERSION};
|
||||
|
||||
private:
|
||||
auto get_database_value(const json &json_document, const std::string &name,
|
||||
database_type &dst, bool &success_flag) -> bool;
|
||||
|
||||
template <typename dest>
|
||||
auto get_value(const json &json_document, const std::string &name, dest &dst,
|
||||
bool &success_flag) -> bool {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto ret{false};
|
||||
try {
|
||||
if (json_document.find(name) != json_document.end()) {
|
||||
dst = json_document[name].get<dest>();
|
||||
ret = true;
|
||||
} else {
|
||||
success_flag = false;
|
||||
}
|
||||
} catch (const json::exception &ex) {
|
||||
utils::error::raise_error(function_name, ex, "exception occurred");
|
||||
success_flag = false;
|
||||
ret = false;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto load() -> bool;
|
||||
|
||||
template <typename dest, typename source>
|
||||
auto set_value(dest &dst, const source &src) -> bool {
|
||||
auto ret{false};
|
||||
recur_mutex_lock lock(read_write_mutex_);
|
||||
if (dst != src) {
|
||||
dst = src;
|
||||
config_changed_ = true;
|
||||
save();
|
||||
ret = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
auto set_value(dest &dst, const source &src) -> bool;
|
||||
|
||||
public:
|
||||
[[nodiscard]] auto get_api_auth() const -> std::string { return api_auth_; }
|
||||
[[nodiscard]] auto get_api_auth() const -> std::string;
|
||||
|
||||
[[nodiscard]] auto get_api_port() const -> std::uint16_t { return api_port_; }
|
||||
[[nodiscard]] auto get_api_port() const -> std::uint16_t;
|
||||
|
||||
[[nodiscard]] auto get_api_user() const -> std::string { return api_user_; }
|
||||
[[nodiscard]] auto get_api_user() const -> std::string;
|
||||
|
||||
[[nodiscard]] auto get_cache_directory() const -> std::string {
|
||||
return cache_directory_;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_chunk_downloader_timeout_secs() const -> std::uint8_t {
|
||||
return std::max(min_download_timeout_secs_, download_timeout_secs_);
|
||||
}
|
||||
[[nodiscard]] auto get_cache_directory() const -> std::string;
|
||||
|
||||
[[nodiscard]] auto get_config_file_path() const -> std::string;
|
||||
|
||||
[[nodiscard]] auto get_database_type() const -> database_type {
|
||||
return db_type_;
|
||||
}
|
||||
[[nodiscard]] auto get_database_type() const -> database_type;
|
||||
|
||||
[[nodiscard]] auto get_data_directory() const -> std::string {
|
||||
return data_directory_;
|
||||
}
|
||||
[[nodiscard]] auto get_data_directory() const -> std::string;
|
||||
|
||||
[[nodiscard]] auto get_enable_chunk_download_timeout() const -> bool {
|
||||
return enable_chunk_downloader_timeout_;
|
||||
}
|
||||
[[nodiscard]] auto get_download_timeout_secs() const -> std::uint8_t;
|
||||
|
||||
[[nodiscard]] auto get_enable_comm_duration_events() const -> bool {
|
||||
return enable_comm_duration_events_;
|
||||
}
|
||||
[[nodiscard]] auto get_enable_download_timeout() const -> bool;
|
||||
|
||||
[[nodiscard]] auto get_enable_drive_events() const -> bool {
|
||||
return enable_drive_events_;
|
||||
}
|
||||
[[nodiscard]] auto get_enable_drive_events() const -> bool;
|
||||
|
||||
[[nodiscard]] auto get_encrypt_config() const -> encrypt_config {
|
||||
return encrypt_config_;
|
||||
}
|
||||
[[nodiscard]] auto get_encrypt_config() const -> encrypt_config;
|
||||
|
||||
#if defined(_WIN32)
|
||||
[[nodiscard]] auto get_enable_mount_manager() const -> bool {
|
||||
return enable_mount_manager_;
|
||||
}
|
||||
[[nodiscard]] auto get_enable_mount_manager() const -> bool;
|
||||
#endif // defined(_WIN32)
|
||||
|
||||
[[nodiscard]] auto get_enable_max_cache_size() const -> bool {
|
||||
return enable_max_cache_size_;
|
||||
}
|
||||
[[nodiscard]] auto get_event_level() const -> event_level;
|
||||
|
||||
[[nodiscard]] auto get_enable_remote_mount() const -> bool {
|
||||
return enable_remote_mount_;
|
||||
}
|
||||
[[nodiscard]] auto get_eviction_delay_mins() const -> std::uint32_t;
|
||||
|
||||
[[nodiscard]] auto get_event_level() const -> event_level {
|
||||
return event_level_;
|
||||
}
|
||||
[[nodiscard]] auto get_eviction_uses_accessed_time() const -> bool;
|
||||
|
||||
[[nodiscard]] auto get_eviction_delay_mins() const -> std::uint32_t {
|
||||
return eviction_delay_mins_;
|
||||
}
|
||||
[[nodiscard]] auto get_high_frequency_interval_secs() const -> std::uint16_t;
|
||||
|
||||
[[nodiscard]] auto get_eviction_uses_accessed_time() const -> bool {
|
||||
return eviction_uses_accessed_time_;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_high_frequency_interval_secs() const -> std::uint16_t {
|
||||
return std::max(static_cast<std::uint16_t>(1U), high_freq_interval_secs_);
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_host_config() const -> host_config { return hc_; }
|
||||
|
||||
[[nodiscard]] auto get_is_remote_mount() const -> bool {
|
||||
return is_remote_mount_;
|
||||
}
|
||||
[[nodiscard]] auto get_host_config() const -> host_config;
|
||||
|
||||
[[nodiscard]] auto get_json() const -> json;
|
||||
|
||||
[[nodiscard]] auto get_log_directory() const -> std::string {
|
||||
return log_directory_;
|
||||
}
|
||||
[[nodiscard]] auto get_log_directory() const -> std::string;
|
||||
|
||||
[[nodiscard]] auto get_low_frequency_interval_secs() const -> std::uint16_t {
|
||||
return std::max(static_cast<std::uint16_t>(1U), low_freq_interval_secs_);
|
||||
}
|
||||
[[nodiscard]] auto get_low_frequency_interval_secs() const -> std::uint16_t;
|
||||
|
||||
[[nodiscard]] auto get_max_cache_size_bytes() const -> std::uint64_t;
|
||||
|
||||
[[nodiscard]] auto get_max_upload_count() const -> std::uint8_t {
|
||||
return std::max(std::uint8_t(1U), max_upload_count_);
|
||||
}
|
||||
[[nodiscard]] auto get_max_upload_count() const -> std::uint8_t;
|
||||
|
||||
[[nodiscard]] auto get_med_frequency_interval_secs() const -> std::uint16_t {
|
||||
return std::max(static_cast<std::uint16_t>(1U), med_freq_interval_secs_);
|
||||
}
|
||||
[[nodiscard]] auto get_med_frequency_interval_secs() const -> std::uint16_t;
|
||||
|
||||
[[nodiscard]] auto get_online_check_retry_secs() const -> std::uint16_t {
|
||||
return std::max(std::uint16_t(15U), online_check_retry_secs_);
|
||||
}
|
||||
[[nodiscard]] auto get_online_check_retry_secs() const -> std::uint16_t;
|
||||
|
||||
[[nodiscard]] auto get_orphaned_file_retention_days() const -> std::uint16_t {
|
||||
return std::min(static_cast<std::uint16_t>(31U),
|
||||
std::max(static_cast<std::uint16_t>(1U),
|
||||
orphaned_file_retention_days_));
|
||||
}
|
||||
[[nodiscard]] auto get_orphaned_file_retention_days() const -> std::uint16_t;
|
||||
|
||||
[[nodiscard]] auto get_preferred_download_type() const -> download_type {
|
||||
return download_type_from_string(preferred_download_type_,
|
||||
download_type::fallback);
|
||||
}
|
||||
[[nodiscard]] auto get_preferred_download_type() const -> download_type;
|
||||
|
||||
[[nodiscard]] auto get_provider_type() const -> provider_type {
|
||||
return prov_;
|
||||
}
|
||||
[[nodiscard]] auto get_provider_type() const -> provider_type;
|
||||
|
||||
[[nodiscard]] auto get_read_ahead_count() const -> std::uint8_t {
|
||||
return std::max(static_cast<std::uint8_t>(1U), read_ahead_count_);
|
||||
}
|
||||
[[nodiscard]] auto get_remote_config() const -> remote::remote_config;
|
||||
|
||||
[[nodiscard]] auto get_remote_client_pool_size() const -> std::uint8_t {
|
||||
return std::max(static_cast<std::uint8_t>(5U), remote_client_pool_size_);
|
||||
}
|
||||
[[nodiscard]] auto get_remote_mount() const -> remote::remote_mount;
|
||||
|
||||
[[nodiscard]] auto get_remote_host_name_or_ip() const -> std::string {
|
||||
return remote_host_name_or_ip_;
|
||||
}
|
||||
[[nodiscard]] auto get_retry_read_count() const -> std::uint16_t;
|
||||
|
||||
[[nodiscard]] auto get_remote_max_connections() const -> std::uint8_t {
|
||||
return std::max(static_cast<std::uint8_t>(1U), remote_max_connections_);
|
||||
}
|
||||
[[nodiscard]] auto get_ring_buffer_file_size() const -> std::uint16_t;
|
||||
|
||||
[[nodiscard]] auto get_remote_port() const -> std::uint16_t {
|
||||
return remote_port_;
|
||||
}
|
||||
[[nodiscard]] auto get_s3_config() const -> s3_config;
|
||||
|
||||
[[nodiscard]] auto get_remote_receive_timeout_secs() const -> std::uint16_t {
|
||||
return remote_receive_timeout_secs_;
|
||||
}
|
||||
[[nodiscard]] auto get_sia_config() const -> sia_config;
|
||||
|
||||
[[nodiscard]] auto get_remote_send_timeout_secs() const -> std::uint16_t {
|
||||
return remote_send_timeout_secs_;
|
||||
}
|
||||
[[nodiscard]] auto get_task_wait_ms() const -> std::uint16_t;
|
||||
|
||||
[[nodiscard]] auto get_remote_token() const -> std::string {
|
||||
return remote_token_;
|
||||
}
|
||||
[[nodiscard]] auto
|
||||
get_value_by_name(const std::string &name) const -> std::string;
|
||||
|
||||
[[nodiscard]] auto get_retry_read_count() const -> std::uint16_t {
|
||||
return std::max(std::uint16_t(2), retry_read_count_);
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_ring_buffer_file_size() const -> std::uint16_t {
|
||||
return std::max(
|
||||
static_cast<std::uint16_t>(64U),
|
||||
std::min(static_cast<std::uint16_t>(1024U), ring_buffer_file_size_));
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_s3_config() const -> s3_config { return s3_config_; }
|
||||
|
||||
[[nodiscard]] auto get_sia_config() const -> sia_config {
|
||||
return sia_config_;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_task_wait_ms() const -> std::uint16_t {
|
||||
return std::max(static_cast<std::uint16_t>(50U), task_wait_ms_);
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_value_by_name(const std::string &name) -> std::string;
|
||||
|
||||
[[nodiscard]] auto get_version() const -> std::uint64_t { return version_; }
|
||||
[[nodiscard]] auto get_version() const -> std::uint64_t;
|
||||
|
||||
void save();
|
||||
|
||||
void set_api_auth(const std::string &api_auth) {
|
||||
set_value(api_auth_, api_auth);
|
||||
}
|
||||
void set_api_auth(const std::string &value);
|
||||
|
||||
void set_api_port(std::uint16_t api_port) { set_value(api_port_, api_port); }
|
||||
void set_api_port(std::uint16_t value);
|
||||
|
||||
void set_api_user(const std::string &api_user) {
|
||||
set_value(api_user_, api_user);
|
||||
}
|
||||
void set_api_user(const std::string &value);
|
||||
|
||||
void set_chunk_downloader_timeout_secs(
|
||||
std::uint8_t chunk_downloader_timeout_secs) {
|
||||
set_value(download_timeout_secs_, chunk_downloader_timeout_secs);
|
||||
}
|
||||
void set_download_timeout_secs(std::uint8_t value);
|
||||
|
||||
void set_database_type(const database_type &type) {
|
||||
set_value(db_type_, type);
|
||||
}
|
||||
void set_database_type(const database_type &value);
|
||||
|
||||
void
|
||||
set_enable_chunk_downloader_timeout(bool enable_chunk_downloader_timeout) {
|
||||
set_value(enable_chunk_downloader_timeout_,
|
||||
enable_chunk_downloader_timeout);
|
||||
}
|
||||
void set_enable_download_timeout(bool value);
|
||||
|
||||
void set_enable_comm_duration_events(bool enable_comm_duration_events) {
|
||||
set_value(enable_comm_duration_events_, enable_comm_duration_events);
|
||||
}
|
||||
|
||||
void set_enable_drive_events(bool enable_drive_events) {
|
||||
set_value(enable_drive_events_, enable_drive_events);
|
||||
}
|
||||
|
||||
void set_enable_max_cache_size(bool enable_max_cache_size) {
|
||||
set_value(enable_max_cache_size_, enable_max_cache_size);
|
||||
}
|
||||
void set_enable_drive_events(bool value);
|
||||
|
||||
#if defined(_WIN32)
|
||||
void set_enable_mount_manager(bool enable_mount_manager) {
|
||||
set_value(enable_mount_manager_, enable_mount_manager);
|
||||
}
|
||||
void set_enable_mount_manager(bool value);
|
||||
#endif // defined(_WIN32)
|
||||
|
||||
void set_enable_remote_mount(bool enable_remote_mount);
|
||||
void set_event_level(const event_level &value);
|
||||
|
||||
void set_event_level(const event_level &level) {
|
||||
if (set_value(event_level_, level)) {
|
||||
event_system::instance().raise<event_level_changed>(
|
||||
event_level_to_string(level));
|
||||
}
|
||||
}
|
||||
void set_encrypt_config(encrypt_config value);
|
||||
|
||||
void set_eviction_delay_mins(std::uint32_t eviction_delay_mins) {
|
||||
set_value(eviction_delay_mins_, eviction_delay_mins);
|
||||
}
|
||||
void set_eviction_delay_mins(std::uint32_t value);
|
||||
|
||||
void set_eviction_uses_accessed_time(bool eviction_uses_accessed_time) {
|
||||
set_value(eviction_uses_accessed_time_, eviction_uses_accessed_time);
|
||||
}
|
||||
void set_eviction_uses_accessed_time(bool value);
|
||||
|
||||
void
|
||||
set_high_frequency_interval_secs(std::uint16_t high_frequency_interval_secs) {
|
||||
set_value(high_freq_interval_secs_, high_frequency_interval_secs);
|
||||
}
|
||||
void set_high_frequency_interval_secs(std::uint16_t value);
|
||||
|
||||
#if defined(PROJECT_TESTING)
|
||||
void set_host_config(host_config hc) {
|
||||
config_changed_ = true;
|
||||
hc_ = std::move(hc);
|
||||
save();
|
||||
}
|
||||
void set_host_config(host_config value);
|
||||
|
||||
void set_s3_config(s3_config s3) {
|
||||
config_changed_ = true;
|
||||
s3_config_ = std::move(s3);
|
||||
save();
|
||||
}
|
||||
void set_low_frequency_interval_secs(std::uint16_t value);
|
||||
|
||||
void set_sia_config(sia_config sia) {
|
||||
config_changed_ = true;
|
||||
sia_config_ = std::move(sia);
|
||||
save();
|
||||
}
|
||||
#endif // defined(PROJECT_TESTING)
|
||||
void set_max_cache_size_bytes(std::uint64_t value);
|
||||
|
||||
void set_is_remote_mount(bool is_remote_mount);
|
||||
void set_max_upload_count(std::uint8_t value);
|
||||
|
||||
void
|
||||
set_low_frequency_interval_secs(std::uint16_t low_frequency_interval_secs) {
|
||||
set_value(low_freq_interval_secs_, low_frequency_interval_secs);
|
||||
}
|
||||
void set_med_frequency_interval_secs(std::uint16_t value);
|
||||
|
||||
void set_max_cache_size_bytes(std::uint64_t max_cache_size_bytes) {
|
||||
set_value(max_cache_size_bytes_, max_cache_size_bytes);
|
||||
}
|
||||
void set_online_check_retry_secs(std::uint16_t value);
|
||||
|
||||
void set_max_upload_count(std::uint8_t max_upload_count) {
|
||||
set_value(max_upload_count_, max_upload_count);
|
||||
}
|
||||
void set_orphaned_file_retention_days(std::uint16_t value);
|
||||
|
||||
void
|
||||
set_med_frequency_interval_secs(std::uint16_t med_frequency_interval_secs) {
|
||||
set_value(med_freq_interval_secs_, med_frequency_interval_secs);
|
||||
}
|
||||
void set_preferred_download_type(const download_type &value);
|
||||
|
||||
void set_online_check_retry_secs(std::uint16_t online_check_retry_secs) {
|
||||
set_value(online_check_retry_secs_, online_check_retry_secs);
|
||||
}
|
||||
void set_remote_config(remote::remote_config value);
|
||||
|
||||
void
|
||||
set_orphaned_file_retention_days(std::uint16_t orphaned_file_retention_days) {
|
||||
set_value(orphaned_file_retention_days_, orphaned_file_retention_days);
|
||||
}
|
||||
void set_remote_mount(remote::remote_mount value);
|
||||
|
||||
void set_preferred_download_type(const download_type &dt) {
|
||||
set_value(preferred_download_type_, download_type_to_string(dt));
|
||||
}
|
||||
void set_retry_read_count(std::uint16_t value);
|
||||
|
||||
void set_read_ahead_count(std::uint8_t read_ahead_count) {
|
||||
set_value(read_ahead_count_, read_ahead_count);
|
||||
}
|
||||
void set_ring_buffer_file_size(std::uint16_t value);
|
||||
|
||||
void set_remote_client_pool_size(std::uint8_t remote_client_pool_size) {
|
||||
set_value(remote_client_pool_size_, remote_client_pool_size);
|
||||
}
|
||||
void set_s3_config(s3_config value);
|
||||
|
||||
void set_ring_buffer_file_size(std::uint16_t ring_buffer_file_size) {
|
||||
set_value(ring_buffer_file_size_, ring_buffer_file_size);
|
||||
}
|
||||
void set_sia_config(sia_config value);
|
||||
|
||||
void set_remote_host_name_or_ip(const std::string &remote_host_name_or_ip) {
|
||||
set_value(remote_host_name_or_ip_, remote_host_name_or_ip);
|
||||
}
|
||||
|
||||
void set_remote_max_connections(std::uint8_t remote_max_connections) {
|
||||
set_value(remote_max_connections_, remote_max_connections);
|
||||
}
|
||||
|
||||
void set_remote_port(std::uint16_t remote_port) {
|
||||
set_value(remote_port_, remote_port);
|
||||
}
|
||||
|
||||
void
|
||||
set_remote_receive_timeout_secs(std::uint16_t remote_receive_timeout_secs) {
|
||||
set_value(remote_receive_timeout_secs_, remote_receive_timeout_secs);
|
||||
}
|
||||
|
||||
void set_remote_send_timeout_secs(std::uint16_t remote_send_timeout_secs) {
|
||||
set_value(remote_send_timeout_secs_, remote_send_timeout_secs);
|
||||
}
|
||||
|
||||
void set_remote_token(const std::string &remote_token) {
|
||||
set_value(remote_token_, remote_token);
|
||||
}
|
||||
|
||||
void set_retry_read_count(std::uint16_t retry_read_count) {
|
||||
set_value(retry_read_count_, retry_read_count);
|
||||
}
|
||||
|
||||
void set_task_wait_ms(std::uint16_t task_wait_ms) {
|
||||
set_value(task_wait_ms_, task_wait_ms);
|
||||
}
|
||||
void set_task_wait_ms(std::uint16_t value);
|
||||
|
||||
[[nodiscard]] auto set_value_by_name(const std::string &name,
|
||||
const std::string &value) -> std::string;
|
||||
|
@ -23,6 +23,7 @@
|
||||
#define REPERTORY_INCLUDE_COMM_PACKET_PACKET_CLIENT_HPP_
|
||||
|
||||
#include "comm/packet/packet.hpp"
|
||||
#include "types/remote.hpp"
|
||||
|
||||
using boost::asio::ip::tcp;
|
||||
|
||||
@ -36,9 +37,7 @@ private:
|
||||
};
|
||||
|
||||
public:
|
||||
packet_client(std::string host_name_or_ip, std::uint8_t max_connections,
|
||||
std::uint16_t port, std::uint16_t receive_timeout,
|
||||
std::uint16_t send_timeout, std::string encryption_token);
|
||||
packet_client(remote::remote_config cfg);
|
||||
|
||||
~packet_client();
|
||||
|
||||
@ -49,12 +48,7 @@ public:
|
||||
|
||||
private:
|
||||
boost::asio::io_context io_context_;
|
||||
std::string host_name_or_ip_;
|
||||
std::uint8_t max_connections_;
|
||||
std::uint16_t port_;
|
||||
std::uint16_t receive_timeout_;
|
||||
std::uint16_t send_timeout_;
|
||||
std::string encryption_token_;
|
||||
remote::remote_config cfg_;
|
||||
std::string unique_id_;
|
||||
|
||||
private:
|
||||
@ -75,21 +69,21 @@ private:
|
||||
|
||||
void put_client(std::shared_ptr<client> &cli);
|
||||
|
||||
[[nodiscard]] auto read_packet(client &cli,
|
||||
packet &response) -> packet::error_type;
|
||||
[[nodiscard]] auto read_packet(client &cli, packet &response)
|
||||
-> packet::error_type;
|
||||
|
||||
void resolve();
|
||||
|
||||
public:
|
||||
[[nodiscard]] auto send(std::string_view method,
|
||||
std::uint32_t &service_flags) -> packet::error_type;
|
||||
[[nodiscard]] auto send(std::string_view method, std::uint32_t &service_flags)
|
||||
-> packet::error_type;
|
||||
|
||||
[[nodiscard]] auto send(std::string_view method, packet &request,
|
||||
std::uint32_t &service_flags) -> packet::error_type;
|
||||
|
||||
[[nodiscard]] auto send(std::string_view method, packet &request,
|
||||
packet &response,
|
||||
std::uint32_t &service_flags) -> packet::error_type;
|
||||
packet &response, std::uint32_t &service_flags)
|
||||
-> packet::error_type;
|
||||
};
|
||||
} // namespace repertory
|
||||
|
||||
|
@ -42,66 +42,75 @@ private:
|
||||
|
||||
private:
|
||||
std::unique_ptr<rocksdb::TransactionDB> db_{nullptr};
|
||||
rocksdb::ColumnFamilyHandle *directory_family_{};
|
||||
rocksdb::ColumnFamilyHandle *file_family_{};
|
||||
rocksdb::ColumnFamilyHandle *path_family_{};
|
||||
rocksdb::ColumnFamilyHandle *source_family_{};
|
||||
|
||||
private:
|
||||
void create_or_open(bool clear);
|
||||
|
||||
[[nodiscard]] auto create_iterator() const
|
||||
[[nodiscard]] auto create_iterator(rocksdb::ColumnFamilyHandle *family) const
|
||||
-> std::shared_ptr<rocksdb::Iterator>;
|
||||
|
||||
[[nodiscard]] static auto
|
||||
perform_action(std::string_view function_name,
|
||||
std::function<rocksdb::Status()> action) -> bool;
|
||||
std::function<rocksdb::Status()> action) -> api_error;
|
||||
|
||||
[[nodiscard]] auto perform_action(
|
||||
std::string_view function_name,
|
||||
std::function<rocksdb::Status(rocksdb::Transaction *txn)> action) -> bool;
|
||||
std::function<rocksdb::Status(rocksdb::Transaction *txn)> action)
|
||||
-> api_error;
|
||||
|
||||
[[nodiscard]] auto remove_item(const std::string &api_path,
|
||||
const std::string &source_path,
|
||||
rocksdb::Transaction *txn) -> rocksdb::Status;
|
||||
|
||||
public:
|
||||
[[nodiscard]] auto add_directory(const std::string &api_path,
|
||||
const std::string &source_path)
|
||||
-> api_error override;
|
||||
[[nodiscard]] auto
|
||||
add_directory(const std::string &api_path,
|
||||
const std::string &source_path) -> api_error override;
|
||||
|
||||
[[nodiscard]] auto add_or_update_file(const i_file_db::file_data &data)
|
||||
-> api_error override;
|
||||
[[nodiscard]] auto
|
||||
add_or_update_file(const i_file_db::file_data &data) -> api_error override;
|
||||
|
||||
void clear() override;
|
||||
|
||||
[[nodiscard]] auto count() const -> std::uint64_t override;
|
||||
|
||||
[[nodiscard]] auto get_api_path(const std::string &source_path,
|
||||
std::string &api_path) const
|
||||
-> api_error override;
|
||||
[[nodiscard]] auto
|
||||
get_api_path(const std::string &source_path,
|
||||
std::string &api_path) const -> api_error override;
|
||||
|
||||
[[nodiscard]] auto get_directory_api_path(const std::string &source_path,
|
||||
std::string &api_path) const
|
||||
-> api_error override;
|
||||
[[nodiscard]] auto
|
||||
get_directory_api_path(const std::string &source_path,
|
||||
std::string &api_path) const -> api_error override;
|
||||
|
||||
[[nodiscard]] auto get_directory_source_path(const std::string &api_path,
|
||||
std::string &source_path) const
|
||||
-> api_error override;
|
||||
|
||||
[[nodiscard]] auto get_file_api_path(const std::string &source_path,
|
||||
std::string &api_path) const
|
||||
-> api_error override;
|
||||
[[nodiscard]] auto
|
||||
get_file_api_path(const std::string &source_path,
|
||||
std::string &api_path) const -> api_error override;
|
||||
|
||||
[[nodiscard]] auto get_file_data(const std::string &api_path,
|
||||
i_file_db::file_data &data) const
|
||||
-> api_error override;
|
||||
[[nodiscard]] auto
|
||||
get_file_data(const std::string &api_path,
|
||||
i_file_db::file_data &data) const -> api_error override;
|
||||
|
||||
[[nodiscard]] auto get_file_source_path(const std::string &api_path,
|
||||
std::string &source_path) const
|
||||
-> api_error override;
|
||||
[[nodiscard]] auto
|
||||
get_file_source_path(const std::string &api_path,
|
||||
std::string &source_path) const -> api_error override;
|
||||
|
||||
[[nodiscard]] auto get_item_list() const
|
||||
-> std::vector<i_file_db::file_info> override;
|
||||
[[nodiscard]] auto
|
||||
get_item_list() const -> std::vector<i_file_db::file_info> override;
|
||||
|
||||
[[nodiscard]] auto get_source_path(const std::string &api_path,
|
||||
std::string &source_path) const
|
||||
-> api_error override;
|
||||
[[nodiscard]] auto
|
||||
get_source_path(const std::string &api_path,
|
||||
std::string &source_path) const -> api_error override;
|
||||
|
||||
[[nodiscard]] auto remove_item(const std::string &api_path)
|
||||
-> api_error override;
|
||||
[[nodiscard]] auto
|
||||
remove_item(const std::string &api_path) -> api_error override;
|
||||
};
|
||||
} // namespace repertory
|
||||
|
||||
|
@ -43,7 +43,7 @@ private:
|
||||
|
||||
private:
|
||||
std::unique_ptr<rocksdb::TransactionDB> db_{nullptr};
|
||||
rocksdb::ColumnFamilyHandle *default_family_{};
|
||||
rocksdb::ColumnFamilyHandle *meta_family_{};
|
||||
rocksdb::ColumnFamilyHandle *pinned_family_{};
|
||||
rocksdb::ColumnFamilyHandle *size_family_{};
|
||||
rocksdb::ColumnFamilyHandle *source_family_{};
|
||||
|
@ -34,7 +34,7 @@ public:
|
||||
private:
|
||||
struct open_directory final {
|
||||
std::shared_ptr<directory_iterator> iterator;
|
||||
std::vector<std::uint64_t> handles{};
|
||||
std::vector<std::uint64_t> handles;
|
||||
std::chrono::system_clock::time_point last_update{
|
||||
std::chrono::system_clock::now()};
|
||||
};
|
||||
@ -60,8 +60,8 @@ public:
|
||||
void execute_action(const std::string &api_path,
|
||||
const execute_callback &execute);
|
||||
|
||||
[[nodiscard]] auto
|
||||
get_directory(std::uint64_t handle) -> std::shared_ptr<directory_iterator>;
|
||||
[[nodiscard]] auto get_directory(std::uint64_t handle)
|
||||
-> std::shared_ptr<directory_iterator>;
|
||||
|
||||
[[nodiscard]] auto remove_directory(const std::string &api_path)
|
||||
-> std::shared_ptr<directory_iterator>;
|
||||
|
@ -31,22 +31,23 @@ class i_provider;
|
||||
|
||||
class eviction final : public single_thread_service_base {
|
||||
public:
|
||||
eviction(i_provider &provider, const app_config &config, i_file_manager &fm)
|
||||
eviction(i_provider &provider, const app_config &config,
|
||||
i_file_manager &file_mgr)
|
||||
: single_thread_service_base("eviction"),
|
||||
provider_(provider),
|
||||
config_(config),
|
||||
fm_(fm) {}
|
||||
file_mgr_(file_mgr),
|
||||
provider_(provider) {}
|
||||
|
||||
~eviction() override = default;
|
||||
|
||||
private:
|
||||
i_provider &provider_;
|
||||
const app_config &config_;
|
||||
i_file_manager &fm_;
|
||||
i_file_manager &file_mgr_;
|
||||
i_provider &provider_;
|
||||
|
||||
private:
|
||||
[[nodiscard]] auto
|
||||
check_minimum_requirements(const std::string &file_path) -> bool;
|
||||
[[nodiscard]] auto check_minimum_requirements(const std::string &file_path)
|
||||
-> bool;
|
||||
|
||||
[[nodiscard]] auto get_filtered_cached_files() -> std::deque<std::string>;
|
||||
|
||||
|
@ -29,6 +29,8 @@
|
||||
#include "drives/fuse/remotefuse/i_remote_instance.hpp"
|
||||
#include "drives/remote/remote_open_file_table.hpp"
|
||||
#include "drives/winfsp/remotewinfsp/i_remote_instance.hpp"
|
||||
#include "events/event_system.hpp"
|
||||
#include "events/events.hpp"
|
||||
#include "types/remote.hpp"
|
||||
#include "types/repertory.hpp"
|
||||
#include "utils/base64.hpp"
|
||||
@ -52,7 +54,7 @@ public:
|
||||
: config_(config),
|
||||
drive_(drv),
|
||||
mount_location_(std::move(mount_location)),
|
||||
client_pool_(config.get_remote_client_pool_size()) {
|
||||
client_pool_(config.get_remote_mount().client_pool_size) {
|
||||
event_system::instance().raise<service_started>("remote_server_base");
|
||||
handler_lookup_.insert(
|
||||
{"::winfsp_can_delete",
|
||||
@ -1357,7 +1359,8 @@ public:
|
||||
}});
|
||||
|
||||
packet_server_ = std::make_unique<packet_server>(
|
||||
config_.get_remote_port(), config_.get_remote_token(), 10,
|
||||
config_.get_remote_mount().api_port,
|
||||
config_.get_remote_mount().encryption_token, 10,
|
||||
[this](const std::string &client_id) {
|
||||
return this->closed_handler(client_id);
|
||||
},
|
||||
|
@ -32,9 +32,12 @@ enum class event_level {
|
||||
trace,
|
||||
};
|
||||
|
||||
auto event_level_from_string(std::string level) -> event_level;
|
||||
[[nodiscard]] auto
|
||||
event_level_from_string(std::string level,
|
||||
event_level default_level = event_level::info)
|
||||
-> event_level;
|
||||
|
||||
auto event_level_to_string(event_level level) -> std::string;
|
||||
[[nodiscard]] auto event_level_to_string(event_level level) -> std::string;
|
||||
|
||||
class event {
|
||||
protected:
|
||||
@ -72,4 +75,18 @@ public:
|
||||
};
|
||||
} // namespace repertory
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
template <> struct adl_serializer<std::atomic<repertory::event_level>> {
|
||||
static void to_json(json &data,
|
||||
const std::atomic<repertory::event_level> &value) {
|
||||
data = repertory::event_level_to_string(value.load());
|
||||
}
|
||||
|
||||
static void from_json(const json &data,
|
||||
std::atomic<repertory::event_level> &value) {
|
||||
value.store(repertory::event_level_from_string(data.get<std::string>()));
|
||||
}
|
||||
};
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
||||
|
||||
#endif // REPERTORY_INCLUDE_EVENTS_EVENT_HPP_
|
||||
|
@ -44,6 +44,7 @@ using event_consumer = event_system::event_consumer;
|
||||
#define E_FROM_STRING(t) t
|
||||
#define E_FROM_UINT16(t) std::to_string(t)
|
||||
#define E_FROM_UINT64(t) std::to_string(t)
|
||||
#define E_FROM_DOWNLOAD_TYPE(t) download_type_to_string(t)
|
||||
|
||||
#define E_PROP(type, name, short_name, ts) \
|
||||
private: \
|
||||
|
@ -0,0 +1,67 @@
|
||||
/*
|
||||
Copyright <2018-2024> <scott.e.graves@protonmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
#ifndef REPERTORY_INCLUDE_FILE_MANAGER_CACHE_SIZE_MGR_HPP_
|
||||
#define REPERTORY_INCLUDE_FILE_MANAGER_CACHE_SIZE_MGR_HPP_
|
||||
|
||||
#include "types/repertory.hpp"
|
||||
|
||||
namespace repertory {
|
||||
class app_config;
|
||||
|
||||
class cache_size_mgr final {
|
||||
public:
|
||||
cache_size_mgr(const cache_size_mgr &) = delete;
|
||||
cache_size_mgr(cache_size_mgr &&) = delete;
|
||||
auto operator=(const cache_size_mgr &) -> cache_size_mgr & = delete;
|
||||
auto operator=(cache_size_mgr &&) -> cache_size_mgr & = delete;
|
||||
|
||||
protected:
|
||||
cache_size_mgr() = default;
|
||||
|
||||
~cache_size_mgr() { stop(); }
|
||||
|
||||
private:
|
||||
static cache_size_mgr instance_;
|
||||
|
||||
private:
|
||||
app_config *cfg_{nullptr};
|
||||
std::uint64_t cache_size_{0U};
|
||||
mutable std::mutex mtx_;
|
||||
std::condition_variable notify_;
|
||||
stop_type stop_requested_{false};
|
||||
|
||||
public:
|
||||
[[nodiscard]] auto expand(std::uint64_t size) -> api_error;
|
||||
|
||||
void initialize(app_config *cfg);
|
||||
|
||||
[[nodiscard]] static auto instance() -> cache_size_mgr & { return instance_; }
|
||||
|
||||
[[nodiscard]] auto shrink(std::uint64_t size) -> api_error;
|
||||
|
||||
[[nodiscard]] auto size() const -> std::uint64_t;
|
||||
|
||||
void stop();
|
||||
};
|
||||
} // namespace repertory
|
||||
|
||||
#endif // REPERTORY_INCLUDE_FILE_MANAGER_CACHE_SIZE_MGR_HPP_
|
@ -0,0 +1,83 @@
|
||||
/*
|
||||
Copyright <2018-2024> <scott.e.graves@protonmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
#ifndef REPERTORY_INCLUDE_FILE_MANAGER_DIRECT_OPEN_FILE_HPP_
|
||||
#define REPERTORY_INCLUDE_FILE_MANAGER_DIRECT_OPEN_FILE_HPP_
|
||||
|
||||
#include "file_manager/ring_buffer_base.hpp"
|
||||
|
||||
#include "types/repertory.hpp"
|
||||
|
||||
namespace repertory {
|
||||
class i_provider;
|
||||
class i_upload_manager;
|
||||
|
||||
class direct_open_file final : public ring_buffer_base {
|
||||
public:
|
||||
direct_open_file(std::uint64_t chunk_size, std::uint8_t chunk_timeout,
|
||||
filesystem_item fsi, i_provider &provider);
|
||||
|
||||
~direct_open_file() override;
|
||||
|
||||
public:
|
||||
direct_open_file() = delete;
|
||||
direct_open_file(const direct_open_file &) noexcept = delete;
|
||||
direct_open_file(direct_open_file &&) noexcept = delete;
|
||||
auto operator=(direct_open_file &&) noexcept -> direct_open_file & = delete;
|
||||
auto
|
||||
operator=(const direct_open_file &) noexcept -> direct_open_file & = delete;
|
||||
|
||||
private:
|
||||
std::array<data_buffer, min_ring_size> ring_data_;
|
||||
|
||||
protected:
|
||||
[[nodiscard]] auto on_check_start() -> bool override;
|
||||
|
||||
[[nodiscard]] auto
|
||||
on_chunk_downloaded(std::size_t /* chunk */,
|
||||
const data_buffer & /* buffer */) -> api_error override {
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto
|
||||
on_read_chunk(std::size_t chunk, std::size_t read_size,
|
||||
std::uint64_t read_offset, data_buffer &data,
|
||||
std::size_t &bytes_read) -> api_error override;
|
||||
|
||||
[[nodiscard]] auto use_buffer(std::size_t chunk,
|
||||
std::function<api_error(data_buffer &)> func)
|
||||
-> api_error override;
|
||||
|
||||
public:
|
||||
[[nodiscard]] auto native_operation(native_operation_callback /* callback */)
|
||||
-> api_error override {
|
||||
return api_error::not_supported;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto native_operation(std::uint64_t /* new_file_size */,
|
||||
native_operation_callback /* callback */)
|
||||
-> api_error override {
|
||||
return api_error::not_supported;
|
||||
}
|
||||
};
|
||||
} // namespace repertory
|
||||
|
||||
#endif // REPERTORY_INCLUDE_FILE_MANAGER_DIRECT_OPEN_FILE_HPP_
|
@ -32,23 +32,6 @@ E_SIMPLE2(download_begin, info, true,
|
||||
std::string, dest_path, dest, E_FROM_STRING
|
||||
);
|
||||
|
||||
E_SIMPLE5(download_chunk_begin, debug, true,
|
||||
std::string, api_path, ap, E_FROM_STRING,
|
||||
std::string, dest_path, dest, E_FROM_STRING,
|
||||
std::size_t, chunk, chunk, E_FROM_SIZE_T,
|
||||
std::size_t, total, total, E_FROM_SIZE_T,
|
||||
std::size_t, complete, complete, E_FROM_SIZE_T
|
||||
);
|
||||
|
||||
E_SIMPLE6(download_chunk_end, debug, true,
|
||||
std::string, api_path, ap, E_FROM_STRING,
|
||||
std::string, dest_path, dest, E_FROM_STRING,
|
||||
std::size_t, chunk, chunk, E_FROM_SIZE_T,
|
||||
std::size_t, total, total, E_FROM_SIZE_T,
|
||||
std::size_t, complete, complete, E_FROM_SIZE_T,
|
||||
api_error, result, result, E_FROM_API_FILE_ERROR
|
||||
);
|
||||
|
||||
E_SIMPLE3(download_end, info, true,
|
||||
std::string, api_path, ap, E_FROM_STRING,
|
||||
std::string, dest_path, dest, E_FROM_STRING,
|
||||
@ -91,6 +74,12 @@ E_SIMPLE2(download_resume_removed, debug, true,
|
||||
E_SIMPLE1(item_timeout, trace, true,
|
||||
std::string, api_path, ap, E_FROM_STRING
|
||||
);
|
||||
|
||||
E_SIMPLE3(download_type_selected, debug, true,
|
||||
std::string, api_path, ap, E_FROM_STRING,
|
||||
std::string, source, src, E_FROM_STRING,
|
||||
download_type, download_type, type, E_FROM_DOWNLOAD_TYPE
|
||||
);
|
||||
// clang-format on
|
||||
} // namespace repertory
|
||||
|
||||
|
@ -68,7 +68,7 @@ private:
|
||||
std::unique_ptr<std::thread> upload_thread_;
|
||||
|
||||
private:
|
||||
void close_all(const std::string &api_path);
|
||||
[[nodiscard]] auto close_all(const std::string &api_path) -> bool;
|
||||
|
||||
void close_timed_out_files();
|
||||
|
||||
@ -108,6 +108,11 @@ public:
|
||||
void remove_resume(const std::string &api_path,
|
||||
const std::string &source_path) override;
|
||||
|
||||
static auto remove_source_and_shrink_cache(const std::string &api_path,
|
||||
const std::string &source_path,
|
||||
std::uint64_t file_size,
|
||||
bool allocated) -> bool;
|
||||
|
||||
void remove_upload(const std::string &api_path) override;
|
||||
|
||||
void store_resume(const i_open_file &file) override;
|
||||
|
@ -62,8 +62,12 @@ public:
|
||||
|
||||
[[nodiscard]] virtual auto get_source_path() const -> std::string = 0;
|
||||
|
||||
[[nodiscard]] virtual auto is_complete() const -> bool = 0;
|
||||
|
||||
[[nodiscard]] virtual auto is_directory() const -> bool = 0;
|
||||
|
||||
[[nodiscard]] virtual auto is_write_supported() const -> bool = 0;
|
||||
|
||||
[[nodiscard]] virtual auto has_handle(std::uint64_t handle) const -> bool = 0;
|
||||
|
||||
[[nodiscard]] virtual auto
|
||||
@ -93,6 +97,8 @@ class i_closeable_open_file : public i_open_file {
|
||||
public:
|
||||
virtual void add(std::uint64_t handle, open_file_data ofd) = 0;
|
||||
|
||||
[[nodiscard]] virtual auto get_allocated() const -> bool = 0;
|
||||
|
||||
[[nodiscard]] virtual auto can_close() const -> bool = 0;
|
||||
|
||||
virtual auto close() -> bool = 0;
|
||||
@ -100,12 +106,8 @@ public:
|
||||
[[nodiscard]] virtual auto get_handles() const
|
||||
-> std::vector<std::uint64_t> = 0;
|
||||
|
||||
[[nodiscard]] virtual auto is_complete() const -> bool = 0;
|
||||
|
||||
[[nodiscard]] virtual auto is_modified() const -> bool = 0;
|
||||
|
||||
[[nodiscard]] virtual auto is_write_supported() const -> bool = 0;
|
||||
|
||||
virtual void remove(std::uint64_t handle) = 0;
|
||||
|
||||
virtual void remove_all() = 0;
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "file_manager/open_file_base.hpp"
|
||||
|
||||
#include "types/repertory.hpp"
|
||||
#include "utils/types/file/i_file.hpp"
|
||||
|
||||
namespace repertory {
|
||||
class i_provider;
|
||||
@ -67,45 +68,55 @@ private:
|
||||
i_upload_manager &mgr_;
|
||||
|
||||
private:
|
||||
bool notified_ = false;
|
||||
bool allocated{false};
|
||||
std::unique_ptr<utils::file::i_file> nf_;
|
||||
bool notified_{false};
|
||||
std::size_t read_chunk_{};
|
||||
boost::dynamic_bitset<> read_state_;
|
||||
std::unique_ptr<std::thread> reader_thread_;
|
||||
std::unique_ptr<std::thread> download_thread_;
|
||||
stop_type stop_requested_ = false;
|
||||
mutable std::recursive_mutex rw_mtx_;
|
||||
stop_type stop_requested_{false};
|
||||
|
||||
private:
|
||||
[[nodiscard]] auto adjust_cache_size(std::uint64_t file_size,
|
||||
bool shrink) -> api_error;
|
||||
|
||||
[[nodiscard]] auto check_start() -> api_error;
|
||||
|
||||
void download_chunk(std::size_t chunk, bool skip_active, bool should_reset);
|
||||
|
||||
void download_range(std::size_t start_chunk, std::size_t end_chunk,
|
||||
void download_range(std::size_t begin_chunk, std::size_t end_chunk,
|
||||
bool should_reset);
|
||||
|
||||
void set_modified();
|
||||
|
||||
void update_background_reader(std::size_t read_chunk);
|
||||
void set_read_state(std::size_t chunk);
|
||||
|
||||
protected:
|
||||
auto is_download_complete() const -> bool override {
|
||||
return read_state_.all();
|
||||
}
|
||||
void set_read_state(boost::dynamic_bitset<> read_state);
|
||||
|
||||
void update_reader(std::size_t chunk);
|
||||
|
||||
public:
|
||||
auto close() -> bool override;
|
||||
|
||||
[[nodiscard]] auto get_allocated() const -> bool override;
|
||||
|
||||
[[nodiscard]] auto get_read_state() const -> boost::dynamic_bitset<> override;
|
||||
|
||||
[[nodiscard]] auto get_read_state(std::size_t chunk) const -> bool override;
|
||||
|
||||
[[nodiscard]] auto is_complete() const -> bool override;
|
||||
|
||||
auto is_write_supported() const -> bool override { return true; }
|
||||
[[nodiscard]] auto is_write_supported() const -> bool override {
|
||||
return true;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto native_operation(native_operation_callback callback)
|
||||
-> api_error override;
|
||||
[[nodiscard]] auto
|
||||
native_operation(native_operation_callback callback) -> api_error override;
|
||||
|
||||
[[nodiscard]] auto native_operation(std::uint64_t new_file_size,
|
||||
native_operation_callback callback)
|
||||
-> api_error override;
|
||||
[[nodiscard]] auto
|
||||
native_operation(std::uint64_t new_file_size,
|
||||
native_operation_callback callback) -> api_error override;
|
||||
|
||||
void remove(std::uint64_t handle) override;
|
||||
|
||||
|
@ -24,20 +24,18 @@
|
||||
|
||||
#include "file_manager/i_open_file.hpp"
|
||||
|
||||
#include "utils/types/file/i_file.hpp"
|
||||
|
||||
namespace repertory {
|
||||
class i_provider;
|
||||
|
||||
class open_file_base : public i_closeable_open_file {
|
||||
public:
|
||||
open_file_base(std::uint64_t chunk_size, std::uint8_t chunk_timeout,
|
||||
filesystem_item fsi, i_provider &provider);
|
||||
filesystem_item fsi, i_provider &provider, bool disable_io);
|
||||
|
||||
open_file_base(std::uint64_t chunk_size, std::uint8_t chunk_timeout,
|
||||
filesystem_item fsi,
|
||||
std::map<std::uint64_t, open_file_data> open_data,
|
||||
i_provider &provider);
|
||||
i_provider &provider, bool disable_io);
|
||||
|
||||
~open_file_base() override = default;
|
||||
|
||||
@ -98,7 +96,7 @@ public:
|
||||
[[nodiscard]] auto get_result() -> api_error;
|
||||
};
|
||||
|
||||
protected:
|
||||
private:
|
||||
std::uint64_t chunk_size_;
|
||||
std::uint8_t chunk_timeout_;
|
||||
filesystem_item fsi_;
|
||||
@ -107,21 +105,19 @@ protected:
|
||||
i_provider &provider_;
|
||||
|
||||
private:
|
||||
std::unordered_map<std::size_t, std::shared_ptr<download>> active_downloads_;
|
||||
api_error error_{api_error::success};
|
||||
mutable std::mutex error_mtx_;
|
||||
mutable std::recursive_mutex file_mtx_;
|
||||
stop_type io_stop_requested_{false};
|
||||
std::unique_ptr<std::thread> io_thread_;
|
||||
|
||||
protected:
|
||||
std::unordered_map<std::size_t, std::shared_ptr<download>> active_downloads_;
|
||||
mutable std::recursive_mutex file_mtx_;
|
||||
std::atomic<std::chrono::system_clock::time_point> last_access_{
|
||||
std::chrono::system_clock::now()};
|
||||
bool modified_{false};
|
||||
std::unique_ptr<utils::file::i_file> nf_;
|
||||
mutable std::mutex io_thread_mtx_;
|
||||
std::condition_variable io_thread_notify_;
|
||||
std::deque<std::shared_ptr<io_item>> io_thread_queue_;
|
||||
std::atomic<std::chrono::system_clock::time_point> last_access_{
|
||||
std::chrono::system_clock::now(),
|
||||
};
|
||||
bool modified_{false};
|
||||
bool removed_{false};
|
||||
|
||||
private:
|
||||
@ -130,11 +126,42 @@ private:
|
||||
protected:
|
||||
[[nodiscard]] auto do_io(std::function<api_error()> action) -> api_error;
|
||||
|
||||
virtual auto is_download_complete() const -> bool = 0;
|
||||
[[nodiscard]] auto get_active_downloads()
|
||||
-> std::unordered_map<std::size_t, std::shared_ptr<download>> & {
|
||||
return active_downloads_;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_mutex() const -> std::recursive_mutex & {
|
||||
return file_mtx_;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_last_chunk_size() const -> std::size_t;
|
||||
|
||||
[[nodiscard]] auto get_provider() -> i_provider & { return provider_; }
|
||||
|
||||
[[nodiscard]] auto get_provider() const -> const i_provider & {
|
||||
return provider_;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto is_removed() const -> bool;
|
||||
|
||||
void notify_io();
|
||||
|
||||
void reset_timeout();
|
||||
|
||||
auto set_api_error(const api_error &e) -> api_error;
|
||||
auto set_api_error(const api_error &err) -> api_error;
|
||||
|
||||
void set_file_size(std::uint64_t size);
|
||||
|
||||
void set_last_chunk_size(std::size_t size);
|
||||
|
||||
void set_modified(bool modified);
|
||||
|
||||
void set_removed(bool removed);
|
||||
|
||||
void set_source_path(std::string source_path);
|
||||
|
||||
void wait_for_io(stop_type &stop_requested);
|
||||
|
||||
public:
|
||||
void add(std::uint64_t handle, open_file_data ofd) override;
|
||||
@ -143,6 +170,8 @@ public:
|
||||
|
||||
auto close() -> bool override;
|
||||
|
||||
[[nodiscard]] auto get_allocated() const -> bool override { return false; }
|
||||
|
||||
[[nodiscard]] auto get_api_error() const -> api_error;
|
||||
|
||||
[[nodiscard]] auto get_api_path() const -> std::string override;
|
||||
@ -157,27 +186,23 @@ public:
|
||||
|
||||
[[nodiscard]] auto get_handles() const -> std::vector<std::uint64_t> override;
|
||||
|
||||
[[nodiscard]] auto get_open_data()
|
||||
-> std::map<std::uint64_t, open_file_data> & override;
|
||||
[[nodiscard]] auto
|
||||
get_open_data() -> std::map<std::uint64_t, open_file_data> & override;
|
||||
|
||||
[[nodiscard]] auto get_open_data() const
|
||||
-> const std::map<std::uint64_t, open_file_data> & override;
|
||||
|
||||
[[nodiscard]] auto get_open_data(std::uint64_t handle)
|
||||
-> open_file_data & override;
|
||||
[[nodiscard]] auto
|
||||
get_open_data(std::uint64_t handle) -> open_file_data & override;
|
||||
|
||||
[[nodiscard]] auto get_open_data(std::uint64_t handle) const
|
||||
-> const open_file_data & override;
|
||||
[[nodiscard]] auto
|
||||
get_open_data(std::uint64_t handle) const -> const open_file_data & override;
|
||||
|
||||
[[nodiscard]] auto get_open_file_count() const -> std::size_t override;
|
||||
|
||||
[[nodiscard]] auto get_source_path() const -> std::string override {
|
||||
return fsi_.source_path;
|
||||
}
|
||||
[[nodiscard]] auto get_source_path() const -> std::string override;
|
||||
|
||||
[[nodiscard]] auto has_handle(std::uint64_t handle) const -> bool override {
|
||||
return open_data_.find(handle) != open_data_.end();
|
||||
}
|
||||
[[nodiscard]] auto has_handle(std::uint64_t handle) const -> bool override;
|
||||
|
||||
[[nodiscard]] auto is_directory() const -> bool override {
|
||||
return fsi_.directory;
|
||||
|
150
repertory/librepertory/include/file_manager/ring_buffer_base.hpp
Normal file
150
repertory/librepertory/include/file_manager/ring_buffer_base.hpp
Normal file
@ -0,0 +1,150 @@
|
||||
/*
|
||||
Copyright <2018-2024> <scott.e.graves@protonmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
#ifndef REPERTORY_INCLUDE_FILE_MANAGER_RING_BUFFER_BASE_HPP_
|
||||
#define REPERTORY_INCLUDE_FILE_MANAGER_RING_BUFFER_BASE_HPP_
|
||||
|
||||
#include "file_manager/open_file_base.hpp"
|
||||
|
||||
#include "types/repertory.hpp"
|
||||
#include "utils/file.hpp"
|
||||
|
||||
namespace repertory {
|
||||
class i_provider;
|
||||
class i_upload_manager;
|
||||
|
||||
class ring_buffer_base : public open_file_base {
|
||||
public:
|
||||
ring_buffer_base(std::uint64_t chunk_size, std::uint8_t chunk_timeout,
|
||||
filesystem_item fsi, i_provider &provider,
|
||||
std::size_t ring_size, bool disable_io);
|
||||
|
||||
~ring_buffer_base() override = default;
|
||||
|
||||
public:
|
||||
ring_buffer_base() = delete;
|
||||
ring_buffer_base(const ring_buffer_base &) noexcept = delete;
|
||||
ring_buffer_base(ring_buffer_base &&) noexcept = delete;
|
||||
auto operator=(ring_buffer_base &&) noexcept -> ring_buffer_base & = delete;
|
||||
auto
|
||||
operator=(const ring_buffer_base &) noexcept -> ring_buffer_base & = delete;
|
||||
|
||||
public:
|
||||
static constexpr const auto min_ring_size{5U};
|
||||
|
||||
private:
|
||||
boost::dynamic_bitset<> read_state_;
|
||||
std::size_t total_chunks_;
|
||||
|
||||
private:
|
||||
std::condition_variable chunk_notify_;
|
||||
mutable std::mutex chunk_mtx_;
|
||||
std::mutex read_mtx_;
|
||||
std::unique_ptr<std::thread> reader_thread_;
|
||||
std::size_t ring_begin_{};
|
||||
std::size_t ring_end_{};
|
||||
std::size_t ring_pos_{};
|
||||
stop_type stop_requested_{false};
|
||||
|
||||
private:
|
||||
[[nodiscard]] auto check_start() -> api_error;
|
||||
|
||||
auto download_chunk(std::size_t chunk, bool skip_active) -> api_error;
|
||||
|
||||
void reader_thread();
|
||||
|
||||
void update_position(std::size_t count, bool is_forward);
|
||||
|
||||
protected:
|
||||
[[nodiscard]] auto has_reader_thread() const -> bool {
|
||||
return reader_thread_ != nullptr;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_ring_size() const -> std::size_t {
|
||||
return read_state_.size();
|
||||
}
|
||||
|
||||
[[nodiscard]] virtual auto on_check_start() -> bool = 0;
|
||||
|
||||
[[nodiscard]] virtual auto
|
||||
on_chunk_downloaded(std::size_t chunk,
|
||||
const data_buffer &buffer) -> api_error = 0;
|
||||
|
||||
[[nodiscard]] virtual auto
|
||||
on_read_chunk(std::size_t chunk, std::size_t read_size,
|
||||
std::uint64_t read_offset, data_buffer &data,
|
||||
std::size_t &bytes_read) -> api_error = 0;
|
||||
|
||||
[[nodiscard]] virtual auto
|
||||
use_buffer(std::size_t chunk,
|
||||
std::function<api_error(data_buffer &)> func) -> api_error = 0;
|
||||
|
||||
public:
|
||||
auto close() -> bool override;
|
||||
|
||||
void forward(std::size_t count);
|
||||
|
||||
[[nodiscard]] auto get_current_chunk() const -> std::size_t {
|
||||
return ring_pos_;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_first_chunk() const -> std::size_t {
|
||||
return ring_begin_;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_last_chunk() const -> std::size_t { return ring_end_; }
|
||||
|
||||
[[nodiscard]] auto get_read_state() const -> boost::dynamic_bitset<> override;
|
||||
|
||||
[[nodiscard]] auto get_read_state(std::size_t chunk) const -> bool override;
|
||||
|
||||
[[nodiscard]] auto get_total_chunks() const -> std::size_t {
|
||||
return total_chunks_;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto is_complete() const -> bool override { return false; }
|
||||
|
||||
[[nodiscard]] auto is_write_supported() const -> bool override {
|
||||
return false;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto read(std::size_t read_size, std::uint64_t read_offset,
|
||||
data_buffer &data) -> api_error override;
|
||||
|
||||
[[nodiscard]] auto resize(std::uint64_t /* size */) -> api_error override {
|
||||
return api_error::not_supported;
|
||||
}
|
||||
|
||||
void reverse(std::size_t count);
|
||||
|
||||
void set(std::size_t first_chunk, std::size_t current_chunk);
|
||||
|
||||
void set_api_path(const std::string &api_path) override;
|
||||
|
||||
[[nodiscard]] auto
|
||||
write(std::uint64_t /* write_offset */, const data_buffer & /* data */,
|
||||
std::size_t & /* bytes_written */) -> api_error override {
|
||||
return api_error::not_supported;
|
||||
}
|
||||
};
|
||||
} // namespace repertory
|
||||
|
||||
#endif // REPERTORY_INCLUDE_FILE_MANAGER_RING_BUFFER_BASE_HPP_
|
@ -22,20 +22,17 @@
|
||||
#ifndef REPERTORY_INCLUDE_FILE_MANAGER_RING_BUFFER_OPEN_FILE_HPP_
|
||||
#define REPERTORY_INCLUDE_FILE_MANAGER_RING_BUFFER_OPEN_FILE_HPP_
|
||||
|
||||
#include "file_manager/open_file_base.hpp"
|
||||
#include "file_manager/ring_buffer_base.hpp"
|
||||
|
||||
#include "types/repertory.hpp"
|
||||
#include "utils/file.hpp"
|
||||
|
||||
namespace repertory {
|
||||
class i_provider;
|
||||
class i_upload_manager;
|
||||
|
||||
class ring_buffer_open_file final : public open_file_base {
|
||||
class ring_buffer_open_file final : public ring_buffer_base {
|
||||
public:
|
||||
ring_buffer_open_file(std::string buffer_directory, std::uint64_t chunk_size,
|
||||
std::uint8_t chunk_timeout, filesystem_item fsi,
|
||||
i_provider &provider);
|
||||
|
||||
ring_buffer_open_file(std::string buffer_directory, std::uint64_t chunk_size,
|
||||
std::uint8_t chunk_timeout, filesystem_item fsi,
|
||||
i_provider &provider, std::size_t ring_size);
|
||||
@ -46,85 +43,49 @@ public:
|
||||
ring_buffer_open_file() = delete;
|
||||
ring_buffer_open_file(const ring_buffer_open_file &) noexcept = delete;
|
||||
ring_buffer_open_file(ring_buffer_open_file &&) noexcept = delete;
|
||||
auto operator=(ring_buffer_open_file &&) noexcept
|
||||
-> ring_buffer_open_file & = delete;
|
||||
auto operator=(ring_buffer_open_file &&) noexcept -> ring_buffer_open_file & =
|
||||
delete;
|
||||
auto operator=(const ring_buffer_open_file &) noexcept
|
||||
-> ring_buffer_open_file & = delete;
|
||||
|
||||
private:
|
||||
boost::dynamic_bitset<> ring_state_;
|
||||
std::size_t total_chunks_;
|
||||
std::string source_path_;
|
||||
|
||||
private:
|
||||
std::unique_ptr<std::thread> chunk_forward_thread_;
|
||||
std::unique_ptr<std::thread> chunk_reverse_thread_;
|
||||
std::condition_variable chunk_notify_;
|
||||
mutable std::mutex chunk_mtx_;
|
||||
std::size_t current_chunk_{};
|
||||
std::size_t first_chunk_{};
|
||||
std::size_t last_chunk_;
|
||||
|
||||
private:
|
||||
auto download_chunk(std::size_t chunk) -> api_error;
|
||||
|
||||
void forward_reader_thread(std::size_t count);
|
||||
|
||||
void reverse_reader_thread(std::size_t count);
|
||||
std::unique_ptr<utils::file::i_file> nf_;
|
||||
|
||||
protected:
|
||||
auto is_download_complete() const -> bool override;
|
||||
[[nodiscard]] auto on_check_start() -> bool override;
|
||||
|
||||
public:
|
||||
void forward(std::size_t count);
|
||||
[[nodiscard]] auto
|
||||
on_chunk_downloaded(std::size_t chunk,
|
||||
const data_buffer &buffer) -> api_error override;
|
||||
|
||||
[[nodiscard]] auto get_current_chunk() const -> std::size_t {
|
||||
return current_chunk_;
|
||||
}
|
||||
[[nodiscard]] auto
|
||||
on_read_chunk(std::size_t chunk, std::size_t read_size,
|
||||
std::uint64_t read_offset, data_buffer &data,
|
||||
std::size_t &bytes_read) -> api_error override;
|
||||
|
||||
[[nodiscard]] auto get_first_chunk() const -> std::size_t {
|
||||
return first_chunk_;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_last_chunk() const -> std::size_t {
|
||||
return last_chunk_;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_read_state() const -> boost::dynamic_bitset<> override;
|
||||
|
||||
[[nodiscard]] auto get_read_state(std::size_t chunk) const -> bool override;
|
||||
|
||||
[[nodiscard]] auto get_total_chunks() const -> std::size_t {
|
||||
return total_chunks_;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto is_complete() const -> bool override { return true; }
|
||||
|
||||
auto is_write_supported() const -> bool override { return false; }
|
||||
|
||||
[[nodiscard]] auto native_operation(native_operation_callback callback)
|
||||
[[nodiscard]] auto use_buffer(std::size_t chunk,
|
||||
std::function<api_error(data_buffer &)> func)
|
||||
-> api_error override;
|
||||
|
||||
[[nodiscard]] auto native_operation(std::uint64_t, native_operation_callback)
|
||||
public:
|
||||
[[nodiscard]] static auto can_handle_file(std::uint64_t file_size,
|
||||
std::size_t chunk_size,
|
||||
std::size_t ring_size) -> bool;
|
||||
|
||||
[[nodiscard]] auto
|
||||
native_operation(native_operation_callback callback) -> api_error override;
|
||||
|
||||
[[nodiscard]] auto native_operation(std::uint64_t /* new_file_size */,
|
||||
native_operation_callback /* callback */)
|
||||
-> api_error override {
|
||||
return api_error::not_supported;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto read(std::size_t read_size, std::uint64_t read_offset,
|
||||
data_buffer &data) -> api_error override;
|
||||
|
||||
[[nodiscard]] auto resize(std::uint64_t) -> api_error override {
|
||||
return api_error::not_supported;
|
||||
}
|
||||
|
||||
void reverse(std::size_t count);
|
||||
|
||||
void set(std::size_t first_chunk, std::size_t current_chunk);
|
||||
|
||||
void set_api_path(const std::string &api_path) override;
|
||||
|
||||
[[nodiscard]] auto write(std::uint64_t, const data_buffer &, std::size_t &)
|
||||
-> api_error override {
|
||||
return api_error::not_supported;
|
||||
[[nodiscard]] auto get_source_path() const -> std::string override {
|
||||
return source_path_;
|
||||
}
|
||||
};
|
||||
} // namespace repertory
|
||||
|
@ -96,7 +96,9 @@ protected:
|
||||
return api_item_added_;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto get_comm() const -> i_http_comm & { return comm_; }
|
||||
[[nodiscard]] auto get_comm() -> i_http_comm & { return comm_; }
|
||||
|
||||
[[nodiscard]] auto get_comm() const -> const i_http_comm & { return comm_; }
|
||||
|
||||
[[nodiscard]] auto get_config() -> app_config & { return config_; }
|
||||
|
||||
|
@ -54,9 +54,10 @@ private:
|
||||
|
||||
private:
|
||||
app_config &config_;
|
||||
std::unique_ptr<i_file_db> db_;
|
||||
encrypt_config encrypt_config_;
|
||||
|
||||
private:
|
||||
std::unique_ptr<i_file_db> db_{nullptr};
|
||||
i_file_manager *fm_{nullptr};
|
||||
std::unordered_map<std::string, std::shared_ptr<reader_info>> reader_lookup_;
|
||||
std::recursive_mutex reader_lookup_mtx_;
|
||||
@ -73,6 +74,10 @@ private:
|
||||
const std::string &source_path)>
|
||||
callback) const -> api_error;
|
||||
|
||||
[[nodiscard]] auto get_encrypt_config() const -> const encrypt_config & {
|
||||
return encrypt_config_;
|
||||
}
|
||||
|
||||
auto process_directory_entry(const utils::file::i_fs_item &dir_entry,
|
||||
const encrypt_config &cfg,
|
||||
std::string &api_path) const -> bool;
|
||||
|
@ -46,6 +46,9 @@ public:
|
||||
auto operator=(const s3_provider &) -> s3_provider & = delete;
|
||||
auto operator=(s3_provider &&) -> s3_provider & = delete;
|
||||
|
||||
private:
|
||||
s3_config s3_config_;
|
||||
|
||||
private:
|
||||
[[nodiscard]] auto add_if_not_found(api_file &file,
|
||||
const std::string &object_name) const
|
||||
@ -78,6 +81,10 @@ private:
|
||||
std::optional<std::string> token = std::nullopt) const
|
||||
-> bool;
|
||||
|
||||
[[nodiscard]] auto get_s3_config() const -> const s3_config & {
|
||||
return s3_config_;
|
||||
}
|
||||
|
||||
protected:
|
||||
[[nodiscard]] auto create_directory_impl(const std::string &api_path,
|
||||
api_meta_map &meta)
|
||||
|
@ -45,6 +45,9 @@ public:
|
||||
auto operator=(const sia_provider &) -> sia_provider & = delete;
|
||||
auto operator=(sia_provider &&) -> sia_provider & = delete;
|
||||
|
||||
private:
|
||||
sia_config sia_config_;
|
||||
|
||||
private:
|
||||
[[nodiscard]] auto get_object_info(const std::string &api_path,
|
||||
json &object_info) const -> api_error;
|
||||
@ -52,6 +55,10 @@ private:
|
||||
[[nodiscard]] auto get_object_list(const std::string &api_path,
|
||||
nlohmann::json &object_list) const -> bool;
|
||||
|
||||
[[nodiscard]] auto get_sia_config() const -> const auto & {
|
||||
return sia_config_;
|
||||
}
|
||||
|
||||
protected:
|
||||
[[nodiscard]] auto create_directory_impl(const std::string &api_path,
|
||||
api_meta_map &meta)
|
||||
|
@ -22,6 +22,8 @@
|
||||
#ifndef REPERTORY_INCLUDE_TYPES_REMOTE_HPP_
|
||||
#define REPERTORY_INCLUDE_TYPES_REMOTE_HPP_
|
||||
|
||||
#include "types/repertory.hpp"
|
||||
|
||||
inline constexpr const auto PACKET_SERVICE_FUSE{1U};
|
||||
inline constexpr const auto PACKET_SERVICE_WINFSP{2U};
|
||||
|
||||
@ -31,7 +33,67 @@ inline constexpr const auto PACKET_SERVICE_FLAGS{PACKET_SERVICE_WINFSP};
|
||||
inline constexpr const auto PACKET_SERVICE_FLAGS{PACKET_SERVICE_FUSE};
|
||||
#endif // defined(_WIN32)
|
||||
|
||||
constexpr const auto default_remote_client_pool_size{20U};
|
||||
constexpr const auto default_remote_max_connections{20U};
|
||||
constexpr const auto default_remote_receive_timeout_ms{120U * 1000U};
|
||||
constexpr const auto default_remote_send_timeout_ms{30U * 1000U};
|
||||
|
||||
namespace repertory::remote {
|
||||
struct remote_config final {
|
||||
std::uint16_t api_port{};
|
||||
std::string encryption_token;
|
||||
std::string host_name_or_ip;
|
||||
std::uint8_t max_connections{default_remote_max_connections};
|
||||
std::uint32_t recv_timeout_ms{default_remote_receive_timeout_ms};
|
||||
std::uint32_t send_timeout_ms{default_remote_send_timeout_ms};
|
||||
|
||||
auto operator==(const remote_config &cfg) const noexcept -> bool {
|
||||
if (&cfg != this) {
|
||||
return api_port == cfg.api_port &&
|
||||
encryption_token == cfg.encryption_token &&
|
||||
host_name_or_ip == cfg.host_name_or_ip &&
|
||||
max_connections == cfg.max_connections &&
|
||||
recv_timeout_ms == cfg.recv_timeout_ms &&
|
||||
send_timeout_ms == cfg.send_timeout_ms;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
auto operator!=(const remote_config &cfg) const noexcept -> bool {
|
||||
if (&cfg != this) {
|
||||
return not(cfg == *this);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
struct remote_mount final {
|
||||
std::uint16_t api_port{};
|
||||
std::uint8_t client_pool_size{default_remote_client_pool_size};
|
||||
bool enable{false};
|
||||
std::string encryption_token;
|
||||
|
||||
auto operator==(const remote_mount &cfg) const noexcept -> bool {
|
||||
if (&cfg == this) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return api_port == cfg.api_port &&
|
||||
client_pool_size == cfg.client_pool_size && enable == cfg.enable &&
|
||||
encryption_token == cfg.encryption_token;
|
||||
}
|
||||
|
||||
auto operator!=(const remote_mount &cfg) const noexcept -> bool {
|
||||
if (&cfg == this) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return not(cfg == *this);
|
||||
}
|
||||
};
|
||||
|
||||
using block_count = std::uint64_t;
|
||||
using block_size = std::uint32_t;
|
||||
using file_handle = std::uint64_t;
|
||||
@ -160,4 +222,46 @@ create_os_open_flags(const open_flags &flags) -> std::uint32_t;
|
||||
#endif // !defined(_WIN32)
|
||||
} // namespace repertory::remote
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
template <> struct adl_serializer<repertory::remote::remote_config> {
|
||||
static void to_json(json &data,
|
||||
const repertory::remote::remote_config &value) {
|
||||
data[repertory::JSON_API_PORT] = value.api_port;
|
||||
data[repertory::JSON_ENCRYPTION_TOKEN] = value.encryption_token;
|
||||
data[repertory::JSON_HOST_NAME_OR_IP] = value.host_name_or_ip;
|
||||
data[repertory::JSON_MAX_CONNECTIONS] = value.max_connections;
|
||||
data[repertory::JSON_RECV_TIMEOUT_MS] = value.recv_timeout_ms;
|
||||
data[repertory::JSON_SEND_TIMEOUT_MS] = value.send_timeout_ms;
|
||||
}
|
||||
|
||||
static void from_json(const json &data,
|
||||
repertory::remote::remote_config &value) {
|
||||
data.at(repertory::JSON_API_PORT).get_to(value.api_port);
|
||||
data.at(repertory::JSON_ENCRYPTION_TOKEN).get_to(value.encryption_token);
|
||||
data.at(repertory::JSON_HOST_NAME_OR_IP).get_to(value.host_name_or_ip);
|
||||
data.at(repertory::JSON_MAX_CONNECTIONS).get_to(value.max_connections);
|
||||
data.at(repertory::JSON_RECV_TIMEOUT_MS).get_to(value.recv_timeout_ms);
|
||||
data.at(repertory::JSON_SEND_TIMEOUT_MS).get_to(value.send_timeout_ms);
|
||||
}
|
||||
};
|
||||
|
||||
template <> struct adl_serializer<repertory::remote::remote_mount> {
|
||||
static void to_json(json &data,
|
||||
const repertory::remote::remote_mount &value) {
|
||||
data[repertory::JSON_API_PORT] = value.api_port;
|
||||
data[repertory::JSON_CLIENT_POOL_SIZE] = value.client_pool_size;
|
||||
data[repertory::JSON_ENABLE_REMOTE_MOUNT] = value.enable;
|
||||
data[repertory::JSON_ENCRYPTION_TOKEN] = value.encryption_token;
|
||||
}
|
||||
|
||||
static void from_json(const json &data,
|
||||
repertory::remote::remote_mount &value) {
|
||||
data.at(repertory::JSON_API_PORT).get_to(value.api_port);
|
||||
data.at(repertory::JSON_CLIENT_POOL_SIZE).get_to(value.client_pool_size);
|
||||
data.at(repertory::JSON_ENABLE_REMOTE_MOUNT).get_to(value.enable);
|
||||
data.at(repertory::JSON_ENCRYPTION_TOKEN).get_to(value.encryption_token);
|
||||
}
|
||||
};
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
||||
|
||||
#endif // REPERTORY_INCLUDE_TYPES_REMOTE_HPP_
|
||||
|
@ -23,6 +23,122 @@
|
||||
#define REPERTORY_INCLUDE_TYPES_REPERTORY_HPP_
|
||||
|
||||
namespace repertory {
|
||||
constexpr const auto default_api_auth_size{48U};
|
||||
constexpr const auto default_download_timeout_ces{30U};
|
||||
constexpr const auto default_eviction_delay_mins{1U};
|
||||
constexpr const auto default_high_freq_interval_secs{30U};
|
||||
constexpr const auto default_low_freq_interval_secs{0U * 60U};
|
||||
constexpr const auto default_max_cache_size_bytes{
|
||||
std::uint64_t(20UL * 1024UL * 1024UL * 1024UL),
|
||||
};
|
||||
constexpr const auto default_max_upload_count{5U};
|
||||
constexpr const auto default_med_freq_interval_secs{2U * 60U};
|
||||
constexpr const auto default_online_check_retry_secs{60U};
|
||||
constexpr const auto default_orphaned_file_retention_days{15U};
|
||||
constexpr const auto default_retry_read_count{6U};
|
||||
constexpr const auto default_ring_buffer_file_size{512U};
|
||||
constexpr const auto default_task_wait_ms{100U};
|
||||
constexpr const auto default_timeout_ms{60000U};
|
||||
constexpr const auto max_orphaned_file_retention_days{std::uint16_t(31U)};
|
||||
constexpr const auto max_ring_buffer_file_size{std::uint16_t(1024U)};
|
||||
constexpr const auto min_cache_size_bytes{
|
||||
std::uint64_t(100UL * 1024UL * 1024UL)};
|
||||
constexpr const auto min_download_timeout_secs{std::uint8_t(5U)};
|
||||
constexpr const auto min_online_check_retry_secs{std::uint16_t(15U)};
|
||||
constexpr const auto min_orphaned_file_retention_days{std::uint16_t(1U)};
|
||||
constexpr const auto min_retry_read_count{std::uint16_t(2U)};
|
||||
constexpr const auto min_ring_buffer_file_size{std::uint16_t(64U)};
|
||||
constexpr const auto min_task_wait_ms{std::uint16_t(50U)};
|
||||
|
||||
template <typename data_t> class atomic final {
|
||||
public:
|
||||
atomic() : mtx_(std::make_shared<std::mutex>()) {}
|
||||
|
||||
atomic(const atomic &at_data)
|
||||
: data_(at_data.load()), mtx_(std::make_shared<std::mutex>()) {}
|
||||
|
||||
atomic(data_t data)
|
||||
: data_(std::move(data)), mtx_(std::make_shared<std::mutex>()) {}
|
||||
|
||||
atomic(atomic &&) = default;
|
||||
|
||||
~atomic() = default;
|
||||
|
||||
private:
|
||||
data_t data_;
|
||||
std::shared_ptr<std::mutex> mtx_;
|
||||
|
||||
public:
|
||||
[[nodiscard]] auto load() const -> data_t {
|
||||
mutex_lock lock(*mtx_);
|
||||
return data_;
|
||||
}
|
||||
|
||||
auto store(data_t data) -> data_t {
|
||||
mutex_lock lock(*mtx_);
|
||||
data_ = std::move(data);
|
||||
return data_;
|
||||
}
|
||||
|
||||
auto operator=(const atomic &at_data) -> atomic & {
|
||||
if (&at_data == this) {
|
||||
return *this;
|
||||
}
|
||||
|
||||
store(at_data.load());
|
||||
return *this;
|
||||
}
|
||||
|
||||
auto operator=(atomic &&) -> atomic & = default;
|
||||
|
||||
auto operator=(data_t data) -> atomic & {
|
||||
if (&data == &data_) {
|
||||
return *this;
|
||||
}
|
||||
|
||||
store(std::move(data));
|
||||
return *this;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto operator==(const atomic &at_data) const -> bool {
|
||||
if (&at_data == this) {
|
||||
return true;
|
||||
}
|
||||
|
||||
mutex_lock lock(*mtx_);
|
||||
return at_data.load() == data_;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto operator==(const data_t &data) const -> bool {
|
||||
if (&data == &data_) {
|
||||
return true;
|
||||
}
|
||||
|
||||
mutex_lock lock(*mtx_);
|
||||
return data == data_;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto operator!=(const atomic &at_data) const -> bool {
|
||||
if (&at_data == this) {
|
||||
return false;
|
||||
}
|
||||
|
||||
mutex_lock lock(*mtx_);
|
||||
return at_data.load() != data_;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto operator!=(const data_t &data) const -> bool {
|
||||
if (&data == &data_) {
|
||||
return false;
|
||||
}
|
||||
|
||||
mutex_lock lock(*mtx_);
|
||||
return data != data_;
|
||||
}
|
||||
|
||||
[[nodiscard]] operator data_t() const { return load(); }
|
||||
};
|
||||
|
||||
inline constexpr const auto max_time{
|
||||
std::numeric_limits<std::uint64_t>::max(),
|
||||
};
|
||||
@ -59,6 +175,7 @@ enum class api_error {
|
||||
bad_address,
|
||||
buffer_overflow,
|
||||
buffer_too_small,
|
||||
cache_not_initialized,
|
||||
comm_error,
|
||||
decryption_error,
|
||||
directory_end_of_files,
|
||||
@ -77,6 +194,7 @@ enum class api_error {
|
||||
invalid_handle,
|
||||
invalid_operation,
|
||||
invalid_ring_buffer_multiple,
|
||||
invalid_ring_buffer_position,
|
||||
invalid_ring_buffer_size,
|
||||
invalid_version,
|
||||
item_exists,
|
||||
@ -98,34 +216,34 @@ enum class api_error {
|
||||
|
||||
[[nodiscard]] auto api_error_from_string(std::string_view str) -> api_error;
|
||||
|
||||
[[nodiscard]] auto api_error_to_string(const api_error &error)
|
||||
-> const std::string &;
|
||||
[[nodiscard]] auto
|
||||
api_error_to_string(const api_error &error) -> const std::string &;
|
||||
|
||||
enum class database_type {
|
||||
rocksdb,
|
||||
sqlite,
|
||||
};
|
||||
[[nodiscard]] auto database_type_from_string(std::string type,
|
||||
const database_type &default_type)
|
||||
-> database_type;
|
||||
[[nodiscard]] auto database_type_from_string(
|
||||
std::string type,
|
||||
database_type default_type = database_type::rocksdb) -> database_type;
|
||||
|
||||
[[nodiscard]] auto database_type_to_string(const database_type &type)
|
||||
-> std::string;
|
||||
[[nodiscard]] auto
|
||||
database_type_to_string(const database_type &type) -> std::string;
|
||||
|
||||
enum class download_type {
|
||||
default_,
|
||||
direct,
|
||||
fallback,
|
||||
ring_buffer,
|
||||
};
|
||||
[[nodiscard]] auto download_type_from_string(std::string type,
|
||||
const download_type &default_type)
|
||||
-> download_type;
|
||||
[[nodiscard]] auto download_type_from_string(
|
||||
std::string type,
|
||||
download_type default_type = download_type::default_) -> download_type;
|
||||
|
||||
[[nodiscard]] auto download_type_to_string(const download_type &type)
|
||||
-> std::string;
|
||||
[[nodiscard]] auto
|
||||
download_type_to_string(const download_type &type) -> std::string;
|
||||
|
||||
enum class exit_code : std::int32_t {
|
||||
success,
|
||||
success = 0,
|
||||
communication_error = -1,
|
||||
file_creation_failed = -2,
|
||||
incompatible_version = -3,
|
||||
@ -193,28 +311,27 @@ struct directory_item final {
|
||||
std::uint64_t size{};
|
||||
api_meta_map meta;
|
||||
bool resolved{false};
|
||||
|
||||
[[nodiscard]] static auto from_json(const json &item) -> directory_item {
|
||||
directory_item ret{};
|
||||
ret.api_path = item["path"].get<std::string>();
|
||||
ret.api_parent = item["parent"].get<std::string>();
|
||||
ret.directory = item["directory"].get<bool>();
|
||||
ret.size = item["size"].get<std::uint64_t>();
|
||||
ret.meta = item["meta"].get<api_meta_map>();
|
||||
return ret;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto to_json() const -> json {
|
||||
return {
|
||||
{"path", api_path}, {"parent", api_parent}, {"size", size},
|
||||
{"directory", directory}, {"meta", meta},
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
struct encrypt_config final {
|
||||
std::string encryption_token;
|
||||
std::string path;
|
||||
|
||||
auto operator==(const encrypt_config &cfg) const noexcept -> bool {
|
||||
if (&cfg != this) {
|
||||
return encryption_token == cfg.encryption_token && path == cfg.path;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
auto operator!=(const encrypt_config &cfg) const noexcept -> bool {
|
||||
if (&cfg != this) {
|
||||
return not(cfg == *this);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
struct filesystem_item final {
|
||||
@ -233,7 +350,7 @@ struct host_config final {
|
||||
std::string host_name_or_ip{"localhost"};
|
||||
std::string path;
|
||||
std::string protocol{"http"};
|
||||
std::uint32_t timeout_ms{60000U};
|
||||
std::uint32_t timeout_ms{default_timeout_ms};
|
||||
|
||||
auto operator==(const host_config &cfg) const noexcept -> bool {
|
||||
if (&cfg != this) {
|
||||
@ -243,6 +360,7 @@ struct host_config final {
|
||||
host_name_or_ip == cfg.host_name_or_ip && path == cfg.path &&
|
||||
protocol == cfg.protocol && timeout_ms == cfg.timeout_ms;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -250,56 +368,61 @@ struct host_config final {
|
||||
if (&cfg != this) {
|
||||
return not(cfg == *this);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
#if defined(__GNUG__)
|
||||
__attribute__((unused))
|
||||
#endif
|
||||
static void
|
||||
to_json(json &data, const host_config &cfg) {
|
||||
data = json{
|
||||
{"AgentString", cfg.agent_string},
|
||||
{"ApiPassword", cfg.api_password},
|
||||
{"ApiPort", cfg.api_port},
|
||||
{"ApiUser", cfg.api_user},
|
||||
{"HostNameOrIp", cfg.host_name_or_ip},
|
||||
{"Path", cfg.path},
|
||||
{"Protocol", cfg.protocol},
|
||||
{"TimeoutMs", cfg.timeout_ms},
|
||||
};
|
||||
}
|
||||
|
||||
#if defined(__GNUG__)
|
||||
__attribute__((unused))
|
||||
#endif
|
||||
static void
|
||||
from_json(const json &data, host_config &cfg) {
|
||||
data.at("AgentString").get_to(cfg.agent_string);
|
||||
data.at("ApiPassword").get_to(cfg.api_password);
|
||||
data.at("ApiPort").get_to(cfg.api_port);
|
||||
data.at("AuthUser").get_to(cfg.api_user);
|
||||
data.at("HostNameOrIp").get_to(cfg.host_name_or_ip);
|
||||
data.at("Path").get_to(cfg.path);
|
||||
data.at("Protocol").get_to(cfg.protocol);
|
||||
data.at("TimeoutMs").get_to(cfg.timeout_ms);
|
||||
}
|
||||
|
||||
struct s3_config final {
|
||||
std::string access_key;
|
||||
std::string bucket;
|
||||
std::string encryption_token;
|
||||
std::string region{"any"};
|
||||
std::string secret_key;
|
||||
std::uint32_t timeout_ms{60000U};
|
||||
std::uint32_t timeout_ms{default_timeout_ms};
|
||||
std::string url;
|
||||
bool use_path_style{false};
|
||||
bool use_region_in_url{false};
|
||||
|
||||
auto operator==(const s3_config &cfg) const noexcept -> bool {
|
||||
if (&cfg != this) {
|
||||
return access_key == cfg.access_key && bucket == cfg.bucket &&
|
||||
encryption_token == cfg.encryption_token && region == cfg.region &&
|
||||
secret_key == cfg.secret_key && timeout_ms == cfg.timeout_ms &&
|
||||
url == cfg.url && use_path_style == cfg.use_path_style &&
|
||||
use_region_in_url == cfg.use_region_in_url;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
auto operator!=(const s3_config &cfg) const noexcept -> bool {
|
||||
if (&cfg != this) {
|
||||
return not(cfg == *this);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
struct sia_config final {
|
||||
std::string bucket;
|
||||
|
||||
auto operator==(const sia_config &cfg) const noexcept -> bool {
|
||||
if (&cfg != this) {
|
||||
return bucket == cfg.bucket;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
auto operator!=(const sia_config &cfg) const noexcept -> bool {
|
||||
if (&cfg != this) {
|
||||
return not(cfg == *this);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
using api_file_list = std::vector<api_file>;
|
||||
@ -307,6 +430,226 @@ using api_file_provider_callback = std::function<void(api_file &)>;
|
||||
using api_item_added_callback = std::function<api_error(bool, api_file &)>;
|
||||
using directory_item_list = std::vector<directory_item>;
|
||||
using meta_provider_callback = std::function<void(directory_item &)>;
|
||||
|
||||
inline constexpr const auto JSON_ACCESS_KEY{"AccessKey"};
|
||||
inline constexpr const auto JSON_AGENT_STRING{"AgentString"};
|
||||
inline constexpr const auto JSON_API_AUTH{"ApiAuth"};
|
||||
inline constexpr const auto JSON_API_PARENT{"ApiParent"};
|
||||
inline constexpr const auto JSON_API_PASSWORD{"ApiPassword"};
|
||||
inline constexpr const auto JSON_API_PATH{"ApiPath"};
|
||||
inline constexpr const auto JSON_API_PORT{"ApiPort"};
|
||||
inline constexpr const auto JSON_API_USER{"ApiUser"};
|
||||
inline constexpr const auto JSON_BUCKET{"Bucket"};
|
||||
inline constexpr const auto JSON_CLIENT_POOL_SIZE{"ClientPoolSize"};
|
||||
inline constexpr const auto JSON_DATABASE_TYPE{"DatabaseType"};
|
||||
inline constexpr const auto JSON_DIRECTORY{"Directory"};
|
||||
inline constexpr const auto JSON_DOWNLOAD_TIMEOUT_SECS{
|
||||
"DownloadTimeoutSeconds"};
|
||||
inline constexpr const auto JSON_ENABLE_DRIVE_EVENTS{"EnableDriveEvents"};
|
||||
inline constexpr const auto JSON_ENABLE_DOWNLOAD_TIMEOUT{
|
||||
"EnableDownloadTimeout"};
|
||||
inline constexpr const auto JSON_ENABLE_MOUNT_MANAGER{"EnableMountManager"};
|
||||
inline constexpr const auto JSON_ENABLE_REMOTE_MOUNT{"Enable"};
|
||||
inline constexpr const auto JSON_ENCRYPTION_TOKEN{"EncryptionToken"};
|
||||
inline constexpr const auto JSON_ENCRYPT_CONFIG{"EncryptConfig"};
|
||||
inline constexpr const auto JSON_EVENT_LEVEL{"EventLevel"};
|
||||
inline constexpr const auto JSON_EVICTION_DELAY_MINS{"EvictionDelayMinutes"};
|
||||
inline constexpr const auto JSON_EVICTION_USE_ACCESS_TIME{
|
||||
"EvictionUseAccessedTime"};
|
||||
inline constexpr const auto JSON_HIGH_FREQ_INTERVAL_SECS{
|
||||
"HighFreqIntervalSeconds"};
|
||||
inline constexpr const auto JSON_HOST_CONFIG{"HostConfig"};
|
||||
inline constexpr const auto JSON_HOST_NAME_OR_IP{"HostNameOrIp"};
|
||||
inline constexpr const auto JSON_LOW_FREQ_INTERVAL_SECS{
|
||||
"LowFreqIntervalSeconds"};
|
||||
inline constexpr const auto JSON_MAX_CACHE_SIZE_BYTES{"MaxCacheSizeBytes"};
|
||||
inline constexpr const auto JSON_MAX_CONNECTIONS{"MaxConnections"};
|
||||
inline constexpr const auto JSON_MAX_UPLOAD_COUNT{"MaxUploadCount"};
|
||||
inline constexpr const auto JSON_MED_FREQ_INTERVAL_SECS{
|
||||
"MedFreqIntervalSeconds"};
|
||||
inline constexpr const auto JSON_META{"Meta"};
|
||||
inline constexpr const auto JSON_ONLINE_CHECK_RETRY_SECS{
|
||||
"OnlineCheckRetrySeconds"};
|
||||
inline constexpr const auto JSON_ORPHANED_FILE_RETENTION_DAYS{
|
||||
"OrphanedFileRetentionDays"};
|
||||
inline constexpr const auto JSON_PATH{"Path"};
|
||||
inline constexpr const auto JSON_PREFERRED_DOWNLOAD_TYPE{
|
||||
"PreferredDownloadType"};
|
||||
inline constexpr const auto JSON_PROTOCOL{"Protocol"};
|
||||
inline constexpr const auto JSON_RECV_TIMEOUT_MS{"ReceiveTimeoutMs"};
|
||||
inline constexpr const auto JSON_REGION{"Region"};
|
||||
inline constexpr const auto JSON_REMOTE_CONFIG{"RemoteConfig"};
|
||||
inline constexpr const auto JSON_REMOTE_MOUNT{"RemoteMount"};
|
||||
inline constexpr const auto JSON_RETRY_READ_COUNT{"RetryReadCount"};
|
||||
inline constexpr const auto JSON_RING_BUFFER_FILE_SIZE{"RingBufferFileSize"};
|
||||
inline constexpr const auto JSON_S3_CONFIG{"S3Config"};
|
||||
inline constexpr const auto JSON_SECRET_KEY{"SecretKey"};
|
||||
inline constexpr const auto JSON_SEND_TIMEOUT_MS{"SendTimeoutMs"};
|
||||
inline constexpr const auto JSON_SIA_CONFIG{"SiaConfig"};
|
||||
inline constexpr const auto JSON_SIZE{"Size"};
|
||||
inline constexpr const auto JSON_TASK_WAIT_MS{"TaskWaitMs"};
|
||||
inline constexpr const auto JSON_TIMEOUT_MS{"TimeoutMs"};
|
||||
inline constexpr const auto JSON_URL{"URL"};
|
||||
inline constexpr const auto JSON_USE_PATH_STYLE{"UsePathStyle"};
|
||||
inline constexpr const auto JSON_USE_REGION_IN_URL{"UseRegionInURL"};
|
||||
inline constexpr const auto JSON_VERSION{"Version"};
|
||||
} // namespace repertory
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
template <> struct adl_serializer<repertory::directory_item> {
|
||||
static void to_json(json &data, const repertory::directory_item &value) {
|
||||
data[repertory::JSON_API_PARENT] = value.api_parent;
|
||||
data[repertory::JSON_API_PATH] = value.api_path;
|
||||
data[repertory::JSON_DIRECTORY] = value.directory;
|
||||
data[repertory::JSON_META] = value.meta;
|
||||
data[repertory::JSON_SIZE] = value.size;
|
||||
}
|
||||
|
||||
static void from_json(const json &data, repertory::directory_item &value) {
|
||||
data.at(repertory::JSON_API_PARENT).get_to<std::string>(value.api_parent);
|
||||
data.at(repertory::JSON_API_PATH).get_to<std::string>(value.api_path);
|
||||
data.at(repertory::JSON_DIRECTORY).get_to<bool>(value.directory);
|
||||
data.at(repertory::JSON_META).get_to<repertory::api_meta_map>(value.meta);
|
||||
data.at(repertory::JSON_SIZE).get_to<std::uint64_t>(value.size);
|
||||
}
|
||||
};
|
||||
|
||||
template <> struct adl_serializer<repertory::encrypt_config> {
|
||||
static void to_json(json &data, const repertory::encrypt_config &value) {
|
||||
data[repertory::JSON_ENCRYPTION_TOKEN] = value.encryption_token;
|
||||
data[repertory::JSON_PATH] = value.path;
|
||||
}
|
||||
|
||||
static void from_json(const json &data, repertory::encrypt_config &value) {
|
||||
data.at(repertory::JSON_ENCRYPTION_TOKEN).get_to(value.encryption_token);
|
||||
data.at(repertory::JSON_PATH).get_to(value.path);
|
||||
}
|
||||
};
|
||||
|
||||
template <> struct adl_serializer<repertory::host_config> {
|
||||
static void to_json(json &data, const repertory::host_config &value) {
|
||||
data[repertory::JSON_AGENT_STRING] = value.agent_string;
|
||||
data[repertory::JSON_API_PASSWORD] = value.api_password;
|
||||
data[repertory::JSON_API_PORT] = value.api_port;
|
||||
data[repertory::JSON_API_USER] = value.api_user;
|
||||
data[repertory::JSON_HOST_NAME_OR_IP] = value.host_name_or_ip;
|
||||
data[repertory::JSON_PATH] = value.path;
|
||||
data[repertory::JSON_PROTOCOL] = value.protocol;
|
||||
data[repertory::JSON_TIMEOUT_MS] = value.timeout_ms;
|
||||
}
|
||||
|
||||
static void from_json(const json &data, repertory::host_config &value) {
|
||||
data.at(repertory::JSON_AGENT_STRING).get_to(value.agent_string);
|
||||
data.at(repertory::JSON_API_PASSWORD).get_to(value.api_password);
|
||||
data.at(repertory::JSON_API_PORT).get_to(value.api_port);
|
||||
data.at(repertory::JSON_API_USER).get_to(value.api_user);
|
||||
data.at(repertory::JSON_HOST_NAME_OR_IP).get_to(value.host_name_or_ip);
|
||||
data.at(repertory::JSON_PATH).get_to(value.path);
|
||||
data.at(repertory::JSON_PROTOCOL).get_to(value.protocol);
|
||||
data.at(repertory::JSON_TIMEOUT_MS).get_to(value.timeout_ms);
|
||||
}
|
||||
};
|
||||
|
||||
template <> struct adl_serializer<repertory::s3_config> {
|
||||
static void to_json(json &data, const repertory::s3_config &value) {
|
||||
data[repertory::JSON_ACCESS_KEY] = value.access_key;
|
||||
data[repertory::JSON_BUCKET] = value.bucket;
|
||||
data[repertory::JSON_ENCRYPTION_TOKEN] = value.encryption_token;
|
||||
data[repertory::JSON_REGION] = value.region;
|
||||
data[repertory::JSON_SECRET_KEY] = value.secret_key;
|
||||
data[repertory::JSON_TIMEOUT_MS] = value.timeout_ms;
|
||||
data[repertory::JSON_URL] = value.url;
|
||||
data[repertory::JSON_USE_PATH_STYLE] = value.use_path_style;
|
||||
data[repertory::JSON_USE_REGION_IN_URL] = value.use_region_in_url;
|
||||
}
|
||||
|
||||
static void from_json(const json &data, repertory::s3_config &value) {
|
||||
data.at(repertory::JSON_ACCESS_KEY).get_to(value.access_key);
|
||||
data.at(repertory::JSON_BUCKET).get_to(value.bucket);
|
||||
data.at(repertory::JSON_ENCRYPTION_TOKEN).get_to(value.encryption_token);
|
||||
data.at(repertory::JSON_REGION).get_to(value.region);
|
||||
data.at(repertory::JSON_SECRET_KEY).get_to(value.secret_key);
|
||||
data.at(repertory::JSON_TIMEOUT_MS).get_to(value.timeout_ms);
|
||||
data.at(repertory::JSON_URL).get_to(value.url);
|
||||
data.at(repertory::JSON_USE_PATH_STYLE).get_to(value.use_path_style);
|
||||
data.at(repertory::JSON_USE_REGION_IN_URL).get_to(value.use_region_in_url);
|
||||
}
|
||||
};
|
||||
|
||||
template <> struct adl_serializer<repertory::sia_config> {
|
||||
static void to_json(json &data, const repertory::sia_config &value) {
|
||||
data[repertory::JSON_BUCKET] = value.bucket;
|
||||
}
|
||||
|
||||
static void from_json(const json &data, repertory::sia_config &value) {
|
||||
data.at(repertory::JSON_BUCKET).get_to(value.bucket);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename data_t> struct adl_serializer<repertory::atomic<data_t>> {
|
||||
static void to_json(json &data, const repertory::atomic<data_t> &value) {
|
||||
data = value.load();
|
||||
}
|
||||
|
||||
static void from_json(const json &data, repertory::atomic<data_t> &value) {
|
||||
value.store(data.get<data_t>());
|
||||
}
|
||||
};
|
||||
|
||||
template <typename primitive_t>
|
||||
struct adl_serializer<std::atomic<primitive_t>> {
|
||||
static void to_json(json &data, const std::atomic<primitive_t> &value) {
|
||||
data = value.load();
|
||||
}
|
||||
|
||||
static void from_json(const json &data, std::atomic<primitive_t> &value) {
|
||||
value.store(data.get<primitive_t>());
|
||||
}
|
||||
};
|
||||
|
||||
template <> struct adl_serializer<std::atomic<repertory::database_type>> {
|
||||
static void to_json(json &data,
|
||||
const std::atomic<repertory::database_type> &value) {
|
||||
data = repertory::database_type_to_string(value.load());
|
||||
}
|
||||
|
||||
static void from_json(const json &data,
|
||||
std::atomic<repertory::database_type> &value) {
|
||||
value.store(repertory::database_type_from_string(data.get<std::string>()));
|
||||
}
|
||||
};
|
||||
|
||||
template <> struct adl_serializer<std::atomic<repertory::download_type>> {
|
||||
static void to_json(json &data,
|
||||
const std::atomic<repertory::download_type> &value) {
|
||||
data = repertory::download_type_to_string(value.load());
|
||||
}
|
||||
|
||||
static void from_json(const json &data,
|
||||
std::atomic<repertory::download_type> &value) {
|
||||
value.store(repertory::download_type_from_string(data.get<std::string>()));
|
||||
}
|
||||
};
|
||||
|
||||
template <> struct adl_serializer<repertory::database_type> {
|
||||
static void to_json(json &data, const repertory::database_type &value) {
|
||||
data = repertory::database_type_to_string(value);
|
||||
}
|
||||
|
||||
static void from_json(const json &data, repertory::database_type &value) {
|
||||
value = repertory::database_type_from_string(data.get<std::string>());
|
||||
}
|
||||
};
|
||||
|
||||
template <> struct adl_serializer<repertory::download_type> {
|
||||
static void to_json(json &data, const repertory::download_type &value) {
|
||||
data = repertory::download_type_to_string(value);
|
||||
}
|
||||
|
||||
static void from_json(const json &data, repertory::download_type &value) {
|
||||
value = repertory::download_type_from_string(data.get<std::string>());
|
||||
}
|
||||
};
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
||||
|
||||
#endif // REPERTORY_INCLUDE_TYPES_REPERTORY_HPP_
|
||||
|
@ -24,15 +24,25 @@
|
||||
|
||||
#include "types/repertory.hpp"
|
||||
|
||||
namespace repertory::utils {
|
||||
namespace repertory {
|
||||
class app_config;
|
||||
|
||||
namespace utils {
|
||||
void calculate_allocation_size(bool directory, std::uint64_t file_size,
|
||||
UINT64 allocation_size,
|
||||
std::string &allocation_meta_size);
|
||||
|
||||
[[nodiscard]] auto
|
||||
create_volume_label(const provider_type &prov) -> std::string;
|
||||
create_rocksdb(const app_config &cfg, const std::string &name,
|
||||
const std::vector<rocksdb::ColumnFamilyDescriptor> &families,
|
||||
std::vector<rocksdb::ColumnFamilyHandle *> &handles, bool clear)
|
||||
-> std::unique_ptr<rocksdb::TransactionDB>;
|
||||
|
||||
[[nodiscard]] auto create_volume_label(const provider_type &prov)
|
||||
-> std::string;
|
||||
|
||||
[[nodiscard]] auto get_attributes_from_meta(const api_meta_map &meta) -> DWORD;
|
||||
} // namespace repertory::utils
|
||||
} // namespace utils
|
||||
} // namespace repertory
|
||||
|
||||
#endif // REPERTORY_INCLUDE_UTILS_UTILS_HPP_
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -38,18 +38,8 @@ E_SIMPLE2(packet_client_timeout, error, true,
|
||||
);
|
||||
// clang-format on
|
||||
|
||||
packet_client::packet_client(std::string host_name_or_ip,
|
||||
std::uint8_t max_connections, std::uint16_t port,
|
||||
std::uint16_t receive_timeout,
|
||||
std::uint16_t send_timeout,
|
||||
std::string encryption_token)
|
||||
: host_name_or_ip_(std::move(host_name_or_ip)),
|
||||
max_connections_(max_connections == 0U ? 20U : max_connections),
|
||||
port_(port),
|
||||
receive_timeout_(receive_timeout),
|
||||
send_timeout_(send_timeout),
|
||||
encryption_token_(std::move(encryption_token)),
|
||||
unique_id_(utils::create_uuid_string()) {}
|
||||
packet_client::packet_client(remote::remote_config cfg)
|
||||
: cfg_(std::move(cfg)), unique_id_(utils::create_uuid_string()) {}
|
||||
|
||||
packet_client::~packet_client() {
|
||||
allow_connections_ = false;
|
||||
@ -115,7 +105,7 @@ auto packet_client::get_client() -> std::shared_ptr<packet_client::client> {
|
||||
|
||||
void packet_client::put_client(std::shared_ptr<client> &cli) {
|
||||
mutex_lock clientsLock(clients_mutex_);
|
||||
if (clients_.size() < max_connections_) {
|
||||
if (clients_.size() < cfg_.max_connections) {
|
||||
clients_.emplace_back(cli);
|
||||
}
|
||||
}
|
||||
@ -144,7 +134,7 @@ auto packet_client::read_packet(client &cli, packet &response)
|
||||
read_buffer();
|
||||
response = std::move(buffer);
|
||||
|
||||
auto ret = response.decrypt(encryption_token_);
|
||||
auto ret = response.decrypt(cfg_.encryption_token);
|
||||
if (ret == 0) {
|
||||
ret = response.decode(cli.nonce);
|
||||
}
|
||||
@ -157,8 +147,9 @@ void packet_client::resolve() {
|
||||
return;
|
||||
}
|
||||
|
||||
resolve_results_ = tcp::resolver(io_context_)
|
||||
.resolve(host_name_or_ip_, std::to_string(port_));
|
||||
resolve_results_ =
|
||||
tcp::resolver(io_context_)
|
||||
.resolve(cfg_.host_name_or_ip, std::to_string(cfg_.api_port));
|
||||
}
|
||||
|
||||
auto packet_client::send(std::string_view method, std::uint32_t &service_flags)
|
||||
@ -193,7 +184,7 @@ auto packet_client::send(std::string_view method, packet &request,
|
||||
if (current_client) {
|
||||
try {
|
||||
request.encode_top(current_client->nonce);
|
||||
request.encrypt(encryption_token_);
|
||||
request.encrypt(cfg_.encryption_token);
|
||||
|
||||
timeout request_timeout(
|
||||
[method, current_client]() {
|
||||
@ -201,7 +192,7 @@ auto packet_client::send(std::string_view method, packet &request,
|
||||
"request", std::string{method});
|
||||
packet_client::close(*current_client);
|
||||
},
|
||||
std::chrono::seconds(send_timeout_));
|
||||
std::chrono::milliseconds(cfg_.send_timeout_ms));
|
||||
|
||||
std::uint32_t offset{};
|
||||
while (offset < request.get_size()) {
|
||||
@ -223,7 +214,7 @@ auto packet_client::send(std::string_view method, packet &request,
|
||||
"response", std::string{method});
|
||||
packet_client::close(*current_client);
|
||||
},
|
||||
std::chrono::seconds(receive_timeout_));
|
||||
std::chrono::milliseconds(cfg_.recv_timeout_ms));
|
||||
|
||||
ret = read_packet(*current_client, response);
|
||||
response_timeout.disable();
|
||||
|
@ -28,39 +28,7 @@
|
||||
#include "utils/file.hpp"
|
||||
#include "utils/path.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
namespace {
|
||||
[[nodiscard]] auto
|
||||
create_rocksdb(const repertory::app_config &cfg, const std::string &name,
|
||||
const std::vector<rocksdb::ColumnFamilyDescriptor> &families,
|
||||
std::vector<rocksdb::ColumnFamilyHandle *> &handles, bool clear)
|
||||
-> std::unique_ptr<rocksdb::TransactionDB> {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto path = repertory::utils::path::combine(cfg.get_data_directory(), {name});
|
||||
if (clear &&
|
||||
not repertory::utils::file::directory{path}.remove_recursively()) {
|
||||
repertory::utils::error::raise_error(
|
||||
function_name, "failed to remove " + name + " db|" + path);
|
||||
}
|
||||
|
||||
rocksdb::Options options{};
|
||||
options.create_if_missing = true;
|
||||
options.create_missing_column_families = true;
|
||||
options.db_log_dir = cfg.get_log_directory();
|
||||
options.keep_log_file_num = 10;
|
||||
|
||||
rocksdb::TransactionDB *ptr{};
|
||||
auto status = rocksdb::TransactionDB::Open(
|
||||
options, rocksdb::TransactionDBOptions{}, path, families, &handles, &ptr);
|
||||
if (not status.ok()) {
|
||||
repertory::utils::error::raise_error(function_name, status.ToString());
|
||||
throw repertory::startup_exception(status.ToString());
|
||||
}
|
||||
|
||||
return std::unique_ptr<rocksdb::TransactionDB>(ptr);
|
||||
}
|
||||
} // namespace
|
||||
#include "utils/utils.hpp"
|
||||
|
||||
namespace repertory {
|
||||
rdb_file_db::rdb_file_db(const app_config &cfg) : cfg_(cfg) {
|
||||
@ -75,75 +43,284 @@ void rdb_file_db::create_or_open(bool clear) {
|
||||
auto families = std::vector<rocksdb::ColumnFamilyDescriptor>();
|
||||
families.emplace_back(rocksdb::kDefaultColumnFamilyName,
|
||||
rocksdb::ColumnFamilyOptions());
|
||||
families.emplace_back("file", rocksdb::ColumnFamilyOptions());
|
||||
families.emplace_back("path", rocksdb::ColumnFamilyOptions());
|
||||
families.emplace_back("source", rocksdb::ColumnFamilyOptions());
|
||||
|
||||
auto handles = std::vector<rocksdb::ColumnFamilyHandle *>();
|
||||
db_ = create_rocksdb(cfg_, "file", families, handles, clear);
|
||||
db_ = utils::create_rocksdb(cfg_, "file", families, handles, clear);
|
||||
|
||||
std::size_t idx{};
|
||||
directory_family_ = handles.at(idx++);
|
||||
file_family_ = handles.at(idx++);
|
||||
path_family_ = handles.at(idx++);
|
||||
source_family_ = handles.at(idx++);
|
||||
}
|
||||
|
||||
auto rdb_file_db::add_directory(const std::string &api_path,
|
||||
const std::string &source_path) -> api_error {}
|
||||
const std::string &source_path) -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
std::string existing_source_path;
|
||||
auto result = get_directory_source_path(api_path, existing_source_path);
|
||||
if (result != api_error::success &&
|
||||
result != api_error::directory_not_found) {
|
||||
return result;
|
||||
}
|
||||
|
||||
return perform_action(
|
||||
function_name, [&](rocksdb::Transaction *txn) -> rocksdb::Status {
|
||||
if (not existing_source_path.empty()) {
|
||||
auto res = remove_item(api_path, existing_source_path, txn);
|
||||
if (not res.ok() && not res.IsNotFound()) {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
auto res = txn->Put(directory_family_, api_path, source_path);
|
||||
if (not res.ok()) {
|
||||
return res;
|
||||
}
|
||||
|
||||
res = txn->Put(path_family_, api_path, source_path);
|
||||
if (not res.ok()) {
|
||||
return res;
|
||||
}
|
||||
|
||||
return txn->Put(source_family_, source_path, api_path);
|
||||
});
|
||||
}
|
||||
|
||||
auto rdb_file_db::add_or_update_file(const i_file_db::file_data &data)
|
||||
-> api_error {}
|
||||
-> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
std::string existing_source_path;
|
||||
auto result = get_file_source_path(data.api_path, existing_source_path);
|
||||
if (result != api_error::success && result != api_error::item_not_found) {
|
||||
return result;
|
||||
}
|
||||
|
||||
return perform_action(
|
||||
function_name, [&](rocksdb::Transaction *txn) -> rocksdb::Status {
|
||||
if (not existing_source_path.empty()) {
|
||||
auto res = remove_item(data.api_path, existing_source_path, txn);
|
||||
if (not res.ok() && not res.IsNotFound()) {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
json json_data = {
|
||||
{"file_size", data.file_size},
|
||||
{"iv", data.iv_list},
|
||||
{"source_path", data.source_path},
|
||||
};
|
||||
|
||||
auto res = txn->Put(file_family_, data.api_path, json_data.dump());
|
||||
if (not res.ok()) {
|
||||
return res;
|
||||
}
|
||||
|
||||
res = txn->Put(path_family_, data.api_path, data.source_path);
|
||||
if (not res.ok()) {
|
||||
return res;
|
||||
}
|
||||
|
||||
return txn->Put(source_family_, data.source_path, data.api_path);
|
||||
});
|
||||
}
|
||||
|
||||
void rdb_file_db::clear() { create_or_open(true); }
|
||||
|
||||
auto rdb_file_db::create_iterator() const
|
||||
auto rdb_file_db::create_iterator(rocksdb::ColumnFamilyHandle *family) const
|
||||
-> std::shared_ptr<rocksdb::Iterator> {
|
||||
return std::shared_ptr<rocksdb::Iterator>(
|
||||
db_->NewIterator(rocksdb::ReadOptions()));
|
||||
db_->NewIterator(rocksdb::ReadOptions{}, family));
|
||||
}
|
||||
|
||||
auto rdb_file_db::count() const -> std::uint64_t {}
|
||||
auto rdb_file_db::count() const -> std::uint64_t {
|
||||
std::uint64_t ret{};
|
||||
|
||||
auto iter = create_iterator(source_family_);
|
||||
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
||||
++ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
auto rdb_file_db::get_api_path(const std::string &source_path,
|
||||
std::string &api_path) const -> api_error {}
|
||||
std::string &api_path) const -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto rdb_file_db::get_directory_api_path(const std::string &source_path,
|
||||
std::string &api_path) const
|
||||
-> api_error {}
|
||||
return perform_action(function_name, [&]() -> rocksdb::Status {
|
||||
return db_->Get(rocksdb::ReadOptions{}, source_family_, source_path,
|
||||
&api_path);
|
||||
});
|
||||
}
|
||||
|
||||
auto rdb_file_db::get_directory_source_path(const std::string &api_path,
|
||||
std::string &source_path) const
|
||||
-> api_error {}
|
||||
auto rdb_file_db::get_directory_api_path(
|
||||
const std::string &source_path, std::string &api_path) const -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto result = perform_action(function_name, [&]() -> rocksdb::Status {
|
||||
auto res = db_->Get(rocksdb::ReadOptions{}, source_family_, source_path,
|
||||
&api_path);
|
||||
if (not res.ok()) {
|
||||
return res;
|
||||
}
|
||||
|
||||
std::string value;
|
||||
return db_->Get(rocksdb::ReadOptions{}, directory_family_, api_path,
|
||||
&value);
|
||||
});
|
||||
|
||||
if (result != api_error::success) {
|
||||
api_path.clear();
|
||||
}
|
||||
|
||||
return result == api_error::item_not_found ? api_error::directory_not_found
|
||||
: result;
|
||||
}
|
||||
|
||||
auto rdb_file_db::get_directory_source_path(
|
||||
const std::string &api_path, std::string &source_path) const -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto result = perform_action(function_name, [&]() -> rocksdb::Status {
|
||||
return db_->Get(rocksdb::ReadOptions{}, directory_family_, api_path,
|
||||
&source_path);
|
||||
});
|
||||
|
||||
return result == api_error::item_not_found ? api_error::directory_not_found
|
||||
: result;
|
||||
}
|
||||
|
||||
auto rdb_file_db::get_file_api_path(const std::string &source_path,
|
||||
std::string &api_path) const -> api_error {}
|
||||
std::string &api_path) const -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto result = perform_action(function_name, [&]() -> rocksdb::Status {
|
||||
auto res = db_->Get(rocksdb::ReadOptions{}, source_family_, source_path,
|
||||
&api_path);
|
||||
if (not res.ok()) {
|
||||
return res;
|
||||
}
|
||||
|
||||
std::string value;
|
||||
return db_->Get(rocksdb::ReadOptions{}, file_family_, api_path, &value);
|
||||
});
|
||||
|
||||
if (result != api_error::success) {
|
||||
api_path.clear();
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
auto rdb_file_db::get_file_data(const std::string &api_path,
|
||||
i_file_db::file_data &data) const -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto result = perform_action(function_name, [&]() -> rocksdb::Status {
|
||||
std::string value;
|
||||
auto res = db_->Get(rocksdb::ReadOptions{}, file_family_, api_path, &value);
|
||||
if (not res.ok()) {
|
||||
return res;
|
||||
}
|
||||
|
||||
auto json_data = json::parse(value);
|
||||
data.api_path = api_path;
|
||||
data.file_size = json_data.at("file_size").get<std::uint64_t>();
|
||||
data.iv_list =
|
||||
json_data.at("iv")
|
||||
.get<std::vector<
|
||||
std::array<unsigned char,
|
||||
crypto_aead_xchacha20poly1305_IETF_NPUBBYTES>>>();
|
||||
data.source_path = json_data.at("source_path").get<std::string>();
|
||||
|
||||
return res;
|
||||
});
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
auto rdb_file_db::get_file_source_path(const std::string &api_path,
|
||||
std::string &source_path) const
|
||||
-> api_error {}
|
||||
auto rdb_file_db::get_file_source_path(
|
||||
const std::string &api_path, std::string &source_path) const -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto rdb_file_db::get_item_list() const -> std::vector<i_file_db::file_info> {}
|
||||
auto result = perform_action(function_name, [&]() -> rocksdb::Status {
|
||||
std::string value;
|
||||
auto res = db_->Get(rocksdb::ReadOptions{}, file_family_, api_path, &value);
|
||||
if (not res.ok()) {
|
||||
return res;
|
||||
}
|
||||
|
||||
auto json_data = json::parse(value);
|
||||
source_path = json_data.at("source_path").get<std::string>();
|
||||
return res;
|
||||
});
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
auto rdb_file_db::get_item_list() const -> std::vector<i_file_db::file_info> {
|
||||
std::vector<i_file_db::file_info> ret{};
|
||||
{
|
||||
auto iter = create_iterator(directory_family_);
|
||||
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
||||
ret.emplace_back(i_file_db::file_info{
|
||||
iter->key().ToString(),
|
||||
true,
|
||||
iter->value().ToString(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
auto iter = create_iterator(file_family_);
|
||||
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
||||
auto json_data = json::parse(iter->value().ToString());
|
||||
ret.emplace_back(i_file_db::file_info{
|
||||
iter->key().ToString(),
|
||||
true,
|
||||
json_data.at("source_path").get<std::string>(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
auto rdb_file_db::get_source_path(const std::string &api_path,
|
||||
std::string &source_path) const -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
return perform_action(function_name, [&]() -> rocksdb::Status {
|
||||
return db_->Get(rocksdb::ReadOptions{}, path_family_, api_path,
|
||||
&source_path);
|
||||
});
|
||||
}
|
||||
|
||||
auto rdb_file_db::perform_action(std::string_view function_name,
|
||||
std::function<rocksdb::Status()> action)
|
||||
-> bool {
|
||||
try {
|
||||
auto res = action();
|
||||
if (not res.ok()) {
|
||||
utils::error::raise_error(function_name, res.ToString());
|
||||
}
|
||||
|
||||
return res.ok();
|
||||
} catch (const std::exception &ex) {
|
||||
utils::error::raise_error(function_name, ex);
|
||||
-> api_error {
|
||||
auto res = action();
|
||||
if (res.ok()) {
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
return false;
|
||||
if (not res.IsNotFound()) {
|
||||
utils::error::raise_error(function_name, res.ToString());
|
||||
}
|
||||
|
||||
return res.IsNotFound() ? api_error::item_not_found : api_error::error;
|
||||
}
|
||||
|
||||
auto rdb_file_db::perform_action(
|
||||
std::string_view function_name,
|
||||
std::function<rocksdb::Status(rocksdb::Transaction *txn)> action) -> bool {
|
||||
std::function<rocksdb::Status(rocksdb::Transaction *txn)> action)
|
||||
-> api_error {
|
||||
std::unique_ptr<rocksdb::Transaction> txn{
|
||||
db_->BeginTransaction(rocksdb::WriteOptions{},
|
||||
rocksdb::TransactionOptions{}),
|
||||
@ -154,12 +331,12 @@ auto rdb_file_db::perform_action(
|
||||
if (res.ok()) {
|
||||
auto commit_res = txn->Commit();
|
||||
if (commit_res.ok()) {
|
||||
return true;
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
utils::error::raise_error(function_name,
|
||||
"rocksdb commit failed|" + res.ToString());
|
||||
return false;
|
||||
return api_error::error;
|
||||
}
|
||||
|
||||
utils::error::raise_error(function_name,
|
||||
@ -172,8 +349,42 @@ auto rdb_file_db::perform_action(
|
||||
auto rollback_res = txn->Rollback();
|
||||
utils::error::raise_error(function_name, "rocksdb rollback failed|" +
|
||||
rollback_res.ToString());
|
||||
return false;
|
||||
return api_error::error;
|
||||
}
|
||||
|
||||
auto rdb_file_db::remove_item(const std::string &api_path) -> api_error {}
|
||||
auto rdb_file_db::remove_item(const std::string &api_path) -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
std::string source_path;
|
||||
auto res = get_source_path(api_path, source_path);
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
}
|
||||
|
||||
return perform_action(function_name,
|
||||
[&](rocksdb::Transaction *txn) -> rocksdb::Status {
|
||||
return remove_item(api_path, source_path, txn);
|
||||
});
|
||||
}
|
||||
|
||||
auto rdb_file_db::remove_item(const std::string &api_path,
|
||||
const std::string &source_path,
|
||||
rocksdb::Transaction *txn) -> rocksdb::Status {
|
||||
auto res = txn->Delete(source_family_, source_path);
|
||||
if (not res.ok()) {
|
||||
return res;
|
||||
}
|
||||
|
||||
res = txn->Delete(path_family_, api_path);
|
||||
if (not res.ok()) {
|
||||
return res;
|
||||
}
|
||||
|
||||
res = txn->Delete(directory_family_, api_path);
|
||||
if (not res.ok()) {
|
||||
return res;
|
||||
}
|
||||
|
||||
return txn->Delete(file_family_, api_path);
|
||||
}
|
||||
} // namespace repertory
|
||||
|
@ -28,39 +28,7 @@
|
||||
#include "utils/file.hpp"
|
||||
#include "utils/path.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
namespace {
|
||||
[[nodiscard]] auto
|
||||
create_rocksdb(const repertory::app_config &cfg, const std::string &name,
|
||||
const std::vector<rocksdb::ColumnFamilyDescriptor> &families,
|
||||
std::vector<rocksdb::ColumnFamilyHandle *> &handles, bool clear)
|
||||
-> std::unique_ptr<rocksdb::TransactionDB> {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto path = repertory::utils::path::combine(cfg.get_data_directory(), {name});
|
||||
if (clear &&
|
||||
not repertory::utils::file::directory{path}.remove_recursively()) {
|
||||
repertory::utils::error::raise_error(
|
||||
function_name, "failed to remove " + name + " db|" + path);
|
||||
}
|
||||
|
||||
rocksdb::Options options{};
|
||||
options.create_if_missing = true;
|
||||
options.create_missing_column_families = true;
|
||||
options.db_log_dir = cfg.get_log_directory();
|
||||
options.keep_log_file_num = 10;
|
||||
|
||||
rocksdb::TransactionDB *ptr{};
|
||||
auto status = rocksdb::TransactionDB::Open(
|
||||
options, rocksdb::TransactionDBOptions{}, path, families, &handles, &ptr);
|
||||
if (not status.ok()) {
|
||||
repertory::utils::error::raise_error(function_name, status.ToString());
|
||||
throw repertory::startup_exception(status.ToString());
|
||||
}
|
||||
|
||||
return std::unique_ptr<rocksdb::TransactionDB>(ptr);
|
||||
}
|
||||
} // namespace
|
||||
#include "utils/utils.hpp"
|
||||
|
||||
namespace repertory {
|
||||
rdb_file_mgr_db::rdb_file_mgr_db(const app_config &cfg) : cfg_(cfg) {
|
||||
@ -79,12 +47,12 @@ void rdb_file_mgr_db::create_or_open(bool clear) {
|
||||
families.emplace_back("upload", rocksdb::ColumnFamilyOptions());
|
||||
|
||||
auto handles = std::vector<rocksdb::ColumnFamilyHandle *>();
|
||||
db_ = create_rocksdb(cfg_, "file_mgr", families, handles, clear);
|
||||
db_ = utils::create_rocksdb(cfg_, "file_mgr", families, handles, clear);
|
||||
|
||||
std::size_t idx{};
|
||||
resume_family_ = handles[idx++];
|
||||
upload_active_family_ = handles[idx++];
|
||||
upload_family_ = handles[idx++];
|
||||
resume_family_ = handles.at(idx++);
|
||||
upload_active_family_ = handles.at(idx++);
|
||||
upload_family_ = handles.at(idx++);
|
||||
}
|
||||
|
||||
auto rdb_file_mgr_db::add_resume(const resume_entry &entry) -> bool {
|
||||
@ -273,9 +241,8 @@ auto rdb_file_mgr_db::remove_resume(const std::string &api_path) -> bool {
|
||||
});
|
||||
}
|
||||
|
||||
auto rdb_file_mgr_db::remove_resume(const std::string &api_path,
|
||||
rocksdb::Transaction *txn)
|
||||
-> rocksdb::Status {
|
||||
auto rdb_file_mgr_db::remove_resume(
|
||||
const std::string &api_path, rocksdb::Transaction *txn) -> rocksdb::Status {
|
||||
return txn->Delete(resume_family_, api_path);
|
||||
}
|
||||
|
||||
@ -316,17 +283,25 @@ auto rdb_file_mgr_db::rename_resume(const std::string &from_api_path,
|
||||
const std::string &to_api_path) -> bool {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
bool not_found{false};
|
||||
std::string value;
|
||||
auto res = perform_action(
|
||||
function_name, [this, &from_api_path, &value]() -> rocksdb::Status {
|
||||
return db_->Get(rocksdb::ReadOptions{}, from_api_path, &value);
|
||||
function_name,
|
||||
[this, &from_api_path, ¬_found, &value]() -> rocksdb::Status {
|
||||
auto result = db_->Get(rocksdb::ReadOptions{}, from_api_path, &value);
|
||||
not_found = result.IsNotFound();
|
||||
return result;
|
||||
});
|
||||
if (not_found) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (not res) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (value.empty()) {
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
auto data = json::parse(value);
|
||||
|
@ -27,39 +27,7 @@
|
||||
#include "utils/file.hpp"
|
||||
#include "utils/path.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
namespace {
|
||||
[[nodiscard]] auto
|
||||
create_rocksdb(const repertory::app_config &cfg, const std::string &name,
|
||||
const std::vector<rocksdb::ColumnFamilyDescriptor> &families,
|
||||
std::vector<rocksdb::ColumnFamilyHandle *> &handles, bool clear)
|
||||
-> std::unique_ptr<rocksdb::TransactionDB> {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto path = repertory::utils::path::combine(cfg.get_data_directory(), {name});
|
||||
if (clear &&
|
||||
not repertory::utils::file::directory{path}.remove_recursively()) {
|
||||
repertory::utils::error::raise_error(
|
||||
function_name, "failed to remove " + name + " db|" + path);
|
||||
}
|
||||
|
||||
rocksdb::Options options{};
|
||||
options.create_if_missing = true;
|
||||
options.create_missing_column_families = true;
|
||||
options.db_log_dir = cfg.get_log_directory();
|
||||
options.keep_log_file_num = 10;
|
||||
|
||||
rocksdb::TransactionDB *ptr{};
|
||||
auto status = rocksdb::TransactionDB::Open(
|
||||
options, rocksdb::TransactionDBOptions{}, path, families, &handles, &ptr);
|
||||
if (not status.ok()) {
|
||||
repertory::utils::error::raise_error(function_name, status.ToString());
|
||||
throw repertory::startup_exception(status.ToString());
|
||||
}
|
||||
|
||||
return std::unique_ptr<rocksdb::TransactionDB>(ptr);
|
||||
}
|
||||
} // namespace
|
||||
#include "utils/utils.hpp"
|
||||
|
||||
namespace repertory {
|
||||
rdb_meta_db::rdb_meta_db(const app_config &cfg) : cfg_(cfg) {
|
||||
@ -79,13 +47,13 @@ void rdb_meta_db::create_or_open(bool clear) {
|
||||
families.emplace_back("source", rocksdb::ColumnFamilyOptions());
|
||||
|
||||
auto handles = std::vector<rocksdb::ColumnFamilyHandle *>();
|
||||
db_ = create_rocksdb(cfg_, "provider_meta", families, handles, clear);
|
||||
db_ = utils::create_rocksdb(cfg_, "provider_meta", families, handles, clear);
|
||||
|
||||
std::size_t idx{};
|
||||
default_family_ = handles[idx++];
|
||||
pinned_family_ = handles[idx++];
|
||||
size_family_ = handles[idx++];
|
||||
source_family_ = handles[idx++];
|
||||
meta_family_ = handles.at(idx++);
|
||||
pinned_family_ = handles.at(idx++);
|
||||
size_family_ = handles.at(idx++);
|
||||
source_family_ = handles.at(idx++);
|
||||
}
|
||||
|
||||
void rdb_meta_db::clear() { create_or_open(true); }
|
||||
@ -112,7 +80,7 @@ auto rdb_meta_db::get_api_path(const std::string &source_path,
|
||||
|
||||
auto rdb_meta_db::get_api_path_list() const -> std::vector<std::string> {
|
||||
std::vector<std::string> ret;
|
||||
auto iter = create_iterator(default_family_);
|
||||
auto iter = create_iterator(meta_family_);
|
||||
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
||||
ret.push_back(iter->key().ToString());
|
||||
}
|
||||
@ -130,8 +98,7 @@ auto rdb_meta_db::get_item_meta_json(const std::string &api_path,
|
||||
{
|
||||
std::string value;
|
||||
auto res = perform_action(function_name, [&]() -> rocksdb::Status {
|
||||
return db_->Get(rocksdb::ReadOptions{}, default_family_, api_path,
|
||||
&value);
|
||||
return db_->Get(rocksdb::ReadOptions{}, meta_family_, api_path, &value);
|
||||
});
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
@ -239,7 +206,7 @@ auto rdb_meta_db::get_pinned_files() const -> std::vector<std::string> {
|
||||
|
||||
auto rdb_meta_db::get_total_item_count() const -> std::uint64_t {
|
||||
std::uint64_t ret{};
|
||||
auto iter = create_iterator(default_family_);
|
||||
auto iter = create_iterator(meta_family_);
|
||||
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
||||
++ret;
|
||||
}
|
||||
@ -349,7 +316,7 @@ auto rdb_meta_db::remove_api_path(const std::string &api_path,
|
||||
}
|
||||
}
|
||||
|
||||
return txn->Delete(default_family_, api_path);
|
||||
return txn->Delete(meta_family_, api_path);
|
||||
}
|
||||
|
||||
auto rdb_meta_db::remove_item_meta(const std::string &api_path,
|
||||
@ -523,7 +490,7 @@ auto rdb_meta_db::update_item_meta(const std::string &api_path, json json_data,
|
||||
}
|
||||
}
|
||||
|
||||
return set_status(txn->Put(default_family_, api_path, json_data.dump()));
|
||||
return set_status(txn->Put(meta_family_, api_path, json_data.dump()));
|
||||
};
|
||||
|
||||
if (base_txn == nullptr) {
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include "db/impl/sqlite_file_db.hpp"
|
||||
|
||||
#include "app_config.hpp"
|
||||
#include "types/startup_exception.hpp"
|
||||
#include "utils/config.hpp"
|
||||
#include "utils/db/sqlite/db_common.hpp"
|
||||
#include "utils/db/sqlite/db_delete.hpp"
|
||||
@ -29,6 +30,7 @@
|
||||
#include "utils/db/sqlite/db_select.hpp"
|
||||
#include "utils/db/sqlite/db_update.hpp"
|
||||
#include "utils/error_utils.hpp"
|
||||
#include "utils/file.hpp"
|
||||
#include "utils/path.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
@ -51,9 +53,14 @@ const std::map<std::string, std::string> sql_create_tables = {
|
||||
|
||||
namespace repertory {
|
||||
sqlite_file_db::sqlite_file_db(const app_config &cfg) {
|
||||
db_ = utils::db::sqlite::create_db(
|
||||
utils::path::combine(cfg.get_data_directory(), {"provider_file.db"}),
|
||||
sql_create_tables);
|
||||
auto db_dir = utils::path::combine(cfg.get_data_directory(), {"db"});
|
||||
if (not utils::file::directory{db_dir}.create_directory()) {
|
||||
throw startup_exception(
|
||||
fmt::format("failed to create db directory|", db_dir));
|
||||
}
|
||||
|
||||
db_ = utils::db::sqlite::create_db(utils::path::combine(db_dir, {"file.db"}),
|
||||
sql_create_tables);
|
||||
}
|
||||
|
||||
sqlite_file_db::~sqlite_file_db() { db_.reset(); }
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include "db/impl/sqlite_file_mgr_db.hpp"
|
||||
|
||||
#include "app_config.hpp"
|
||||
#include "types/startup_exception.hpp"
|
||||
#include "utils/config.hpp"
|
||||
#include "utils/db/sqlite/db_common.hpp"
|
||||
#include "utils/db/sqlite/db_delete.hpp"
|
||||
@ -29,6 +30,7 @@
|
||||
#include "utils/db/sqlite/db_select.hpp"
|
||||
#include "utils/db/sqlite/db_update.hpp"
|
||||
#include "utils/error_utils.hpp"
|
||||
#include "utils/file.hpp"
|
||||
#include "utils/path.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
@ -75,9 +77,14 @@ const std::map<std::string, std::string> sql_create_tables{
|
||||
|
||||
namespace repertory {
|
||||
sqlite_file_mgr_db::sqlite_file_mgr_db(const app_config &cfg) {
|
||||
auto db_dir = utils::path::combine(cfg.get_data_directory(), {"db"});
|
||||
if (not utils::file::directory{db_dir}.create_directory()) {
|
||||
throw startup_exception(
|
||||
fmt::format("failed to create db directory|", db_dir));
|
||||
}
|
||||
|
||||
db_ = utils::db::sqlite::create_db(
|
||||
utils::path::combine(cfg.get_data_directory(), {"mgr.db"}),
|
||||
sql_create_tables);
|
||||
utils::path::combine(db_dir, {"file_mgr.db"}), sql_create_tables);
|
||||
}
|
||||
|
||||
sqlite_file_mgr_db::~sqlite_file_mgr_db() { db_.reset(); }
|
||||
|
@ -22,11 +22,13 @@
|
||||
#include "db/impl/sqlite_meta_db.hpp"
|
||||
|
||||
#include "app_config.hpp"
|
||||
#include "types/startup_exception.hpp"
|
||||
#include "utils/db/sqlite/db_common.hpp"
|
||||
#include "utils/db/sqlite/db_delete.hpp"
|
||||
#include "utils/db/sqlite/db_insert.hpp"
|
||||
#include "utils/db/sqlite/db_select.hpp"
|
||||
#include "utils/error_utils.hpp"
|
||||
#include "utils/file.hpp"
|
||||
#include "utils/path.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
@ -48,9 +50,14 @@ sqlite_meta_db::sqlite_meta_db(const app_config &cfg) {
|
||||
},
|
||||
};
|
||||
|
||||
db_ = utils::db::sqlite::create_db(
|
||||
utils::path::combine(cfg.get_data_directory(), {"provider_meta.db"}),
|
||||
sql_create_tables);
|
||||
auto db_dir = utils::path::combine(cfg.get_data_directory(), {"db"});
|
||||
if (not utils::file::directory{db_dir}.create_directory()) {
|
||||
throw startup_exception(
|
||||
fmt::format("failed to create db directory|", db_dir));
|
||||
}
|
||||
|
||||
db_ = utils::db::sqlite::create_db(utils::path::combine(db_dir, {"meta.db"}),
|
||||
sql_create_tables);
|
||||
}
|
||||
|
||||
sqlite_meta_db::~sqlite_meta_db() { db_.reset(); }
|
||||
@ -272,6 +279,12 @@ void sqlite_meta_db::remove_api_path(const std::string &api_path) {
|
||||
|
||||
auto sqlite_meta_db::remove_item_meta(const std::string &api_path,
|
||||
const std::string &key) -> api_error {
|
||||
if (key == META_DIRECTORY || key == META_PINNED || key == META_SIZE ||
|
||||
key == META_SOURCE) {
|
||||
// TODO log warning for unsupported attributes
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
api_meta_map meta{};
|
||||
auto res = get_item_meta(api_path, meta);
|
||||
if (res != api_error::success) {
|
||||
|
@ -115,7 +115,7 @@ auto directory_iterator::get_directory_item(const std::string &api_path,
|
||||
|
||||
auto directory_iterator::get_json(std::size_t offset, json &item) -> int {
|
||||
if (offset < items_.size()) {
|
||||
item = items_[offset].to_json();
|
||||
item = json(items_.at(offset));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -30,18 +30,17 @@
|
||||
#include "utils/file_utils.hpp"
|
||||
#include "utils/time.hpp"
|
||||
#include "utils/utils.hpp"
|
||||
#include <spdlog/fmt/bundled/base.h>
|
||||
|
||||
namespace repertory {
|
||||
auto eviction::check_minimum_requirements(const std::string &file_path)
|
||||
-> bool {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto check_file = utils::file::file{file_path};
|
||||
|
||||
auto reference_time =
|
||||
check_file.get_time(config_.get_eviction_uses_accessed_time()
|
||||
? utils::file::time_type::accessed
|
||||
: utils::file::time_type::modified);
|
||||
auto file = utils::file::file{file_path};
|
||||
auto reference_time = file.get_time(config_.get_eviction_uses_accessed_time()
|
||||
? utils::file::time_type::accessed
|
||||
: utils::file::time_type::modified);
|
||||
|
||||
if (not reference_time.has_value()) {
|
||||
utils::error::raise_error(function_name, utils::get_last_error_code(),
|
||||
@ -49,18 +48,17 @@ auto eviction::check_minimum_requirements(const std::string &file_path)
|
||||
return false;
|
||||
}
|
||||
|
||||
auto delay = (config_.get_eviction_delay_mins() * 60UL) *
|
||||
utils::time::NANOS_PER_SECOND;
|
||||
|
||||
return ((reference_time.value() + static_cast<std::uint64_t>(delay)) <=
|
||||
utils::time::get_time_now());
|
||||
auto delay =
|
||||
static_cast<std::uint64_t>(config_.get_eviction_delay_mins() * 60U) *
|
||||
utils::time::NANOS_PER_SECOND;
|
||||
return (reference_time.value() + delay) <= utils::time::get_time_now();
|
||||
}
|
||||
|
||||
auto eviction::get_filtered_cached_files() -> std::deque<std::string> {
|
||||
auto list =
|
||||
utils::file::get_directory_files(config_.get_cache_directory(), true);
|
||||
list.erase(std::remove_if(list.begin(), list.end(),
|
||||
[this](const std::string &path) -> bool {
|
||||
[this](auto &&path) -> bool {
|
||||
return not this->check_minimum_requirements(path);
|
||||
}),
|
||||
list.end());
|
||||
@ -70,65 +68,38 @@ auto eviction::get_filtered_cached_files() -> std::deque<std::string> {
|
||||
void eviction::service_function() {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto should_evict = true;
|
||||
auto cached_files_list = get_filtered_cached_files();
|
||||
auto was_file_evicted{false};
|
||||
while (not get_stop_requested() && not cached_files_list.empty()) {
|
||||
auto file_path = cached_files_list.front();
|
||||
cached_files_list.pop_front();
|
||||
|
||||
// Handle maximum cache size eviction
|
||||
auto used_bytes =
|
||||
utils::file::directory{config_.get_cache_directory()}.size();
|
||||
if (config_.get_enable_max_cache_size()) {
|
||||
should_evict = (used_bytes > config_.get_max_cache_size_bytes());
|
||||
}
|
||||
|
||||
if (should_evict) {
|
||||
// Remove cached source files that don't meet minimum requirements
|
||||
auto cached_files_list = get_filtered_cached_files();
|
||||
while (not get_stop_requested() && should_evict &&
|
||||
not cached_files_list.empty()) {
|
||||
try {
|
||||
std::string api_path;
|
||||
if (provider_.get_api_path_from_source(
|
||||
cached_files_list.front(), api_path) == api_error::success) {
|
||||
api_file file{};
|
||||
filesystem_item fsi{};
|
||||
if (provider_.get_filesystem_item_and_file(api_path, file, fsi) ==
|
||||
api_error::success) {
|
||||
// Only evict files that match expected size
|
||||
auto opt_size = utils::file::file{cached_files_list.front()}.size();
|
||||
if (opt_size.has_value()) {
|
||||
auto file_size{opt_size.value()};
|
||||
if (file_size == fsi.size) {
|
||||
// Try to evict file
|
||||
if (fm_.evict_file(fsi.api_path) &&
|
||||
config_.get_enable_max_cache_size()) {
|
||||
// Restrict number of items evicted if maximum cache size is
|
||||
// enabled
|
||||
used_bytes -= file_size;
|
||||
should_evict =
|
||||
(used_bytes > config_.get_max_cache_size_bytes());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
utils::error::raise_api_path_error(
|
||||
function_name, file.api_path, file.source_path,
|
||||
utils::get_last_error_code(), "failed to get file size");
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (const std::exception &ex) {
|
||||
utils::error::raise_error(function_name, ex,
|
||||
"failed to process cached file|sp|" +
|
||||
cached_files_list.front());
|
||||
try {
|
||||
std::string api_path;
|
||||
auto res = provider_.get_api_path_from_source(file_path, api_path);
|
||||
if (res != api_error::success) {
|
||||
continue;
|
||||
}
|
||||
|
||||
cached_files_list.pop_front();
|
||||
if (file_mgr_.evict_file(api_path)) {
|
||||
was_file_evicted = true;
|
||||
}
|
||||
} catch (const std::exception &ex) {
|
||||
utils::error::raise_error(
|
||||
function_name, ex,
|
||||
fmt::format("failed to process cached file|sp|{}", file_path));
|
||||
}
|
||||
}
|
||||
|
||||
if (not get_stop_requested()) {
|
||||
unique_mutex_lock lock(get_mutex());
|
||||
if (not get_stop_requested()) {
|
||||
get_notify().wait_for(lock, 30s);
|
||||
}
|
||||
if (get_stop_requested() || was_file_evicted) {
|
||||
return;
|
||||
}
|
||||
|
||||
unique_mutex_lock lock(get_mutex());
|
||||
if (get_stop_requested()) {
|
||||
return;
|
||||
}
|
||||
|
||||
get_notify().wait_for(lock, 30s);
|
||||
}
|
||||
} // namespace repertory
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "initialize.hpp"
|
||||
#include "platform/platform.hpp"
|
||||
#include "utils/collection.hpp"
|
||||
#include "utils/error_utils.hpp"
|
||||
#include "utils/file_utils.hpp"
|
||||
#include "utils/path.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
@ -612,7 +612,7 @@ void *fuse_drive::init_impl(struct fuse_conn_info *conn) {
|
||||
eviction_->start();
|
||||
}
|
||||
|
||||
if (config_.get_enable_remote_mount()) {
|
||||
if (config_.get_remote_mount().enable) {
|
||||
remote_server_ = std::make_unique<remote_fuse::remote_server>(
|
||||
config_, *this, get_mount_location());
|
||||
}
|
||||
@ -1315,6 +1315,10 @@ auto fuse_drive::truncate_impl(std::string api_path, off_t size) -> api_error {
|
||||
return res;
|
||||
}
|
||||
|
||||
if (not fm_->get_open_file(handle, true, open_file)) {
|
||||
return api_error::invalid_handle;
|
||||
}
|
||||
|
||||
res = open_file->resize(static_cast<std::uint64_t>(size));
|
||||
}
|
||||
|
||||
|
@ -27,12 +27,7 @@
|
||||
|
||||
namespace repertory::remote_fuse {
|
||||
remote_client::remote_client(const app_config &config)
|
||||
: config_(config),
|
||||
packet_client_(
|
||||
config.get_remote_host_name_or_ip(),
|
||||
config.get_remote_max_connections(), config.get_remote_port(),
|
||||
config.get_remote_receive_timeout_secs(),
|
||||
config.get_remote_send_timeout_secs(), config.get_remote_token()) {}
|
||||
: config_(config), packet_client_(config.get_remote_config()) {}
|
||||
|
||||
auto remote_client::fuse_access(const char *path, const std::int32_t &mask)
|
||||
-> packet::error_type {
|
||||
|
@ -47,12 +47,7 @@ E_SIMPLE3(remote_winfsp_client_event, debug, true,
|
||||
// clang-format on
|
||||
|
||||
remote_client::remote_client(const app_config &config)
|
||||
: config_(config),
|
||||
packet_client_(
|
||||
config.get_remote_host_name_or_ip(),
|
||||
config.get_remote_max_connections(), config.get_remote_port(),
|
||||
config.get_remote_receive_timeout_secs(),
|
||||
config.get_remote_send_timeout_secs(), config.get_remote_token()) {}
|
||||
: config_(config), packet_client_(config.get_remote_config()) {}
|
||||
|
||||
auto remote_client::winfsp_can_delete(PVOID file_desc, PWSTR file_name)
|
||||
-> packet::error_type {
|
||||
|
@ -302,27 +302,27 @@ auto remote_winfsp_drive::Overwrite(PVOID /*file_node*/, PVOID file_desc,
|
||||
BOOLEAN replace_attributes,
|
||||
UINT64 allocation_size, FileInfo *file_info)
|
||||
-> NTSTATUS {
|
||||
remote::file_info fi{};
|
||||
remote::file_info info{};
|
||||
auto ret = remote_instance_->winfsp_overwrite(
|
||||
file_desc, attributes, replace_attributes, allocation_size, &fi);
|
||||
set_file_info(*file_info, fi);
|
||||
file_desc, attributes, replace_attributes, allocation_size, &info);
|
||||
set_file_info(*file_info, info);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void remote_winfsp_drive::populate_file_info(const json &item,
|
||||
FSP_FSCTL_FILE_INFO &file_info) {
|
||||
auto di = directory_item::from_json(item);
|
||||
file_info.FileSize = di.directory ? 0 : di.size;
|
||||
auto dir_item = item.get<directory_item>();
|
||||
file_info.FileSize = dir_item.directory ? 0 : dir_item.size;
|
||||
file_info.AllocationSize =
|
||||
utils::divide_with_ceiling(file_info.FileSize, WINFSP_ALLOCATION_UNIT) *
|
||||
WINFSP_ALLOCATION_UNIT;
|
||||
file_info.ChangeTime = utils::get_changed_time_from_meta(di.meta);
|
||||
file_info.CreationTime = utils::get_creation_time_from_meta(di.meta);
|
||||
file_info.FileAttributes = utils::get_attributes_from_meta(di.meta);
|
||||
file_info.ChangeTime = utils::get_changed_time_from_meta(dir_item.meta);
|
||||
file_info.CreationTime = utils::get_creation_time_from_meta(dir_item.meta);
|
||||
file_info.FileAttributes = utils::get_attributes_from_meta(dir_item.meta);
|
||||
file_info.HardLinks = 0;
|
||||
file_info.IndexNumber = 0;
|
||||
file_info.LastAccessTime = utils::get_accessed_time_from_meta(di.meta);
|
||||
file_info.LastWriteTime = utils::get_written_time_from_meta(di.meta);
|
||||
file_info.LastAccessTime = utils::get_accessed_time_from_meta(dir_item.meta);
|
||||
file_info.LastWriteTime = utils::get_written_time_from_meta(dir_item.meta);
|
||||
file_info.ReparseTag = 0;
|
||||
file_info.EaSize = 0;
|
||||
}
|
||||
|
@ -89,8 +89,8 @@ auto winfsp_drive::handle_error(std::string_view function_name,
|
||||
return ret;
|
||||
}
|
||||
|
||||
auto winfsp_drive::winfsp_service::OnStart(ULONG /*Argc*/,
|
||||
PWSTR * /*Argv*/) -> NTSTATUS {
|
||||
auto winfsp_drive::winfsp_service::OnStart(ULONG /*Argc*/, PWSTR * /*Argv*/)
|
||||
-> NTSTATUS {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto mount_location = utils::string::to_lower(
|
||||
@ -457,9 +457,10 @@ auto winfsp_drive::get_item_meta(const std::string &api_path,
|
||||
return ret;
|
||||
}
|
||||
|
||||
auto winfsp_drive::get_security_by_name(
|
||||
PWSTR file_name, PUINT32 attributes, PSECURITY_DESCRIPTOR descriptor,
|
||||
std::uint64_t *descriptor_size) -> NTSTATUS {
|
||||
auto winfsp_drive::get_security_by_name(PWSTR file_name, PUINT32 attributes,
|
||||
PSECURITY_DESCRIPTOR descriptor,
|
||||
std::uint64_t *descriptor_size)
|
||||
-> NTSTATUS {
|
||||
auto api_path =
|
||||
utils::path::create_api_path(utils::string::to_utf8(file_name));
|
||||
|
||||
@ -640,7 +641,7 @@ auto winfsp_drive::Mounted(PVOID host) -> NTSTATUS {
|
||||
}
|
||||
|
||||
auto mount_location = parse_mount_location(file_system_host->MountPoint());
|
||||
if (config_.get_enable_remote_mount()) {
|
||||
if (config_.get_remote_mount().enable) {
|
||||
remote_server_ = std::make_unique<remote_winfsp::remote_server>(
|
||||
config_, *this, mount_location);
|
||||
}
|
||||
@ -720,8 +721,8 @@ auto winfsp_drive::Open(PWSTR file_name, UINT32 create_options,
|
||||
|
||||
auto winfsp_drive::Overwrite(PVOID /*file_node*/, PVOID file_desc,
|
||||
UINT32 attributes, BOOLEAN replace_attributes,
|
||||
UINT64 /*allocation_size*/,
|
||||
FileInfo *file_info) -> NTSTATUS {
|
||||
UINT64 /*allocation_size*/, FileInfo *file_info)
|
||||
-> NTSTATUS {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
std::string api_path;
|
||||
@ -827,8 +828,8 @@ void winfsp_drive::populate_file_info(std::uint64_t file_size,
|
||||
}
|
||||
|
||||
auto winfsp_drive::Read(PVOID /*file_node*/, PVOID file_desc, PVOID buffer,
|
||||
UINT64 offset, ULONG length,
|
||||
PULONG bytes_transferred) -> NTSTATUS {
|
||||
UINT64 offset, ULONG length, PULONG bytes_transferred)
|
||||
-> NTSTATUS {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
*bytes_transferred = 0U;
|
||||
@ -883,8 +884,8 @@ auto winfsp_drive::Read(PVOID /*file_node*/, PVOID file_desc, PVOID buffer,
|
||||
|
||||
auto winfsp_drive::ReadDirectory(PVOID /*file_node*/, PVOID file_desc,
|
||||
PWSTR /*pattern*/, PWSTR marker, PVOID buffer,
|
||||
ULONG buffer_length,
|
||||
PULONG bytes_transferred) -> NTSTATUS {
|
||||
ULONG buffer_length, PULONG bytes_transferred)
|
||||
-> NTSTATUS {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
std::string api_path;
|
||||
@ -1046,8 +1047,8 @@ auto winfsp_drive::Rename(PVOID /*file_node*/, PVOID /*file_desc*/,
|
||||
auto winfsp_drive::SetBasicInfo(PVOID /*file_node*/, PVOID file_desc,
|
||||
UINT32 attributes, UINT64 creation_time,
|
||||
UINT64 last_access_time, UINT64 last_write_time,
|
||||
UINT64 change_time,
|
||||
FileInfo *file_info) -> NTSTATUS {
|
||||
UINT64 change_time, FileInfo *file_info)
|
||||
-> NTSTATUS {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
std::string api_path;
|
||||
|
@ -24,7 +24,8 @@
|
||||
#include "utils/string.hpp"
|
||||
|
||||
namespace repertory {
|
||||
auto event_level_from_string(std::string level) -> event_level {
|
||||
auto event_level_from_string(std::string level, event_level default_level)
|
||||
-> event_level {
|
||||
level = utils::string::to_lower(level);
|
||||
if (level == "critical" || level == "event_level::critical") {
|
||||
return event_level::critical;
|
||||
@ -50,7 +51,7 @@ auto event_level_from_string(std::string level) -> event_level {
|
||||
return event_level::trace;
|
||||
}
|
||||
|
||||
return event_level::info;
|
||||
return default_level;
|
||||
}
|
||||
|
||||
auto event_level_to_string(event_level level) -> std::string {
|
||||
|
128
repertory/librepertory/src/file_manager/cache_size_mgr.cpp
Normal file
128
repertory/librepertory/src/file_manager/cache_size_mgr.cpp
Normal file
@ -0,0 +1,128 @@
|
||||
/*
|
||||
Copyright <2018-2024> <scott.e.graves@protonmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
#include "file_manager/cache_size_mgr.hpp"
|
||||
|
||||
#include "app_config.hpp"
|
||||
#include "events/event.hpp"
|
||||
#include "events/event_system.hpp"
|
||||
#include "types/startup_exception.hpp"
|
||||
#include "utils/file_utils.hpp"
|
||||
|
||||
namespace repertory {
|
||||
// clang-format off
|
||||
E_SIMPLE2(invalid_cache_size, warn, true,
|
||||
std::uint64_t, cache_size, sz, E_FROM_UINT64,
|
||||
std::uint64_t, by, by, E_FROM_UINT64
|
||||
);
|
||||
|
||||
E_SIMPLE2(max_cache_size_reached, warn, true,
|
||||
std::uint64_t, cache_size, sz, E_FROM_UINT64,
|
||||
std::uint64_t, max_cache_size, max, E_FROM_UINT64
|
||||
);
|
||||
// clang-format on
|
||||
|
||||
cache_size_mgr cache_size_mgr::instance_{};
|
||||
|
||||
// TODO add timeout
|
||||
auto cache_size_mgr::expand(std::uint64_t size) -> api_error {
|
||||
if (size == 0U) {
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
unique_mutex_lock lock(mtx_);
|
||||
if (cfg_ == nullptr) {
|
||||
return api_error::cache_not_initialized;
|
||||
}
|
||||
|
||||
cache_size_ += size;
|
||||
|
||||
auto max_cache_size = cfg_->get_max_cache_size_bytes();
|
||||
|
||||
auto cache_dir = utils::file::directory{cfg_->get_cache_directory()};
|
||||
while (not stop_requested_ && cache_size_ > max_cache_size &&
|
||||
cache_dir.count() > 1U) {
|
||||
event_system::instance().raise<max_cache_size_reached>(cache_size_,
|
||||
max_cache_size);
|
||||
notify_.wait(lock);
|
||||
}
|
||||
|
||||
notify_.notify_all();
|
||||
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
void cache_size_mgr::initialize(app_config *cfg) {
|
||||
if (cfg == nullptr) {
|
||||
throw startup_exception("app_config must not be null");
|
||||
}
|
||||
|
||||
mutex_lock lock(mtx_);
|
||||
cfg_ = cfg;
|
||||
|
||||
stop_requested_ = false;
|
||||
|
||||
auto cache_dir = utils::file::directory{cfg_->get_cache_directory()};
|
||||
if (not cache_dir.create_directory()) {
|
||||
throw startup_exception(fmt::format("failed to create cache directory|{}",
|
||||
cache_dir.get_path()));
|
||||
}
|
||||
|
||||
cache_size_ = cache_dir.size(false);
|
||||
|
||||
notify_.notify_all();
|
||||
}
|
||||
|
||||
auto cache_size_mgr::shrink(std::uint64_t size) -> api_error {
|
||||
mutex_lock lock(mtx_);
|
||||
if (size == 0U) {
|
||||
notify_.notify_all();
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
if (cache_size_ >= size) {
|
||||
cache_size_ -= size;
|
||||
} else {
|
||||
event_system::instance().raise<invalid_cache_size>(cache_size_, size);
|
||||
cache_size_ = 0U;
|
||||
}
|
||||
|
||||
notify_.notify_all();
|
||||
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
auto cache_size_mgr::size() const -> std::uint64_t {
|
||||
mutex_lock lock(mtx_);
|
||||
return cache_size_;
|
||||
}
|
||||
|
||||
void cache_size_mgr::stop() {
|
||||
if (stop_requested_) {
|
||||
return;
|
||||
}
|
||||
|
||||
stop_requested_ = true;
|
||||
|
||||
mutex_lock lock(mtx_);
|
||||
notify_.notify_all();
|
||||
}
|
||||
} // namespace repertory
|
63
repertory/librepertory/src/file_manager/direct_open_file.cpp
Normal file
63
repertory/librepertory/src/file_manager/direct_open_file.cpp
Normal file
@ -0,0 +1,63 @@
|
||||
/*
|
||||
Copyright <2018-2024> <scott.e.graves@protonmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
#include "file_manager/direct_open_file.hpp"
|
||||
|
||||
#include "file_manager/open_file_base.hpp"
|
||||
#include "providers/i_provider.hpp"
|
||||
#include "types/repertory.hpp"
|
||||
|
||||
namespace repertory {
|
||||
direct_open_file::direct_open_file(std::uint64_t chunk_size,
|
||||
std::uint8_t chunk_timeout,
|
||||
filesystem_item fsi, i_provider &provider)
|
||||
: ring_buffer_base(chunk_size, chunk_timeout, fsi, provider,
|
||||
min_ring_size, true) {}
|
||||
|
||||
direct_open_file::~direct_open_file() {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
close();
|
||||
}
|
||||
|
||||
auto direct_open_file::on_check_start() -> bool {
|
||||
return (get_file_size() == 0U || has_reader_thread());
|
||||
}
|
||||
|
||||
auto direct_open_file::on_read_chunk(std::size_t chunk, std::size_t read_size,
|
||||
std::uint64_t read_offset,
|
||||
data_buffer &data,
|
||||
std::size_t &bytes_read) -> api_error {
|
||||
auto &buffer = ring_data_.at(chunk % get_ring_size());
|
||||
auto begin =
|
||||
std::next(buffer.begin(), static_cast<std::int64_t>(read_offset));
|
||||
auto end = std::next(begin, static_cast<std::int64_t>(read_size));
|
||||
data.insert(data.end(), begin, end);
|
||||
bytes_read = read_size;
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
auto direct_open_file::use_buffer(std::size_t chunk,
|
||||
std::function<api_error(data_buffer &)> func)
|
||||
-> api_error {
|
||||
return func(ring_data_.at(chunk % get_ring_size()));
|
||||
}
|
||||
} // namespace repertory
|
@ -23,6 +23,8 @@
|
||||
|
||||
#include "app_config.hpp"
|
||||
#include "db/file_mgr_db.hpp"
|
||||
#include "file_manager/cache_size_mgr.hpp"
|
||||
#include "file_manager/direct_open_file.hpp"
|
||||
#include "file_manager/events.hpp"
|
||||
#include "file_manager/open_file.hpp"
|
||||
#include "file_manager/open_file_base.hpp"
|
||||
@ -37,7 +39,6 @@
|
||||
#include "utils/file.hpp"
|
||||
#include "utils/path.hpp"
|
||||
#include "utils/polling.hpp"
|
||||
#include "utils/time.hpp"
|
||||
|
||||
namespace repertory {
|
||||
file_manager::file_manager(app_config &config, i_provider &provider)
|
||||
@ -72,13 +73,13 @@ void file_manager::close(std::uint64_t handle) {
|
||||
closeable_file->remove(handle);
|
||||
}
|
||||
|
||||
void file_manager::close_all(const std::string &api_path) {
|
||||
auto file_manager::close_all(const std::string &api_path) -> bool {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
unique_recur_mutex_lock file_lock(open_file_mtx_);
|
||||
auto file_iter = open_file_lookup_.find(api_path);
|
||||
if (file_iter == open_file_lookup_.end()) {
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
auto closeable_file = file_iter->second;
|
||||
@ -87,6 +88,8 @@ void file_manager::close_all(const std::string &api_path) {
|
||||
|
||||
closeable_file->remove_all();
|
||||
closeable_file->close();
|
||||
|
||||
return closeable_file->get_allocated();
|
||||
}
|
||||
|
||||
void file_manager::close_timed_out_files() {
|
||||
@ -101,12 +104,12 @@ void file_manager::close_timed_out_files() {
|
||||
}
|
||||
return items;
|
||||
});
|
||||
for (auto &&closeable_file : closeable_list) {
|
||||
for (const auto &closeable_file : closeable_list) {
|
||||
open_file_lookup_.erase(closeable_file->get_api_path());
|
||||
}
|
||||
file_lock.unlock();
|
||||
|
||||
for (auto &&closeable_file : closeable_list) {
|
||||
for (auto &closeable_file : closeable_list) {
|
||||
closeable_file->close();
|
||||
event_system::instance().raise<item_timeout>(
|
||||
closeable_file->get_api_path());
|
||||
@ -139,7 +142,7 @@ auto file_manager::evict_file(const std::string &api_path) -> bool {
|
||||
return false;
|
||||
}
|
||||
|
||||
recur_mutex_lock open_lock(open_file_mtx_);
|
||||
unique_recur_mutex_lock open_lock(open_file_mtx_);
|
||||
if (is_processing(api_path)) {
|
||||
return false;
|
||||
}
|
||||
@ -148,8 +151,18 @@ auto file_manager::evict_file(const std::string &api_path) -> bool {
|
||||
return false;
|
||||
}
|
||||
|
||||
filesystem_item fsi{};
|
||||
auto res = provider_.get_filesystem_item(api_path, false, fsi);
|
||||
if (res != api_error::success) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (fsi.source_path.empty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string pinned;
|
||||
auto res = provider_.get_item_meta(api_path, META_PINNED, pinned);
|
||||
res = provider_.get_item_meta(api_path, META_PINNED, pinned);
|
||||
if (res != api_error::success && res != api_error::item_not_found) {
|
||||
utils::error::raise_api_path_error(std::string{function_name}, api_path,
|
||||
res, "failed to get pinned status");
|
||||
@ -160,23 +173,22 @@ auto file_manager::evict_file(const std::string &api_path) -> bool {
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string source_path{};
|
||||
res = provider_.get_item_meta(api_path, META_SOURCE, source_path);
|
||||
if (res != api_error::success) {
|
||||
utils::error::raise_api_path_error(std::string{function_name}, api_path,
|
||||
res, "failed to get source path");
|
||||
return false;
|
||||
}
|
||||
if (source_path.empty()) {
|
||||
return false;
|
||||
std::shared_ptr<i_closeable_open_file> closeable_file;
|
||||
if (open_file_lookup_.contains(api_path)) {
|
||||
closeable_file = open_file_lookup_.at(api_path);
|
||||
}
|
||||
|
||||
open_file_lookup_.erase(api_path);
|
||||
open_lock.unlock();
|
||||
|
||||
auto removed = utils::file::file{source_path}.remove();
|
||||
auto allocated = closeable_file ? closeable_file->get_allocated() : true;
|
||||
closeable_file.reset();
|
||||
|
||||
auto removed = remove_source_and_shrink_cache(api_path, fsi.source_path,
|
||||
fsi.size, allocated);
|
||||
if (removed) {
|
||||
event_system::instance().raise<filesystem_item_evicted>(api_path,
|
||||
source_path);
|
||||
fsi.source_path);
|
||||
}
|
||||
|
||||
return removed;
|
||||
@ -207,7 +219,7 @@ auto file_manager::get_open_file_by_handle(std::uint64_t handle) const
|
||||
-> std::shared_ptr<i_closeable_open_file> {
|
||||
auto file_iter =
|
||||
std::find_if(open_file_lookup_.begin(), open_file_lookup_.end(),
|
||||
[&handle](const auto &item) -> bool {
|
||||
[&handle](auto &&item) -> bool {
|
||||
return item.second->has_handle(handle);
|
||||
});
|
||||
return (file_iter == open_file_lookup_.end()) ? nullptr : file_iter->second;
|
||||
@ -223,7 +235,7 @@ auto file_manager::get_open_file_count(const std::string &api_path) const
|
||||
|
||||
auto file_manager::get_open_file(std::uint64_t handle, bool write_supported,
|
||||
std::shared_ptr<i_open_file> &file) -> bool {
|
||||
recur_mutex_lock open_lock(open_file_mtx_);
|
||||
unique_recur_mutex_lock open_lock(open_file_mtx_);
|
||||
auto file_ptr = get_open_file_by_handle(handle);
|
||||
if (not file_ptr) {
|
||||
return false;
|
||||
@ -232,8 +244,8 @@ auto file_manager::get_open_file(std::uint64_t handle, bool write_supported,
|
||||
if (write_supported && not file_ptr->is_write_supported()) {
|
||||
auto writeable_file = std::make_shared<open_file>(
|
||||
utils::encryption::encrypting_reader::get_data_chunk_size(),
|
||||
config_.get_enable_chunk_download_timeout()
|
||||
? config_.get_chunk_downloader_timeout_secs()
|
||||
config_.get_enable_download_timeout()
|
||||
? config_.get_download_timeout_secs()
|
||||
: 0U,
|
||||
file_ptr->get_filesystem_item(), file_ptr->get_open_data(), provider_,
|
||||
*this);
|
||||
@ -256,7 +268,7 @@ auto file_manager::get_open_files() const
|
||||
std::unordered_map<std::string, std::size_t> ret;
|
||||
|
||||
recur_mutex_lock open_lock(open_file_mtx_);
|
||||
for (auto &&item : open_file_lookup_) {
|
||||
for (const auto &item : open_file_lookup_) {
|
||||
ret[item.first] = item.second->get_open_file_count();
|
||||
}
|
||||
|
||||
@ -346,12 +358,19 @@ auto file_manager::is_processing(const std::string &api_path) const -> bool {
|
||||
return true;
|
||||
};
|
||||
|
||||
recur_mutex_lock open_lock(open_file_mtx_);
|
||||
unique_recur_mutex_lock open_lock(open_file_mtx_);
|
||||
auto file_iter = open_file_lookup_.find(api_path);
|
||||
return (file_iter == open_file_lookup_.end())
|
||||
? false
|
||||
: file_iter->second->is_modified() ||
|
||||
not file_iter->second->is_complete();
|
||||
if (file_iter == open_file_lookup_.end()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
auto closeable_file = file_iter->second;
|
||||
open_lock.unlock();
|
||||
|
||||
return closeable_file->is_write_supported()
|
||||
? closeable_file->is_modified() ||
|
||||
not closeable_file->is_complete()
|
||||
: false;
|
||||
}
|
||||
|
||||
auto file_manager::open(const std::string &api_path, bool directory,
|
||||
@ -361,11 +380,12 @@ auto file_manager::open(const std::string &api_path, bool directory,
|
||||
return open(api_path, directory, ofd, handle, file, nullptr);
|
||||
}
|
||||
|
||||
auto file_manager::open(const std::string &api_path, bool directory,
|
||||
const open_file_data &ofd, std::uint64_t &handle,
|
||||
std::shared_ptr<i_open_file> &file,
|
||||
std::shared_ptr<i_closeable_open_file> closeable_file)
|
||||
-> api_error {
|
||||
auto file_manager::open(
|
||||
const std::string &api_path, bool directory, const open_file_data &ofd,
|
||||
std::uint64_t &handle, std::shared_ptr<i_open_file> &file,
|
||||
std::shared_ptr<i_closeable_open_file> closeable_file) -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
const auto create_and_add_handle =
|
||||
[&](std::shared_ptr<i_closeable_open_file> cur_file) {
|
||||
handle = get_next_handle();
|
||||
@ -395,21 +415,99 @@ auto file_manager::open(const std::string &api_path, bool directory,
|
||||
}
|
||||
|
||||
if (not closeable_file) {
|
||||
closeable_file = std::make_shared<open_file>(
|
||||
auto buffer_directory{
|
||||
utils::path::combine(config_.get_data_directory(), {"buffer"}),
|
||||
};
|
||||
|
||||
auto chunk_size{
|
||||
utils::encryption::encrypting_reader::get_data_chunk_size(),
|
||||
config_.get_enable_chunk_download_timeout()
|
||||
? config_.get_chunk_downloader_timeout_secs()
|
||||
: 0U,
|
||||
fsi, provider_, *this);
|
||||
};
|
||||
|
||||
auto chunk_timeout = config_.get_enable_download_timeout()
|
||||
? config_.get_download_timeout_secs()
|
||||
: 0U;
|
||||
|
||||
auto ring_buffer_file_size{
|
||||
static_cast<std::uint64_t>(config_.get_ring_buffer_file_size()) *
|
||||
1024UL * 1024UL,
|
||||
};
|
||||
|
||||
auto ring_size{ring_buffer_file_size / chunk_size};
|
||||
|
||||
const auto get_download_type = [&](download_type type) -> download_type {
|
||||
if (directory || fsi.size == 0U || is_processing(api_path)) {
|
||||
return download_type::default_;
|
||||
}
|
||||
|
||||
if (type == download_type::direct) {
|
||||
return type;
|
||||
}
|
||||
|
||||
if (type == download_type::default_) {
|
||||
auto free_space =
|
||||
utils::file::get_free_drive_space(config_.get_cache_directory());
|
||||
if (fsi.size < free_space) {
|
||||
return download_type::default_;
|
||||
}
|
||||
}
|
||||
|
||||
if (not ring_buffer_open_file::can_handle_file(fsi.size, chunk_size,
|
||||
ring_size)) {
|
||||
return download_type::direct;
|
||||
}
|
||||
|
||||
if (not utils::file::directory{buffer_directory}.create_directory()) {
|
||||
utils::error::raise_error(
|
||||
function_name, utils::get_last_error_code(),
|
||||
fmt::format("failed to create buffer directory|sp|{}",
|
||||
buffer_directory));
|
||||
return download_type::direct;
|
||||
}
|
||||
|
||||
auto free_space = utils::file::get_free_drive_space(buffer_directory);
|
||||
if (ring_buffer_file_size < free_space) {
|
||||
return download_type::ring_buffer;
|
||||
}
|
||||
|
||||
return download_type::direct;
|
||||
};
|
||||
|
||||
auto preferred_type = config_.get_preferred_download_type();
|
||||
auto type = get_download_type(directory ? download_type::default_
|
||||
: preferred_type == download_type::default_
|
||||
? download_type::ring_buffer
|
||||
: preferred_type);
|
||||
if (not directory) {
|
||||
event_system::instance().raise<download_type_selected>(
|
||||
fsi.api_path, fsi.source_path, type);
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case repertory::download_type::direct: {
|
||||
closeable_file = std::make_shared<direct_open_file>(
|
||||
chunk_size, chunk_timeout, fsi, provider_);
|
||||
} break;
|
||||
|
||||
case repertory::download_type::ring_buffer: {
|
||||
closeable_file = std::make_shared<ring_buffer_open_file>(
|
||||
buffer_directory, chunk_size, chunk_timeout, fsi, provider_,
|
||||
ring_size);
|
||||
} break;
|
||||
|
||||
default: {
|
||||
closeable_file = std::make_shared<open_file>(chunk_size, chunk_timeout,
|
||||
fsi, provider_, *this);
|
||||
} break;
|
||||
}
|
||||
}
|
||||
|
||||
open_file_lookup_[api_path] = closeable_file;
|
||||
create_and_add_handle(closeable_file);
|
||||
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
void file_manager::queue_upload(const i_open_file &file) {
|
||||
return queue_upload(file.get_api_path(), file.get_source_path(), false);
|
||||
queue_upload(file.get_api_path(), file.get_source_path(), false);
|
||||
}
|
||||
|
||||
void file_manager::queue_upload(const std::string &api_path,
|
||||
@ -418,9 +516,9 @@ void file_manager::queue_upload(const std::string &api_path,
|
||||
return;
|
||||
}
|
||||
|
||||
std::unique_ptr<mutex_lock> lock;
|
||||
std::unique_ptr<mutex_lock> upload_lock;
|
||||
if (not no_lock) {
|
||||
lock = std::make_unique<mutex_lock>(upload_mtx_);
|
||||
upload_lock = std::make_unique<mutex_lock>(upload_mtx_);
|
||||
}
|
||||
|
||||
remove_upload(api_path, true);
|
||||
@ -444,38 +542,35 @@ void file_manager::queue_upload(const std::string &api_path,
|
||||
auto file_manager::remove_file(const std::string &api_path) -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
recur_mutex_lock open_lock(open_file_mtx_);
|
||||
|
||||
filesystem_item fsi{};
|
||||
auto res = provider_.get_filesystem_item(api_path, false, fsi);
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
}
|
||||
|
||||
close_all(api_path);
|
||||
auto allocated = close_all(api_path);
|
||||
|
||||
mutex_lock lock(upload_mtx_);
|
||||
unique_mutex_lock upload_lock(upload_mtx_);
|
||||
remove_upload(api_path, true);
|
||||
remove_resume(api_path, fsi.source_path, true);
|
||||
upload_notify_.notify_all();
|
||||
upload_lock.unlock();
|
||||
|
||||
recur_mutex_lock open_lock(open_file_mtx_);
|
||||
|
||||
res = provider_.remove_file(api_path);
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
}
|
||||
|
||||
if (not utils::file::file{fsi.source_path}.remove()) {
|
||||
utils::error::raise_api_path_error(
|
||||
function_name, fsi.api_path, fsi.source_path,
|
||||
utils::get_last_error_code(), "failed to delete source");
|
||||
}
|
||||
|
||||
remove_source_and_shrink_cache(api_path, fsi.source_path, fsi.size,
|
||||
allocated);
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
void file_manager::remove_resume(const std::string &api_path,
|
||||
const std::string &source_path) {
|
||||
return remove_resume(api_path, source_path, false);
|
||||
remove_resume(api_path, source_path, false);
|
||||
}
|
||||
|
||||
void file_manager::remove_resume(const std::string &api_path,
|
||||
@ -484,9 +579,9 @@ void file_manager::remove_resume(const std::string &api_path,
|
||||
return;
|
||||
}
|
||||
|
||||
std::unique_ptr<mutex_lock> lock;
|
||||
std::unique_ptr<mutex_lock> upload_lock;
|
||||
if (not no_lock) {
|
||||
lock = std::make_unique<mutex_lock>(upload_mtx_);
|
||||
upload_lock = std::make_unique<mutex_lock>(upload_mtx_);
|
||||
}
|
||||
|
||||
if (mgr_db_->remove_resume(api_path)) {
|
||||
@ -499,6 +594,40 @@ void file_manager::remove_resume(const std::string &api_path,
|
||||
}
|
||||
}
|
||||
|
||||
auto file_manager::remove_source_and_shrink_cache(
|
||||
const std::string &api_path, const std::string &source_path,
|
||||
std::uint64_t file_size, bool allocated) -> bool {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto file = utils::file::file{source_path};
|
||||
auto source_size = file.exists() ? file.size().value_or(0U) : 0U;
|
||||
|
||||
if (not file.remove()) {
|
||||
utils::error::raise_api_path_error(function_name, api_path, source_path,
|
||||
utils::get_last_error_code(),
|
||||
"failed to delete source");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (not allocated || source_size == 0U) {
|
||||
auto res = cache_size_mgr::instance().shrink(0U);
|
||||
if (res != api_error::success) {
|
||||
utils::error::raise_api_path_error(function_name, api_path, source_path,
|
||||
res, "failed to shrink cache");
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
auto res = cache_size_mgr::instance().shrink(file_size);
|
||||
if (res != api_error::success) {
|
||||
utils::error::raise_api_path_error(function_name, api_path, source_path,
|
||||
res, "failed to shrink cache");
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void file_manager::remove_upload(const std::string &api_path) {
|
||||
remove_upload(api_path, false);
|
||||
}
|
||||
@ -510,9 +639,9 @@ void file_manager::remove_upload(const std::string &api_path, bool no_lock) {
|
||||
return;
|
||||
}
|
||||
|
||||
std::unique_ptr<mutex_lock> lock;
|
||||
std::unique_ptr<mutex_lock> upload_lock;
|
||||
if (not no_lock) {
|
||||
lock = std::make_unique<mutex_lock>(upload_mtx_);
|
||||
upload_lock = std::make_unique<mutex_lock>(upload_mtx_);
|
||||
}
|
||||
|
||||
if (not mgr_db_->remove_upload(api_path)) {
|
||||
@ -618,8 +747,8 @@ auto file_manager::rename_directory(const std::string &from_api_path,
|
||||
}
|
||||
|
||||
auto file_manager::rename_file(const std::string &from_api_path,
|
||||
const std::string &to_api_path, bool overwrite)
|
||||
-> api_error {
|
||||
const std::string &to_api_path,
|
||||
bool overwrite) -> api_error {
|
||||
if (not provider_.is_rename_supported()) {
|
||||
return api_error::not_implemented;
|
||||
}
|
||||
@ -751,12 +880,12 @@ void file_manager::start() {
|
||||
continue;
|
||||
}
|
||||
|
||||
auto closeable_file = std::make_shared<open_file>(
|
||||
entry.chunk_size,
|
||||
config_.get_enable_chunk_download_timeout()
|
||||
? config_.get_chunk_downloader_timeout_secs()
|
||||
: 0U,
|
||||
fsi, provider_, entry.read_state, *this);
|
||||
auto closeable_file =
|
||||
std::make_shared<open_file>(entry.chunk_size,
|
||||
config_.get_enable_download_timeout()
|
||||
? config_.get_download_timeout_secs()
|
||||
: 0U,
|
||||
fsi, provider_, entry.read_state, *this);
|
||||
open_file_lookup_[entry.api_path] = closeable_file;
|
||||
event_system::instance().raise<download_restored>(fsi.api_path,
|
||||
fsi.source_path);
|
||||
@ -778,7 +907,6 @@ void file_manager::stop() {
|
||||
|
||||
stop_requested_ = true;
|
||||
|
||||
polling::instance().remove_callback("db_cleanup");
|
||||
polling::instance().remove_callback("timed_out_close");
|
||||
|
||||
unique_mutex_lock upload_lock(upload_mtx_);
|
||||
@ -792,7 +920,7 @@ void file_manager::stop() {
|
||||
open_file_lookup_.clear();
|
||||
|
||||
upload_lock.lock();
|
||||
for (auto &&item : upload_lookup_) {
|
||||
for (auto &item : upload_lookup_) {
|
||||
item.second->stop();
|
||||
}
|
||||
upload_notify_.notify_all();
|
||||
@ -838,10 +966,10 @@ void file_manager::swap_renamed_items(std::string from_api_path,
|
||||
|
||||
auto file_iter = open_file_lookup_.find(from_api_path);
|
||||
if (file_iter != open_file_lookup_.end()) {
|
||||
auto ptr = std::move(open_file_lookup_[from_api_path]);
|
||||
auto closeable_file = std::move(open_file_lookup_[from_api_path]);
|
||||
open_file_lookup_.erase(from_api_path);
|
||||
ptr->set_api_path(to_api_path);
|
||||
open_file_lookup_[to_api_path] = std::move(ptr);
|
||||
closeable_file->set_api_path(to_api_path);
|
||||
open_file_lookup_[to_api_path] = std::move(closeable_file);
|
||||
}
|
||||
|
||||
if (directory) {
|
||||
|
@ -21,18 +21,17 @@
|
||||
*/
|
||||
#include "file_manager/open_file.hpp"
|
||||
|
||||
#include "file_manager/cache_size_mgr.hpp"
|
||||
#include "file_manager/events.hpp"
|
||||
#include "file_manager/file_manager.hpp"
|
||||
#include "file_manager/i_upload_manager.hpp"
|
||||
#include "platform/platform.hpp"
|
||||
#include "providers/i_provider.hpp"
|
||||
#include "types/repertory.hpp"
|
||||
#include "types/startup_exception.hpp"
|
||||
#include "utils/common.hpp"
|
||||
#include "utils/error_utils.hpp"
|
||||
#include "utils/file_utils.hpp"
|
||||
#include "utils/path.hpp"
|
||||
#include "utils/time.hpp"
|
||||
#include "utils/utils.hpp"
|
||||
|
||||
namespace repertory {
|
||||
open_file::open_file(std::uint64_t chunk_size, std::uint8_t chunk_timeout,
|
||||
@ -61,75 +60,246 @@ open_file::open_file(std::uint64_t chunk_size, std::uint8_t chunk_timeout,
|
||||
i_provider &provider,
|
||||
std::optional<boost::dynamic_bitset<>> read_state,
|
||||
i_upload_manager &mgr)
|
||||
: open_file_base(chunk_size, chunk_timeout, fsi, open_data, provider),
|
||||
: open_file_base(chunk_size, chunk_timeout, fsi, open_data, provider,
|
||||
false),
|
||||
mgr_(mgr) {
|
||||
if (fsi_.directory && read_state.has_value()) {
|
||||
throw startup_exception("cannot resume a directory|" + fsi.api_path);
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
if (fsi.directory) {
|
||||
if (read_state.has_value()) {
|
||||
utils::error::raise_api_path_error(
|
||||
function_name, fsi.api_path, fsi.source_path,
|
||||
fmt::format("cannot resume a directory|sp|", fsi.api_path));
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (not fsi.directory) {
|
||||
nf_ = utils::file::file::open_or_create_file(fsi.source_path,
|
||||
provider_.is_read_only());
|
||||
set_api_error(*nf_ ? api_error::success : api_error::os_error);
|
||||
if (get_api_error() == api_error::success) {
|
||||
if (read_state.has_value()) {
|
||||
read_state_ = read_state.value();
|
||||
set_modified();
|
||||
} else if (fsi_.size > 0U) {
|
||||
read_state_.resize(static_cast<std::size_t>(utils::divide_with_ceiling(
|
||||
fsi_.size, chunk_size)),
|
||||
false);
|
||||
nf_ = utils::file::file::open_or_create_file(fsi.source_path,
|
||||
get_provider().is_read_only());
|
||||
set_api_error(*nf_ ? api_error::success : api_error::os_error);
|
||||
if (get_api_error() != api_error::success) {
|
||||
return;
|
||||
}
|
||||
|
||||
auto file_size = nf_->size();
|
||||
if (provider_.is_read_only() || file_size == fsi.size) {
|
||||
read_state_.set(0U, read_state_.size(), true);
|
||||
} else if (not nf_->truncate(fsi.size)) {
|
||||
set_api_error(api_error::os_error);
|
||||
}
|
||||
}
|
||||
if (read_state.has_value()) {
|
||||
read_state_ = read_state.value();
|
||||
set_modified();
|
||||
allocated = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if (get_api_error() != api_error::success && *nf_) {
|
||||
nf_->close();
|
||||
}
|
||||
}
|
||||
if (fsi.size == 0U) {
|
||||
return;
|
||||
}
|
||||
|
||||
read_state_.resize(static_cast<std::size_t>(
|
||||
utils::divide_with_ceiling(fsi.size, chunk_size)),
|
||||
false);
|
||||
|
||||
auto file_size = nf_->size();
|
||||
if (not file_size.has_value()) {
|
||||
utils::error::raise_api_path_error(
|
||||
function_name, fsi.api_path, fsi.source_path,
|
||||
utils::get_last_error_code(), "failed to get file size");
|
||||
set_api_error(api_error::os_error);
|
||||
return;
|
||||
}
|
||||
|
||||
if (get_provider().is_read_only() || file_size.value() == fsi.size) {
|
||||
read_state_.set(0U, read_state_.size(), true);
|
||||
allocated = true;
|
||||
}
|
||||
|
||||
if (get_api_error() != api_error::success && *nf_) {
|
||||
nf_->close();
|
||||
}
|
||||
}
|
||||
|
||||
open_file::~open_file() { close(); }
|
||||
|
||||
auto open_file::adjust_cache_size(std::uint64_t file_size,
|
||||
bool shrink) -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
if (file_size == get_file_size()) {
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
if (file_size > get_file_size()) {
|
||||
auto size = file_size - get_file_size();
|
||||
auto res = shrink ? cache_size_mgr::instance().shrink(size)
|
||||
: cache_size_mgr::instance().expand(size);
|
||||
if (res == api_error::success) {
|
||||
return res;
|
||||
}
|
||||
|
||||
utils::error::raise_api_path_error(
|
||||
function_name, get_api_path(), get_source_path(), res,
|
||||
fmt::format("failed to {} cache|size|{}",
|
||||
(shrink ? "shrink" : "expand"), size));
|
||||
return set_api_error(res);
|
||||
}
|
||||
|
||||
auto size = get_file_size() - file_size;
|
||||
auto res = shrink ? cache_size_mgr::instance().expand(size)
|
||||
: cache_size_mgr::instance().shrink(size);
|
||||
if (res == api_error::success) {
|
||||
return res;
|
||||
}
|
||||
|
||||
utils::error::raise_api_path_error(
|
||||
function_name, get_api_path(), get_source_path(), res,
|
||||
fmt::format("failed to {} cache|size|{}", (shrink ? "expand" : "shrink"),
|
||||
size));
|
||||
return set_api_error(res);
|
||||
}
|
||||
|
||||
auto open_file::check_start() -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
unique_recur_mutex_lock file_lock(get_mutex());
|
||||
if (allocated) {
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
auto file_size = nf_->size();
|
||||
if (not file_size.has_value()) {
|
||||
utils::error::raise_api_path_error(
|
||||
function_name, get_api_path(), get_source_path(),
|
||||
utils::get_last_error_code(), "failed to get file size");
|
||||
return set_api_error(api_error::os_error);
|
||||
}
|
||||
|
||||
if (file_size.value() == get_file_size()) {
|
||||
allocated = true;
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
file_lock.unlock();
|
||||
auto res = adjust_cache_size(file_size.value(), true);
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
}
|
||||
file_lock.lock();
|
||||
|
||||
if (not nf_->truncate(get_file_size())) {
|
||||
utils::error::raise_api_path_error(
|
||||
function_name, get_api_path(), get_source_path(),
|
||||
utils::get_last_error_code(),
|
||||
fmt::format("failed to truncate file|size|{}", get_file_size()));
|
||||
return set_api_error(res);
|
||||
}
|
||||
|
||||
allocated = true;
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
auto open_file::close() -> bool {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
if (is_directory() || stop_requested_) {
|
||||
return false;
|
||||
}
|
||||
|
||||
stop_requested_ = true;
|
||||
|
||||
notify_io();
|
||||
|
||||
if (reader_thread_) {
|
||||
reader_thread_->join();
|
||||
reader_thread_.reset();
|
||||
}
|
||||
|
||||
if (not open_file_base::close()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
auto read_state = get_read_state();
|
||||
auto err = get_api_error();
|
||||
if (err == api_error::success || err == api_error::download_incomplete ||
|
||||
err == api_error::download_stopped) {
|
||||
if (is_modified() && not read_state.all()) {
|
||||
set_api_error(api_error::download_incomplete);
|
||||
} else if (not is_modified() && (get_file_size() > 0U) &&
|
||||
not read_state.all()) {
|
||||
set_api_error(api_error::download_stopped);
|
||||
}
|
||||
|
||||
err = get_api_error();
|
||||
}
|
||||
|
||||
nf_->close();
|
||||
|
||||
if (is_modified()) {
|
||||
if (err == api_error::success) {
|
||||
mgr_.queue_upload(*this);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (err == api_error::download_incomplete) {
|
||||
mgr_.store_resume(*this);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (err != api_error::success || read_state.all()) {
|
||||
mgr_.remove_resume(get_api_path(), get_source_path());
|
||||
}
|
||||
|
||||
if (err == api_error::success) {
|
||||
return true;
|
||||
}
|
||||
|
||||
file_manager::remove_source_and_shrink_cache(
|
||||
get_api_path(), get_source_path(), get_file_size(), allocated);
|
||||
|
||||
auto parent = utils::path::get_parent_path(get_source_path());
|
||||
set_source_path(utils::path::combine(parent, {utils::create_uuid_string()}));
|
||||
|
||||
auto res = get_provider().set_item_meta(get_api_path(), META_SOURCE,
|
||||
get_source_path());
|
||||
if (res != api_error::success) {
|
||||
utils::error::raise_api_path_error(function_name, get_api_path(),
|
||||
get_source_path(), res,
|
||||
"failed to set new source path");
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void open_file::download_chunk(std::size_t chunk, bool skip_active,
|
||||
bool should_reset) {
|
||||
if (should_reset) {
|
||||
reset_timeout();
|
||||
}
|
||||
|
||||
unique_recur_mutex_lock download_lock(file_mtx_);
|
||||
if ((get_api_error() == api_error::success) && (chunk < read_state_.size()) &&
|
||||
not read_state_[chunk]) {
|
||||
if (active_downloads_.find(chunk) != active_downloads_.end()) {
|
||||
if (not skip_active) {
|
||||
auto active_download = active_downloads_.at(chunk);
|
||||
download_lock.unlock();
|
||||
|
||||
active_download->wait();
|
||||
unique_recur_mutex_lock rw_lock(rw_mtx_);
|
||||
auto read_state = get_read_state();
|
||||
if ((get_api_error() == api_error::success) && (chunk < read_state.size()) &&
|
||||
not read_state[chunk]) {
|
||||
if (get_active_downloads().find(chunk) != get_active_downloads().end()) {
|
||||
if (skip_active) {
|
||||
return;
|
||||
}
|
||||
|
||||
auto active_download = get_active_downloads().at(chunk);
|
||||
rw_lock.unlock();
|
||||
|
||||
active_download->wait();
|
||||
return;
|
||||
}
|
||||
|
||||
auto data_offset = chunk * chunk_size_;
|
||||
auto data_size =
|
||||
(chunk == read_state_.size() - 1U) ? last_chunk_size_ : chunk_size_;
|
||||
if (active_downloads_.empty() && (read_state_.count() == 0U)) {
|
||||
event_system::instance().raise<download_begin>(fsi_.api_path,
|
||||
fsi_.source_path);
|
||||
auto data_offset = chunk * get_chunk_size();
|
||||
auto data_size = (chunk == read_state.size() - 1U) ? get_last_chunk_size()
|
||||
: get_chunk_size();
|
||||
if (get_active_downloads().empty() && (read_state.count() == 0U)) {
|
||||
event_system::instance().raise<download_begin>(get_api_path(),
|
||||
get_source_path());
|
||||
}
|
||||
event_system::instance().raise<download_chunk_begin>(
|
||||
fsi_.api_path, fsi_.source_path, chunk, read_state_.size(),
|
||||
read_state_.count());
|
||||
|
||||
active_downloads_[chunk] = std::make_shared<download>();
|
||||
download_lock.unlock();
|
||||
get_active_downloads()[chunk] = std::make_shared<download>();
|
||||
rw_lock.unlock();
|
||||
|
||||
if (should_reset) {
|
||||
reset_timeout();
|
||||
@ -138,28 +308,28 @@ void open_file::download_chunk(std::size_t chunk, bool skip_active,
|
||||
std::async(std::launch::async, [this, chunk, data_size, data_offset,
|
||||
should_reset]() {
|
||||
const auto notify_complete = [this, chunk, should_reset]() {
|
||||
unique_recur_mutex_lock file_lock(file_mtx_);
|
||||
auto active_download = active_downloads_.at(chunk);
|
||||
active_downloads_.erase(chunk);
|
||||
event_system::instance().raise<download_chunk_end>(
|
||||
fsi_.api_path, fsi_.source_path, chunk, read_state_.size(),
|
||||
read_state_.count(), get_api_error());
|
||||
auto state = get_read_state();
|
||||
|
||||
unique_recur_mutex_lock lock(rw_mtx_);
|
||||
auto active_download = get_active_downloads().at(chunk);
|
||||
get_active_downloads().erase(chunk);
|
||||
if (get_api_error() == api_error::success) {
|
||||
auto progress = (static_cast<double>(read_state_.count()) /
|
||||
static_cast<double>(read_state_.size()) * 100.0);
|
||||
auto progress = (static_cast<double>(state.count()) /
|
||||
static_cast<double>(state.size())) *
|
||||
100.0;
|
||||
event_system::instance().raise<download_progress>(
|
||||
fsi_.api_path, fsi_.source_path, progress);
|
||||
if (read_state_.all() && not notified_) {
|
||||
get_api_path(), get_source_path(), progress);
|
||||
if (state.all() && not notified_) {
|
||||
notified_ = true;
|
||||
event_system::instance().raise<download_end>(
|
||||
fsi_.api_path, fsi_.source_path, get_api_error());
|
||||
get_api_path(), get_source_path(), get_api_error());
|
||||
}
|
||||
} else if (not notified_) {
|
||||
notified_ = true;
|
||||
event_system::instance().raise<download_end>(
|
||||
fsi_.api_path, fsi_.source_path, get_api_error());
|
||||
get_api_path(), get_source_path(), get_api_error());
|
||||
}
|
||||
file_lock.unlock();
|
||||
lock.unlock();
|
||||
|
||||
active_download->notify(get_api_error());
|
||||
|
||||
@ -168,9 +338,9 @@ void open_file::download_chunk(std::size_t chunk, bool skip_active,
|
||||
}
|
||||
};
|
||||
|
||||
data_buffer data;
|
||||
auto res = provider_.read_file_bytes(get_api_path(), data_size,
|
||||
data_offset, data, stop_requested_);
|
||||
data_buffer buffer;
|
||||
auto res = get_provider().read_file_bytes(
|
||||
get_api_path(), data_size, data_offset, buffer, stop_requested_);
|
||||
if (res != api_error::success) {
|
||||
set_api_error(res);
|
||||
notify_complete();
|
||||
@ -183,7 +353,7 @@ void open_file::download_chunk(std::size_t chunk, bool skip_active,
|
||||
|
||||
res = do_io([&]() -> api_error {
|
||||
std::size_t bytes_written{};
|
||||
if (not nf_->write(data, data_offset, &bytes_written)) {
|
||||
if (not nf_->write(buffer, data_offset, &bytes_written)) {
|
||||
return api_error::os_error;
|
||||
}
|
||||
|
||||
@ -198,48 +368,50 @@ void open_file::download_chunk(std::size_t chunk, bool skip_active,
|
||||
return;
|
||||
}
|
||||
|
||||
unique_recur_mutex_lock file_lock(file_mtx_);
|
||||
read_state_.set(chunk);
|
||||
file_lock.unlock();
|
||||
set_read_state(chunk);
|
||||
|
||||
notify_complete();
|
||||
}).wait();
|
||||
}
|
||||
}
|
||||
|
||||
void open_file::download_range(std::size_t start_chunk, std::size_t end_chunk,
|
||||
void open_file::download_range(std::size_t begin_chunk, std::size_t end_chunk,
|
||||
bool should_reset) {
|
||||
for (std::size_t chunk = start_chunk; chunk <= end_chunk; ++chunk) {
|
||||
for (std::size_t chunk = begin_chunk;
|
||||
(get_api_error() == api_error::success) && (chunk <= end_chunk);
|
||||
++chunk) {
|
||||
download_chunk(chunk, false, should_reset);
|
||||
if (get_api_error() != api_error::success) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto open_file::get_allocated() const -> bool {
|
||||
recur_mutex_lock file_lock(get_mutex());
|
||||
return allocated;
|
||||
}
|
||||
|
||||
auto open_file::get_read_state() const -> boost::dynamic_bitset<> {
|
||||
recur_mutex_lock file_lock(file_mtx_);
|
||||
recur_mutex_lock file_lock(get_mutex());
|
||||
return read_state_;
|
||||
}
|
||||
|
||||
auto open_file::get_read_state(std::size_t chunk) const -> bool {
|
||||
recur_mutex_lock file_lock(file_mtx_);
|
||||
return read_state_[chunk];
|
||||
return get_read_state()[chunk];
|
||||
}
|
||||
|
||||
auto open_file::is_complete() const -> bool {
|
||||
recur_mutex_lock file_lock(file_mtx_);
|
||||
return read_state_.all();
|
||||
}
|
||||
auto open_file::is_complete() const -> bool { return get_read_state().all(); }
|
||||
|
||||
auto open_file::native_operation(
|
||||
i_open_file::native_operation_callback callback) -> api_error {
|
||||
unique_recur_mutex_lock file_lock(file_mtx_);
|
||||
if (stop_requested_) {
|
||||
return api_error::download_stopped;
|
||||
return set_api_error(api_error::download_stopped);
|
||||
}
|
||||
file_lock.unlock();
|
||||
|
||||
auto res = check_start();
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
}
|
||||
|
||||
unique_recur_mutex_lock rw_lock(rw_mtx_);
|
||||
return do_io([&]() -> api_error { return callback(nf_->get_handle()); });
|
||||
}
|
||||
|
||||
@ -248,38 +420,48 @@ auto open_file::native_operation(
|
||||
i_open_file::native_operation_callback callback) -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
if (fsi_.directory) {
|
||||
return api_error::invalid_operation;
|
||||
if (is_directory()) {
|
||||
return set_api_error(api_error::invalid_operation);
|
||||
}
|
||||
|
||||
unique_recur_mutex_lock file_lock(file_mtx_);
|
||||
if (stop_requested_) {
|
||||
return api_error::download_stopped;
|
||||
return set_api_error(api_error::download_stopped);
|
||||
}
|
||||
|
||||
auto res = check_start();
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
}
|
||||
|
||||
res = adjust_cache_size(new_file_size, false);
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
}
|
||||
file_lock.unlock();
|
||||
|
||||
auto is_empty_file = new_file_size == 0U;
|
||||
auto last_chunk = is_empty_file
|
||||
? std::size_t(0U)
|
||||
: static_cast<std::size_t>(utils::divide_with_ceiling(
|
||||
new_file_size, chunk_size_)) -
|
||||
new_file_size, get_chunk_size())) -
|
||||
1U;
|
||||
|
||||
file_lock.lock();
|
||||
if (not is_empty_file && (last_chunk < read_state_.size())) {
|
||||
file_lock.unlock();
|
||||
update_background_reader(0U);
|
||||
unique_recur_mutex_lock rw_lock(rw_mtx_);
|
||||
auto read_state = get_read_state();
|
||||
if (not is_empty_file && (last_chunk < read_state.size())) {
|
||||
rw_lock.unlock();
|
||||
update_reader(0U);
|
||||
|
||||
download_chunk(last_chunk, false, true);
|
||||
if (get_api_error() != api_error::success) {
|
||||
return get_api_error();
|
||||
}
|
||||
file_lock.lock();
|
||||
rw_lock.lock();
|
||||
}
|
||||
|
||||
read_state = get_read_state();
|
||||
auto original_file_size = get_file_size();
|
||||
|
||||
auto res = do_io([&]() -> api_error { return callback(nf_->get_handle()); });
|
||||
res = do_io([&]() -> api_error { return callback(nf_->get_handle()); });
|
||||
if (res != api_error::success) {
|
||||
utils::error::raise_api_path_error(function_name, get_api_path(),
|
||||
utils::get_last_error_code(),
|
||||
@ -288,59 +470,73 @@ auto open_file::native_operation(
|
||||
}
|
||||
|
||||
{
|
||||
auto file_size = nf_->size().value_or(0U);
|
||||
if (file_size != new_file_size) {
|
||||
auto file_size = nf_->size();
|
||||
if (not file_size.has_value()) {
|
||||
utils::error::raise_api_path_error(
|
||||
function_name, get_api_path(), api_error::file_size_mismatch,
|
||||
"allocated file size mismatch|expected|" +
|
||||
std::to_string(new_file_size) + "|actual|" +
|
||||
std::to_string(file_size));
|
||||
fmt::format("failed to get file size|error|{}",
|
||||
utils::get_last_error_code()));
|
||||
return set_api_error(api_error::error);
|
||||
}
|
||||
|
||||
if (file_size.value() != new_file_size) {
|
||||
utils::error::raise_api_path_error(
|
||||
function_name, get_api_path(), api_error::file_size_mismatch,
|
||||
fmt::format("file size mismatch|expected|{}|actual|{}", new_file_size,
|
||||
file_size.value()));
|
||||
return set_api_error(api_error::error);
|
||||
}
|
||||
}
|
||||
|
||||
if (is_empty_file || (read_state_.size() != (last_chunk + 1U))) {
|
||||
auto old_size = read_state_.size();
|
||||
read_state_.resize(is_empty_file ? 0U : last_chunk + 1U);
|
||||
if (is_empty_file || (read_state.size() != (last_chunk + 1U))) {
|
||||
auto old_size = read_state.size();
|
||||
read_state.resize(is_empty_file ? 0U : last_chunk + 1U);
|
||||
|
||||
if (not is_empty_file) {
|
||||
for (std::size_t chunk = old_size; chunk <= last_chunk; ++chunk) {
|
||||
read_state_.set(chunk);
|
||||
read_state.set(chunk);
|
||||
}
|
||||
}
|
||||
set_read_state(read_state);
|
||||
|
||||
last_chunk_size_ = static_cast<std::size_t>(
|
||||
new_file_size <= chunk_size_ ? new_file_size
|
||||
: (new_file_size % chunk_size_) == 0U ? chunk_size_
|
||||
: new_file_size % chunk_size_);
|
||||
set_last_chunk_size(static_cast<std::size_t>(
|
||||
new_file_size <= get_chunk_size() ? new_file_size
|
||||
: (new_file_size % get_chunk_size()) == 0U
|
||||
? get_chunk_size()
|
||||
: new_file_size % get_chunk_size()));
|
||||
}
|
||||
|
||||
if (original_file_size != new_file_size) {
|
||||
set_modified();
|
||||
if (original_file_size == new_file_size) {
|
||||
return res;
|
||||
}
|
||||
set_modified();
|
||||
|
||||
fsi_.size = new_file_size;
|
||||
auto now = std::to_string(utils::time::get_time_now());
|
||||
res = provider_.set_item_meta(
|
||||
fsi_.api_path, {
|
||||
{META_CHANGED, now},
|
||||
{META_MODIFIED, now},
|
||||
{META_SIZE, std::to_string(new_file_size)},
|
||||
{META_WRITTEN, now},
|
||||
});
|
||||
if (res != api_error::success) {
|
||||
utils::error::raise_api_path_error(function_name, get_api_path(), res,
|
||||
"failed to set file meta");
|
||||
return set_api_error(res);
|
||||
}
|
||||
set_file_size(new_file_size);
|
||||
auto now = std::to_string(utils::time::get_time_now());
|
||||
res = get_provider().set_item_meta(
|
||||
get_api_path(), {
|
||||
{META_CHANGED, now},
|
||||
{META_MODIFIED, now},
|
||||
{META_SIZE, std::to_string(new_file_size)},
|
||||
{META_WRITTEN, now},
|
||||
});
|
||||
if (res == api_error::success) {
|
||||
return res;
|
||||
}
|
||||
|
||||
return res;
|
||||
utils::error::raise_api_path_error(function_name, get_api_path(), res,
|
||||
"failed to set file meta");
|
||||
return set_api_error(res);
|
||||
}
|
||||
|
||||
auto open_file::read(std::size_t read_size, std::uint64_t read_offset,
|
||||
data_buffer &data) -> api_error {
|
||||
if (fsi_.directory) {
|
||||
return api_error::invalid_operation;
|
||||
if (is_directory()) {
|
||||
return set_api_error(api_error::invalid_operation);
|
||||
}
|
||||
|
||||
if (stop_requested_) {
|
||||
return set_api_error(api_error::download_stopped);
|
||||
}
|
||||
|
||||
read_size =
|
||||
@ -349,12 +545,17 @@ auto open_file::read(std::size_t read_size, std::uint64_t read_offset,
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
auto res = check_start();
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
}
|
||||
|
||||
const auto read_from_source = [this, &data, &read_offset,
|
||||
&read_size]() -> api_error {
|
||||
return do_io([this, &data, &read_offset, &read_size]() -> api_error {
|
||||
if (provider_.is_read_only()) {
|
||||
return provider_.read_file_bytes(fsi_.api_path, read_size, read_offset,
|
||||
data, stop_requested_);
|
||||
if (get_provider().is_read_only()) {
|
||||
return get_provider().read_file_bytes(
|
||||
get_api_path(), read_size, read_offset, data, stop_requested_);
|
||||
}
|
||||
|
||||
data.resize(read_size);
|
||||
@ -365,49 +566,48 @@ auto open_file::read(std::size_t read_size, std::uint64_t read_offset,
|
||||
});
|
||||
};
|
||||
|
||||
unique_recur_mutex_lock file_lock(file_mtx_);
|
||||
if (read_state_.all()) {
|
||||
if (get_read_state().all()) {
|
||||
reset_timeout();
|
||||
return read_from_source();
|
||||
}
|
||||
file_lock.unlock();
|
||||
|
||||
auto start_chunk = static_cast<std::size_t>(read_offset / chunk_size_);
|
||||
auto begin_chunk = static_cast<std::size_t>(read_offset / get_chunk_size());
|
||||
auto end_chunk =
|
||||
static_cast<std::size_t>((read_size + read_offset) / chunk_size_);
|
||||
static_cast<std::size_t>((read_size + read_offset) / get_chunk_size());
|
||||
|
||||
update_background_reader(start_chunk);
|
||||
update_reader(begin_chunk);
|
||||
|
||||
download_range(start_chunk, end_chunk, true);
|
||||
download_range(begin_chunk, end_chunk, true);
|
||||
if (get_api_error() != api_error::success) {
|
||||
return get_api_error();
|
||||
}
|
||||
|
||||
file_lock.lock();
|
||||
unique_recur_mutex_lock rw_lock(rw_mtx_);
|
||||
return get_api_error() == api_error::success ? read_from_source()
|
||||
: get_api_error();
|
||||
}
|
||||
|
||||
void open_file::remove(std::uint64_t handle) {
|
||||
recur_mutex_lock file_lock(file_mtx_);
|
||||
open_file_base::remove(handle);
|
||||
if (modified_ && read_state_.all() &&
|
||||
|
||||
recur_mutex_lock rw_lock(rw_mtx_);
|
||||
if (is_modified() && get_read_state().all() &&
|
||||
(get_api_error() == api_error::success)) {
|
||||
mgr_.queue_upload(*this);
|
||||
modified_ = false;
|
||||
open_file_base::set_modified(false);
|
||||
}
|
||||
|
||||
if (removed_ && (get_open_file_count() == 0U)) {
|
||||
removed_ = false;
|
||||
if (is_removed() && (get_open_file_count() == 0U)) {
|
||||
open_file_base::set_removed(false);
|
||||
}
|
||||
}
|
||||
|
||||
void open_file::remove_all() {
|
||||
recur_mutex_lock file_lock(file_mtx_);
|
||||
open_file_base::remove_all();
|
||||
|
||||
modified_ = false;
|
||||
removed_ = true;
|
||||
recur_mutex_lock rw_lock(rw_mtx_);
|
||||
open_file_base::set_modified(false);
|
||||
open_file_base::set_removed(true);
|
||||
|
||||
mgr_.remove_upload(get_api_path());
|
||||
|
||||
@ -415,8 +615,12 @@ void open_file::remove_all() {
|
||||
}
|
||||
|
||||
auto open_file::resize(std::uint64_t new_file_size) -> api_error {
|
||||
if (fsi_.directory) {
|
||||
return api_error::invalid_operation;
|
||||
if (is_directory()) {
|
||||
return set_api_error(api_error::invalid_operation);
|
||||
}
|
||||
|
||||
if (new_file_size == get_file_size()) {
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
return native_operation(
|
||||
@ -426,126 +630,62 @@ auto open_file::resize(std::uint64_t new_file_size) -> api_error {
|
||||
});
|
||||
}
|
||||
|
||||
auto open_file::close() -> bool {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
if (fsi_.directory || stop_requested_) {
|
||||
return false;
|
||||
}
|
||||
|
||||
stop_requested_ = true;
|
||||
|
||||
unique_mutex_lock reader_lock(io_thread_mtx_);
|
||||
io_thread_notify_.notify_all();
|
||||
reader_lock.unlock();
|
||||
|
||||
if (reader_thread_) {
|
||||
reader_thread_->join();
|
||||
reader_thread_.reset();
|
||||
}
|
||||
|
||||
if (not open_file_base::close()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
auto err = get_api_error();
|
||||
if (err == api_error::success || err == api_error::download_incomplete ||
|
||||
err == api_error::download_stopped) {
|
||||
if (modified_ && not read_state_.all()) {
|
||||
set_api_error(api_error::download_incomplete);
|
||||
} else if (not modified_ && (fsi_.size > 0U) && not read_state_.all()) {
|
||||
set_api_error(api_error::download_stopped);
|
||||
}
|
||||
|
||||
err = get_api_error();
|
||||
}
|
||||
|
||||
nf_->close();
|
||||
|
||||
if (modified_) {
|
||||
if (err == api_error::success) {
|
||||
mgr_.queue_upload(*this);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (err == api_error::download_incomplete) {
|
||||
mgr_.store_resume(*this);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (err != api_error::success || read_state_.all()) {
|
||||
mgr_.remove_resume(get_api_path(), get_source_path());
|
||||
}
|
||||
|
||||
if (err == api_error::success) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (not utils::file::file(fsi_.source_path).remove()) {
|
||||
utils::error::raise_api_path_error(
|
||||
function_name, get_api_path(), fsi_.source_path,
|
||||
utils::get_last_error_code(), "failed to delete source file");
|
||||
}
|
||||
|
||||
auto parent = utils::path::get_parent_path(fsi_.source_path);
|
||||
fsi_.source_path =
|
||||
utils::path::combine(parent, {utils::create_uuid_string()});
|
||||
auto res =
|
||||
provider_.set_item_meta(fsi_.api_path, META_SOURCE, fsi_.source_path);
|
||||
if (res != api_error::success) {
|
||||
utils::error::raise_api_path_error(function_name, get_api_path(),
|
||||
fsi_.source_path, res,
|
||||
"failed to set new source path");
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void open_file::set_modified() {
|
||||
if (not modified_) {
|
||||
modified_ = true;
|
||||
if (not is_modified()) {
|
||||
open_file_base::set_modified(true);
|
||||
mgr_.store_resume(*this);
|
||||
}
|
||||
|
||||
if (not removed_) {
|
||||
removed_ = true;
|
||||
if (not is_removed()) {
|
||||
open_file_base::set_removed(true);
|
||||
mgr_.remove_upload(get_api_path());
|
||||
}
|
||||
}
|
||||
|
||||
void open_file::update_background_reader(std::size_t read_chunk) {
|
||||
recur_mutex_lock reader_lock(file_mtx_);
|
||||
read_chunk_ = read_chunk;
|
||||
void open_file::set_read_state(std::size_t chunk) {
|
||||
recur_mutex_lock file_lock(get_mutex());
|
||||
read_state_.set(chunk);
|
||||
}
|
||||
|
||||
if (not reader_thread_ && not stop_requested_) {
|
||||
reader_thread_ = std::make_unique<std::thread>([this]() {
|
||||
std::size_t next_chunk{};
|
||||
while (not stop_requested_) {
|
||||
unique_recur_mutex_lock file_lock(file_mtx_);
|
||||
if ((fsi_.size == 0U) || read_state_.all()) {
|
||||
file_lock.unlock();
|
||||
void open_file::set_read_state(boost::dynamic_bitset<> read_state) {
|
||||
recur_mutex_lock file_lock(get_mutex());
|
||||
read_state_ = std::move(read_state);
|
||||
}
|
||||
|
||||
unique_mutex_lock io_lock(io_thread_mtx_);
|
||||
if (not stop_requested_ && io_thread_queue_.empty()) {
|
||||
io_thread_notify_.wait(io_lock);
|
||||
}
|
||||
io_thread_notify_.notify_all();
|
||||
io_lock.unlock();
|
||||
} else {
|
||||
do {
|
||||
next_chunk = read_chunk_ =
|
||||
((read_chunk_ + 1U) >= read_state_.size()) ? 0U
|
||||
: read_chunk_ + 1U;
|
||||
} while ((next_chunk != 0U) && (active_downloads_.find(next_chunk) !=
|
||||
active_downloads_.end()));
|
||||
void open_file::update_reader(std::size_t chunk) {
|
||||
recur_mutex_lock rw_lock(rw_mtx_);
|
||||
read_chunk_ = chunk;
|
||||
|
||||
file_lock.unlock();
|
||||
download_chunk(next_chunk, true, false);
|
||||
}
|
||||
}
|
||||
});
|
||||
if (reader_thread_ || stop_requested_) {
|
||||
return;
|
||||
}
|
||||
|
||||
reader_thread_ = std::make_unique<std::thread>([this]() {
|
||||
unique_recur_mutex_lock lock(rw_mtx_);
|
||||
auto next_chunk{read_chunk_};
|
||||
auto read_chunk{read_chunk_};
|
||||
lock.unlock();
|
||||
|
||||
while (not stop_requested_) {
|
||||
lock.lock();
|
||||
|
||||
auto read_state = get_read_state();
|
||||
if ((get_file_size() == 0U) || read_state.all()) {
|
||||
lock.unlock();
|
||||
wait_for_io(stop_requested_);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (read_chunk != read_chunk_) {
|
||||
next_chunk = read_chunk = read_chunk_;
|
||||
}
|
||||
|
||||
next_chunk = next_chunk + 1U >= read_state.size() ? 0U : next_chunk + 1U;
|
||||
lock.unlock();
|
||||
|
||||
download_chunk(next_chunk, true, false);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
auto open_file::write(std::uint64_t write_offset, const data_buffer &data,
|
||||
@ -554,41 +694,44 @@ auto open_file::write(std::uint64_t write_offset, const data_buffer &data,
|
||||
|
||||
bytes_written = 0U;
|
||||
|
||||
if (fsi_.directory || provider_.is_read_only()) {
|
||||
return api_error::invalid_operation;
|
||||
if (is_directory() || get_provider().is_read_only()) {
|
||||
return set_api_error(api_error::invalid_operation);
|
||||
}
|
||||
|
||||
if (data.empty()) {
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
unique_recur_mutex_lock write_lock(file_mtx_);
|
||||
if (stop_requested_) {
|
||||
return api_error::download_stopped;
|
||||
return set_api_error(api_error::download_stopped);
|
||||
}
|
||||
write_lock.unlock();
|
||||
|
||||
auto start_chunk = static_cast<std::size_t>(write_offset / chunk_size_);
|
||||
auto res = check_start();
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
}
|
||||
|
||||
auto begin_chunk = static_cast<std::size_t>(write_offset / get_chunk_size());
|
||||
auto end_chunk =
|
||||
static_cast<std::size_t>((write_offset + data.size()) / chunk_size_);
|
||||
static_cast<std::size_t>((write_offset + data.size()) / get_chunk_size());
|
||||
|
||||
update_background_reader(start_chunk);
|
||||
update_reader(begin_chunk);
|
||||
|
||||
download_range(start_chunk, std::min(read_state_.size() - 1U, end_chunk),
|
||||
download_range(begin_chunk, std::min(get_read_state().size() - 1U, end_chunk),
|
||||
true);
|
||||
if (get_api_error() != api_error::success) {
|
||||
return get_api_error();
|
||||
}
|
||||
|
||||
write_lock.lock();
|
||||
if ((write_offset + data.size()) > fsi_.size) {
|
||||
auto res = resize(write_offset + data.size());
|
||||
unique_recur_mutex_lock rw_lock(rw_mtx_);
|
||||
if ((write_offset + data.size()) > get_file_size()) {
|
||||
res = resize(write_offset + data.size());
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
auto res = do_io([&]() -> api_error {
|
||||
res = do_io([&]() -> api_error {
|
||||
if (not nf_->write(data, write_offset, &bytes_written)) {
|
||||
return api_error::os_error;
|
||||
}
|
||||
@ -601,11 +744,11 @@ auto open_file::write(std::uint64_t write_offset, const data_buffer &data,
|
||||
}
|
||||
|
||||
auto now = std::to_string(utils::time::get_time_now());
|
||||
res = provider_.set_item_meta(fsi_.api_path, {
|
||||
{META_CHANGED, now},
|
||||
{META_MODIFIED, now},
|
||||
{META_WRITTEN, now},
|
||||
});
|
||||
res = get_provider().set_item_meta(get_api_path(), {
|
||||
{META_CHANGED, now},
|
||||
{META_MODIFIED, now},
|
||||
{META_WRITTEN, now},
|
||||
});
|
||||
if (res != api_error::success) {
|
||||
utils::error::raise_api_path_error(function_name, get_api_path(), res,
|
||||
"failed to set file meta");
|
||||
|
@ -35,14 +35,16 @@ void open_file_base::download::notify(const api_error &err) {
|
||||
}
|
||||
|
||||
auto open_file_base::download::wait() -> api_error {
|
||||
if (not complete_) {
|
||||
unique_mutex_lock lock(mtx_);
|
||||
if (not complete_) {
|
||||
notify_.wait(lock);
|
||||
}
|
||||
notify_.notify_all();
|
||||
if (complete_) {
|
||||
return error_;
|
||||
}
|
||||
|
||||
unique_mutex_lock lock(mtx_);
|
||||
if (not complete_) {
|
||||
notify_.wait(lock);
|
||||
}
|
||||
notify_.notify_all();
|
||||
|
||||
return error_;
|
||||
}
|
||||
|
||||
@ -65,12 +67,14 @@ auto open_file_base::io_item::get_result() -> api_error {
|
||||
|
||||
open_file_base::open_file_base(std::uint64_t chunk_size,
|
||||
std::uint8_t chunk_timeout, filesystem_item fsi,
|
||||
i_provider &provider)
|
||||
: open_file_base(chunk_size, chunk_timeout, fsi, {}, provider) {}
|
||||
i_provider &provider, bool disable_io)
|
||||
: open_file_base(chunk_size, chunk_timeout, fsi, {}, provider, disable_io) {
|
||||
}
|
||||
|
||||
open_file_base::open_file_base(
|
||||
std::uint64_t chunk_size, std::uint8_t chunk_timeout, filesystem_item fsi,
|
||||
std::map<std::uint64_t, open_file_data> open_data, i_provider &provider)
|
||||
std::map<std::uint64_t, open_file_data> open_data, i_provider &provider,
|
||||
bool disable_io)
|
||||
: chunk_size_(chunk_size),
|
||||
chunk_timeout_(chunk_timeout),
|
||||
fsi_(std::move(fsi)),
|
||||
@ -80,7 +84,7 @@ open_file_base::open_file_base(
|
||||
: fsi.size % chunk_size)),
|
||||
open_data_(std::move(open_data)),
|
||||
provider_(provider) {
|
||||
if (not fsi.directory) {
|
||||
if (not fsi.directory && not disable_io) {
|
||||
io_thread_ = std::make_unique<std::thread>([this] { file_io_thread(); });
|
||||
}
|
||||
}
|
||||
@ -115,7 +119,7 @@ auto open_file_base::can_close() const -> bool {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (is_download_complete()) {
|
||||
if (is_complete()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -123,12 +127,30 @@ auto open_file_base::can_close() const -> bool {
|
||||
return true;
|
||||
}
|
||||
|
||||
const std::chrono::system_clock::time_point last_access = last_access_;
|
||||
const auto duration = std::chrono::duration_cast<std::chrono::seconds>(
|
||||
std::chrono::system_clock::time_point last_access{last_access_};
|
||||
auto duration = std::chrono::duration_cast<std::chrono::seconds>(
|
||||
std::chrono::system_clock::now() - last_access);
|
||||
return (duration.count() >= chunk_timeout_);
|
||||
}
|
||||
|
||||
auto open_file_base::close() -> bool {
|
||||
unique_mutex_lock io_lock(io_thread_mtx_);
|
||||
if (io_stop_requested_ || not io_thread_) {
|
||||
io_thread_notify_.notify_all();
|
||||
io_lock.unlock();
|
||||
return false;
|
||||
}
|
||||
|
||||
io_stop_requested_ = true;
|
||||
io_thread_notify_.notify_all();
|
||||
io_lock.unlock();
|
||||
|
||||
io_thread_->join();
|
||||
io_thread_.reset();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
auto open_file_base::do_io(std::function<api_error()> action) -> api_error {
|
||||
unique_mutex_lock io_lock(io_thread_mtx_);
|
||||
auto item = std::make_shared<io_item>(action);
|
||||
@ -187,6 +209,36 @@ auto open_file_base::get_file_size() const -> std::uint64_t {
|
||||
return fsi_.size;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto open_file_base::get_last_chunk_size() const -> std::size_t {
|
||||
recur_mutex_lock file_lock(file_mtx_);
|
||||
return last_chunk_size_;
|
||||
}
|
||||
|
||||
void open_file_base::set_file_size(std::uint64_t size) {
|
||||
recur_mutex_lock file_lock(file_mtx_);
|
||||
fsi_.size = size;
|
||||
}
|
||||
|
||||
void open_file_base::set_last_chunk_size(std::size_t size) {
|
||||
recur_mutex_lock file_lock(file_mtx_);
|
||||
last_chunk_size_ = size;
|
||||
}
|
||||
|
||||
void open_file_base::set_modified(bool modified) {
|
||||
recur_mutex_lock file_lock(file_mtx_);
|
||||
modified_ = modified;
|
||||
}
|
||||
|
||||
void open_file_base::set_removed(bool removed) {
|
||||
recur_mutex_lock file_lock(file_mtx_);
|
||||
removed_ = removed;
|
||||
}
|
||||
|
||||
void open_file_base::set_source_path(std::string source_path) {
|
||||
recur_mutex_lock file_lock(file_mtx_);
|
||||
fsi_.source_path = std::move(source_path);
|
||||
}
|
||||
|
||||
auto open_file_base::get_filesystem_item() const -> filesystem_item {
|
||||
recur_mutex_lock file_lock(file_mtx_);
|
||||
return fsi_;
|
||||
@ -194,8 +246,9 @@ auto open_file_base::get_filesystem_item() const -> filesystem_item {
|
||||
|
||||
auto open_file_base::get_handles() const -> std::vector<std::uint64_t> {
|
||||
recur_mutex_lock file_lock(file_mtx_);
|
||||
|
||||
std::vector<std::uint64_t> ret;
|
||||
for (auto &&item : open_data_) {
|
||||
for (const auto &item : open_data_) {
|
||||
ret.emplace_back(item.first);
|
||||
}
|
||||
|
||||
@ -230,11 +283,31 @@ auto open_file_base::get_open_file_count() const -> std::size_t {
|
||||
return open_data_.size();
|
||||
}
|
||||
|
||||
auto open_file_base::get_source_path() const -> std::string {
|
||||
recur_mutex_lock file_lock(file_mtx_);
|
||||
return fsi_.source_path;
|
||||
}
|
||||
|
||||
auto open_file_base::has_handle(std::uint64_t handle) const -> bool {
|
||||
recur_mutex_lock file_lock(file_mtx_);
|
||||
return open_data_.find(handle) != open_data_.end();
|
||||
}
|
||||
|
||||
auto open_file_base::is_modified() const -> bool {
|
||||
recur_mutex_lock file_lock(file_mtx_);
|
||||
return modified_;
|
||||
}
|
||||
|
||||
auto open_file_base::is_removed() const -> bool {
|
||||
recur_mutex_lock file_lock(file_mtx_);
|
||||
return removed_;
|
||||
}
|
||||
|
||||
void open_file_base::notify_io() {
|
||||
mutex_lock io_lock(io_thread_mtx_);
|
||||
io_thread_notify_.notify_all();
|
||||
}
|
||||
|
||||
void open_file_base::remove(std::uint64_t handle) {
|
||||
recur_mutex_lock file_lock(file_mtx_);
|
||||
if (open_data_.find(handle) == open_data_.end()) {
|
||||
@ -261,7 +334,7 @@ void open_file_base::remove_all() {
|
||||
auto open_data = open_data_;
|
||||
open_data_.clear();
|
||||
|
||||
for (auto &&data : open_data) {
|
||||
for (const auto &data : open_data) {
|
||||
event_system::instance().raise<filesystem_item_handle_closed>(
|
||||
fsi_.api_path, data.first, fsi_.source_path, fsi_.directory, modified_);
|
||||
}
|
||||
@ -276,15 +349,15 @@ void open_file_base::reset_timeout() {
|
||||
|
||||
auto open_file_base::set_api_error(const api_error &err) -> api_error {
|
||||
mutex_lock error_lock(error_mtx_);
|
||||
if (error_ != err) {
|
||||
return ((error_ = (error_ == api_error::success ||
|
||||
error_ == api_error::download_incomplete ||
|
||||
error_ == api_error::download_stopped
|
||||
? err
|
||||
: error_)));
|
||||
if (error_ == err) {
|
||||
return error_;
|
||||
}
|
||||
|
||||
return error_;
|
||||
return ((error_ = (error_ == api_error::success ||
|
||||
error_ == api_error::download_incomplete ||
|
||||
error_ == api_error::download_stopped
|
||||
? err
|
||||
: error_)));
|
||||
}
|
||||
|
||||
void open_file_base::set_api_path(const std::string &api_path) {
|
||||
@ -293,24 +366,12 @@ void open_file_base::set_api_path(const std::string &api_path) {
|
||||
fsi_.api_parent = utils::path::get_parent_api_path(api_path);
|
||||
}
|
||||
|
||||
auto open_file_base::close() -> bool {
|
||||
void open_file_base::wait_for_io(stop_type &stop_requested) {
|
||||
unique_mutex_lock io_lock(io_thread_mtx_);
|
||||
if (not fsi_.directory && not io_stop_requested_) {
|
||||
io_stop_requested_ = true;
|
||||
io_thread_notify_.notify_all();
|
||||
io_lock.unlock();
|
||||
|
||||
if (io_thread_) {
|
||||
io_thread_->join();
|
||||
io_thread_.reset();
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
if (not stop_requested && io_thread_queue_.empty()) {
|
||||
io_thread_notify_.wait(io_lock);
|
||||
}
|
||||
|
||||
io_thread_notify_.notify_all();
|
||||
io_lock.unlock();
|
||||
return false;
|
||||
}
|
||||
} // namespace repertory
|
||||
|
367
repertory/librepertory/src/file_manager/ring_buffer_base.cpp
Normal file
367
repertory/librepertory/src/file_manager/ring_buffer_base.cpp
Normal file
@ -0,0 +1,367 @@
|
||||
/*
|
||||
Copyright <2018-2024> <scott.e.graves@protonmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
#include "file_manager/ring_buffer_base.hpp"
|
||||
|
||||
#include "events/event_system.hpp"
|
||||
#include "file_manager/events.hpp"
|
||||
#include "file_manager/open_file_base.hpp"
|
||||
#include "platform/platform.hpp"
|
||||
#include "providers/i_provider.hpp"
|
||||
#include "types/repertory.hpp"
|
||||
#include "utils/common.hpp"
|
||||
#include "utils/error_utils.hpp"
|
||||
|
||||
namespace repertory {
|
||||
ring_buffer_base::ring_buffer_base(std::uint64_t chunk_size,
|
||||
std::uint8_t chunk_timeout,
|
||||
filesystem_item fsi, i_provider &provider,
|
||||
std::size_t ring_size, bool disable_io)
|
||||
: open_file_base(chunk_size, chunk_timeout, fsi, provider, disable_io),
|
||||
read_state_(ring_size),
|
||||
total_chunks_(static_cast<std::size_t>(
|
||||
utils::divide_with_ceiling(fsi.size, chunk_size))) {
|
||||
if (disable_io) {
|
||||
if (fsi.size > 0U) {
|
||||
read_state_.resize(std::min(total_chunks_, read_state_.size()));
|
||||
|
||||
ring_end_ =
|
||||
std::min(total_chunks_ - 1U, ring_begin_ + read_state_.size() - 1U);
|
||||
read_state_.set(0U, read_state_.size(), false);
|
||||
}
|
||||
} else {
|
||||
if (ring_size < min_ring_size) {
|
||||
throw std::runtime_error("ring size must be greater than or equal to 5");
|
||||
}
|
||||
|
||||
ring_end_ = std::min(total_chunks_ - 1U, ring_begin_ + ring_size - 1U);
|
||||
read_state_.set(0U, ring_size, false);
|
||||
}
|
||||
}
|
||||
|
||||
auto ring_buffer_base::check_start() -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
try {
|
||||
if (on_check_start()) {
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
event_system::instance().raise<download_begin>(get_api_path(),
|
||||
get_source_path());
|
||||
reader_thread_ =
|
||||
std::make_unique<std::thread>([this]() { reader_thread(); });
|
||||
return api_error::success;
|
||||
} catch (const std::exception &ex) {
|
||||
utils::error::raise_api_path_error(function_name, get_api_path(),
|
||||
get_source_path(), ex,
|
||||
"failed to start");
|
||||
return api_error::error;
|
||||
}
|
||||
}
|
||||
|
||||
auto ring_buffer_base::close() -> bool {
|
||||
stop_requested_ = true;
|
||||
|
||||
unique_mutex_lock chunk_lock(chunk_mtx_);
|
||||
chunk_notify_.notify_all();
|
||||
chunk_lock.unlock();
|
||||
|
||||
auto res = open_file_base::close();
|
||||
|
||||
if (reader_thread_) {
|
||||
reader_thread_->join();
|
||||
reader_thread_.reset();
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
auto ring_buffer_base::download_chunk(std::size_t chunk,
|
||||
bool skip_active) -> api_error {
|
||||
unique_mutex_lock chunk_lock(chunk_mtx_);
|
||||
const auto unlock_and_notify = [this, &chunk_lock]() {
|
||||
chunk_notify_.notify_all();
|
||||
chunk_lock.unlock();
|
||||
};
|
||||
|
||||
const auto unlock_and_return =
|
||||
[&unlock_and_notify](api_error res) -> api_error {
|
||||
unlock_and_notify();
|
||||
return res;
|
||||
};
|
||||
|
||||
if (chunk < ring_begin_ || chunk > ring_end_) {
|
||||
return unlock_and_return(api_error::invalid_ring_buffer_position);
|
||||
}
|
||||
|
||||
if (get_active_downloads().find(chunk) != get_active_downloads().end()) {
|
||||
if (skip_active) {
|
||||
return unlock_and_return(api_error::success);
|
||||
}
|
||||
|
||||
auto active_download = get_active_downloads().at(chunk);
|
||||
unlock_and_notify();
|
||||
|
||||
return active_download->wait();
|
||||
}
|
||||
|
||||
if (read_state_[chunk % read_state_.size()]) {
|
||||
return unlock_and_return(api_error::success);
|
||||
}
|
||||
|
||||
auto active_download{std::make_shared<download>()};
|
||||
get_active_downloads()[chunk] = active_download;
|
||||
|
||||
return use_buffer(chunk, [&](data_buffer &buffer) -> api_error {
|
||||
auto data_offset{chunk * get_chunk_size()};
|
||||
auto data_size{
|
||||
chunk == (total_chunks_ - 1U) ? get_last_chunk_size()
|
||||
: get_chunk_size(),
|
||||
};
|
||||
unlock_and_notify();
|
||||
|
||||
auto result{
|
||||
get_provider().read_file_bytes(get_api_path(), data_size, data_offset,
|
||||
buffer, stop_requested_),
|
||||
};
|
||||
|
||||
chunk_lock.lock();
|
||||
if (chunk < ring_begin_ || chunk > ring_end_) {
|
||||
result = api_error::invalid_ring_buffer_position;
|
||||
}
|
||||
|
||||
if (result == api_error::success) {
|
||||
result = on_chunk_downloaded(chunk, buffer);
|
||||
if (result == api_error::success) {
|
||||
read_state_[chunk % read_state_.size()] = true;
|
||||
auto progress = (static_cast<double>(chunk + 1U) /
|
||||
static_cast<double>(total_chunks_)) *
|
||||
100.0;
|
||||
event_system::instance().raise<download_progress>(
|
||||
get_api_path(), get_source_path(), progress);
|
||||
}
|
||||
}
|
||||
|
||||
get_active_downloads().erase(chunk);
|
||||
unlock_and_notify();
|
||||
|
||||
active_download->notify(result);
|
||||
return result;
|
||||
});
|
||||
}
|
||||
|
||||
void ring_buffer_base::forward(std::size_t count) {
|
||||
update_position(count, true);
|
||||
}
|
||||
|
||||
auto ring_buffer_base::get_read_state() const -> boost::dynamic_bitset<> {
|
||||
recur_mutex_lock file_lock(get_mutex());
|
||||
return read_state_;
|
||||
}
|
||||
|
||||
auto ring_buffer_base::get_read_state(std::size_t chunk) const -> bool {
|
||||
recur_mutex_lock file_lock(get_mutex());
|
||||
return read_state_[chunk % read_state_.size()];
|
||||
}
|
||||
|
||||
auto ring_buffer_base::read(std::size_t read_size, std::uint64_t read_offset,
|
||||
data_buffer &data) -> api_error {
|
||||
if (is_directory()) {
|
||||
return api_error::invalid_operation;
|
||||
}
|
||||
|
||||
reset_timeout();
|
||||
|
||||
read_size =
|
||||
utils::calculate_read_size(get_file_size(), read_size, read_offset);
|
||||
if (read_size == 0U) {
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
auto begin_chunk{static_cast<std::size_t>(read_offset / get_chunk_size())};
|
||||
read_offset = read_offset - (begin_chunk * get_chunk_size());
|
||||
|
||||
unique_mutex_lock read_lock(read_mtx_);
|
||||
auto res = check_start();
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
}
|
||||
|
||||
for (std::size_t chunk = begin_chunk;
|
||||
not stop_requested_ && (res == api_error::success) && (read_size > 0U);
|
||||
++chunk) {
|
||||
reset_timeout();
|
||||
|
||||
if (chunk > ring_pos_) {
|
||||
forward(chunk - ring_pos_);
|
||||
} else if (chunk < ring_pos_) {
|
||||
reverse(ring_pos_ - chunk);
|
||||
}
|
||||
|
||||
res = download_chunk(chunk, false);
|
||||
if (res != api_error::success) {
|
||||
if (res == api_error::invalid_ring_buffer_position) {
|
||||
read_lock.unlock();
|
||||
|
||||
// TODO limit retry
|
||||
return read(read_size, read_offset, data);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
reset_timeout();
|
||||
|
||||
std::size_t bytes_read{};
|
||||
res = on_read_chunk(
|
||||
chunk,
|
||||
std::min(static_cast<std::size_t>(get_chunk_size() - read_offset),
|
||||
read_size),
|
||||
read_offset, data, bytes_read);
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
}
|
||||
|
||||
reset_timeout();
|
||||
|
||||
read_size -= bytes_read;
|
||||
read_offset = 0U;
|
||||
}
|
||||
|
||||
return stop_requested_ ? api_error::download_stopped : res;
|
||||
}
|
||||
|
||||
void ring_buffer_base::reader_thread() {
|
||||
unique_mutex_lock chunk_lock(chunk_mtx_);
|
||||
auto next_chunk{ring_pos_};
|
||||
chunk_notify_.notify_all();
|
||||
chunk_lock.unlock();
|
||||
|
||||
while (not stop_requested_) {
|
||||
chunk_lock.lock();
|
||||
|
||||
next_chunk = next_chunk + 1U > ring_end_ ? ring_begin_ : next_chunk + 1U;
|
||||
const auto check_and_wait = [this, &chunk_lock, &next_chunk]() {
|
||||
if (stop_requested_) {
|
||||
chunk_notify_.notify_all();
|
||||
chunk_lock.unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
if (get_read_state().all()) {
|
||||
chunk_notify_.wait(chunk_lock);
|
||||
next_chunk = ring_pos_;
|
||||
}
|
||||
|
||||
chunk_notify_.notify_all();
|
||||
chunk_lock.unlock();
|
||||
};
|
||||
|
||||
if (read_state_[next_chunk % read_state_.size()]) {
|
||||
check_and_wait();
|
||||
continue;
|
||||
}
|
||||
|
||||
chunk_notify_.notify_all();
|
||||
chunk_lock.unlock();
|
||||
|
||||
download_chunk(next_chunk, true);
|
||||
}
|
||||
|
||||
event_system::instance().raise<download_end>(
|
||||
get_api_path(), get_source_path(), api_error::download_stopped);
|
||||
}
|
||||
|
||||
void ring_buffer_base::reverse(std::size_t count) {
|
||||
update_position(count, false);
|
||||
}
|
||||
|
||||
void ring_buffer_base::set(std::size_t first_chunk, std::size_t current_chunk) {
|
||||
mutex_lock chunk_lock(chunk_mtx_);
|
||||
if (first_chunk >= total_chunks_) {
|
||||
chunk_notify_.notify_all();
|
||||
throw std::runtime_error("first chunk must be less than total chunks");
|
||||
}
|
||||
|
||||
ring_begin_ = first_chunk;
|
||||
ring_end_ =
|
||||
std::min(total_chunks_ - 1U, ring_begin_ + read_state_.size() - 1U);
|
||||
|
||||
if (current_chunk > ring_end_) {
|
||||
chunk_notify_.notify_all();
|
||||
throw std::runtime_error(
|
||||
"current chunk must be less than or equal to last chunk");
|
||||
}
|
||||
|
||||
ring_pos_ = current_chunk;
|
||||
read_state_.set(0U, read_state_.size(), true);
|
||||
|
||||
chunk_notify_.notify_all();
|
||||
}
|
||||
|
||||
void ring_buffer_base::set_api_path(const std::string &api_path) {
|
||||
mutex_lock chunk_lock(chunk_mtx_);
|
||||
open_file_base::set_api_path(api_path);
|
||||
chunk_notify_.notify_all();
|
||||
}
|
||||
|
||||
void ring_buffer_base::update_position(std::size_t count, bool is_forward) {
|
||||
mutex_lock chunk_lock(chunk_mtx_);
|
||||
|
||||
if (is_forward) {
|
||||
if ((ring_pos_ + count) > (total_chunks_ - 1U)) {
|
||||
count = (total_chunks_ - 1U) - ring_pos_;
|
||||
}
|
||||
} else {
|
||||
count = std::min(ring_pos_, count);
|
||||
}
|
||||
|
||||
if (is_forward ? (ring_pos_ + count) <= ring_end_
|
||||
: (ring_pos_ - count) >= ring_begin_) {
|
||||
ring_pos_ += is_forward ? count : -count;
|
||||
} else {
|
||||
auto delta = is_forward ? count - (ring_end_ - ring_pos_)
|
||||
: count - (ring_pos_ - ring_begin_);
|
||||
|
||||
if (delta >= read_state_.size()) {
|
||||
read_state_.set(0U, read_state_.size(), false);
|
||||
ring_pos_ += is_forward ? count : -count;
|
||||
ring_begin_ += is_forward ? delta : -delta;
|
||||
} else {
|
||||
for (std::size_t idx = 0U; idx < delta; ++idx) {
|
||||
if (is_forward) {
|
||||
read_state_[(ring_begin_ + idx) % read_state_.size()] = false;
|
||||
} else {
|
||||
read_state_[(ring_end_ - idx) % read_state_.size()] = false;
|
||||
}
|
||||
}
|
||||
ring_begin_ += is_forward ? delta : -delta;
|
||||
ring_pos_ += is_forward ? count : -count;
|
||||
}
|
||||
|
||||
ring_end_ =
|
||||
std::min(total_chunks_ - 1U, ring_begin_ + read_state_.size() - 1U);
|
||||
}
|
||||
|
||||
chunk_notify_.notify_all();
|
||||
}
|
||||
} // namespace repertory
|
@ -21,73 +21,30 @@
|
||||
*/
|
||||
#include "file_manager/ring_buffer_open_file.hpp"
|
||||
|
||||
#include "app_config.hpp"
|
||||
#include "file_manager/events.hpp"
|
||||
#include "file_manager/open_file_base.hpp"
|
||||
#include "platform/platform.hpp"
|
||||
#include "providers/i_provider.hpp"
|
||||
#include "types/repertory.hpp"
|
||||
#include "utils/common.hpp"
|
||||
#include "utils/encrypting_reader.hpp"
|
||||
#include "utils/file_utils.hpp"
|
||||
#include "utils/error_utils.hpp"
|
||||
#include "utils/path.hpp"
|
||||
#include "utils/utils.hpp"
|
||||
|
||||
namespace repertory {
|
||||
ring_buffer_open_file::ring_buffer_open_file(std::string buffer_directory,
|
||||
std::uint64_t chunk_size,
|
||||
std::uint8_t chunk_timeout,
|
||||
filesystem_item fsi,
|
||||
i_provider &provider)
|
||||
: ring_buffer_open_file(std::move(buffer_directory), chunk_size,
|
||||
chunk_timeout, std::move(fsi), provider,
|
||||
(1024ULL * 1024ULL * 1024ULL) / chunk_size) {}
|
||||
|
||||
ring_buffer_open_file::ring_buffer_open_file(std::string buffer_directory,
|
||||
std::uint64_t chunk_size,
|
||||
std::uint8_t chunk_timeout,
|
||||
filesystem_item fsi,
|
||||
i_provider &provider,
|
||||
std::size_t ring_size)
|
||||
: open_file_base(chunk_size, chunk_timeout, fsi, provider),
|
||||
ring_state_(ring_size),
|
||||
total_chunks_(static_cast<std::size_t>(
|
||||
utils::divide_with_ceiling(fsi.size, chunk_size_))) {
|
||||
if ((ring_size % 2U) != 0U) {
|
||||
throw std::runtime_error("ring size must be a multiple of 2");
|
||||
}
|
||||
|
||||
if (ring_size < 4U) {
|
||||
throw std::runtime_error("ring size must be greater than or equal to 4");
|
||||
}
|
||||
|
||||
if (fsi.size < (ring_state_.size() * chunk_size)) {
|
||||
: ring_buffer_base(chunk_size, chunk_timeout, fsi, provider, ring_size,
|
||||
false),
|
||||
source_path_(utils::path::combine(buffer_directory,
|
||||
{
|
||||
utils::create_uuid_string(),
|
||||
})) {
|
||||
if (not can_handle_file(fsi.size, chunk_size, ring_size)) {
|
||||
throw std::runtime_error("file size is less than ring buffer size");
|
||||
}
|
||||
|
||||
last_chunk_ = ring_state_.size() - 1U;
|
||||
ring_state_.set(0U, ring_state_.size(), true);
|
||||
|
||||
buffer_directory = utils::path::absolute(buffer_directory);
|
||||
if (not utils::file::directory(buffer_directory).create_directory()) {
|
||||
throw std::runtime_error("failed to create buffer directory|path|" +
|
||||
buffer_directory + "|err|" +
|
||||
std::to_string(utils::get_last_error_code()));
|
||||
}
|
||||
|
||||
fsi_.source_path =
|
||||
utils::path::combine(buffer_directory, {utils::create_uuid_string()});
|
||||
nf_ = utils::file::file::open_or_create_file(fsi_.source_path);
|
||||
if (not*nf_) {
|
||||
throw std::runtime_error("failed to create buffer file|err|" +
|
||||
std::to_string(utils::get_last_error_code()));
|
||||
}
|
||||
|
||||
if (not nf_->truncate(ring_state_.size() * chunk_size)) {
|
||||
nf_->close();
|
||||
throw std::runtime_error("failed to resize buffer file|err|" +
|
||||
std::to_string(utils::get_last_error_code()));
|
||||
}
|
||||
}
|
||||
|
||||
ring_buffer_open_file::~ring_buffer_open_file() {
|
||||
@ -95,107 +52,24 @@ ring_buffer_open_file::~ring_buffer_open_file() {
|
||||
|
||||
close();
|
||||
|
||||
if (not nf_) {
|
||||
return;
|
||||
}
|
||||
|
||||
nf_->close();
|
||||
if (not utils::file::file(fsi_.source_path).remove()) {
|
||||
nf_.reset();
|
||||
|
||||
if (not utils::file::file(source_path_).remove()) {
|
||||
utils::error::raise_api_path_error(
|
||||
function_name, fsi_.api_path, fsi_.source_path,
|
||||
function_name, get_api_path(), source_path_,
|
||||
utils::get_last_error_code(), "failed to delete file");
|
||||
}
|
||||
}
|
||||
|
||||
auto ring_buffer_open_file::download_chunk(std::size_t chunk) -> api_error {
|
||||
unique_mutex_lock chunk_lock(chunk_mtx_);
|
||||
if (active_downloads_.find(chunk) != active_downloads_.end()) {
|
||||
auto active_download = active_downloads_.at(chunk);
|
||||
chunk_notify_.notify_all();
|
||||
chunk_lock.unlock();
|
||||
|
||||
return active_download->wait();
|
||||
}
|
||||
|
||||
if (ring_state_[chunk % ring_state_.size()]) {
|
||||
auto active_download = std::make_shared<download>();
|
||||
active_downloads_[chunk] = active_download;
|
||||
ring_state_[chunk % ring_state_.size()] = false;
|
||||
chunk_notify_.notify_all();
|
||||
chunk_lock.unlock();
|
||||
|
||||
data_buffer buffer((chunk == (total_chunks_ - 1U)) ? last_chunk_size_
|
||||
: chunk_size_);
|
||||
|
||||
stop_type stop_requested = !!ring_state_[chunk % ring_state_.size()];
|
||||
auto res =
|
||||
provider_.read_file_bytes(fsi_.api_path, buffer.size(),
|
||||
chunk * chunk_size_, buffer, stop_requested);
|
||||
if (res == api_error::success) {
|
||||
res = do_io([&]() -> api_error {
|
||||
std::size_t bytes_written{};
|
||||
if (not nf_->write(buffer, (chunk % ring_state_.size()) * chunk_size_,
|
||||
&bytes_written)) {
|
||||
return api_error::os_error;
|
||||
}
|
||||
|
||||
return api_error::success;
|
||||
});
|
||||
}
|
||||
|
||||
active_download->notify(res);
|
||||
|
||||
chunk_lock.lock();
|
||||
active_downloads_.erase(chunk);
|
||||
chunk_notify_.notify_all();
|
||||
return res;
|
||||
}
|
||||
|
||||
chunk_notify_.notify_all();
|
||||
chunk_lock.unlock();
|
||||
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
void ring_buffer_open_file::forward(std::size_t count) {
|
||||
mutex_lock chunk_lock(chunk_mtx_);
|
||||
if ((current_chunk_ + count) > (total_chunks_ - 1U)) {
|
||||
count = (total_chunks_ - 1U) - current_chunk_;
|
||||
}
|
||||
|
||||
if ((current_chunk_ + count) <= last_chunk_) {
|
||||
current_chunk_ += count;
|
||||
} else {
|
||||
const auto added = count - (last_chunk_ - current_chunk_);
|
||||
if (added >= ring_state_.size()) {
|
||||
ring_state_.set(0U, ring_state_.size(), true);
|
||||
current_chunk_ += count;
|
||||
first_chunk_ += added;
|
||||
last_chunk_ =
|
||||
std::min(total_chunks_ - 1U, first_chunk_ + ring_state_.size() - 1U);
|
||||
} else {
|
||||
for (std::size_t idx = 0U; idx < added; ++idx) {
|
||||
ring_state_[(first_chunk_ + idx) % ring_state_.size()] = true;
|
||||
}
|
||||
first_chunk_ += added;
|
||||
current_chunk_ += count;
|
||||
last_chunk_ =
|
||||
std::min(total_chunks_ - 1U, first_chunk_ + ring_state_.size() - 1U);
|
||||
}
|
||||
}
|
||||
|
||||
chunk_notify_.notify_all();
|
||||
}
|
||||
|
||||
auto ring_buffer_open_file::get_read_state() const -> boost::dynamic_bitset<> {
|
||||
recur_mutex_lock file_lock(file_mtx_);
|
||||
auto read_state = ring_state_;
|
||||
return read_state.flip();
|
||||
}
|
||||
|
||||
auto ring_buffer_open_file::get_read_state(std::size_t chunk) const -> bool {
|
||||
recur_mutex_lock file_lock(file_mtx_);
|
||||
return not ring_state_[chunk % ring_state_.size()];
|
||||
}
|
||||
|
||||
auto ring_buffer_open_file::is_download_complete() const -> bool {
|
||||
return false;
|
||||
auto ring_buffer_open_file::can_handle_file(std::uint64_t file_size,
|
||||
std::size_t chunk_size,
|
||||
std::size_t ring_size) -> bool {
|
||||
return file_size >= (static_cast<std::uint64_t>(ring_size) * chunk_size);
|
||||
}
|
||||
|
||||
auto ring_buffer_open_file::native_operation(
|
||||
@ -203,121 +77,75 @@ auto ring_buffer_open_file::native_operation(
|
||||
return do_io([&]() -> api_error { return callback(nf_->get_handle()); });
|
||||
}
|
||||
|
||||
void ring_buffer_open_file::reverse(std::size_t count) {
|
||||
mutex_lock chunk_lock(chunk_mtx_);
|
||||
if (current_chunk_ < count) {
|
||||
count = current_chunk_;
|
||||
auto ring_buffer_open_file::on_check_start() -> bool {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
if (nf_) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if ((current_chunk_ - count) >= first_chunk_) {
|
||||
current_chunk_ -= count;
|
||||
} else {
|
||||
const auto removed = count - (current_chunk_ - first_chunk_);
|
||||
if (removed >= ring_state_.size()) {
|
||||
ring_state_.set(0U, ring_state_.size(), true);
|
||||
current_chunk_ -= count;
|
||||
first_chunk_ = current_chunk_;
|
||||
last_chunk_ =
|
||||
std::min(total_chunks_ - 1U, first_chunk_ + ring_state_.size() - 1U);
|
||||
} else {
|
||||
for (std::size_t idx = 0U; idx < removed; ++idx) {
|
||||
ring_state_[(last_chunk_ - idx) % ring_state_.size()] = true;
|
||||
}
|
||||
first_chunk_ -= removed;
|
||||
current_chunk_ -= count;
|
||||
last_chunk_ =
|
||||
std::min(total_chunks_ - 1U, first_chunk_ + ring_state_.size() - 1U);
|
||||
}
|
||||
}
|
||||
|
||||
chunk_notify_.notify_all();
|
||||
}
|
||||
|
||||
auto ring_buffer_open_file::read(std::size_t read_size,
|
||||
std::uint64_t read_offset, data_buffer &data)
|
||||
-> api_error {
|
||||
if (fsi_.directory) {
|
||||
return api_error::invalid_operation;
|
||||
}
|
||||
|
||||
reset_timeout();
|
||||
|
||||
read_size = utils::calculate_read_size(fsi_.size, read_size, read_offset);
|
||||
if (read_size == 0U) {
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
const auto start_chunk_index =
|
||||
static_cast<std::size_t>(read_offset / chunk_size_);
|
||||
read_offset = read_offset - (start_chunk_index * chunk_size_);
|
||||
data_buffer buffer(chunk_size_);
|
||||
|
||||
auto res = api_error::success;
|
||||
for (std::size_t chunk = start_chunk_index;
|
||||
(res == api_error::success) && (read_size > 0U); ++chunk) {
|
||||
if (chunk > current_chunk_) {
|
||||
forward(chunk - current_chunk_);
|
||||
} else if (chunk < current_chunk_) {
|
||||
reverse(current_chunk_ - chunk);
|
||||
}
|
||||
|
||||
reset_timeout();
|
||||
res = download_chunk(chunk);
|
||||
if (res == api_error::success) {
|
||||
const auto to_read = std::min(
|
||||
static_cast<std::size_t>(chunk_size_ - read_offset), read_size);
|
||||
res = do_io([this, &buffer, &chunk, &data, read_offset,
|
||||
&to_read]() -> api_error {
|
||||
std::size_t bytes_read{};
|
||||
auto ret =
|
||||
nf_->read(buffer, ((chunk % ring_state_.size()) * chunk_size_),
|
||||
&bytes_read)
|
||||
? api_error::success
|
||||
: api_error::os_error;
|
||||
if (ret == api_error::success) {
|
||||
data.insert(data.end(),
|
||||
buffer.begin() + static_cast<std::int64_t>(read_offset),
|
||||
buffer.begin() +
|
||||
static_cast<std::int64_t>(read_offset + to_read));
|
||||
reset_timeout();
|
||||
}
|
||||
|
||||
return ret;
|
||||
});
|
||||
read_offset = 0U;
|
||||
read_size -= to_read;
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
void ring_buffer_open_file::set(std::size_t first_chunk,
|
||||
std::size_t current_chunk) {
|
||||
mutex_lock chunk_lock(chunk_mtx_);
|
||||
if (first_chunk >= total_chunks_) {
|
||||
chunk_notify_.notify_all();
|
||||
throw std::runtime_error("first chunk must be less than total chunks");
|
||||
}
|
||||
|
||||
first_chunk_ = first_chunk;
|
||||
last_chunk_ = first_chunk_ + ring_state_.size() - 1U;
|
||||
|
||||
if (current_chunk > last_chunk_) {
|
||||
chunk_notify_.notify_all();
|
||||
auto buffer_directory{utils::path::get_parent_path(source_path_)};
|
||||
if (not utils::file::directory(buffer_directory).create_directory()) {
|
||||
throw std::runtime_error(
|
||||
"current chunk must be less than or equal to last chunk");
|
||||
fmt::format("failed to create buffer directory|path|{}|err|{}",
|
||||
buffer_directory, utils::get_last_error_code()));
|
||||
}
|
||||
|
||||
current_chunk_ = current_chunk;
|
||||
ring_state_.set(0U, ring_state_.size(), false);
|
||||
nf_ = utils::file::file::open_or_create_file(source_path_);
|
||||
if (not nf_ || not *nf_) {
|
||||
throw std::runtime_error(fmt::format("failed to create buffer file|err|{}",
|
||||
utils::get_last_error_code()));
|
||||
}
|
||||
|
||||
chunk_notify_.notify_all();
|
||||
if (not nf_->truncate(get_ring_size() * get_chunk_size())) {
|
||||
nf_->close();
|
||||
nf_.reset();
|
||||
|
||||
throw std::runtime_error(fmt::format("failed to resize buffer file|err|{}",
|
||||
utils::get_last_error_code()));
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void ring_buffer_open_file::set_api_path(const std::string &api_path) {
|
||||
mutex_lock chunk_lock(chunk_mtx_);
|
||||
open_file_base::set_api_path(api_path);
|
||||
chunk_notify_.notify_all();
|
||||
auto ring_buffer_open_file::on_chunk_downloaded(
|
||||
std::size_t chunk, const data_buffer &buffer) -> api_error {
|
||||
return do_io([&]() -> api_error {
|
||||
std::size_t bytes_written{};
|
||||
if (nf_->write(buffer, (chunk % get_ring_size()) * get_chunk_size(),
|
||||
&bytes_written)) {
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
return api_error::os_error;
|
||||
});
|
||||
}
|
||||
|
||||
auto ring_buffer_open_file::on_read_chunk(
|
||||
std::size_t chunk, std::size_t read_size, std::uint64_t read_offset,
|
||||
data_buffer &data, std::size_t &bytes_read) -> api_error {
|
||||
data_buffer buffer(read_size);
|
||||
auto res = do_io([&]() -> api_error {
|
||||
return nf_->read(
|
||||
buffer,
|
||||
(((chunk % get_ring_size()) * get_chunk_size()) + read_offset),
|
||||
&bytes_read)
|
||||
? api_error::success
|
||||
: api_error::os_error;
|
||||
});
|
||||
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
}
|
||||
|
||||
data.insert(data.end(), buffer.begin(), buffer.end());
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
auto ring_buffer_open_file::use_buffer(
|
||||
std::size_t /* chunk */,
|
||||
std::function<api_error(data_buffer &)> func) -> api_error {
|
||||
data_buffer buffer;
|
||||
return func(buffer);
|
||||
}
|
||||
} // namespace repertory
|
||||
|
@ -53,7 +53,8 @@ void upload::upload_thread() {
|
||||
|
||||
error_ =
|
||||
provider_.upload_file(fsi_.api_path, fsi_.source_path, stop_requested_);
|
||||
if (not utils::file::reset_modified_time(fsi_.source_path)) {
|
||||
if (error_ == api_error::success &&
|
||||
not utils::file::reset_modified_time(fsi_.source_path)) {
|
||||
utils::error::raise_api_path_error(
|
||||
function_name, fsi_.api_path, fsi_.source_path,
|
||||
utils::get_last_error_code(), "failed to reset modified time");
|
||||
|
@ -28,9 +28,8 @@
|
||||
#endif // defined(PROJECT_ENABLE_OPENSSL)
|
||||
|
||||
#if defined(PROJECT_REQUIRE_ALPINE) && !defined(PROJECT_IS_MINGW)
|
||||
#include <filesystem>
|
||||
#include <cstdlib>
|
||||
#include <pthread.h>
|
||||
#include <stdlib.h>
|
||||
#endif // defined(PROJECT_REQUIRE_ALPINE) && !defined (PROJECT_IS_MINGW)
|
||||
|
||||
#if defined(PROJECT_ENABLE_LIBSODIUM)
|
||||
@ -44,6 +43,7 @@
|
||||
#include "spdlog/spdlog.h"
|
||||
|
||||
#include "initialize.hpp"
|
||||
|
||||
#if defined(PROJECT_REQUIRE_ALPINE) && !defined(PROJECT_IS_MINGW)
|
||||
#include "utils/path.hpp"
|
||||
#endif // defined(PROJECT_REQUIRE_ALPINE) && !defined (PROJECT_IS_MINGW)
|
||||
|
@ -24,6 +24,8 @@
|
||||
#include "platform/unix_platform.hpp"
|
||||
|
||||
#include "app_config.hpp"
|
||||
#include "events/event_system.hpp"
|
||||
#include "events/events.hpp"
|
||||
#include "providers/i_provider.hpp"
|
||||
#include "types/startup_exception.hpp"
|
||||
#include "utils/common.hpp"
|
||||
|
@ -23,12 +23,14 @@
|
||||
|
||||
#include "platform/win32_platform.hpp"
|
||||
|
||||
#include "events/event_system.hpp"
|
||||
#include "events/events.hpp"
|
||||
#include "providers/i_provider.hpp"
|
||||
#include "utils/error_utils.hpp"
|
||||
|
||||
namespace repertory {
|
||||
auto lock_data::get_mount_state(const provider_type & /*pt*/, json &mount_state)
|
||||
-> bool {
|
||||
auto lock_data::get_mount_state(const provider_type & /*pt*/,
|
||||
json &mount_state) -> bool {
|
||||
const auto ret = get_mount_state(mount_state);
|
||||
if (ret) {
|
||||
const auto mount_id =
|
||||
|
@ -25,8 +25,10 @@
|
||||
#include "db/meta_db.hpp"
|
||||
#include "events/event_system.hpp"
|
||||
#include "events/events.hpp"
|
||||
#include "file_manager/cache_size_mgr.hpp"
|
||||
#include "file_manager/i_file_manager.hpp"
|
||||
#include "platform/platform.hpp"
|
||||
#include "utils/error_utils.hpp"
|
||||
#include "utils/file_utils.hpp"
|
||||
#include "utils/path.hpp"
|
||||
#include "utils/polling.hpp"
|
||||
@ -49,8 +51,8 @@ void base_provider::add_all_items(const stop_type &stop_requested) {
|
||||
}
|
||||
|
||||
auto base_provider::create_api_file(std::string path, std::string key,
|
||||
std::uint64_t size,
|
||||
std::uint64_t file_time) -> api_file {
|
||||
std::uint64_t size, std::uint64_t file_time)
|
||||
-> api_file {
|
||||
api_file file{};
|
||||
file.api_path = utils::path::create_api_path(path);
|
||||
file.api_parent = utils::path::get_parent_api_path(file.api_path);
|
||||
@ -82,8 +84,8 @@ auto base_provider::create_api_file(std::string path, std::uint64_t size,
|
||||
}
|
||||
|
||||
auto base_provider::create_directory_clone_source_meta(
|
||||
const std::string &source_api_path,
|
||||
const std::string &api_path) -> api_error {
|
||||
const std::string &source_api_path, const std::string &api_path)
|
||||
-> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
bool exists{};
|
||||
@ -180,8 +182,8 @@ auto base_provider::create_directory(const std::string &api_path,
|
||||
return set_item_meta(api_path, meta);
|
||||
}
|
||||
|
||||
auto base_provider::create_file(const std::string &api_path,
|
||||
api_meta_map &meta) -> api_error {
|
||||
auto base_provider::create_file(const std::string &api_path, api_meta_map &meta)
|
||||
-> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
bool exists{};
|
||||
@ -238,8 +240,9 @@ auto base_provider::create_file(const std::string &api_path,
|
||||
return api_error::error;
|
||||
}
|
||||
|
||||
auto base_provider::get_api_path_from_source(
|
||||
const std::string &source_path, std::string &api_path) const -> api_error {
|
||||
auto base_provider::get_api_path_from_source(const std::string &source_path,
|
||||
std::string &api_path) const
|
||||
-> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
if (source_path.empty()) {
|
||||
@ -252,8 +255,9 @@ auto base_provider::get_api_path_from_source(
|
||||
return db3_->get_api_path(source_path, api_path);
|
||||
}
|
||||
|
||||
auto base_provider::get_directory_items(
|
||||
const std::string &api_path, directory_item_list &list) const -> api_error {
|
||||
auto base_provider::get_directory_items(const std::string &api_path,
|
||||
directory_item_list &list) const
|
||||
-> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
bool exists{};
|
||||
@ -317,9 +321,10 @@ auto base_provider::get_file_size(const std::string &api_path,
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
auto base_provider::get_filesystem_item(
|
||||
const std::string &api_path, bool directory,
|
||||
filesystem_item &fsi) const -> api_error {
|
||||
auto base_provider::get_filesystem_item(const std::string &api_path,
|
||||
bool directory,
|
||||
filesystem_item &fsi) const
|
||||
-> api_error {
|
||||
bool exists{};
|
||||
auto res = is_directory(api_path, exists);
|
||||
if (res != api_error::success) {
|
||||
@ -352,9 +357,10 @@ auto base_provider::get_filesystem_item(
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
auto base_provider::get_filesystem_item_and_file(
|
||||
const std::string &api_path, api_file &file,
|
||||
filesystem_item &fsi) const -> api_error {
|
||||
auto base_provider::get_filesystem_item_and_file(const std::string &api_path,
|
||||
api_file &file,
|
||||
filesystem_item &fsi) const
|
||||
-> api_error {
|
||||
auto res = get_file(api_path, file);
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
@ -451,7 +457,7 @@ void base_provider::process_removed_files(std::deque<removed_item> removed_list,
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto orphaned_directory =
|
||||
utils::path::combine(config_.get_data_directory(), {"orphaned"});
|
||||
utils::path::combine(get_config().get_data_directory(), {"orphaned"});
|
||||
for (const auto &item : removed_list) {
|
||||
if (stop_requested) {
|
||||
return;
|
||||
@ -665,8 +671,10 @@ void base_provider::remove_unmatched_source_files(
|
||||
return;
|
||||
}
|
||||
|
||||
const auto &cfg = get_config();
|
||||
|
||||
auto source_list =
|
||||
utils::file::directory{config_.get_cache_directory()}.get_files();
|
||||
utils::file::directory{cfg.get_cache_directory()}.get_files();
|
||||
for (const auto &source_file : source_list) {
|
||||
if (stop_requested) {
|
||||
return;
|
||||
@ -679,15 +687,15 @@ void base_provider::remove_unmatched_source_files(
|
||||
}
|
||||
|
||||
auto reference_time =
|
||||
source_file->get_time(config_.get_eviction_uses_accessed_time()
|
||||
source_file->get_time(cfg.get_eviction_uses_accessed_time()
|
||||
? utils::file::time_type::accessed
|
||||
: utils::file::time_type::modified);
|
||||
if (not reference_time.has_value()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
auto delay = (config_.get_eviction_delay_mins() * 60UL) *
|
||||
utils::time::NANOS_PER_SECOND;
|
||||
auto delay =
|
||||
(cfg.get_eviction_delay_mins() * 60UL) * utils::time::NANOS_PER_SECOND;
|
||||
if ((reference_time.value() + static_cast<std::uint64_t>(delay)) >=
|
||||
utils::time::get_time_now()) {
|
||||
continue;
|
||||
@ -731,17 +739,19 @@ auto base_provider::start(api_item_added_callback api_item_added,
|
||||
auto online{false};
|
||||
auto unmount_requested{false};
|
||||
{
|
||||
const auto &cfg = get_config();
|
||||
|
||||
repertory::event_consumer consumer(
|
||||
"unmount_requested",
|
||||
[&unmount_requested](const event &) { unmount_requested = true; });
|
||||
for (std::uint16_t idx = 0U; not online && not unmount_requested &&
|
||||
(idx < config_.get_online_check_retry_secs());
|
||||
(idx < cfg.get_online_check_retry_secs());
|
||||
++idx) {
|
||||
online = is_online();
|
||||
if (not online) {
|
||||
event_system::instance().raise<provider_offline>(
|
||||
config_.get_host_config().host_name_or_ip,
|
||||
config_.get_host_config().api_port);
|
||||
cfg.get_host_config().host_name_or_ip,
|
||||
cfg.get_host_config().api_port);
|
||||
std::this_thread::sleep_for(1s);
|
||||
}
|
||||
}
|
||||
@ -751,6 +761,8 @@ auto base_provider::start(api_item_added_callback api_item_added,
|
||||
return false;
|
||||
}
|
||||
|
||||
cache_size_mgr::instance().initialize(&config_);
|
||||
|
||||
polling::instance().set_callback({
|
||||
"check_deleted",
|
||||
polling::frequency::low,
|
||||
@ -761,6 +773,7 @@ auto base_provider::start(api_item_added_callback api_item_added,
|
||||
}
|
||||
|
||||
void base_provider::stop() {
|
||||
cache_size_mgr::instance().stop();
|
||||
polling::instance().remove_callback("check_deleted");
|
||||
db3_.reset();
|
||||
}
|
||||
|
@ -29,21 +29,18 @@
|
||||
#include "utils/collection.hpp"
|
||||
#include "utils/encrypting_reader.hpp"
|
||||
#include "utils/encryption.hpp"
|
||||
#include "utils/error_utils.hpp"
|
||||
#include "utils/file_utils.hpp"
|
||||
#include "utils/path.hpp"
|
||||
#include "utils/polling.hpp"
|
||||
|
||||
namespace {
|
||||
const std::string file_table = "file";
|
||||
} // namespace
|
||||
|
||||
namespace repertory {
|
||||
encrypt_provider::encrypt_provider(app_config &config) : config_(config) {}
|
||||
encrypt_provider::encrypt_provider(app_config &config)
|
||||
: config_(config), encrypt_config_(config.get_encrypt_config()) {}
|
||||
|
||||
auto encrypt_provider::create_api_file(const std::string &api_path,
|
||||
bool directory,
|
||||
const std::string &source_path)
|
||||
-> api_file {
|
||||
auto encrypt_provider::create_api_file(
|
||||
const std::string &api_path, bool directory,
|
||||
const std::string &source_path) -> api_file {
|
||||
auto times = utils::file::get_times(source_path);
|
||||
if (not times.has_value()) {
|
||||
throw std::runtime_error("failed to get file times");
|
||||
@ -69,10 +66,10 @@ auto encrypt_provider::create_api_file(const std::string &api_path,
|
||||
void encrypt_provider::create_item_meta(api_meta_map &meta, bool directory,
|
||||
const api_file &file) {
|
||||
#if defined(_WIN32)
|
||||
struct _stat64 buf{};
|
||||
struct _stat64 buf {};
|
||||
_stat64(file.source_path.c_str(), &buf);
|
||||
#else // !defined(_WIN32)
|
||||
struct stat buf{};
|
||||
struct stat buf {};
|
||||
stat(file.source_path.c_str(), &buf);
|
||||
#endif // defined(_WIN32)
|
||||
|
||||
@ -115,7 +112,8 @@ auto encrypt_provider::do_fs_operation(
|
||||
std::function<api_error(const encrypt_config &cfg,
|
||||
const std::string &source_path)>
|
||||
callback) const -> api_error {
|
||||
auto cfg = config_.get_encrypt_config();
|
||||
const auto &cfg = get_encrypt_config();
|
||||
|
||||
std::string source_path{api_path};
|
||||
if (api_path != "/" && not utils::encryption::decrypt_file_path(
|
||||
cfg.encryption_token, source_path)) {
|
||||
@ -151,9 +149,8 @@ auto encrypt_provider::do_fs_operation(
|
||||
return callback(cfg, source_path);
|
||||
}
|
||||
|
||||
auto encrypt_provider::get_api_path_from_source(const std::string &source_path,
|
||||
std::string &api_path) const
|
||||
-> api_error {
|
||||
auto encrypt_provider::get_api_path_from_source(
|
||||
const std::string &source_path, std::string &api_path) const -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
try {
|
||||
@ -192,9 +189,8 @@ auto encrypt_provider::get_directory_item_count(
|
||||
return count;
|
||||
}
|
||||
|
||||
auto encrypt_provider::get_directory_items(const std::string &api_path,
|
||||
directory_item_list &list) const
|
||||
-> api_error {
|
||||
auto encrypt_provider::get_directory_items(
|
||||
const std::string &api_path, directory_item_list &list) const -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
return do_fs_operation(
|
||||
@ -322,12 +318,11 @@ auto encrypt_provider::get_file(const std::string &api_path,
|
||||
return api_error::error;
|
||||
}
|
||||
|
||||
auto encrypt_provider::get_file_list(api_file_list &list,
|
||||
std::string & /* marker */) const
|
||||
-> api_error {
|
||||
auto encrypt_provider::get_file_list(
|
||||
api_file_list &list, std::string & /* marker */) const -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto cfg = config_.get_encrypt_config();
|
||||
const auto &cfg = get_encrypt_config();
|
||||
|
||||
try {
|
||||
for (const auto &dir_entry : utils::file::directory{cfg.path}.get_items()) {
|
||||
@ -347,9 +342,8 @@ auto encrypt_provider::get_file_list(api_file_list &list,
|
||||
return api_error::error;
|
||||
}
|
||||
|
||||
auto encrypt_provider::get_file_size(const std::string &api_path,
|
||||
std::uint64_t &file_size) const
|
||||
-> api_error {
|
||||
auto encrypt_provider::get_file_size(
|
||||
const std::string &api_path, std::uint64_t &file_size) const -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
try {
|
||||
@ -370,10 +364,9 @@ auto encrypt_provider::get_file_size(const std::string &api_path,
|
||||
return api_error::error;
|
||||
}
|
||||
|
||||
auto encrypt_provider::get_filesystem_item(const std::string &api_path,
|
||||
bool directory,
|
||||
filesystem_item &fsi) const
|
||||
-> api_error {
|
||||
auto encrypt_provider::get_filesystem_item(
|
||||
const std::string &api_path, bool directory,
|
||||
filesystem_item &fsi) const -> api_error {
|
||||
std::string source_path;
|
||||
if (directory) {
|
||||
auto result = db_->get_directory_source_path(api_path, source_path);
|
||||
@ -424,10 +417,9 @@ auto encrypt_provider::get_filesystem_item_from_source_path(
|
||||
return get_filesystem_item(api_path, false, fsi);
|
||||
}
|
||||
|
||||
auto encrypt_provider::get_filesystem_item_and_file(const std::string &api_path,
|
||||
api_file &file,
|
||||
filesystem_item &fsi) const
|
||||
-> api_error {
|
||||
auto encrypt_provider::get_filesystem_item_and_file(
|
||||
const std::string &api_path, api_file &file,
|
||||
filesystem_item &fsi) const -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
try {
|
||||
@ -501,9 +493,8 @@ auto encrypt_provider::get_item_meta(const std::string &api_path,
|
||||
}
|
||||
|
||||
auto encrypt_provider::get_total_drive_space() const -> std::uint64_t {
|
||||
auto total_space =
|
||||
utils::file::get_total_drive_space(config_.get_encrypt_config().path);
|
||||
return total_space.value_or(0U);
|
||||
return utils::file::get_total_drive_space(get_encrypt_config().path)
|
||||
.value_or(0U);
|
||||
}
|
||||
|
||||
auto encrypt_provider::get_total_item_count() const -> std::uint64_t {
|
||||
@ -521,7 +512,7 @@ auto encrypt_provider::get_total_item_count() const -> std::uint64_t {
|
||||
|
||||
auto encrypt_provider::get_used_drive_space() const -> std::uint64_t {
|
||||
auto free_space =
|
||||
utils::file::get_free_drive_space(config_.get_encrypt_config().path);
|
||||
utils::file::get_free_drive_space(get_encrypt_config().path);
|
||||
return free_space.has_value() ? get_total_drive_space() - free_space.value()
|
||||
: 0U;
|
||||
}
|
||||
@ -533,8 +524,14 @@ auto encrypt_provider::is_directory(const std::string &api_path,
|
||||
try {
|
||||
std::string source_path;
|
||||
auto result = db_->get_directory_source_path(api_path, source_path);
|
||||
|
||||
if (result != api_error::success) {
|
||||
return result;
|
||||
if (result != api_error::directory_not_found) {
|
||||
return result;
|
||||
}
|
||||
|
||||
exists = false;
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
exists = utils::file::directory{source_path}.exists();
|
||||
@ -547,15 +544,20 @@ auto encrypt_provider::is_directory(const std::string &api_path,
|
||||
return api_error::error;
|
||||
}
|
||||
|
||||
auto encrypt_provider::is_file(const std::string &api_path, bool &exists) const
|
||||
-> api_error {
|
||||
auto encrypt_provider::is_file(const std::string &api_path,
|
||||
bool &exists) const -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
try {
|
||||
std::string source_path;
|
||||
auto result = db_->get_file_source_path(api_path, source_path);
|
||||
if (result != api_error::success) {
|
||||
return result;
|
||||
if (result != api_error::item_not_found) {
|
||||
return result;
|
||||
}
|
||||
|
||||
exists = false;
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
exists = utils::file::file{source_path}.exists();
|
||||
@ -574,10 +576,7 @@ auto encrypt_provider::is_file_writeable(const std::string & /*api_path*/) const
|
||||
}
|
||||
|
||||
auto encrypt_provider::is_online() const -> bool {
|
||||
return utils::file::directory{
|
||||
config_.get_encrypt_config().path,
|
||||
}
|
||||
.exists();
|
||||
return utils::file::directory{get_encrypt_config().path}.exists();
|
||||
}
|
||||
|
||||
auto encrypt_provider::process_directory_entry(
|
||||
@ -733,7 +732,7 @@ auto encrypt_provider::read_file_bytes(const std::string &api_path,
|
||||
|
||||
auto file_size{opt_size.value()};
|
||||
|
||||
auto cfg = config_.get_encrypt_config();
|
||||
const auto &cfg = get_encrypt_config();
|
||||
|
||||
unique_recur_mutex_lock reader_lookup_lock(reader_lookup_mtx_);
|
||||
|
||||
@ -829,9 +828,6 @@ auto encrypt_provider::start(api_item_added_callback /*api_item_added*/,
|
||||
|
||||
db_ = create_file_db(config_);
|
||||
|
||||
auto cfg = config_.get_encrypt_config();
|
||||
auto cfg_path = utils::path::absolute(cfg.path);
|
||||
|
||||
std::string source_path;
|
||||
auto result = db_->get_directory_source_path("/", source_path);
|
||||
if (result != api_error::success &&
|
||||
@ -840,6 +836,7 @@ auto encrypt_provider::start(api_item_added_callback /*api_item_added*/,
|
||||
fmt::format("failed to get root|{}", api_error_to_string(result)));
|
||||
}
|
||||
|
||||
auto cfg_path = utils::path::absolute(get_encrypt_config().path);
|
||||
if (result == api_error::success) {
|
||||
auto cur_path = utils::path::absolute(source_path);
|
||||
#if defined(_WIN32)
|
||||
@ -849,10 +846,10 @@ auto encrypt_provider::start(api_item_added_callback /*api_item_added*/,
|
||||
if (cur_path != cfg_path) {
|
||||
#endif // defined(_WIN32)
|
||||
throw startup_exception(fmt::format(
|
||||
"source path has changed|cur|{}|cfg|{}", cur_path, cfg.path));
|
||||
"source path has changed|cur|{}|cfg|{}", cur_path, cfg_path));
|
||||
}
|
||||
} else {
|
||||
result = db_->add_directory("/", utils::path::absolute(source_path));
|
||||
result = db_->add_directory("/", utils::path::absolute(cfg_path));
|
||||
if (result != api_error::success) {
|
||||
throw startup_exception(
|
||||
fmt::format("failed to create root|{}", api_error_to_string(result)));
|
||||
|
@ -23,6 +23,8 @@
|
||||
|
||||
#include "app_config.hpp"
|
||||
#include "comm/i_http_comm.hpp"
|
||||
#include "events/event_system.hpp"
|
||||
#include "events/events.hpp"
|
||||
#include "file_manager/i_file_manager.hpp"
|
||||
#include "types/repertory.hpp"
|
||||
#include "types/s3.hpp"
|
||||
@ -39,9 +41,7 @@
|
||||
|
||||
namespace repertory {
|
||||
s3_provider::s3_provider(app_config &config, i_http_comm &comm)
|
||||
: base_provider(config, comm) {
|
||||
get_comm().enable_s3_path_style(config.get_s3_config().use_path_style);
|
||||
}
|
||||
: base_provider(config, comm) {}
|
||||
|
||||
auto s3_provider::add_if_not_found(
|
||||
api_file &file, const std::string &object_name) const -> api_error {
|
||||
@ -85,7 +85,7 @@ auto s3_provider::create_directory_impl(const std::string &api_path,
|
||||
api_meta_map &meta) -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto cfg = get_config().get_s3_config();
|
||||
const auto &cfg = get_s3_config();
|
||||
auto is_encrypted = not cfg.encryption_token.empty();
|
||||
stop_type stop_requested{false};
|
||||
|
||||
@ -138,7 +138,8 @@ auto s3_provider::create_file_extra(const std::string &api_path,
|
||||
api_meta_map &meta) -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
if (not get_config().get_s3_config().encryption_token.empty()) {
|
||||
const auto &cfg = get_s3_config();
|
||||
if (not cfg.encryption_token.empty()) {
|
||||
std::string encrypted_file_path;
|
||||
auto res = get_item_meta(utils::path::get_parent_api_path(api_path),
|
||||
META_KEY, encrypted_file_path);
|
||||
@ -150,7 +151,7 @@ auto s3_provider::create_file_extra(const std::string &api_path,
|
||||
|
||||
data_buffer result;
|
||||
utils::encryption::encrypt_data(
|
||||
get_config().get_s3_config().encryption_token,
|
||||
cfg.encryption_token,
|
||||
*(utils::string::split(api_path, '/', false).end() - 1U), result);
|
||||
|
||||
meta[META_KEY] = utils::path::create_api_path(
|
||||
@ -169,7 +170,8 @@ auto s3_provider::create_path_directories(
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
auto encryption_token = get_config().get_s3_config().encryption_token;
|
||||
const auto &cfg = get_s3_config();
|
||||
auto encryption_token = cfg.encryption_token;
|
||||
auto is_encrypted = not encryption_token.empty();
|
||||
|
||||
auto path_parts = utils::string::split(api_path, '/', false);
|
||||
@ -179,8 +181,6 @@ auto s3_provider::create_path_directories(
|
||||
return api_error::error;
|
||||
}
|
||||
|
||||
auto cfg = get_config().get_s3_config();
|
||||
|
||||
std::string cur_key{'/'};
|
||||
std::string cur_path{'/'};
|
||||
for (std::size_t idx = 0U; idx < path_parts.size(); ++idx) {
|
||||
@ -242,9 +242,9 @@ auto s3_provider::create_path_directories(
|
||||
auto s3_provider::decrypt_object_name(std::string &object_name) const
|
||||
-> api_error {
|
||||
auto parts = utils::string::split(object_name, '/', false);
|
||||
for (auto &&part : parts) {
|
||||
for (auto &part : parts) {
|
||||
if (not utils::encryption::decrypt_file_name(
|
||||
get_config().get_s3_config().encryption_token, part)) {
|
||||
get_s3_config().encryption_token, part)) {
|
||||
return api_error::decryption_error;
|
||||
}
|
||||
}
|
||||
@ -258,7 +258,7 @@ auto s3_provider::get_directory_item_count(const std::string &api_path) const
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
try {
|
||||
auto cfg = get_config().get_s3_config();
|
||||
const auto &cfg = get_s3_config();
|
||||
auto is_encrypted = not cfg.encryption_token.empty();
|
||||
std::string key;
|
||||
if (is_encrypted) {
|
||||
@ -334,7 +334,7 @@ auto s3_provider::get_directory_items_impl(
|
||||
const std::string &api_path, directory_item_list &list) const -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto cfg = get_config().get_s3_config();
|
||||
const auto &cfg = get_s3_config();
|
||||
auto is_encrypted = not cfg.encryption_token.empty();
|
||||
|
||||
auto ret = api_error::success;
|
||||
@ -439,14 +439,14 @@ auto s3_provider::get_directory_items_impl(
|
||||
|
||||
auto node_list =
|
||||
doc.select_nodes("/ListBucketResult/CommonPrefixes/Prefix");
|
||||
for (auto &&node : node_list) {
|
||||
for (const auto &node : node_list) {
|
||||
add_directory_item(
|
||||
true, node.node().text().as_string(), 0U,
|
||||
[](const directory_item &) -> std::uint64_t { return 0U; });
|
||||
}
|
||||
|
||||
node_list = doc.select_nodes("/ListBucketResult/Contents");
|
||||
for (auto &&node : node_list) {
|
||||
for (const auto &node : node_list) {
|
||||
auto child_object_name = utils::path::create_api_path(
|
||||
node.node().select_node("Key").node().text().as_string());
|
||||
if (child_object_name == utils::path::create_api_path(prefix)) {
|
||||
@ -551,7 +551,7 @@ auto s3_provider::get_file_list(api_file_list &list,
|
||||
}
|
||||
|
||||
auto node_list = doc.select_nodes("/ListBucketResult/Contents");
|
||||
for (auto &&node : node_list) {
|
||||
for (const auto &node : node_list) {
|
||||
auto object_name =
|
||||
std::string{node.node().select_node("Key").node().text().as_string()};
|
||||
auto api_path{object_name};
|
||||
@ -559,8 +559,7 @@ auto s3_provider::get_file_list(api_file_list &list,
|
||||
continue;
|
||||
}
|
||||
|
||||
auto is_encrypted =
|
||||
not get_config().get_s3_config().encryption_token.empty();
|
||||
auto is_encrypted = not get_s3_config().encryption_token.empty();
|
||||
if (is_encrypted) {
|
||||
auto err = decrypt_object_name(api_path);
|
||||
if (err != api_error::success) {
|
||||
@ -610,7 +609,7 @@ auto s3_provider::get_object_info(
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
try {
|
||||
auto cfg = get_config().get_s3_config();
|
||||
const auto &cfg = get_s3_config();
|
||||
is_encrypted = not cfg.encryption_token.empty();
|
||||
|
||||
std::string key;
|
||||
@ -661,7 +660,7 @@ auto s3_provider::get_object_list(
|
||||
std::optional<std::string> token) const -> bool {
|
||||
curl::requests::http_get get{};
|
||||
get.allow_timeout = true;
|
||||
get.aws_service = "aws:amz:" + get_config().get_s3_config().region + ":s3";
|
||||
get.aws_service = "aws:amz:" + get_s3_config().region + ":s3";
|
||||
get.path = '/';
|
||||
get.query["list-type"] = "2";
|
||||
if (delimiter.has_value() && not delimiter.value().empty()) {
|
||||
@ -753,7 +752,7 @@ auto s3_provider::read_file_bytes(const std::string &api_path, std::size_t size,
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
try {
|
||||
auto cfg = get_config().get_s3_config();
|
||||
const auto &cfg = get_s3_config();
|
||||
auto is_encrypted = not cfg.encryption_token.empty();
|
||||
std::string key;
|
||||
if (is_encrypted) {
|
||||
@ -859,7 +858,7 @@ auto s3_provider::remove_directory_impl(const std::string &api_path)
|
||||
-> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto cfg = get_config().get_s3_config();
|
||||
const auto &cfg = get_s3_config();
|
||||
auto is_encrypted = not cfg.encryption_token.empty();
|
||||
|
||||
std::string key;
|
||||
@ -901,7 +900,7 @@ auto s3_provider::remove_directory_impl(const std::string &api_path)
|
||||
auto s3_provider::remove_file_impl(const std::string &api_path) -> api_error {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto cfg = get_config().get_s3_config();
|
||||
const auto &cfg = get_s3_config();
|
||||
auto is_encrypted = not cfg.encryption_token.empty();
|
||||
|
||||
std::string key;
|
||||
@ -949,6 +948,8 @@ auto s3_provider::rename_file(const std::string & /* from_api_path */,
|
||||
auto s3_provider::start(api_item_added_callback api_item_added,
|
||||
i_file_manager *mgr) -> bool {
|
||||
event_system::instance().raise<service_started>("s3_provider");
|
||||
s3_config_ = get_config().get_s3_config();
|
||||
get_comm().enable_s3_path_style(s3_config_.use_path_style);
|
||||
return base_provider::start(api_item_added, mgr);
|
||||
}
|
||||
|
||||
@ -972,7 +973,7 @@ auto s3_provider::upload_file_impl(const std::string &api_path,
|
||||
file_size = opt_size.value();
|
||||
}
|
||||
|
||||
auto cfg = get_config().get_s3_config();
|
||||
const auto &cfg = get_s3_config();
|
||||
auto is_encrypted = not cfg.encryption_token.empty();
|
||||
|
||||
std::string key;
|
||||
|
@ -37,8 +37,7 @@
|
||||
#include "utils/utils.hpp"
|
||||
|
||||
namespace {
|
||||
[[nodiscard]] auto get_bucket(repertory::sia_config cfg) -> std::string {
|
||||
repertory::utils::string::trim(cfg.bucket);
|
||||
[[nodiscard]] auto get_bucket(const repertory::sia_config &cfg) -> std::string {
|
||||
if (cfg.bucket.empty()) {
|
||||
return "default";
|
||||
}
|
||||
@ -68,7 +67,7 @@ auto sia_provider::create_directory_impl(const std::string &api_path,
|
||||
curl::requests::http_put_file put_file{};
|
||||
put_file.allow_timeout = true;
|
||||
put_file.path = "/api/worker/objects" + api_path + "/";
|
||||
put_file.query["bucket"] = get_bucket(get_config().get_sia_config());
|
||||
put_file.query["bucket"] = get_bucket(get_sia_config());
|
||||
|
||||
long response_code{};
|
||||
stop_type stop_requested{};
|
||||
@ -100,7 +99,7 @@ auto sia_provider::get_directory_item_count(const std::string &api_path) const
|
||||
|
||||
std::uint64_t item_count{};
|
||||
if (object_list.contains("entries")) {
|
||||
for (auto &&entry : object_list.at("entries")) {
|
||||
for (const auto &entry : object_list.at("entries")) {
|
||||
try {
|
||||
auto name = entry.at("name").get<std::string>();
|
||||
auto entry_api_path = utils::path::create_api_path(name);
|
||||
@ -137,7 +136,7 @@ auto sia_provider::get_directory_items_impl(const std::string &api_path,
|
||||
}
|
||||
|
||||
if (object_list.contains("entries")) {
|
||||
for (auto &&entry : object_list.at("entries")) {
|
||||
for (const auto &entry : object_list.at("entries")) {
|
||||
try {
|
||||
auto name = entry.at("name").get<std::string>();
|
||||
auto entry_api_path = utils::path::create_api_path(name);
|
||||
@ -228,7 +227,7 @@ auto sia_provider::get_file_list(api_file_list &list,
|
||||
}
|
||||
|
||||
if (object_list.contains("entries")) {
|
||||
for (auto &&entry : object_list.at("entries")) {
|
||||
for (const auto &entry : object_list.at("entries")) {
|
||||
auto name = entry.at("name").get<std::string>();
|
||||
auto entry_api_path = utils::path::create_api_path(name);
|
||||
|
||||
@ -289,7 +288,7 @@ auto sia_provider::get_object_info(const std::string &api_path,
|
||||
curl::requests::http_get get{};
|
||||
get.allow_timeout = true;
|
||||
get.path = "/api/bus/objects" + api_path;
|
||||
get.query["bucket"] = get_bucket(get_config().get_sia_config());
|
||||
get.query["bucket"] = get_bucket(get_sia_config());
|
||||
|
||||
get.response_handler = [&object_info](const data_buffer &data,
|
||||
long response_code) {
|
||||
@ -330,7 +329,7 @@ auto sia_provider::get_object_list(const std::string &api_path,
|
||||
curl::requests::http_get get{};
|
||||
get.allow_timeout = true;
|
||||
get.path = "/api/bus/objects" + api_path + "/";
|
||||
get.query["bucket"] = get_bucket(get_config().get_sia_config());
|
||||
get.query["bucket"] = get_bucket(get_sia_config());
|
||||
|
||||
get.response_handler = [&object_list](const data_buffer &data,
|
||||
long response_code) {
|
||||
@ -364,7 +363,7 @@ auto sia_provider::get_total_drive_space() const -> std::uint64_t {
|
||||
curl::requests::http_get get{};
|
||||
get.allow_timeout = true;
|
||||
get.path = "/api/autopilot/config";
|
||||
get.query["bucket"] = get_bucket(get_config().get_sia_config());
|
||||
get.query["bucket"] = get_bucket(get_sia_config());
|
||||
|
||||
json config_data{};
|
||||
get.response_handler = [&config_data](const data_buffer &data,
|
||||
@ -465,7 +464,7 @@ auto sia_provider::is_online() const -> bool {
|
||||
curl::requests::http_get get{};
|
||||
get.allow_timeout = true;
|
||||
get.path = "/api/bus/consensus/state";
|
||||
get.query["bucket"] = get_bucket(get_config().get_sia_config());
|
||||
get.query["bucket"] = get_bucket(get_sia_config());
|
||||
|
||||
json state_data{};
|
||||
get.response_handler = [&state_data](const data_buffer &data,
|
||||
@ -506,7 +505,7 @@ auto sia_provider::read_file_bytes(const std::string &api_path,
|
||||
|
||||
curl::requests::http_get get{};
|
||||
get.path = "/api/worker/objects" + api_path;
|
||||
get.query["bucket"] = get_bucket(get_config().get_sia_config());
|
||||
get.query["bucket"] = get_bucket(get_sia_config());
|
||||
get.range = {{
|
||||
offset,
|
||||
offset + size - 1U,
|
||||
@ -561,7 +560,7 @@ auto sia_provider::remove_directory_impl(const std::string &api_path)
|
||||
curl::requests::http_delete del{};
|
||||
del.allow_timeout = true;
|
||||
del.path = "/api/bus/objects" + api_path + "/";
|
||||
del.query["bucket"] = get_bucket(get_config().get_sia_config());
|
||||
del.query["bucket"] = get_bucket(get_sia_config());
|
||||
|
||||
long response_code{};
|
||||
stop_type stop_requested{};
|
||||
@ -587,7 +586,7 @@ auto sia_provider::remove_file_impl(const std::string &api_path) -> api_error {
|
||||
curl::requests::http_delete del{};
|
||||
del.allow_timeout = true;
|
||||
del.path = "/api/bus/objects" + api_path;
|
||||
del.query["bucket"] = get_bucket(get_config().get_sia_config());
|
||||
del.query["bucket"] = get_bucket(get_sia_config());
|
||||
|
||||
long response_code{};
|
||||
stop_type stop_requested{};
|
||||
@ -619,7 +618,7 @@ auto sia_provider::rename_file(const std::string &from_api_path,
|
||||
{"mode", "single"},
|
||||
});
|
||||
post.path = "/api/bus/objects/rename";
|
||||
post.query["bucket"] = get_bucket(get_config().get_sia_config());
|
||||
post.query["bucket"] = get_bucket(get_sia_config());
|
||||
|
||||
long response_code{};
|
||||
stop_type stop_requested{};
|
||||
@ -644,6 +643,7 @@ auto sia_provider::rename_file(const std::string &from_api_path,
|
||||
auto sia_provider::start(api_item_added_callback api_item_added,
|
||||
i_file_manager *mgr) -> bool {
|
||||
event_system::instance().raise<service_started>("sia_provider");
|
||||
sia_config_ = get_config().get_sia_config();
|
||||
return base_provider::start(api_item_added, mgr);
|
||||
}
|
||||
|
||||
@ -660,7 +660,7 @@ auto sia_provider::upload_file_impl(const std::string &api_path,
|
||||
|
||||
curl::requests::http_put_file put_file{};
|
||||
put_file.path = "/api/worker/objects" + api_path;
|
||||
put_file.query["bucket"] = get_bucket(get_config().get_sia_config());
|
||||
put_file.query["bucket"] = get_bucket(get_sia_config());
|
||||
put_file.source_path = source_path;
|
||||
|
||||
long response_code{};
|
||||
|
@ -22,10 +22,14 @@
|
||||
#include "rpc/server/full_server.hpp"
|
||||
|
||||
#include "app_config.hpp"
|
||||
#include "events/event_system.hpp"
|
||||
#include "events/events.hpp"
|
||||
#include "file_manager/cache_size_mgr.hpp"
|
||||
#include "file_manager/i_file_manager.hpp"
|
||||
#include "providers/i_provider.hpp"
|
||||
#include "types/repertory.hpp"
|
||||
#include "types/rpc.hpp"
|
||||
#include "utils/error_utils.hpp"
|
||||
#include "utils/file.hpp"
|
||||
#include "utils/path.hpp"
|
||||
|
||||
@ -36,25 +40,20 @@ full_server::full_server(app_config &config, i_provider &provider,
|
||||
|
||||
void full_server::handle_get_directory_items(const httplib::Request &req,
|
||||
httplib::Response &res) {
|
||||
const auto api_path =
|
||||
utils::path::create_api_path(req.get_param_value("api_path"));
|
||||
const auto list = fm_.get_directory_items(api_path);
|
||||
|
||||
json items = {{"items", std::vector<json>()}};
|
||||
for (const auto &item : list) {
|
||||
items["items"].emplace_back(item.to_json());
|
||||
}
|
||||
res.set_content(items.dump(), "application/json");
|
||||
auto api_path = utils::path::create_api_path(req.get_param_value("api_path"));
|
||||
res.set_content(json({
|
||||
{"items", fm_.get_directory_items(api_path)},
|
||||
})
|
||||
.dump(),
|
||||
"application/json");
|
||||
res.status = 200;
|
||||
}
|
||||
|
||||
void full_server::handle_get_drive_information(const httplib::Request & /*req*/,
|
||||
httplib::Response &res) {
|
||||
auto dir_size =
|
||||
utils::file::directory(get_config().get_cache_directory()).size();
|
||||
res.set_content(
|
||||
json({
|
||||
{"cache_space_used", dir_size},
|
||||
{"cache_space_used", cache_size_mgr::instance().size()},
|
||||
{"drive_space_total", provider_.get_total_drive_space()},
|
||||
{"drive_space_used", provider_.get_used_drive_space()},
|
||||
{"item_count", provider_.get_total_item_count()},
|
||||
@ -66,9 +65,9 @@ void full_server::handle_get_drive_information(const httplib::Request & /*req*/,
|
||||
|
||||
void full_server::handle_get_open_files(const httplib::Request & /*req*/,
|
||||
httplib::Response &res) {
|
||||
const auto list = fm_.get_open_files();
|
||||
auto list = fm_.get_open_files();
|
||||
|
||||
json open_files = {{"items", std::vector<json>()}};
|
||||
json open_files;
|
||||
for (const auto &kv : list) {
|
||||
open_files["items"].emplace_back(json({
|
||||
{"path", kv.first},
|
||||
@ -81,7 +80,10 @@ void full_server::handle_get_open_files(const httplib::Request & /*req*/,
|
||||
|
||||
void full_server::handle_get_pinned_files(const httplib::Request & /*req*/,
|
||||
httplib::Response &res) {
|
||||
res.set_content(json({{"items", provider_.get_pinned_files()}}).dump(),
|
||||
res.set_content(json({
|
||||
{"items", provider_.get_pinned_files()},
|
||||
})
|
||||
.dump(),
|
||||
"application/json");
|
||||
res.status = 200;
|
||||
}
|
||||
@ -90,11 +92,10 @@ void full_server::handle_get_pinned_status(const httplib::Request &req,
|
||||
httplib::Response &res) {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
const auto api_path =
|
||||
utils::path::create_api_path(req.get_param_value("api_path"));
|
||||
auto api_path = utils::path::create_api_path(req.get_param_value("api_path"));
|
||||
|
||||
std::string pinned;
|
||||
const auto result = provider_.get_item_meta(api_path, META_PINNED, pinned);
|
||||
auto result = provider_.get_item_meta(api_path, META_PINNED, pinned);
|
||||
if (result != api_error::success) {
|
||||
utils::error::raise_api_path_error(function_name, api_path, result,
|
||||
"failed to get pinned status");
|
||||
@ -103,8 +104,10 @@ void full_server::handle_get_pinned_status(const httplib::Request &req,
|
||||
}
|
||||
|
||||
res.set_content(
|
||||
json(
|
||||
{{"pinned", pinned.empty() ? false : utils::string::to_bool(pinned)}})
|
||||
json({
|
||||
{"pinned",
|
||||
pinned.empty() ? false : utils::string::to_bool(pinned)},
|
||||
})
|
||||
.dump(),
|
||||
"application/json");
|
||||
res.status = 200;
|
||||
@ -114,8 +117,7 @@ void full_server::handle_pin_file(const httplib::Request &req,
|
||||
httplib::Response &res) {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
const auto api_path =
|
||||
utils::path::create_api_path(req.get_param_value("api_path"));
|
||||
auto api_path = utils::path::create_api_path(req.get_param_value("api_path"));
|
||||
|
||||
bool exists{};
|
||||
auto result = provider_.is_file(api_path, exists);
|
||||
@ -143,8 +145,7 @@ void full_server::handle_unpin_file(const httplib::Request &req,
|
||||
httplib::Response &res) {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
const auto api_path =
|
||||
utils::path::create_api_path(req.get_param_value("api_path"));
|
||||
auto api_path = utils::path::create_api_path(req.get_param_value("api_path"));
|
||||
|
||||
bool exists{};
|
||||
auto result = provider_.is_file(api_path, exists);
|
||||
|
@ -22,6 +22,8 @@
|
||||
#include "rpc/server/server.hpp"
|
||||
|
||||
#include "app_config.hpp"
|
||||
#include "events/event_system.hpp"
|
||||
#include "events/events.hpp"
|
||||
#include "utils/base64.hpp"
|
||||
#include "utils/error_utils.hpp"
|
||||
|
||||
|
@ -26,8 +26,7 @@
|
||||
|
||||
namespace repertory {
|
||||
auto database_type_from_string(std::string type,
|
||||
const database_type &default_type)
|
||||
-> database_type {
|
||||
database_type default_type) -> database_type {
|
||||
type = utils::string::to_lower(utils::string::trim(type));
|
||||
if (type == "rocksdb") {
|
||||
return database_type::rocksdb;
|
||||
@ -52,15 +51,14 @@ auto database_type_to_string(const database_type &type) -> std::string {
|
||||
}
|
||||
|
||||
auto download_type_from_string(std::string type,
|
||||
const download_type &default_type)
|
||||
-> download_type {
|
||||
download_type default_type) -> download_type {
|
||||
type = utils::string::to_lower(utils::string::trim(type));
|
||||
if (type == "direct") {
|
||||
return download_type::direct;
|
||||
if (type == "default") {
|
||||
return download_type::default_;
|
||||
}
|
||||
|
||||
if (type == "fallback") {
|
||||
return download_type::fallback;
|
||||
if (type == "direct") {
|
||||
return download_type::direct;
|
||||
}
|
||||
|
||||
if (type == "ring_buffer") {
|
||||
@ -72,14 +70,14 @@ auto download_type_from_string(std::string type,
|
||||
|
||||
auto download_type_to_string(const download_type &type) -> std::string {
|
||||
switch (type) {
|
||||
case download_type::default_:
|
||||
return "default";
|
||||
case download_type::direct:
|
||||
return "direct";
|
||||
case download_type::fallback:
|
||||
return "fallback";
|
||||
case download_type::ring_buffer:
|
||||
return "ring_buffer";
|
||||
default:
|
||||
return "fallback";
|
||||
return "default";
|
||||
}
|
||||
}
|
||||
|
||||
@ -89,6 +87,7 @@ static const std::unordered_map<api_error, std::string> LOOKUP = {
|
||||
{api_error::bad_address, "bad_address"},
|
||||
{api_error::buffer_overflow, "buffer_overflow"},
|
||||
{api_error::buffer_too_small, "buffer_too_small"},
|
||||
{api_error::cache_not_initialized, "cache_not_initialized"},
|
||||
{api_error::comm_error, "comm_error"},
|
||||
{api_error::decryption_error, "decryption_error"},
|
||||
{api_error::directory_end_of_files, "directory_end_of_files"},
|
||||
@ -107,6 +106,7 @@ static const std::unordered_map<api_error, std::string> LOOKUP = {
|
||||
{api_error::invalid_handle, "invalid_handle"},
|
||||
{api_error::invalid_operation, "invalid_operation"},
|
||||
{api_error::invalid_ring_buffer_multiple, "invalid_ring_buffer_multiple"},
|
||||
{api_error::invalid_ring_buffer_position, "invalid_ring_buffer_position"},
|
||||
{api_error::invalid_ring_buffer_size, "invalid_ring_buffer_size"},
|
||||
{api_error::invalid_version, "invalid_version"},
|
||||
{api_error::item_exists, "item_exists"},
|
||||
|
@ -45,15 +45,15 @@ void get_api_authentication_data(std::string &user, std::string &password,
|
||||
|
||||
if (success) {
|
||||
if (user.empty() && password.empty()) {
|
||||
password = data["ApiAuth"].get<std::string>();
|
||||
user = data["ApiUser"].get<std::string>();
|
||||
password = data[JSON_API_AUTH].get<std::string>();
|
||||
user = data[JSON_API_USER].get<std::string>();
|
||||
}
|
||||
port = data["ApiPort"].get<std::uint16_t>();
|
||||
port = data[JSON_API_PORT].get<std::uint16_t>();
|
||||
}
|
||||
}
|
||||
|
||||
[[nodiscard]] auto
|
||||
get_provider_type_from_args(std::vector<const char *> args) -> provider_type {
|
||||
[[nodiscard]] auto get_provider_type_from_args(std::vector<const char *> args)
|
||||
-> provider_type {
|
||||
if (has_option(args, options::s3_option)) {
|
||||
return provider_type::s3;
|
||||
}
|
||||
@ -67,8 +67,8 @@ get_provider_type_from_args(std::vector<const char *> args) -> provider_type {
|
||||
return provider_type::sia;
|
||||
}
|
||||
|
||||
auto has_option(std::vector<const char *> args,
|
||||
const std::string &option_name) -> bool {
|
||||
auto has_option(std::vector<const char *> args, const std::string &option_name)
|
||||
-> bool {
|
||||
return std::find_if(args.begin(), args.end(),
|
||||
[&option_name](const auto &value) -> bool {
|
||||
return option_name == value;
|
||||
@ -80,8 +80,8 @@ auto has_option(std::vector<const char *> args, const option &opt) -> bool {
|
||||
}
|
||||
|
||||
auto parse_option(std::vector<const char *> args,
|
||||
const std::string &option_name,
|
||||
std::uint8_t count) -> std::vector<std::string> {
|
||||
const std::string &option_name, std::uint8_t count)
|
||||
-> std::vector<std::string> {
|
||||
std::vector<std::string> ret;
|
||||
auto found{false};
|
||||
for (std::size_t i = 0U; not found && (i < args.size()); i++) {
|
||||
@ -119,9 +119,10 @@ auto parse_string_option(std::vector<const char *> args, const option &opt,
|
||||
return ret;
|
||||
}
|
||||
|
||||
auto parse_drive_options(
|
||||
std::vector<const char *> args, [[maybe_unused]] provider_type &prov,
|
||||
[[maybe_unused]] std::string &data_directory) -> std::vector<std::string> {
|
||||
auto parse_drive_options(std::vector<const char *> args,
|
||||
[[maybe_unused]] provider_type &prov,
|
||||
[[maybe_unused]] std::string &data_directory)
|
||||
-> std::vector<std::string> {
|
||||
// Strip out options from command line
|
||||
const auto &option_list = options::option_list;
|
||||
std::vector<std::string> drive_args;
|
||||
|
@ -22,6 +22,8 @@
|
||||
#include "utils/polling.hpp"
|
||||
|
||||
#include "app_config.hpp"
|
||||
#include "events/event_system.hpp"
|
||||
#include "events/events.hpp"
|
||||
#include "utils/tasks.hpp"
|
||||
|
||||
namespace repertory {
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include "utils/tasks.hpp"
|
||||
|
||||
#include "app_config.hpp"
|
||||
#include "utils/error_utils.hpp"
|
||||
|
||||
namespace repertory {
|
||||
tasks tasks::instance_;
|
||||
|
@ -52,6 +52,8 @@ auto from_api_error(const api_error &err) -> int {
|
||||
return -EEXIST;
|
||||
case api_error::file_in_use:
|
||||
return -EBUSY;
|
||||
case api_error::invalid_handle:
|
||||
return -EBADF;
|
||||
case api_error::invalid_operation:
|
||||
return -EINVAL;
|
||||
case api_error::item_not_found:
|
||||
|
@ -22,7 +22,11 @@
|
||||
#include "utils/utils.hpp"
|
||||
|
||||
#include "app_config.hpp"
|
||||
#include "types/startup_exception.hpp"
|
||||
#include "utils/common.hpp"
|
||||
#include "utils/error_utils.hpp"
|
||||
#include "utils/file.hpp"
|
||||
#include "utils/path.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
namespace repertory::utils {
|
||||
@ -44,6 +48,42 @@ void calculate_allocation_size(bool directory, std::uint64_t file_size,
|
||||
allocation_meta_size = std::to_string(allocation_size);
|
||||
}
|
||||
|
||||
auto create_rocksdb(
|
||||
const app_config &cfg, const std::string &name,
|
||||
const std::vector<rocksdb::ColumnFamilyDescriptor> &families,
|
||||
std::vector<rocksdb::ColumnFamilyHandle *> &handles,
|
||||
bool clear) -> std::unique_ptr<rocksdb::TransactionDB> {
|
||||
REPERTORY_USES_FUNCTION_NAME();
|
||||
|
||||
auto db_dir = utils::path::combine(cfg.get_data_directory(), {"db"});
|
||||
if (not utils::file::directory{db_dir}.create_directory()) {
|
||||
throw startup_exception(
|
||||
fmt::format("failed to create db directory|", db_dir));
|
||||
}
|
||||
|
||||
auto path = utils::path::combine(db_dir, {name});
|
||||
if (clear && not utils::file::directory{path}.remove_recursively()) {
|
||||
utils::error::raise_error(function_name,
|
||||
"failed to remove " + name + " db|" + path);
|
||||
}
|
||||
|
||||
rocksdb::Options options{};
|
||||
options.create_if_missing = true;
|
||||
options.create_missing_column_families = true;
|
||||
options.db_log_dir = cfg.get_log_directory();
|
||||
options.keep_log_file_num = 10;
|
||||
|
||||
rocksdb::TransactionDB *ptr{};
|
||||
auto status = rocksdb::TransactionDB::Open(
|
||||
options, rocksdb::TransactionDBOptions{}, path, families, &handles, &ptr);
|
||||
if (not status.ok()) {
|
||||
throw startup_exception(fmt::format("failed to open rocksdb|path{}|error{}",
|
||||
path, status.ToString()));
|
||||
}
|
||||
|
||||
return std::unique_ptr<rocksdb::TransactionDB>(ptr);
|
||||
}
|
||||
|
||||
auto create_volume_label(const provider_type &prov) -> std::string {
|
||||
return "repertory_" + app_config::get_provider_name(prov);
|
||||
}
|
||||
|
@ -71,11 +71,10 @@ mount(std::vector<const char *> args, std::string data_directory,
|
||||
if (generate_config) {
|
||||
app_config config(prov, data_directory);
|
||||
if (prov == provider_type::remote) {
|
||||
config.set_enable_remote_mount(false);
|
||||
config.set_is_remote_mount(true);
|
||||
config.set_remote_host_name_or_ip(remote_host);
|
||||
config.set_remote_port(remote_port);
|
||||
config.save();
|
||||
auto cfg = config.get_remote_config();
|
||||
cfg.host_name_or_ip = remote_host;
|
||||
cfg.api_port = remote_port;
|
||||
config.set_remote_config(cfg);
|
||||
} else if (prov == provider_type::sia &&
|
||||
config.get_sia_config().bucket.empty()) {
|
||||
config.set_value_by_name("SiaConfig.Bucket", unique_id);
|
||||
@ -128,12 +127,12 @@ mount(std::vector<const char *> args, std::string data_directory,
|
||||
if (prov == provider_type::remote) {
|
||||
std::uint16_t port{0U};
|
||||
if (utils::get_next_available_port(config.get_api_port(), port)) {
|
||||
config.set_remote_host_name_or_ip(remote_host);
|
||||
config.set_remote_port(remote_port);
|
||||
auto cfg = config.get_remote_config();
|
||||
cfg.host_name_or_ip = remote_host;
|
||||
cfg.api_port = remote_port;
|
||||
config.set_remote_config(cfg);
|
||||
config.set_api_port(port);
|
||||
config.set_is_remote_mount(true);
|
||||
config.set_enable_remote_mount(false);
|
||||
config.save();
|
||||
|
||||
try {
|
||||
remote_drive drive(
|
||||
config,
|
||||
@ -161,8 +160,6 @@ mount(std::vector<const char *> args, std::string data_directory,
|
||||
config.set_value_by_name("SiaConfig.Bucket", unique_id);
|
||||
}
|
||||
|
||||
config.set_is_remote_mount(false);
|
||||
|
||||
try {
|
||||
auto provider = create_provider(prov, config);
|
||||
repertory_drive drive(config, lock, *provider);
|
||||
|
@ -58,8 +58,7 @@ protected:
|
||||
}
|
||||
};
|
||||
|
||||
// using file_db_types = ::testing::Types<sqlite_file_db, rdb_file_db>;
|
||||
using file_db_types = ::testing::Types<sqlite_file_db>;
|
||||
using file_db_types = ::testing::Types<rdb_file_db, sqlite_file_db>;
|
||||
|
||||
template <typename db_t> std::unique_ptr<app_config> file_db_test<db_t>::config;
|
||||
|
||||
|
@ -58,7 +58,7 @@ protected:
|
||||
}
|
||||
};
|
||||
|
||||
using file_mgr_db_types = ::testing::Types<sqlite_file_mgr_db, rdb_file_mgr_db>;
|
||||
using file_mgr_db_types = ::testing::Types<rdb_file_mgr_db, sqlite_file_mgr_db>;
|
||||
|
||||
template <typename db_t>
|
||||
std::unique_ptr<app_config> file_mgr_db_test<db_t>::config;
|
||||
|
@ -109,8 +109,11 @@ protected:
|
||||
config->set_enable_drive_events(true);
|
||||
config->set_event_level(event_level::trace);
|
||||
config->set_s3_config(src_cfg.get_s3_config());
|
||||
config->set_enable_remote_mount(true);
|
||||
config->set_remote_port(30000U);
|
||||
|
||||
auto r_cfg = config->get_remote_mount();
|
||||
r_cfg.enable = true;
|
||||
r_cfg.api_port = 30000U;
|
||||
config->set_remote_mount(r_cfg);
|
||||
}
|
||||
|
||||
drive_args = std::vector<std::string>({
|
||||
@ -152,8 +155,11 @@ protected:
|
||||
config->set_event_level(event_level::trace);
|
||||
config->set_host_config(src_cfg.get_host_config());
|
||||
config->set_sia_config(src_cfg.get_sia_config());
|
||||
config->set_enable_remote_mount(true);
|
||||
config->set_remote_port(30000U);
|
||||
|
||||
auto r_cfg = config->get_remote_mount();
|
||||
r_cfg.enable = true;
|
||||
r_cfg.api_port = 30000U;
|
||||
config->set_remote_mount(r_cfg);
|
||||
}
|
||||
|
||||
drive_args = std::vector<std::string>({
|
||||
|
@ -99,8 +99,11 @@ protected:
|
||||
config->set_enable_drive_events(true);
|
||||
config->set_event_level(event_level::trace);
|
||||
config->set_s3_config(src_cfg.get_s3_config());
|
||||
config->set_enable_remote_mount(true);
|
||||
config->set_remote_port(30000U);
|
||||
|
||||
auto r_cfg = config->get_remote_mount();
|
||||
r_cfg.enable = true;
|
||||
r_cfg.api_port = 30000U;
|
||||
config->set_remote_mount(r_cfg);
|
||||
}
|
||||
|
||||
drive_args = std::vector<std::string>({
|
||||
@ -138,8 +141,11 @@ protected:
|
||||
config->set_event_level(event_level::trace);
|
||||
config->set_host_config(src_cfg.get_host_config());
|
||||
config->set_sia_config(src_cfg.get_sia_config());
|
||||
config->set_enable_remote_mount(true);
|
||||
config->set_remote_port(30000U);
|
||||
|
||||
auto r_cfg = config->get_remote_mount();
|
||||
r_cfg.enable = true;
|
||||
r_cfg.api_port = 30000U;
|
||||
config->set_remote_mount(r_cfg);
|
||||
}
|
||||
|
||||
drive_args = std::vector<std::string>({
|
||||
|
@ -29,6 +29,13 @@
|
||||
namespace repertory {
|
||||
class mock_open_file : public virtual i_closeable_open_file {
|
||||
public:
|
||||
MOCK_METHOD(void, add, (std::uint64_t handle, open_file_data ofd),
|
||||
(override));
|
||||
|
||||
MOCK_METHOD(bool, can_close, (), (const, override));
|
||||
|
||||
MOCK_METHOD(bool, close, (), (override));
|
||||
|
||||
MOCK_METHOD(std::string, get_api_path, (), (const, override));
|
||||
|
||||
MOCK_METHOD(std::size_t, get_chunk_size, (), (const, override));
|
||||
@ -47,14 +54,30 @@ public:
|
||||
|
||||
MOCK_METHOD(boost::dynamic_bitset<>, get_read_state, (), (const, override));
|
||||
|
||||
MOCK_METHOD(bool, get_allocated, (), (const, override));
|
||||
|
||||
MOCK_METHOD(std::vector<std::uint64_t>, get_handles, (), (const, override));
|
||||
|
||||
MOCK_METHOD((std::map<std::uint64_t, open_file_data> &), get_open_data, (),
|
||||
(override));
|
||||
|
||||
MOCK_METHOD((const std::map<std::uint64_t, open_file_data> &), get_open_data,
|
||||
(), (const, override));
|
||||
|
||||
MOCK_METHOD(bool, get_read_state, (std::size_t chunk), (const, override));
|
||||
|
||||
MOCK_METHOD(std::string, get_source_path, (), (const, override));
|
||||
|
||||
MOCK_METHOD(bool, has_handle, (std::uint64_t handle), (const, override));
|
||||
|
||||
MOCK_METHOD(bool, is_complete, (), (const, override));
|
||||
|
||||
MOCK_METHOD(bool, is_directory, (), (const, override));
|
||||
|
||||
MOCK_METHOD(bool, is_modified, (), (const, override));
|
||||
|
||||
MOCK_METHOD(bool, is_write_supported, (), (const, override));
|
||||
|
||||
MOCK_METHOD(api_error, native_operation, (native_operation_callback callback),
|
||||
(override));
|
||||
|
||||
@ -67,6 +90,10 @@ public:
|
||||
data_buffer &data),
|
||||
(override));
|
||||
|
||||
MOCK_METHOD(void, remove, (std::uint64_t handle), (override));
|
||||
|
||||
MOCK_METHOD(void, remove_all, (), (override));
|
||||
|
||||
MOCK_METHOD(api_error, resize, (std::uint64_t new_file_size), (override));
|
||||
|
||||
MOCK_METHOD(void, set_api_path, (const std::string &api_path), (override));
|
||||
@ -75,31 +102,6 @@ public:
|
||||
(std::uint64_t write_offset, const data_buffer &data,
|
||||
std::size_t &bytes_written),
|
||||
(override));
|
||||
|
||||
MOCK_METHOD(void, add, (std::uint64_t handle, open_file_data ofd),
|
||||
(override));
|
||||
|
||||
MOCK_METHOD(bool, can_close, (), (const, override));
|
||||
|
||||
MOCK_METHOD(bool, close, (), (override));
|
||||
|
||||
MOCK_METHOD(std::vector<std::uint64_t>, get_handles, (), (const, override));
|
||||
|
||||
MOCK_METHOD((std::map<std::uint64_t, open_file_data> &), get_open_data, (),
|
||||
(override));
|
||||
|
||||
MOCK_METHOD((const std::map<std::uint64_t, open_file_data> &), get_open_data,
|
||||
(), (const, override));
|
||||
|
||||
MOCK_METHOD(bool, is_complete, (), (const, override));
|
||||
|
||||
MOCK_METHOD(bool, is_modified, (), (const, override));
|
||||
|
||||
MOCK_METHOD(bool, is_write_supported, (), (const, override));
|
||||
|
||||
MOCK_METHOD(void, remove, (std::uint64_t handle), (override));
|
||||
|
||||
MOCK_METHOD(void, remove_all, (), (override));
|
||||
};
|
||||
} // namespace repertory
|
||||
|
||||
|
68
repertory/repertory_test/src/atomic_test.cpp
Normal file
68
repertory/repertory_test/src/atomic_test.cpp
Normal file
@ -0,0 +1,68 @@
|
||||
/*
|
||||
Copyright <2018-2024> <scott.e.graves@protonmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
#include "test_common.hpp"
|
||||
|
||||
#include "types/repertory.hpp"
|
||||
|
||||
namespace repertory {
|
||||
TEST(atomic, atomic_primitive) {
|
||||
atomic<std::uint16_t> value;
|
||||
value = 5U;
|
||||
EXPECT_EQ(5U, static_cast<std::uint16_t>(value));
|
||||
EXPECT_EQ(5U, value.load());
|
||||
|
||||
value.store(6U);
|
||||
EXPECT_EQ(6U, static_cast<std::uint16_t>(value));
|
||||
EXPECT_EQ(6U, value.load());
|
||||
}
|
||||
|
||||
TEST(atomic, atomic_primitive_equality) {
|
||||
atomic<std::uint16_t> value1{5U};
|
||||
atomic<std::uint16_t> value2{5U};
|
||||
EXPECT_EQ(value1, value1);
|
||||
EXPECT_EQ(value2, value2);
|
||||
EXPECT_EQ(value1, value2);
|
||||
EXPECT_EQ(static_cast<std::uint16_t>(value1), 5U);
|
||||
EXPECT_EQ(static_cast<std::uint16_t>(value2), 5U);
|
||||
}
|
||||
|
||||
TEST(atomic, atomic_primitive_inequality) {
|
||||
atomic<std::uint16_t> value1{5U};
|
||||
atomic<std::uint16_t> value2{6U};
|
||||
EXPECT_NE(value1, value2);
|
||||
EXPECT_NE(static_cast<std::uint16_t>(value1), 6U);
|
||||
EXPECT_NE(static_cast<std::uint16_t>(value2), 5U);
|
||||
}
|
||||
|
||||
TEST(atomic, atomic_struct) {
|
||||
atomic<encrypt_config> value{
|
||||
encrypt_config{
|
||||
.encryption_token = "token",
|
||||
.path = "path",
|
||||
},
|
||||
};
|
||||
|
||||
auto data = static_cast<encrypt_config>(value);
|
||||
EXPECT_STREQ("token", data.encryption_token.c_str());
|
||||
EXPECT_STREQ("path", data.path.c_str());
|
||||
}
|
||||
} // namespace repertory
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright <2018-2024> <scott.e.graves@protonmail.com>
|
||||
Copyright <2018-2024> <scott.e.graves@protonmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@ -30,178 +30,34 @@
|
||||
namespace repertory {
|
||||
class config_test : public ::testing::Test {
|
||||
public:
|
||||
static console_consumer cs;
|
||||
console_consumer cs;
|
||||
|
||||
std::string s3_directory{
|
||||
utils::path::combine(test::get_test_output_dir(), {"config_test", "s3"})};
|
||||
static std::atomic<std::uint64_t> idx;
|
||||
|
||||
std::string sia_directory{utils::path::combine(test::get_test_output_dir(),
|
||||
{"config_test", "sia"})};
|
||||
std::string s3_directory;
|
||||
std::string sia_directory;
|
||||
|
||||
void SetUp() override {
|
||||
s3_directory = utils::path::combine(test::get_test_output_dir(),
|
||||
{
|
||||
"config_test",
|
||||
"s3",
|
||||
std::to_string(++idx),
|
||||
});
|
||||
|
||||
sia_directory = utils::path::combine(test::get_test_output_dir(),
|
||||
{
|
||||
"config_test",
|
||||
"sia",
|
||||
std::to_string(++idx),
|
||||
});
|
||||
event_system::instance().start();
|
||||
ASSERT_TRUE(
|
||||
utils::file::directory(
|
||||
utils::path::combine(test::get_test_output_dir(), {"config_test"}))
|
||||
.remove_recursively());
|
||||
}
|
||||
|
||||
void TearDown() override {
|
||||
ASSERT_TRUE(
|
||||
utils::file::directory(
|
||||
utils::path::combine(test::get_test_output_dir(), {"config_test"}))
|
||||
.remove_recursively());
|
||||
event_system::instance().stop();
|
||||
}
|
||||
void TearDown() override { event_system::instance().stop(); }
|
||||
};
|
||||
|
||||
const auto DEFAULT_SIA_CONFIG = "{\n"
|
||||
" \"ApiAuth\": \"\",\n"
|
||||
" \"ApiPort\": 10000,\n"
|
||||
" \"ApiUser\": \"repertory\",\n"
|
||||
" \"ChunkDownloaderTimeoutSeconds\": 30,\n"
|
||||
" \"DatabaseType\": \"rocksdb\",\n"
|
||||
" \"EnableChunkDownloaderTimeout\": true,\n"
|
||||
" \"EnableCommDurationEvents\": false,\n"
|
||||
" \"EnableDriveEvents\": false,\n"
|
||||
" \"EnableMaxCacheSize\": false,\n"
|
||||
#if defined(_WIN32)
|
||||
" \"EnableMountManager\": false,\n"
|
||||
#endif
|
||||
" \"EventLevel\": \"info\",\n"
|
||||
" \"EvictionDelayMinutes\": 10,\n"
|
||||
" \"EvictionUsesAccessedTime\": false,\n"
|
||||
" \"HighFreqIntervalSeconds\": 30,\n"
|
||||
" \"HostConfig\": {\n"
|
||||
" \"AgentString\": \"Sia-Agent\",\n"
|
||||
" \"ApiPassword\": \"\",\n"
|
||||
" \"ApiPort\": 9980,\n"
|
||||
" \"HostNameOrIp\": \"localhost\",\n"
|
||||
" \"TimeoutMs\": 60000\n"
|
||||
" },\n"
|
||||
" \"LowFreqIntervalSeconds\": 3600,\n"
|
||||
" \"MaxCacheSizeBytes\": 21474836480,\n"
|
||||
" \"MaxUploadCount\": 5,\n"
|
||||
" \"MedFreqIntervalSeconds\": 120,\n"
|
||||
" \"OnlineCheckRetrySeconds\": 60,\n"
|
||||
" \"OrphanedFileRetentionDays\": 15,\n"
|
||||
" \"PreferredDownloadType\": \"fallback\",\n"
|
||||
" \"ReadAheadCount\": 4,\n"
|
||||
" \"RemoteMount\": {\n"
|
||||
" \"EnableRemoteMount\": false,\n"
|
||||
" \"IsRemoteMount\": false,\n"
|
||||
" \"RemoteClientPoolSize\": 10,\n"
|
||||
" \"RemoteHostNameOrIp\": \"\",\n"
|
||||
" \"RemoteMaxConnections\": 20,\n"
|
||||
" \"RemotePort\": 20000,\n"
|
||||
" \"RemoteReceiveTimeoutSeconds\": 120,\n"
|
||||
" \"RemoteSendTimeoutSeconds\": 30,\n"
|
||||
" \"RemoteToken\": \"\"\n"
|
||||
" },\n"
|
||||
" \"RetryReadCount\": 6,\n"
|
||||
" \"RingBufferFileSize\": 512,\n"
|
||||
" \"SiaConfig\": {\n"
|
||||
" \"Bucket\": \"\"\n"
|
||||
" },\n"
|
||||
" \"TaskWaitMillis\": 100,\n"
|
||||
" \"Version\": " +
|
||||
std::to_string(REPERTORY_CONFIG_VERSION) +
|
||||
"\n"
|
||||
"}";
|
||||
|
||||
const auto DEFAULT_S3_CONFIG = "{\n"
|
||||
" \"ApiAuth\": \"\",\n"
|
||||
" \"ApiPort\": 10100,\n"
|
||||
" \"ApiUser\": \"repertory\",\n"
|
||||
" \"ChunkDownloaderTimeoutSeconds\": 30,\n"
|
||||
" \"DatabaseType\": \"rocksdb\",\n"
|
||||
" \"EnableChunkDownloaderTimeout\": true,\n"
|
||||
" \"EnableCommDurationEvents\": false,\n"
|
||||
" \"EnableDriveEvents\": false,\n"
|
||||
" \"EnableMaxCacheSize\": false,\n"
|
||||
#if defined(_WIN32)
|
||||
" \"EnableMountManager\": false,\n"
|
||||
#endif
|
||||
" \"EventLevel\": \"info\",\n"
|
||||
" \"EvictionDelayMinutes\": 10,\n"
|
||||
" \"EvictionUsesAccessedTime\": false,\n"
|
||||
" \"HighFreqIntervalSeconds\": 30,\n"
|
||||
" \"LowFreqIntervalSeconds\": 3600,\n"
|
||||
" \"MaxCacheSizeBytes\": 21474836480,\n"
|
||||
" \"MaxUploadCount\": 5,\n"
|
||||
" \"MedFreqIntervalSeconds\": 120,\n"
|
||||
" \"OnlineCheckRetrySeconds\": 60,\n"
|
||||
" \"OrphanedFileRetentionDays\": 15,\n"
|
||||
" \"PreferredDownloadType\": \"fallback\",\n"
|
||||
" \"ReadAheadCount\": 4,\n"
|
||||
" \"RemoteMount\": {\n"
|
||||
" \"EnableRemoteMount\": false,\n"
|
||||
" \"IsRemoteMount\": false,\n"
|
||||
" \"RemoteClientPoolSize\": 10,\n"
|
||||
" \"RemoteHostNameOrIp\": \"\",\n"
|
||||
" \"RemoteMaxConnections\": 20,\n"
|
||||
" \"RemotePort\": 20100,\n"
|
||||
" \"RemoteReceiveTimeoutSeconds\": 120,\n"
|
||||
" \"RemoteSendTimeoutSeconds\": 30,\n"
|
||||
" \"RemoteToken\": \"\"\n"
|
||||
" },\n"
|
||||
" \"RetryReadCount\": 6,\n"
|
||||
" \"RingBufferFileSize\": 512,\n"
|
||||
" \"S3Config\": {\n"
|
||||
" \"AccessKey\": \"\",\n"
|
||||
" \"Bucket\": \"\",\n"
|
||||
" \"EncryptionToken\": \"\",\n"
|
||||
" \"Region\": \"any\",\n"
|
||||
" \"SecretKey\": \"\",\n"
|
||||
" \"TimeoutMs\": 60000,\n"
|
||||
" \"URL\": \"\",\n"
|
||||
" \"UsePathStyle\": false,\n"
|
||||
" \"UseRegionInURL\": false\n"
|
||||
" },\n"
|
||||
" \"TaskWaitMillis\": 100,\n"
|
||||
" \"Version\": " +
|
||||
std::to_string(REPERTORY_CONFIG_VERSION) +
|
||||
"\n"
|
||||
"}";
|
||||
|
||||
TEST_F(config_test, sia_default_settings) {
|
||||
const auto config_file = utils::path::combine(sia_directory, {"config.json"});
|
||||
|
||||
for (int i = 0; i < 2; i++) {
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
config.set_remote_token("");
|
||||
config.set_api_auth("");
|
||||
EXPECT_TRUE(config.set_value_by_name("HostConfig.ApiPassword", "").empty());
|
||||
json data;
|
||||
EXPECT_TRUE(utils::file::read_json_file(config_file, data));
|
||||
EXPECT_STREQ(DEFAULT_SIA_CONFIG.c_str(), data.dump(2).c_str());
|
||||
EXPECT_TRUE(
|
||||
utils::file::directory(utils::path::combine(sia_directory, {"cache"}))
|
||||
.exists());
|
||||
EXPECT_TRUE(
|
||||
utils::file::directory(utils::path::combine(sia_directory, {"logs"}))
|
||||
.exists());
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(config_test, s3_default_settings) {
|
||||
const auto config_file = utils::path::combine(s3_directory, {"config.json"});
|
||||
|
||||
for (int i = 0; i < 2; i++) {
|
||||
app_config config(provider_type::s3, s3_directory);
|
||||
config.set_remote_token("");
|
||||
config.set_api_auth("");
|
||||
json data;
|
||||
EXPECT_TRUE(utils::file::read_json_file(config_file, data));
|
||||
EXPECT_STREQ(DEFAULT_S3_CONFIG.c_str(), data.dump(2).c_str());
|
||||
EXPECT_TRUE(
|
||||
utils::file::directory(utils::path::combine(s3_directory, {"cache"}))
|
||||
.exists());
|
||||
EXPECT_TRUE(
|
||||
utils::file::directory(utils::path::combine(s3_directory, {"logs"}))
|
||||
.exists());
|
||||
}
|
||||
}
|
||||
std::atomic<std::uint64_t> config_test::idx{0U};
|
||||
|
||||
TEST_F(config_test, api_path) {
|
||||
std::string original_value;
|
||||
@ -254,45 +110,31 @@ TEST_F(config_test, api_user) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(config_test, chunk_downloader_timeout_secs) {
|
||||
TEST_F(config_test, download_timeout_secs) {
|
||||
std::uint8_t original_value{};
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
original_value = config.get_chunk_downloader_timeout_secs();
|
||||
config.set_chunk_downloader_timeout_secs(original_value + 5);
|
||||
EXPECT_EQ(original_value + 5, config.get_chunk_downloader_timeout_secs());
|
||||
original_value = config.get_download_timeout_secs();
|
||||
config.set_download_timeout_secs(original_value + 5);
|
||||
EXPECT_EQ(original_value + 5, config.get_download_timeout_secs());
|
||||
}
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
EXPECT_EQ(original_value + 5, config.get_chunk_downloader_timeout_secs());
|
||||
EXPECT_EQ(original_value + 5, config.get_download_timeout_secs());
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(config_test, enable_chunk_download_timeout) {
|
||||
TEST_F(config_test, enable_download_timeout) {
|
||||
bool original_value{};
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
original_value = config.get_enable_chunk_download_timeout();
|
||||
config.set_enable_chunk_downloader_timeout(not original_value);
|
||||
EXPECT_EQ(not original_value, config.get_enable_chunk_download_timeout());
|
||||
original_value = config.get_enable_download_timeout();
|
||||
config.set_enable_download_timeout(not original_value);
|
||||
EXPECT_EQ(not original_value, config.get_enable_download_timeout());
|
||||
}
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
EXPECT_EQ(not original_value, config.get_enable_chunk_download_timeout());
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(config_test, enable_comm_duration_events) {
|
||||
bool original_value{};
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
original_value = config.get_enable_comm_duration_events();
|
||||
config.set_enable_comm_duration_events(not original_value);
|
||||
EXPECT_EQ(not original_value, config.get_enable_comm_duration_events());
|
||||
}
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
EXPECT_EQ(not original_value, config.get_enable_comm_duration_events());
|
||||
EXPECT_EQ(not original_value, config.get_enable_download_timeout());
|
||||
}
|
||||
}
|
||||
|
||||
@ -310,19 +152,6 @@ TEST_F(config_test, enable_drive_events) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(config_test, enable_max_cache_size) {
|
||||
bool original_value{};
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
original_value = config.get_enable_max_cache_size();
|
||||
config.set_enable_max_cache_size(not original_value);
|
||||
EXPECT_EQ(not original_value, config.get_enable_max_cache_size());
|
||||
}
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
EXPECT_EQ(not original_value, config.get_enable_max_cache_size());
|
||||
}
|
||||
}
|
||||
#if defined(_WIN32)
|
||||
TEST_F(config_test, enable_mount_manager) {
|
||||
bool original_value;
|
||||
@ -504,20 +333,6 @@ TEST_F(config_test, orphaned_file_retention_days_maximum_value) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(config_test, read_ahead_count) {
|
||||
std::uint8_t original_value{};
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
original_value = config.get_read_ahead_count();
|
||||
config.set_read_ahead_count(original_value + 5);
|
||||
EXPECT_EQ(original_value + 5, config.get_read_ahead_count());
|
||||
}
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
EXPECT_EQ(original_value + 5, config.get_read_ahead_count());
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(config_test, get_cache_directory) {
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
@ -654,167 +469,170 @@ TEST_F(config_test, get_version) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(config_test, enable_remote_mount) {
|
||||
bool original_value{};
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
original_value = config.get_enable_remote_mount();
|
||||
config.set_enable_remote_mount(not original_value);
|
||||
EXPECT_EQ(not original_value, config.get_enable_remote_mount());
|
||||
}
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
EXPECT_EQ(not original_value, config.get_enable_remote_mount());
|
||||
}
|
||||
}
|
||||
// TEST_F(config_test, enable_remote_mount) {
|
||||
// bool original_value{};
|
||||
// {
|
||||
// app_config config(provider_type::sia, sia_directory);
|
||||
// original_value = config.get_enable_remote_mount();
|
||||
// config.set_enable_remote_mount(not original_value);
|
||||
// EXPECT_EQ(not original_value, config.get_enable_remote_mount());
|
||||
// }
|
||||
// {
|
||||
// app_config config(provider_type::sia, sia_directory);
|
||||
// EXPECT_EQ(not original_value, config.get_enable_remote_mount());
|
||||
// }
|
||||
// }
|
||||
|
||||
TEST_F(config_test, is_remote_mount) {
|
||||
bool original_value{};
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
original_value = config.get_is_remote_mount();
|
||||
config.set_is_remote_mount(not original_value);
|
||||
EXPECT_EQ(not original_value, config.get_is_remote_mount());
|
||||
}
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
EXPECT_EQ(not original_value, config.get_is_remote_mount());
|
||||
}
|
||||
}
|
||||
// TEST_F(config_test, is_remote_mount) {
|
||||
// bool original_value{};
|
||||
// {
|
||||
// app_config config(provider_type::sia, sia_directory);
|
||||
// original_value = config.get_is_remote_mount();
|
||||
// config.set_is_remote_mount(not original_value);
|
||||
// EXPECT_EQ(not original_value, config.get_is_remote_mount());
|
||||
// }
|
||||
// {
|
||||
// app_config config(provider_type::sia, sia_directory);
|
||||
// EXPECT_EQ(not original_value, config.get_is_remote_mount());
|
||||
// }
|
||||
// }
|
||||
|
||||
TEST_F(config_test, enable_remote_mount_fails_if_remote_mount_is_true) {
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
config.set_is_remote_mount(true);
|
||||
config.set_enable_remote_mount(true);
|
||||
EXPECT_FALSE(config.get_enable_remote_mount());
|
||||
EXPECT_TRUE(config.get_is_remote_mount());
|
||||
}
|
||||
// TEST_F(config_test, enable_remote_mount_fails_if_remote_mount_is_true) {
|
||||
// app_config config(provider_type::sia, sia_directory);
|
||||
// config.set_is_remote_mount(true);
|
||||
// config.set_enable_remote_mount(true);
|
||||
// EXPECT_FALSE(config.get_enable_remote_mount());
|
||||
// EXPECT_TRUE(config.get_is_remote_mount());
|
||||
// }
|
||||
|
||||
TEST_F(config_test, set_is_remote_mount_fails_if_enable_remote_mount_is_true) {
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
config.set_enable_remote_mount(true);
|
||||
config.set_is_remote_mount(true);
|
||||
EXPECT_FALSE(config.get_is_remote_mount());
|
||||
EXPECT_TRUE(config.get_enable_remote_mount());
|
||||
}
|
||||
// TEST_F(config_test, set_is_remote_mount_fails_if_enable_remote_mount_is_true)
|
||||
// {
|
||||
// app_config config(provider_type::sia, sia_directory);
|
||||
// config.set_enable_remote_mount(true);
|
||||
// config.set_is_remote_mount(true);
|
||||
// EXPECT_FALSE(config.get_is_remote_mount());
|
||||
// EXPECT_TRUE(config.get_enable_remote_mount());
|
||||
// }
|
||||
|
||||
TEST_F(config_test, remote_host_name_or_ip) {
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
config.set_remote_host_name_or_ip("my.host.name");
|
||||
EXPECT_STREQ("my.host.name", config.get_remote_host_name_or_ip().c_str());
|
||||
}
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
EXPECT_STREQ("my.host.name", config.get_remote_host_name_or_ip().c_str());
|
||||
}
|
||||
}
|
||||
// TEST_F(config_test, remote_host_name_or_ip) {
|
||||
// {
|
||||
// app_config config(provider_type::sia, sia_directory);
|
||||
// config.set_remote_host_name_or_ip("my.host.name");
|
||||
// EXPECT_STREQ("my.host.name",
|
||||
// config.get_remote_host_name_or_ip().c_str());
|
||||
// }
|
||||
// {
|
||||
// app_config config(provider_type::sia, sia_directory);
|
||||
// EXPECT_STREQ("my.host.name",
|
||||
// config.get_remote_host_name_or_ip().c_str());
|
||||
// }
|
||||
// }
|
||||
|
||||
TEST_F(config_test, remote_port) {
|
||||
std::uint16_t original_value{};
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
original_value = config.get_remote_port();
|
||||
config.set_remote_port(original_value + 5);
|
||||
EXPECT_EQ(original_value + 5, config.get_remote_port());
|
||||
}
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
EXPECT_EQ(original_value + 5, config.get_remote_port());
|
||||
}
|
||||
}
|
||||
// TEST_F(config_test, remote_api_port) {
|
||||
// std::uint16_t original_value{};
|
||||
// {
|
||||
// app_config config(provider_type::sia, sia_directory);
|
||||
// original_value = config.get_remote_api_port();
|
||||
// config.set_remote_api_port(original_value + 5);
|
||||
// EXPECT_EQ(original_value + 5, config.get_remote_api_port());
|
||||
// }
|
||||
// {
|
||||
// app_config config(provider_type::sia, sia_directory);
|
||||
// EXPECT_EQ(original_value + 5, config.get_remote_api_port());
|
||||
// }
|
||||
// }
|
||||
|
||||
TEST_F(config_test, remote_receive_timeout_secs) {
|
||||
std::uint16_t original_value{};
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
original_value = config.get_remote_receive_timeout_secs();
|
||||
config.set_remote_receive_timeout_secs(original_value + 5);
|
||||
EXPECT_EQ(original_value + 5, config.get_remote_receive_timeout_secs());
|
||||
}
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
EXPECT_EQ(original_value + 5, config.get_remote_receive_timeout_secs());
|
||||
}
|
||||
}
|
||||
// TEST_F(config_test, remote_receive_timeout_secs) {
|
||||
// std::uint16_t original_value{};
|
||||
// {
|
||||
// app_config config(provider_type::sia, sia_directory);
|
||||
// original_value = config.get_remote_receive_timeout_secs();
|
||||
// config.set_remote_receive_timeout_secs(original_value + 5);
|
||||
// EXPECT_EQ(original_value + 5, config.get_remote_receive_timeout_secs());
|
||||
// }
|
||||
// {
|
||||
// app_config config(provider_type::sia, sia_directory);
|
||||
// EXPECT_EQ(original_value + 5, config.get_remote_receive_timeout_secs());
|
||||
// }
|
||||
// }
|
||||
|
||||
TEST_F(config_test, remote_send_timeout_secs) {
|
||||
std::uint16_t original_value{};
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
original_value = config.get_remote_send_timeout_secs();
|
||||
config.set_remote_send_timeout_secs(original_value + 5);
|
||||
EXPECT_EQ(original_value + 5, config.get_remote_send_timeout_secs());
|
||||
}
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
EXPECT_EQ(original_value + 5, config.get_remote_send_timeout_secs());
|
||||
}
|
||||
}
|
||||
// TEST_F(config_test, remote_send_timeout_secs) {
|
||||
// std::uint16_t original_value{};
|
||||
// {
|
||||
// app_config config(provider_type::sia, sia_directory);
|
||||
// original_value = config.get_remote_send_timeout_secs();
|
||||
// config.set_remote_send_timeout_secs(original_value + 5);
|
||||
// EXPECT_EQ(original_value + 5, config.get_remote_send_timeout_secs());
|
||||
// }
|
||||
// {
|
||||
// app_config config(provider_type::sia, sia_directory);
|
||||
// EXPECT_EQ(original_value + 5, config.get_remote_send_timeout_secs());
|
||||
// }
|
||||
// }
|
||||
|
||||
TEST_F(config_test, remote_token) {
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
config.set_remote_token("myToken");
|
||||
EXPECT_STREQ("myToken", config.get_remote_token().c_str());
|
||||
}
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
EXPECT_STREQ("myToken", config.get_remote_token().c_str());
|
||||
}
|
||||
}
|
||||
// TEST_F(config_test, remote_encryption_token) {
|
||||
// {
|
||||
// app_config config(provider_type::sia, sia_directory);
|
||||
// config.set_remote_encryption_token("myToken");
|
||||
// EXPECT_STREQ("myToken", config.get_remote_encryption_token().c_str());
|
||||
// }
|
||||
// {
|
||||
// app_config config(provider_type::sia, sia_directory);
|
||||
// EXPECT_STREQ("myToken", config.get_remote_encryption_token().c_str());
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// TEST_F(config_test, remote_client_pool_size) {
|
||||
// std::uint8_t original_value{};
|
||||
// {
|
||||
// app_config config(provider_type::sia, sia_directory);
|
||||
// original_value = config.get_remote_client_pool_size();
|
||||
// config.set_remote_client_pool_size(original_value + 5);
|
||||
// EXPECT_EQ(original_value + 5, config.get_remote_client_pool_size());
|
||||
// }
|
||||
// {
|
||||
// app_config config(provider_type::sia, sia_directory);
|
||||
// EXPECT_EQ(original_value + 5, config.get_remote_client_pool_size());
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// TEST_F(config_test, remote_client_pool_size_minimum_value) {
|
||||
// {
|
||||
// app_config config(provider_type::sia, sia_directory);
|
||||
// config.set_remote_client_pool_size(0);
|
||||
// EXPECT_EQ(5, config.get_remote_client_pool_size());
|
||||
// }
|
||||
// {
|
||||
// app_config config(provider_type::sia, sia_directory);
|
||||
// EXPECT_EQ(5, config.get_remote_client_pool_size());
|
||||
// }
|
||||
// }
|
||||
|
||||
TEST_F(config_test, remote_client_pool_size) {
|
||||
std::uint8_t original_value{};
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
original_value = config.get_remote_client_pool_size();
|
||||
config.set_remote_client_pool_size(original_value + 5);
|
||||
EXPECT_EQ(original_value + 5, config.get_remote_client_pool_size());
|
||||
}
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
EXPECT_EQ(original_value + 5, config.get_remote_client_pool_size());
|
||||
}
|
||||
}
|
||||
// TEST_F(config_test, remote_max_connections) {
|
||||
// std::uint8_t original_value{};
|
||||
// {
|
||||
// app_config config(provider_type::sia, sia_directory);
|
||||
// original_value = config.get_remote_max_connections();
|
||||
// config.set_remote_max_connections(original_value + 5);
|
||||
// EXPECT_EQ(original_value + 5, config.get_remote_max_connections());
|
||||
// }
|
||||
// {
|
||||
// app_config config(provider_type::sia, sia_directory);
|
||||
// EXPECT_EQ(original_value + 5, config.get_remote_max_connections());
|
||||
// }
|
||||
// }
|
||||
|
||||
TEST_F(config_test, remote_client_pool_size_minimum_value) {
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
config.set_remote_client_pool_size(0);
|
||||
EXPECT_EQ(5, config.get_remote_client_pool_size());
|
||||
}
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
EXPECT_EQ(5, config.get_remote_client_pool_size());
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(config_test, remote_max_connections) {
|
||||
std::uint8_t original_value{};
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
original_value = config.get_remote_max_connections();
|
||||
config.set_remote_max_connections(original_value + 5);
|
||||
EXPECT_EQ(original_value + 5, config.get_remote_max_connections());
|
||||
}
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
EXPECT_EQ(original_value + 5, config.get_remote_max_connections());
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(config_test, remote_max_connections_minimum_value) {
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
config.set_remote_max_connections(0);
|
||||
EXPECT_EQ(1, config.get_remote_max_connections());
|
||||
}
|
||||
{
|
||||
app_config config(provider_type::sia, sia_directory);
|
||||
EXPECT_EQ(1, config.get_remote_max_connections());
|
||||
}
|
||||
}
|
||||
// TEST_F(config_test, remote_max_connections_minimum_value) {
|
||||
// {
|
||||
// app_config config(provider_type::sia, sia_directory);
|
||||
// config.set_remote_max_connections(0);
|
||||
// EXPECT_EQ(1, config.get_remote_max_connections());
|
||||
// }
|
||||
// {
|
||||
// app_config config(provider_type::sia, sia_directory);
|
||||
// EXPECT_EQ(1, config.get_remote_max_connections());
|
||||
// }
|
||||
// }
|
||||
|
||||
TEST_F(config_test, retry_read_count) {
|
||||
std::uint16_t original_value{};
|
||||
|
292
repertory/repertory_test/src/direct_open_file_test.cpp
Normal file
292
repertory/repertory_test/src/direct_open_file_test.cpp
Normal file
@ -0,0 +1,292 @@
|
||||
/*
|
||||
Copyright <2018-2024> <scott.e.graves@protonmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
#include "test_common.hpp"
|
||||
|
||||
#include "file_manager/direct_open_file.hpp"
|
||||
#include "mocks/mock_provider.hpp"
|
||||
|
||||
namespace {
|
||||
constexpr const std::size_t test_chunk_size{1024U};
|
||||
} // namespace
|
||||
|
||||
namespace repertory {
|
||||
class direct_open_file_test : public ::testing::Test {
|
||||
public:
|
||||
console_consumer con_consumer;
|
||||
mock_provider provider;
|
||||
|
||||
protected:
|
||||
void SetUp() override { event_system::instance().start(); }
|
||||
|
||||
void TearDown() override { event_system::instance().stop(); }
|
||||
};
|
||||
|
||||
TEST_F(direct_open_file_test, read_full_file) {
|
||||
auto &source_file = test::create_random_file(test_chunk_size * 32U);
|
||||
|
||||
auto dest_path = test::generate_test_file_name("direct_open_file");
|
||||
|
||||
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
filesystem_item fsi;
|
||||
fsi.api_path = "/test.txt";
|
||||
fsi.directory = false;
|
||||
fsi.size = test_chunk_size * 32U;
|
||||
|
||||
std::mutex read_mtx;
|
||||
EXPECT_CALL(provider, read_file_bytes)
|
||||
.WillRepeatedly([&read_mtx, &source_file](
|
||||
const std::string & /* api_path */, std::size_t size,
|
||||
std::uint64_t offset, data_buffer &data,
|
||||
stop_type &stop_requested) -> api_error {
|
||||
mutex_lock lock(read_mtx);
|
||||
|
||||
EXPECT_FALSE(stop_requested);
|
||||
std::size_t bytes_read{};
|
||||
data.resize(size);
|
||||
auto ret = source_file.read(data, offset, &bytes_read)
|
||||
? api_error::success
|
||||
: api_error::os_error;
|
||||
EXPECT_EQ(bytes_read, data.size());
|
||||
return ret;
|
||||
});
|
||||
{
|
||||
direct_open_file file(test_chunk_size, 30U, fsi, provider);
|
||||
|
||||
auto dest_file = utils::file::file::open_or_create_file(dest_path);
|
||||
EXPECT_TRUE(dest_file);
|
||||
|
||||
auto to_read{fsi.size};
|
||||
std::size_t chunk{0U};
|
||||
while (to_read > 0U) {
|
||||
data_buffer data{};
|
||||
EXPECT_EQ(api_error::success,
|
||||
file.read(test_chunk_size, chunk * test_chunk_size, data));
|
||||
|
||||
std::size_t bytes_written{};
|
||||
EXPECT_TRUE(
|
||||
dest_file->write(data, chunk * test_chunk_size, &bytes_written));
|
||||
++chunk;
|
||||
to_read -= data.size();
|
||||
}
|
||||
dest_file->close();
|
||||
source_file.close();
|
||||
|
||||
auto hash1 = utils::file::file(source_file.get_path()).sha256();
|
||||
auto hash2 = utils::file::file(dest_path).sha256();
|
||||
|
||||
EXPECT_TRUE(hash1.has_value());
|
||||
EXPECT_TRUE(hash2.has_value());
|
||||
if (hash1.has_value() && hash2.has_value()) {
|
||||
EXPECT_STREQ(hash1.value().c_str(), hash2.value().c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(direct_open_file_test, read_full_file_in_reverse) {
|
||||
auto &source_file = test::create_random_file(test_chunk_size * 32U);
|
||||
|
||||
auto dest_path = test::generate_test_file_name("direct_open_file");
|
||||
|
||||
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
filesystem_item fsi;
|
||||
fsi.api_path = "/test.txt";
|
||||
fsi.directory = false;
|
||||
fsi.size = test_chunk_size * 32U;
|
||||
|
||||
std::mutex read_mtx;
|
||||
EXPECT_CALL(provider, read_file_bytes)
|
||||
.WillRepeatedly([&read_mtx, &source_file](
|
||||
const std::string & /* api_path */, std::size_t size,
|
||||
std::uint64_t offset, data_buffer &data,
|
||||
stop_type &stop_requested) -> api_error {
|
||||
mutex_lock lock(read_mtx);
|
||||
|
||||
EXPECT_FALSE(stop_requested);
|
||||
std::size_t bytes_read{};
|
||||
data.resize(size);
|
||||
auto ret = source_file.read(data, offset, &bytes_read)
|
||||
? api_error::success
|
||||
: api_error::os_error;
|
||||
EXPECT_EQ(bytes_read, data.size());
|
||||
return ret;
|
||||
});
|
||||
{
|
||||
direct_open_file file(test_chunk_size, 30U, fsi, provider);
|
||||
|
||||
auto dest_file = utils::file::file::open_or_create_file(dest_path);
|
||||
EXPECT_TRUE(dest_file);
|
||||
|
||||
auto to_read{fsi.size};
|
||||
std::size_t chunk{file.get_total_chunks() - 1U};
|
||||
while (to_read > 0U) {
|
||||
data_buffer data{};
|
||||
EXPECT_EQ(api_error::success,
|
||||
file.read(test_chunk_size, chunk * test_chunk_size, data));
|
||||
|
||||
std::size_t bytes_written{};
|
||||
EXPECT_TRUE(
|
||||
dest_file->write(data, chunk * test_chunk_size, &bytes_written));
|
||||
--chunk;
|
||||
to_read -= data.size();
|
||||
}
|
||||
dest_file->close();
|
||||
source_file.close();
|
||||
|
||||
auto hash1 = utils::file::file(source_file.get_path()).sha256();
|
||||
auto hash2 = utils::file::file(dest_path).sha256();
|
||||
|
||||
EXPECT_TRUE(hash1.has_value());
|
||||
EXPECT_TRUE(hash2.has_value());
|
||||
if (hash1.has_value() && hash2.has_value()) {
|
||||
EXPECT_STREQ(hash1.value().c_str(), hash2.value().c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(direct_open_file_test, read_full_file_in_partial_chunks) {
|
||||
auto &source_file = test::create_random_file(test_chunk_size * 32U);
|
||||
|
||||
auto dest_path = test::generate_test_file_name("test");
|
||||
|
||||
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
filesystem_item fsi;
|
||||
fsi.directory = false;
|
||||
fsi.api_path = "/test.txt";
|
||||
fsi.size = test_chunk_size * 32U;
|
||||
|
||||
std::mutex read_mtx;
|
||||
EXPECT_CALL(provider, read_file_bytes)
|
||||
.WillRepeatedly([&read_mtx, &source_file](
|
||||
const std::string & /* api_path */, std::size_t size,
|
||||
std::uint64_t offset, data_buffer &data,
|
||||
stop_type &stop_requested) -> api_error {
|
||||
mutex_lock lock(read_mtx);
|
||||
|
||||
EXPECT_FALSE(stop_requested);
|
||||
std::size_t bytes_read{};
|
||||
data.resize(size);
|
||||
auto ret = source_file.read(data, offset, &bytes_read)
|
||||
? api_error::success
|
||||
: api_error::os_error;
|
||||
EXPECT_EQ(bytes_read, data.size());
|
||||
return ret;
|
||||
});
|
||||
{
|
||||
direct_open_file file(test_chunk_size, 30U, fsi, provider);
|
||||
|
||||
auto dest_file = utils::file::file::open_or_create_file(dest_path);
|
||||
EXPECT_TRUE(dest_file);
|
||||
|
||||
auto total_read{std::uint64_t(0U)};
|
||||
while (total_read < fsi.size) {
|
||||
data_buffer data{};
|
||||
EXPECT_EQ(api_error::success, file.read(3U, total_read, data));
|
||||
|
||||
std::size_t bytes_written{};
|
||||
EXPECT_TRUE(dest_file->write(data.data(), data.size(), total_read,
|
||||
&bytes_written));
|
||||
total_read += data.size();
|
||||
}
|
||||
dest_file->close();
|
||||
source_file.close();
|
||||
|
||||
auto hash1 = utils::file::file(source_file.get_path()).sha256();
|
||||
auto hash2 = utils::file::file(dest_path).sha256();
|
||||
|
||||
EXPECT_TRUE(hash1.has_value());
|
||||
EXPECT_TRUE(hash2.has_value());
|
||||
if (hash1.has_value() && hash2.has_value()) {
|
||||
EXPECT_STREQ(hash1.value().c_str(), hash2.value().c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(direct_open_file_test, read_full_file_in_partial_chunks_in_reverse) {
|
||||
auto &source_file = test::create_random_file(test_chunk_size * 32U);
|
||||
|
||||
auto dest_path = test::generate_test_file_name("direct_open_file");
|
||||
|
||||
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
filesystem_item fsi;
|
||||
fsi.directory = false;
|
||||
fsi.api_path = "/test.txt";
|
||||
fsi.size = test_chunk_size * 32U;
|
||||
|
||||
std::mutex read_mtx;
|
||||
EXPECT_CALL(provider, read_file_bytes)
|
||||
.WillRepeatedly([&read_mtx, &source_file](
|
||||
const std::string & /* api_path */, std::size_t size,
|
||||
std::uint64_t offset, data_buffer &data,
|
||||
stop_type &stop_requested) -> api_error {
|
||||
mutex_lock lock(read_mtx);
|
||||
|
||||
EXPECT_FALSE(stop_requested);
|
||||
std::size_t bytes_read{};
|
||||
data.resize(size);
|
||||
auto ret = source_file.read(data, offset, &bytes_read)
|
||||
? api_error::success
|
||||
: api_error::os_error;
|
||||
EXPECT_EQ(bytes_read, data.size());
|
||||
return ret;
|
||||
});
|
||||
{
|
||||
direct_open_file file(test_chunk_size, 30U, fsi, provider);
|
||||
|
||||
auto dest_file = utils::file::file::open_or_create_file(dest_path);
|
||||
EXPECT_TRUE(dest_file);
|
||||
|
||||
std::uint64_t total_read{0U};
|
||||
auto read_size{3U};
|
||||
|
||||
while (total_read < fsi.size) {
|
||||
auto offset = fsi.size - total_read - read_size;
|
||||
auto remain = fsi.size - total_read;
|
||||
|
||||
data_buffer data{};
|
||||
EXPECT_EQ(api_error::success,
|
||||
file.read(static_cast<std::size_t>(
|
||||
std::min(remain, std::uint64_t(read_size))),
|
||||
(remain >= read_size) ? offset : 0U, data));
|
||||
|
||||
std::size_t bytes_written{};
|
||||
EXPECT_TRUE(dest_file->write(data, (remain >= read_size) ? offset : 0U,
|
||||
&bytes_written));
|
||||
total_read += data.size();
|
||||
}
|
||||
dest_file->close();
|
||||
source_file.close();
|
||||
|
||||
auto hash1 = utils::file::file(source_file.get_path()).sha256();
|
||||
auto hash2 = utils::file::file(dest_path).sha256();
|
||||
|
||||
EXPECT_TRUE(hash1.has_value());
|
||||
EXPECT_TRUE(hash2.has_value());
|
||||
if (hash1.has_value() && hash2.has_value()) {
|
||||
EXPECT_STREQ(hash1.value().c_str(), hash2.value().c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace repertory
|
@ -83,7 +83,8 @@ TYPED_TEST(file_db_test, can_get_api_path_for_file) {
|
||||
EXPECT_STREQ("/file", api_path.c_str());
|
||||
}
|
||||
|
||||
TYPED_TEST(file_db_test, item_not_found_is_returned_for_non_existing_api_path) {
|
||||
TYPED_TEST(file_db_test,
|
||||
item_not_found_is_returned_for_non_existing_source_path) {
|
||||
this->file_db->clear();
|
||||
|
||||
std::string api_path;
|
||||
@ -199,10 +200,10 @@ TYPED_TEST(file_db_test, can_get_file_data) {
|
||||
|
||||
i_file_db::file_data data{};
|
||||
EXPECT_EQ(api_error::success, this->file_db->get_file_data("/file", data));
|
||||
EXPECT_STREQ("/file", data.api_path);
|
||||
EXPECT_STREQ("/file", data.api_path.c_str());
|
||||
EXPECT_EQ(1U, data.file_size);
|
||||
EXPECT_EQ(2U, data.iv_list.size());
|
||||
EXPECT_STREQ("c:\\test\\file.txt", data.source_path);
|
||||
EXPECT_STREQ("c:\\test\\file.txt", data.source_path.c_str());
|
||||
}
|
||||
|
||||
TYPED_TEST(file_db_test,
|
||||
@ -213,5 +214,119 @@ TYPED_TEST(file_db_test,
|
||||
EXPECT_EQ(api_error::item_not_found,
|
||||
this->file_db->get_file_data("/file", data));
|
||||
}
|
||||
// test can update file source, iv, size
|
||||
|
||||
TYPED_TEST(file_db_test, can_update_existing_file_iv) {
|
||||
this->file_db->clear();
|
||||
|
||||
EXPECT_EQ(api_error::success, this->file_db->add_or_update_file({
|
||||
"/file",
|
||||
1U,
|
||||
{{}, {}},
|
||||
"c:\\test\\file.txt",
|
||||
}));
|
||||
|
||||
EXPECT_EQ(api_error::success, this->file_db->add_or_update_file({
|
||||
"/file",
|
||||
1U,
|
||||
{{}, {}, {}},
|
||||
"c:\\test\\file.txt",
|
||||
}));
|
||||
|
||||
i_file_db::file_data data{};
|
||||
EXPECT_EQ(api_error::success, this->file_db->get_file_data("/file", data));
|
||||
EXPECT_STREQ("/file", data.api_path.c_str());
|
||||
EXPECT_EQ(1U, data.file_size);
|
||||
EXPECT_EQ(3U, data.iv_list.size());
|
||||
EXPECT_STREQ("c:\\test\\file.txt", data.source_path.c_str());
|
||||
|
||||
EXPECT_EQ(1U, this->file_db->count());
|
||||
}
|
||||
|
||||
TYPED_TEST(file_db_test, can_update_existing_file_size) {
|
||||
this->file_db->clear();
|
||||
|
||||
EXPECT_EQ(api_error::success, this->file_db->add_or_update_file({
|
||||
"/file",
|
||||
1U,
|
||||
{{}, {}},
|
||||
"c:\\test\\file.txt",
|
||||
}));
|
||||
|
||||
EXPECT_EQ(api_error::success, this->file_db->add_or_update_file({
|
||||
"/file",
|
||||
2U,
|
||||
{{}, {}},
|
||||
"c:\\test\\file.txt",
|
||||
}));
|
||||
|
||||
i_file_db::file_data data{};
|
||||
EXPECT_EQ(api_error::success, this->file_db->get_file_data("/file", data));
|
||||
EXPECT_STREQ("/file", data.api_path.c_str());
|
||||
EXPECT_EQ(2U, data.file_size);
|
||||
EXPECT_EQ(2U, data.iv_list.size());
|
||||
EXPECT_STREQ("c:\\test\\file.txt", data.source_path.c_str());
|
||||
|
||||
EXPECT_EQ(1U, this->file_db->count());
|
||||
}
|
||||
|
||||
TYPED_TEST(file_db_test, can_update_existing_file_source_path) {
|
||||
this->file_db->clear();
|
||||
|
||||
EXPECT_EQ(api_error::success, this->file_db->add_or_update_file({
|
||||
"/file",
|
||||
1U,
|
||||
{{}, {}},
|
||||
"c:\\test\\file.txt",
|
||||
}));
|
||||
|
||||
EXPECT_EQ(api_error::success, this->file_db->add_or_update_file({
|
||||
"/file",
|
||||
1U,
|
||||
{{}, {}},
|
||||
"c:\\test\\file2.txt",
|
||||
}));
|
||||
|
||||
i_file_db::file_data data{};
|
||||
EXPECT_EQ(api_error::success, this->file_db->get_file_data("/file", data));
|
||||
EXPECT_STREQ("/file", data.api_path.c_str());
|
||||
EXPECT_EQ(1U, data.file_size);
|
||||
EXPECT_EQ(2U, data.iv_list.size());
|
||||
EXPECT_STREQ("c:\\test\\file2.txt", data.source_path.c_str());
|
||||
|
||||
EXPECT_EQ(1U, this->file_db->count());
|
||||
}
|
||||
|
||||
TYPED_TEST(file_db_test, can_get_source_path_for_directory) {
|
||||
this->file_db->clear();
|
||||
|
||||
EXPECT_EQ(api_error::success, this->file_db->add_directory("/", "c:\\test"));
|
||||
std::string source_path;
|
||||
EXPECT_EQ(api_error::success,
|
||||
this->file_db->get_source_path("/", source_path));
|
||||
EXPECT_STREQ("c:\\test", source_path.c_str());
|
||||
}
|
||||
|
||||
TYPED_TEST(file_db_test, can_get_source_path_for_file) {
|
||||
this->file_db->clear();
|
||||
|
||||
EXPECT_EQ(api_error::success, this->file_db->add_or_update_file({
|
||||
"/file",
|
||||
0U,
|
||||
{},
|
||||
"c:\\test\\file.txt",
|
||||
}));
|
||||
std::string source_path;
|
||||
EXPECT_EQ(api_error::success,
|
||||
this->file_db->get_source_path("/file", source_path));
|
||||
EXPECT_STREQ("c:\\test\\file.txt", source_path.c_str());
|
||||
}
|
||||
|
||||
TYPED_TEST(file_db_test, item_not_found_is_returned_for_non_existing_api_path) {
|
||||
this->file_db->clear();
|
||||
|
||||
std::string source_path;
|
||||
EXPECT_EQ(api_error::item_not_found,
|
||||
this->file_db->get_source_path("/file", source_path));
|
||||
EXPECT_TRUE(source_path.empty());
|
||||
}
|
||||
} // namespace repertory
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include "test_common.hpp"
|
||||
|
||||
#include "app_config.hpp"
|
||||
#include "file_manager/cache_size_mgr.hpp"
|
||||
#include "file_manager/events.hpp"
|
||||
#include "file_manager/file_manager.hpp"
|
||||
#include "file_manager/i_open_file.hpp"
|
||||
@ -51,7 +52,7 @@ auto file_manager::open(std::shared_ptr<i_closeable_open_file> of,
|
||||
|
||||
class file_manager_test : public ::testing::Test {
|
||||
public:
|
||||
console_consumer c;
|
||||
console_consumer con_consumer;
|
||||
std::unique_ptr<app_config> cfg;
|
||||
mock_provider mp;
|
||||
static std::atomic<std::size_t> inst;
|
||||
@ -66,7 +67,9 @@ protected:
|
||||
{"file_manager_test" + std::to_string(++inst)});
|
||||
|
||||
cfg = std::make_unique<app_config>(provider_type::sia, file_manager_dir);
|
||||
cfg->set_enable_chunk_downloader_timeout(false);
|
||||
cfg->set_enable_download_timeout(false);
|
||||
|
||||
cache_size_mgr::instance().initialize(cfg.get());
|
||||
}
|
||||
|
||||
void TearDown() override { event_system::instance().stop(); }
|
||||
@ -104,7 +107,7 @@ TEST_F(file_manager_test, can_start_and_stop) {
|
||||
}
|
||||
|
||||
TEST_F(file_manager_test, can_create_and_close_file) {
|
||||
cfg->set_enable_chunk_downloader_timeout(true);
|
||||
cfg->set_enable_download_timeout(true);
|
||||
|
||||
EXPECT_CALL(mp, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
@ -213,7 +216,7 @@ TEST_F(file_manager_test, can_create_and_close_file) {
|
||||
}
|
||||
|
||||
TEST_F(file_manager_test, can_open_and_close_file) {
|
||||
cfg->set_enable_chunk_downloader_timeout(true);
|
||||
cfg->set_enable_download_timeout(true);
|
||||
|
||||
EXPECT_CALL(mp, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
@ -428,16 +431,6 @@ TEST_F(file_manager_test,
|
||||
return api_error::success;
|
||||
});
|
||||
|
||||
std::uint64_t handle{};
|
||||
std::shared_ptr<i_open_file> open_file;
|
||||
#if defined(_WIN32)
|
||||
EXPECT_EQ(api_error::success, mgr.open("/test_write_partial_download.txt",
|
||||
false, {}, handle, open_file));
|
||||
#else
|
||||
EXPECT_EQ(api_error::success, mgr.open("/test_write_partial_download.txt",
|
||||
false, O_RDWR, handle, open_file));
|
||||
#endif
|
||||
|
||||
EXPECT_CALL(mp, read_file_bytes)
|
||||
.WillRepeatedly([&file](const std::string & /* api_path */,
|
||||
std::size_t size, std::uint64_t offset,
|
||||
@ -462,6 +455,17 @@ TEST_F(file_manager_test,
|
||||
|
||||
return api_error::download_stopped;
|
||||
});
|
||||
|
||||
std::uint64_t handle{};
|
||||
std::shared_ptr<i_open_file> open_file;
|
||||
#if defined(_WIN32)
|
||||
EXPECT_EQ(api_error::success, mgr.open("/test_write_partial_download.txt",
|
||||
false, {}, handle, open_file));
|
||||
#else
|
||||
EXPECT_EQ(api_error::success, mgr.open("/test_write_partial_download.txt",
|
||||
false, O_RDWR, handle, open_file));
|
||||
#endif
|
||||
|
||||
EXPECT_CALL(mp, set_item_meta("/test_write_partial_download.txt", _))
|
||||
.WillOnce(
|
||||
[](const std::string &, const api_meta_map &meta2) -> api_error {
|
||||
@ -472,6 +476,10 @@ TEST_F(file_manager_test,
|
||||
});
|
||||
EXPECT_CALL(mp, upload_file).Times(0u);
|
||||
|
||||
if (not open_file->is_write_supported()) {
|
||||
EXPECT_TRUE(mgr.get_open_file(handle, true, open_file));
|
||||
}
|
||||
|
||||
std::size_t bytes_written{};
|
||||
data_buffer data = {0, 1, 2};
|
||||
EXPECT_EQ(api_error::success, open_file->write(0u, data, bytes_written));
|
||||
@ -530,7 +538,7 @@ TEST_F(file_manager_test,
|
||||
}
|
||||
|
||||
TEST_F(file_manager_test, upload_occurs_after_write_if_fully_downloaded) {
|
||||
cfg->set_enable_chunk_downloader_timeout(true);
|
||||
cfg->set_enable_download_timeout(true);
|
||||
|
||||
EXPECT_CALL(mp, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
@ -557,7 +565,6 @@ TEST_F(file_manager_test, upload_occurs_after_write_if_fully_downloaded) {
|
||||
EXPECT_STREQ(source_path.c_str(),
|
||||
evt2.get_source().get<std::string>().c_str());
|
||||
});
|
||||
event_capture capture({"download_end"});
|
||||
|
||||
auto now = utils::time::get_time_now();
|
||||
auto meta = create_meta_attributes(
|
||||
@ -581,16 +588,6 @@ TEST_F(file_manager_test, upload_occurs_after_write_if_fully_downloaded) {
|
||||
return api_error::success;
|
||||
});
|
||||
|
||||
std::uint64_t handle{};
|
||||
std::shared_ptr<i_open_file> open_file;
|
||||
#if defined(_WIN32)
|
||||
EXPECT_EQ(api_error::success, mgr.open("/test_write_full_download.txt", false,
|
||||
{}, handle, open_file));
|
||||
#else
|
||||
EXPECT_EQ(api_error::success, mgr.open("/test_write_full_download.txt", false,
|
||||
O_RDWR, handle, open_file));
|
||||
#endif
|
||||
|
||||
EXPECT_CALL(mp, read_file_bytes)
|
||||
.WillRepeatedly([&file](const std::string & /* api_path */,
|
||||
std::size_t size, std::uint64_t offset,
|
||||
@ -603,6 +600,17 @@ TEST_F(file_manager_test, upload_occurs_after_write_if_fully_downloaded) {
|
||||
EXPECT_EQ(bytes_read, data.size());
|
||||
return ret;
|
||||
});
|
||||
|
||||
std::uint64_t handle{};
|
||||
std::shared_ptr<i_open_file> open_file;
|
||||
#if defined(_WIN32)
|
||||
EXPECT_EQ(api_error::success, mgr.open("/test_write_full_download.txt", false,
|
||||
{}, handle, open_file));
|
||||
#else
|
||||
EXPECT_EQ(api_error::success, mgr.open("/test_write_full_download.txt", false,
|
||||
O_RDWR, handle, open_file));
|
||||
#endif
|
||||
|
||||
EXPECT_CALL(mp, set_item_meta("/test_write_full_download.txt", _))
|
||||
.WillOnce(
|
||||
[](const std::string &, const api_meta_map &meta2) -> api_error {
|
||||
@ -611,25 +619,33 @@ TEST_F(file_manager_test, upload_occurs_after_write_if_fully_downloaded) {
|
||||
EXPECT_NO_THROW(EXPECT_FALSE(meta2.at(META_WRITTEN).empty()));
|
||||
return api_error::success;
|
||||
});
|
||||
std::size_t bytes_written{};
|
||||
data_buffer data = {0, 1, 2};
|
||||
EXPECT_EQ(api_error::success, open_file->write(0u, data, bytes_written));
|
||||
EXPECT_EQ(std::size_t(3u), bytes_written);
|
||||
open_file.reset();
|
||||
|
||||
capture.wait_for_empty();
|
||||
if (not open_file->is_write_supported()) {
|
||||
EXPECT_TRUE(mgr.get_open_file(handle, true, open_file));
|
||||
}
|
||||
|
||||
EXPECT_CALL(mp, upload_file("/test_write_full_download.txt", source_path, _))
|
||||
.WillOnce(Return(api_error::success));
|
||||
|
||||
event_capture ec2({
|
||||
event_capture capture({
|
||||
"item_timeout",
|
||||
"file_upload_queued",
|
||||
"file_upload_completed",
|
||||
});
|
||||
|
||||
EXPECT_CALL(mp, upload_file("/test_write_full_download.txt", source_path, _))
|
||||
.WillOnce(Return(api_error::success));
|
||||
|
||||
std::size_t bytes_written{};
|
||||
data_buffer data = {0, 1, 2};
|
||||
EXPECT_EQ(api_error::success, open_file->write(0u, data, bytes_written));
|
||||
EXPECT_EQ(std::size_t(3u), bytes_written);
|
||||
|
||||
while (not open_file->is_complete()) {
|
||||
std::this_thread::sleep_for(10ms);
|
||||
}
|
||||
open_file.reset();
|
||||
|
||||
mgr.close(handle);
|
||||
|
||||
ec2.wait_for_empty();
|
||||
capture.wait_for_empty();
|
||||
|
||||
EXPECT_EQ(std::size_t(0U), mgr.get_open_file_count());
|
||||
EXPECT_EQ(std::size_t(0U), mgr.get_open_handle_count());
|
||||
@ -694,9 +710,14 @@ TEST_F(file_manager_test, can_evict_file) {
|
||||
.WillRepeatedly(Return(api_error::success));
|
||||
EXPECT_CALL(mp, upload_file(_, _, _)).WillOnce(Return(api_error::success));
|
||||
|
||||
if (not open_file->is_write_supported()) {
|
||||
EXPECT_TRUE(mgr.get_open_file(handle, true, open_file));
|
||||
}
|
||||
|
||||
data_buffer data{{0, 1, 1}};
|
||||
std::size_t bytes_written{};
|
||||
EXPECT_EQ(api_error::success, open_file->write(0U, data, bytes_written));
|
||||
auto res = open_file->write(0U, data, bytes_written);
|
||||
EXPECT_EQ(api_error::success, res);
|
||||
|
||||
auto opt_size = utils::file::file{source_path}.size();
|
||||
EXPECT_TRUE(opt_size.has_value());
|
||||
@ -709,15 +730,6 @@ TEST_F(file_manager_test, can_evict_file) {
|
||||
EXPECT_TRUE(utils::retry_action(
|
||||
[&mgr]() -> bool { return not mgr.is_processing("/test_evict.txt"); }));
|
||||
|
||||
EXPECT_CALL(mp, get_item_meta(_, META_SOURCE, _))
|
||||
.WillOnce([&source_path](const std::string &api_path,
|
||||
const std::string &key,
|
||||
std::string &value) -> api_error {
|
||||
EXPECT_STREQ("/test_evict.txt", api_path.c_str());
|
||||
EXPECT_STREQ(META_SOURCE.c_str(), key.c_str());
|
||||
value = source_path;
|
||||
return api_error::success;
|
||||
});
|
||||
EXPECT_CALL(mp, get_item_meta(_, META_PINNED, _))
|
||||
.WillOnce([](const std::string &api_path, const std::string &key,
|
||||
std::string &value) -> api_error {
|
||||
@ -736,6 +748,17 @@ TEST_F(file_manager_test, evict_file_fails_if_file_is_pinned) {
|
||||
EXPECT_CALL(mp, is_read_only()).WillRepeatedly(Return(false));
|
||||
file_manager mgr(*cfg, mp);
|
||||
|
||||
EXPECT_CALL(mp, get_filesystem_item)
|
||||
.WillRepeatedly([](const std::string &api_path, bool directory,
|
||||
filesystem_item &fsi) -> api_error {
|
||||
fsi.api_path = api_path;
|
||||
fsi.api_parent = utils::path::get_parent_api_path(api_path);
|
||||
fsi.directory = directory;
|
||||
fsi.size = 2U;
|
||||
fsi.source_path = "/test/test_open.src";
|
||||
return api_error::success;
|
||||
});
|
||||
|
||||
EXPECT_CALL(mp, get_item_meta(_, META_PINNED, _))
|
||||
.WillOnce([](const std::string &api_path, const std::string &key,
|
||||
std::string &value) -> api_error {
|
||||
@ -794,28 +817,17 @@ TEST_F(file_manager_test, evict_file_fails_if_file_is_open) {
|
||||
mgr.close(handle);
|
||||
}
|
||||
|
||||
TEST_F(file_manager_test,
|
||||
evict_file_fails_if_unable_to_get_source_path_from_item_meta) {
|
||||
TEST_F(file_manager_test, evict_file_fails_if_unable_to_get_filesystem_item) {
|
||||
EXPECT_CALL(mp, is_read_only()).WillRepeatedly(Return(false));
|
||||
file_manager mgr(*cfg, mp);
|
||||
|
||||
EXPECT_CALL(mp, get_item_meta(_, META_SOURCE, _))
|
||||
.WillOnce([](const std::string &api_path, const std::string &key,
|
||||
std::string & /*value*/) -> api_error {
|
||||
EXPECT_STREQ("/test_open.txt", api_path.c_str());
|
||||
EXPECT_STREQ(META_SOURCE.c_str(), key.c_str());
|
||||
EXPECT_CALL(mp, get_filesystem_item)
|
||||
.WillRepeatedly([](const std::string & /* api_path */,
|
||||
bool /* directory */,
|
||||
filesystem_item & /* fsi */) -> api_error {
|
||||
return api_error::error;
|
||||
});
|
||||
|
||||
EXPECT_CALL(mp, get_item_meta(_, META_PINNED, _))
|
||||
.WillOnce([](const std::string &api_path, const std::string &key,
|
||||
std::string &value) -> api_error {
|
||||
EXPECT_STREQ("/test_open.txt", api_path.c_str());
|
||||
EXPECT_STREQ(META_PINNED.c_str(), key.c_str());
|
||||
value = "0";
|
||||
return api_error::success;
|
||||
});
|
||||
|
||||
EXPECT_FALSE(mgr.evict_file("/test_open.txt"));
|
||||
}
|
||||
|
||||
@ -823,20 +835,13 @@ TEST_F(file_manager_test, evict_file_fails_if_source_path_is_empty) {
|
||||
EXPECT_CALL(mp, is_read_only()).WillRepeatedly(Return(false));
|
||||
file_manager mgr(*cfg, mp);
|
||||
|
||||
EXPECT_CALL(mp, get_item_meta(_, META_SOURCE, _))
|
||||
.WillOnce([](const std::string &api_path, const std::string &key,
|
||||
std::string &value) -> api_error {
|
||||
EXPECT_STREQ("/test_open.txt", api_path.c_str());
|
||||
EXPECT_STREQ(META_SOURCE.c_str(), key.c_str());
|
||||
value = "";
|
||||
return api_error::success;
|
||||
});
|
||||
EXPECT_CALL(mp, get_item_meta(_, META_PINNED, _))
|
||||
.WillOnce([](const std::string &api_path, const std::string &key,
|
||||
std::string &value) -> api_error {
|
||||
EXPECT_STREQ("/test_open.txt", api_path.c_str());
|
||||
EXPECT_STREQ(META_PINNED.c_str(), key.c_str());
|
||||
value = "0";
|
||||
EXPECT_CALL(mp, get_filesystem_item)
|
||||
.WillRepeatedly([](const std::string &api_path, bool directory,
|
||||
filesystem_item &fsi) -> api_error {
|
||||
fsi.api_path = api_path;
|
||||
fsi.api_parent = utils::path::get_parent_api_path(api_path);
|
||||
fsi.directory = directory;
|
||||
fsi.size = 20U;
|
||||
return api_error::success;
|
||||
});
|
||||
|
||||
@ -904,6 +909,10 @@ TEST_F(file_manager_test, evict_file_fails_if_file_is_uploading) {
|
||||
return api_error::success;
|
||||
});
|
||||
|
||||
if (not open_file->is_write_supported()) {
|
||||
EXPECT_TRUE(mgr.get_open_file(handle, true, open_file));
|
||||
}
|
||||
|
||||
data_buffer data{{0, 1, 1}};
|
||||
std::size_t bytes_written{};
|
||||
EXPECT_EQ(api_error::success, open_file->write(0U, data, bytes_written));
|
||||
@ -947,6 +956,7 @@ TEST_F(file_manager_test, evict_file_fails_if_file_is_modified) {
|
||||
EXPECT_CALL(mp, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
file_manager mgr(*cfg, mp);
|
||||
|
||||
EXPECT_CALL(mp, get_filesystem_item)
|
||||
.WillOnce([](const std::string &api_path, bool directory,
|
||||
filesystem_item &fsi) -> api_error {
|
||||
@ -961,11 +971,12 @@ TEST_F(file_manager_test, evict_file_fails_if_file_is_modified) {
|
||||
});
|
||||
|
||||
auto file = std::make_shared<mock_open_file>();
|
||||
EXPECT_CALL(*file, is_directory).WillOnce(Return(false));
|
||||
EXPECT_CALL(*file, add).WillOnce(Return());
|
||||
EXPECT_CALL(*file, get_api_path).WillRepeatedly(Return("/test_evict.txt"));
|
||||
EXPECT_CALL(*file, get_source_path).WillRepeatedly(Return("/test_evict.src"));
|
||||
EXPECT_CALL(*file, is_modified).Times(2).WillRepeatedly(Return(true));
|
||||
EXPECT_CALL(*file, is_directory).WillOnce(Return(false));
|
||||
EXPECT_CALL(*file, is_modified).WillRepeatedly(Return(true));
|
||||
EXPECT_CALL(*file, is_write_supported).WillRepeatedly(Return(true));
|
||||
|
||||
std::uint64_t handle{};
|
||||
std::shared_ptr<i_open_file> open_file;
|
||||
@ -988,20 +999,21 @@ TEST_F(file_manager_test, evict_file_fails_if_file_is_not_complete) {
|
||||
filesystem_item &fsi) -> api_error {
|
||||
EXPECT_STREQ("/test_evict.txt", api_path.c_str());
|
||||
EXPECT_FALSE(directory);
|
||||
fsi.api_path = api_path;
|
||||
fsi.api_parent = utils::path::get_parent_api_path(api_path);
|
||||
fsi.api_path = api_path;
|
||||
fsi.directory = directory;
|
||||
fsi.size = 1U;
|
||||
return api_error::success;
|
||||
});
|
||||
|
||||
auto file = std::make_shared<mock_open_file>();
|
||||
EXPECT_CALL(*file, is_directory).WillOnce(Return(false));
|
||||
EXPECT_CALL(*file, add).WillOnce(Return());
|
||||
EXPECT_CALL(*file, get_api_path).WillRepeatedly(Return("/test_evict.txt"));
|
||||
EXPECT_CALL(*file, get_source_path).WillRepeatedly(Return("/test_evict.src"));
|
||||
EXPECT_CALL(*file, is_modified).Times(2).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(*file, is_complete).Times(2).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(*file, is_complete).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(*file, is_directory).WillOnce(Return(false));
|
||||
EXPECT_CALL(*file, is_modified).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(*file, is_write_supported).WillRepeatedly(Return(true));
|
||||
EXPECT_CALL(mp, set_item_meta("/test_evict.txt", META_SOURCE, _))
|
||||
.WillOnce(Return(api_error::success));
|
||||
|
||||
@ -1416,8 +1428,8 @@ TEST_F(file_manager_test, can_queue_and_remove_upload) {
|
||||
}
|
||||
|
||||
TEST_F(file_manager_test, file_is_closed_after_download_timeout) {
|
||||
cfg->set_enable_chunk_downloader_timeout(true);
|
||||
cfg->set_chunk_downloader_timeout_secs(3U);
|
||||
cfg->set_enable_download_timeout(true);
|
||||
cfg->set_download_timeout_secs(3U);
|
||||
|
||||
polling::instance().start(cfg.get());
|
||||
|
||||
@ -1457,26 +1469,16 @@ TEST_F(file_manager_test, file_is_closed_after_download_timeout) {
|
||||
|
||||
event_capture capture({"item_timeout"});
|
||||
|
||||
std::uint64_t handle{};
|
||||
std::shared_ptr<i_open_file> open_file;
|
||||
#if defined(_WIN32)
|
||||
EXPECT_EQ(api_error::success, mgr.open("/test_download_timeout.txt", false,
|
||||
{}, handle, open_file));
|
||||
#else
|
||||
EXPECT_EQ(api_error::success, mgr.open("/test_download_timeout.txt", false,
|
||||
O_RDWR, handle, open_file));
|
||||
#endif
|
||||
|
||||
EXPECT_CALL(mp, read_file_bytes)
|
||||
.WillRepeatedly([](const std::string & /* api_path */,
|
||||
std::size_t /*size*/, std::uint64_t offset,
|
||||
data_buffer & /*data*/,
|
||||
.WillRepeatedly([](const std::string & /* api_path */, std::size_t size,
|
||||
std::uint64_t offset, data_buffer &data,
|
||||
stop_type &stop_requested) -> api_error {
|
||||
if (stop_requested) {
|
||||
return api_error::download_stopped;
|
||||
}
|
||||
|
||||
if (offset == 0U) {
|
||||
data.resize(size);
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
@ -1487,13 +1489,25 @@ TEST_F(file_manager_test, file_is_closed_after_download_timeout) {
|
||||
return api_error::download_stopped;
|
||||
});
|
||||
|
||||
std::uint64_t handle{};
|
||||
std::shared_ptr<i_open_file> open_file;
|
||||
#if defined(_WIN32)
|
||||
EXPECT_EQ(api_error::success, mgr.open("/test_download_timeout.txt", false,
|
||||
{}, handle, open_file));
|
||||
#else
|
||||
EXPECT_EQ(api_error::success, mgr.open("/test_download_timeout.txt", false,
|
||||
O_RDWR, handle, open_file));
|
||||
#endif
|
||||
|
||||
data_buffer data{};
|
||||
EXPECT_EQ(api_error::success, open_file->read(1U, 0U, data));
|
||||
|
||||
mgr.close(handle);
|
||||
|
||||
EXPECT_CALL(mp, set_item_meta("/test_download_timeout.txt", META_SOURCE, _))
|
||||
.WillOnce(Return(api_error::success));
|
||||
if (open_file->is_write_supported()) {
|
||||
EXPECT_CALL(mp, set_item_meta("/test_download_timeout.txt", META_SOURCE, _))
|
||||
.WillOnce(Return(api_error::success));
|
||||
}
|
||||
|
||||
EXPECT_EQ(std::size_t(1U), mgr.get_open_file_count());
|
||||
capture.wait_for_empty();
|
||||
@ -1544,7 +1558,7 @@ TEST_F(file_manager_test, remove_file_fails_if_provider_remove_file_fails) {
|
||||
|
||||
TEST_F(file_manager_test,
|
||||
resize_greater_than_chunk_size_sets_new_chunks_to_read) {
|
||||
cfg->set_enable_chunk_downloader_timeout(true);
|
||||
cfg->set_enable_download_timeout(true);
|
||||
|
||||
EXPECT_CALL(mp, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
|
248
repertory/repertory_test/src/json_serialize_test.cpp
Normal file
248
repertory/repertory_test/src/json_serialize_test.cpp
Normal file
@ -0,0 +1,248 @@
|
||||
/*
|
||||
Copyright <2018-2024> <scott.e.graves@protonmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
#include "test_common.hpp"
|
||||
|
||||
#include "types/remote.hpp"
|
||||
#include "types/repertory.hpp"
|
||||
|
||||
namespace repertory {
|
||||
TEST(json_serialize, can_handle_directory_item) {
|
||||
directory_item cfg{
|
||||
"api", "parent", true, 2U, {{META_DIRECTORY, "true"}},
|
||||
};
|
||||
|
||||
json data(cfg);
|
||||
EXPECT_STREQ("api", data.at(JSON_API_PATH).get<std::string>().c_str());
|
||||
EXPECT_STREQ("parent", data.at(JSON_API_PARENT).get<std::string>().c_str());
|
||||
EXPECT_TRUE(data.at(JSON_DIRECTORY).get<bool>());
|
||||
EXPECT_STREQ(
|
||||
"true", data.at(JSON_META).at(META_DIRECTORY).get<std::string>().c_str());
|
||||
|
||||
{
|
||||
auto cfg2 = data.get<directory_item>();
|
||||
EXPECT_STREQ(cfg2.api_path.c_str(), cfg.api_path.c_str());
|
||||
EXPECT_STREQ(cfg2.api_parent.c_str(), cfg.api_parent.c_str());
|
||||
EXPECT_EQ(cfg2.directory, cfg.directory);
|
||||
EXPECT_STREQ(cfg2.meta.at(META_DIRECTORY).c_str(),
|
||||
cfg.meta.at(META_DIRECTORY).c_str());
|
||||
}
|
||||
}
|
||||
|
||||
TEST(json_serialize, can_handle_encrypt_config) {
|
||||
encrypt_config cfg{
|
||||
"token",
|
||||
"path",
|
||||
};
|
||||
|
||||
json data(cfg);
|
||||
EXPECT_STREQ("token",
|
||||
data.at(JSON_ENCRYPTION_TOKEN).get<std::string>().c_str());
|
||||
EXPECT_STREQ("path", data.at(JSON_PATH).get<std::string>().c_str());
|
||||
|
||||
{
|
||||
auto cfg2 = data.get<encrypt_config>();
|
||||
EXPECT_STREQ(cfg2.encryption_token.c_str(), cfg.encryption_token.c_str());
|
||||
EXPECT_STREQ(cfg2.path.c_str(), cfg.path.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
TEST(json_serialize, can_handle_host_config) {
|
||||
host_config cfg{
|
||||
"agent", "pwd", "user", 1024U, "host", "path", "http", 11U,
|
||||
};
|
||||
|
||||
json data(cfg);
|
||||
EXPECT_STREQ("agent", data.at(JSON_AGENT_STRING).get<std::string>().c_str());
|
||||
EXPECT_STREQ("pwd", data.at(JSON_API_PASSWORD).get<std::string>().c_str());
|
||||
EXPECT_STREQ("user", data.at(JSON_API_USER).get<std::string>().c_str());
|
||||
EXPECT_EQ(1024U, data.at(JSON_API_PORT).get<std::uint16_t>());
|
||||
EXPECT_STREQ("host",
|
||||
data.at(JSON_HOST_NAME_OR_IP).get<std::string>().c_str());
|
||||
EXPECT_STREQ("path", data.at(JSON_PATH).get<std::string>().c_str());
|
||||
EXPECT_STREQ("http", data.at(JSON_PROTOCOL).get<std::string>().c_str());
|
||||
EXPECT_EQ(11U, data.at(JSON_TIMEOUT_MS).get<std::uint16_t>());
|
||||
|
||||
{
|
||||
auto cfg2 = data.get<host_config>();
|
||||
EXPECT_STREQ(cfg2.agent_string.c_str(), cfg.agent_string.c_str());
|
||||
EXPECT_STREQ(cfg2.api_password.c_str(), cfg.api_password.c_str());
|
||||
EXPECT_STREQ(cfg2.api_user.c_str(), cfg.api_user.c_str());
|
||||
EXPECT_EQ(cfg2.api_port, cfg.api_port);
|
||||
EXPECT_STREQ(cfg2.host_name_or_ip.c_str(), cfg.host_name_or_ip.c_str());
|
||||
EXPECT_STREQ(cfg2.path.c_str(), cfg.path.c_str());
|
||||
EXPECT_STREQ(cfg2.protocol.c_str(), cfg.protocol.c_str());
|
||||
EXPECT_EQ(cfg2.timeout_ms, cfg.timeout_ms);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(json_serialize, can_handle_remote_config) {
|
||||
remote::remote_config cfg{
|
||||
1024U, "token", "host", 11U, 20U, 21U,
|
||||
};
|
||||
|
||||
json data(cfg);
|
||||
EXPECT_EQ(1024U, data.at(JSON_API_PORT).get<std::uint16_t>());
|
||||
EXPECT_STREQ("token",
|
||||
data.at(JSON_ENCRYPTION_TOKEN).get<std::string>().c_str());
|
||||
EXPECT_STREQ("host",
|
||||
data.at(JSON_HOST_NAME_OR_IP).get<std::string>().c_str());
|
||||
EXPECT_EQ(11U, data.at(JSON_MAX_CONNECTIONS).get<std::uint16_t>());
|
||||
EXPECT_EQ(20U, data.at(JSON_RECV_TIMEOUT_MS).get<std::uint32_t>());
|
||||
EXPECT_EQ(21U, data.at(JSON_SEND_TIMEOUT_MS).get<std::uint32_t>());
|
||||
|
||||
{
|
||||
auto cfg2 = data.get<remote::remote_config>();
|
||||
EXPECT_EQ(cfg2.api_port, cfg.api_port);
|
||||
EXPECT_STREQ(cfg2.encryption_token.c_str(), cfg.encryption_token.c_str());
|
||||
EXPECT_STREQ(cfg2.host_name_or_ip.c_str(), cfg.host_name_or_ip.c_str());
|
||||
EXPECT_EQ(cfg2.max_connections, cfg.max_connections);
|
||||
EXPECT_EQ(cfg2.recv_timeout_ms, cfg.recv_timeout_ms);
|
||||
EXPECT_EQ(cfg2.send_timeout_ms, cfg.send_timeout_ms);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(json_serialize, can_handle_remote_mount) {
|
||||
remote::remote_mount cfg{1024U, 21U, true, "token"};
|
||||
|
||||
json data(cfg);
|
||||
EXPECT_EQ(1024U, data.at(JSON_API_PORT).get<std::uint16_t>());
|
||||
EXPECT_EQ(21U, data.at(JSON_CLIENT_POOL_SIZE).get<std::uint16_t>());
|
||||
EXPECT_TRUE(data.at(JSON_ENABLE_REMOTE_MOUNT).get<bool>());
|
||||
EXPECT_STREQ("token",
|
||||
data.at(JSON_ENCRYPTION_TOKEN).get<std::string>().c_str());
|
||||
|
||||
{
|
||||
auto cfg2 = data.get<remote::remote_mount>();
|
||||
EXPECT_EQ(cfg2.api_port, cfg.api_port);
|
||||
EXPECT_EQ(cfg2.client_pool_size, cfg.client_pool_size);
|
||||
EXPECT_EQ(cfg2.enable, cfg.enable);
|
||||
EXPECT_STREQ(cfg2.encryption_token.c_str(), cfg.encryption_token.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
TEST(json_serialize, can_handle_s3_config) {
|
||||
s3_config cfg{
|
||||
"access", "bucket", "token", "region", "secret", 31U, "url", true, false,
|
||||
};
|
||||
|
||||
json data(cfg);
|
||||
EXPECT_STREQ("access", data.at(JSON_ACCESS_KEY).get<std::string>().c_str());
|
||||
EXPECT_STREQ("bucket", data.at(JSON_BUCKET).get<std::string>().c_str());
|
||||
EXPECT_STREQ("token",
|
||||
data.at(JSON_ENCRYPTION_TOKEN).get<std::string>().c_str());
|
||||
EXPECT_STREQ("region", data.at(JSON_REGION).get<std::string>().c_str());
|
||||
EXPECT_STREQ("secret", data.at(JSON_SECRET_KEY).get<std::string>().c_str());
|
||||
EXPECT_EQ(31U, data.at(JSON_TIMEOUT_MS).get<std::uint32_t>());
|
||||
EXPECT_STREQ("url", data.at(JSON_URL).get<std::string>().c_str());
|
||||
EXPECT_TRUE(data.at(JSON_USE_PATH_STYLE).get<bool>());
|
||||
EXPECT_FALSE(data.at(JSON_USE_REGION_IN_URL).get<bool>());
|
||||
|
||||
{
|
||||
auto cfg2 = data.get<s3_config>();
|
||||
EXPECT_STREQ(cfg2.access_key.c_str(), cfg.access_key.c_str());
|
||||
EXPECT_STREQ(cfg2.bucket.c_str(), cfg.bucket.c_str());
|
||||
EXPECT_STREQ(cfg2.encryption_token.c_str(), cfg.encryption_token.c_str());
|
||||
EXPECT_STREQ(cfg2.region.c_str(), cfg.region.c_str());
|
||||
EXPECT_STREQ(cfg2.secret_key.c_str(), cfg.secret_key.c_str());
|
||||
EXPECT_EQ(cfg2.timeout_ms, cfg.timeout_ms);
|
||||
EXPECT_STREQ(cfg2.url.c_str(), cfg.url.c_str());
|
||||
EXPECT_EQ(cfg2.use_path_style, cfg.use_path_style);
|
||||
EXPECT_EQ(cfg2.use_region_in_url, cfg.use_region_in_url);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(json_serialize, can_handle_sia_config) {
|
||||
sia_config cfg{
|
||||
"bucket",
|
||||
};
|
||||
|
||||
json data(cfg);
|
||||
EXPECT_STREQ("bucket", data.at(JSON_BUCKET).get<std::string>().c_str());
|
||||
|
||||
{
|
||||
auto cfg2 = data.get<sia_config>();
|
||||
EXPECT_STREQ(cfg2.bucket.c_str(), cfg.bucket.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
TEST(json_serialize, can_handle_atomic) {
|
||||
atomic<sia_config> cfg({
|
||||
"bucket",
|
||||
});
|
||||
|
||||
json data(cfg);
|
||||
EXPECT_STREQ("bucket", data.at(JSON_BUCKET).get<std::string>().c_str());
|
||||
|
||||
{
|
||||
auto cfg2 = data.get<atomic<sia_config>>();
|
||||
EXPECT_STREQ(cfg2.load().bucket.c_str(), cfg.load().bucket.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
TEST(json_serialize, can_handle_database_type) {
|
||||
json data(database_type::rocksdb);
|
||||
EXPECT_EQ(database_type::rocksdb, data.get<database_type>());
|
||||
EXPECT_STREQ("rocksdb", data.get<std::string>().c_str());
|
||||
|
||||
data = database_type::sqlite;
|
||||
EXPECT_EQ(database_type::sqlite, data.get<database_type>());
|
||||
EXPECT_STREQ("sqlite", data.get<std::string>().c_str());
|
||||
}
|
||||
|
||||
TEST(json_serialize, can_handle_download_type) {
|
||||
json data(download_type::direct);
|
||||
EXPECT_EQ(download_type::direct, data.get<download_type>());
|
||||
EXPECT_STREQ("direct", data.get<std::string>().c_str());
|
||||
|
||||
data = download_type::default_;
|
||||
EXPECT_EQ(download_type::default_, data.get<download_type>());
|
||||
EXPECT_STREQ("default", data.get<std::string>().c_str());
|
||||
|
||||
data = download_type::ring_buffer;
|
||||
EXPECT_EQ(download_type::ring_buffer, data.get<download_type>());
|
||||
EXPECT_STREQ("ring_buffer", data.get<std::string>().c_str());
|
||||
}
|
||||
|
||||
TEST(json_serialize, can_handle_atomic_database_type) {
|
||||
json data(atomic<database_type>{database_type::rocksdb});
|
||||
EXPECT_EQ(database_type::rocksdb, data.get<atomic<database_type>>());
|
||||
EXPECT_STREQ("rocksdb", data.get<std::string>().c_str());
|
||||
|
||||
data = atomic<database_type>(database_type::sqlite);
|
||||
EXPECT_EQ(database_type::sqlite, data.get<atomic<database_type>>());
|
||||
EXPECT_STREQ("sqlite", data.get<std::string>().c_str());
|
||||
}
|
||||
|
||||
TEST(json_serialize, can_handle_atomic_download_type) {
|
||||
json data(atomic<download_type>{download_type::direct});
|
||||
EXPECT_EQ(download_type::direct, data.get<atomic<download_type>>());
|
||||
EXPECT_STREQ("direct", data.get<std::string>().c_str());
|
||||
|
||||
data = atomic<download_type>{download_type::default_};
|
||||
EXPECT_EQ(download_type::default_, data.get<download_type>());
|
||||
EXPECT_STREQ("default", data.get<std::string>().c_str());
|
||||
|
||||
data = atomic<download_type>{download_type::ring_buffer};
|
||||
EXPECT_EQ(download_type::ring_buffer, data.get<atomic<download_type>>());
|
||||
EXPECT_STREQ("ring_buffer", data.get<std::string>().c_str());
|
||||
}
|
||||
} // namespace repertory
|
@ -21,6 +21,8 @@
|
||||
*/
|
||||
#include "test_common.hpp"
|
||||
|
||||
#include "app_config.hpp"
|
||||
#include "file_manager/cache_size_mgr.hpp"
|
||||
#include "file_manager/open_file.hpp"
|
||||
#include "mocks/mock_provider.hpp"
|
||||
#include "mocks/mock_upload_manager.hpp"
|
||||
@ -28,191 +30,222 @@
|
||||
#include "utils/event_capture.hpp"
|
||||
#include "utils/path.hpp"
|
||||
|
||||
namespace repertory {
|
||||
static constexpr const std::size_t test_chunk_size = 1024u;
|
||||
|
||||
static void test_closeable_open_file(const open_file &o, bool directory,
|
||||
const api_error &e, std::uint64_t size,
|
||||
const std::string &source_path) {
|
||||
EXPECT_EQ(directory, o.is_directory());
|
||||
EXPECT_EQ(e, o.get_api_error());
|
||||
EXPECT_EQ(std::size_t(0u), o.get_open_file_count());
|
||||
EXPECT_EQ(std::uint64_t(size), o.get_file_size());
|
||||
EXPECT_STREQ(source_path.c_str(), o.get_source_path().c_str());
|
||||
EXPECT_TRUE(o.can_close());
|
||||
namespace {
|
||||
constexpr const std::size_t test_chunk_size{1024U};
|
||||
}
|
||||
|
||||
static void validate_write(open_file &o, std::size_t offset, data_buffer data,
|
||||
std::size_t bytes_written) {
|
||||
namespace repertory {
|
||||
class open_file_test : public ::testing::Test {
|
||||
public:
|
||||
console_consumer con_consumer;
|
||||
std::unique_ptr<app_config> cfg;
|
||||
static std::atomic<std::size_t> inst;
|
||||
mock_provider provider;
|
||||
mock_upload_manager upload_mgr;
|
||||
|
||||
protected:
|
||||
void SetUp() override {
|
||||
event_system::instance().start();
|
||||
|
||||
auto open_file_dir = repertory::utils::path::combine(
|
||||
repertory::test::get_test_output_dir(),
|
||||
{"open_file_test" + std::to_string(++inst)});
|
||||
|
||||
cfg = std::make_unique<app_config>(provider_type::sia, open_file_dir);
|
||||
|
||||
cache_size_mgr::instance().initialize(cfg.get());
|
||||
}
|
||||
|
||||
void TearDown() override { event_system::instance().stop(); }
|
||||
};
|
||||
|
||||
std::atomic<std::size_t> open_file_test::inst{0U};
|
||||
|
||||
static void test_closeable_open_file(const open_file &file, bool directory,
|
||||
const api_error &err, std::uint64_t size,
|
||||
const std::string &source_path) {
|
||||
EXPECT_EQ(directory, file.is_directory());
|
||||
EXPECT_EQ(err, file.get_api_error());
|
||||
EXPECT_EQ(std::size_t(0U), file.get_open_file_count());
|
||||
EXPECT_EQ(std::uint64_t(size), file.get_file_size());
|
||||
EXPECT_STREQ(source_path.c_str(), file.get_source_path().c_str());
|
||||
EXPECT_TRUE(file.can_close());
|
||||
}
|
||||
|
||||
static void validate_write(open_file &file, std::size_t offset,
|
||||
data_buffer data, std::size_t bytes_written) {
|
||||
EXPECT_EQ(data.size(), bytes_written);
|
||||
|
||||
data_buffer read_data{};
|
||||
EXPECT_EQ(api_error::success, o.read(data.size(), offset, read_data));
|
||||
EXPECT_EQ(api_error::success, file.read(data.size(), offset, read_data));
|
||||
|
||||
EXPECT_TRUE(std::equal(data.begin(), data.end(), read_data.begin()));
|
||||
}
|
||||
|
||||
TEST(open_file, properly_initializes_state_for_0_byte_file) {
|
||||
TEST_F(open_file_test, properly_initializes_state_for_0_byte_file) {
|
||||
const auto source_path =
|
||||
test::generate_test_file_name("file_manager_open_file_test");
|
||||
|
||||
mock_provider mp;
|
||||
mock_upload_manager um;
|
||||
|
||||
EXPECT_CALL(mp, is_read_only()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
filesystem_item fsi;
|
||||
fsi.directory = false;
|
||||
fsi.api_path = "/test.txt";
|
||||
fsi.size = 0u;
|
||||
fsi.size = 0U;
|
||||
fsi.source_path = source_path;
|
||||
|
||||
open_file o(test_chunk_size, 0U, fsi, mp, um);
|
||||
EXPECT_EQ(std::size_t(0u), o.get_read_state().size());
|
||||
EXPECT_FALSE(o.is_modified());
|
||||
EXPECT_EQ(test_chunk_size, o.get_chunk_size());
|
||||
}
|
||||
|
||||
TEST(open_file, properly_initializes_state_based_on_chunk_size) {
|
||||
const auto source_path =
|
||||
test::generate_test_file_name("file_manager_open_file_test");
|
||||
|
||||
mock_provider mp;
|
||||
mock_upload_manager um;
|
||||
|
||||
EXPECT_CALL(mp, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
filesystem_item fsi;
|
||||
fsi.directory = false;
|
||||
fsi.api_path = "/test.txt";
|
||||
fsi.size = 8u;
|
||||
fsi.source_path = source_path;
|
||||
|
||||
EXPECT_CALL(um, remove_resume)
|
||||
EXPECT_CALL(upload_mgr, remove_resume)
|
||||
.WillOnce(
|
||||
[&fsi](const std::string &api_path, const std::string &source_path2) {
|
||||
EXPECT_EQ(fsi.api_path, api_path);
|
||||
EXPECT_EQ(fsi.source_path, source_path2);
|
||||
});
|
||||
|
||||
open_file o(1u, 0U, fsi, mp, um);
|
||||
EXPECT_EQ(std::size_t(8u), o.get_read_state().size());
|
||||
EXPECT_TRUE(o.get_read_state().none());
|
||||
|
||||
EXPECT_FALSE(o.is_modified());
|
||||
|
||||
EXPECT_CALL(mp, set_item_meta(fsi.api_path, META_SOURCE, _))
|
||||
.WillOnce(Return(api_error::success));
|
||||
EXPECT_EQ(std::size_t(1u), o.get_chunk_size());
|
||||
open_file file(test_chunk_size, 0U, fsi, provider, upload_mgr);
|
||||
EXPECT_EQ(std::size_t(0U), file.get_read_state().size());
|
||||
EXPECT_FALSE(file.is_modified());
|
||||
EXPECT_EQ(test_chunk_size, file.get_chunk_size());
|
||||
}
|
||||
|
||||
TEST(open_file, will_not_change_source_path_for_0_byte_file) {
|
||||
TEST_F(open_file_test, properly_initializes_state_based_on_chunk_size) {
|
||||
const auto source_path =
|
||||
test::generate_test_file_name("file_manager_open_file_test");
|
||||
|
||||
mock_provider mp;
|
||||
mock_upload_manager um;
|
||||
|
||||
EXPECT_CALL(mp, is_read_only()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
filesystem_item fsi;
|
||||
fsi.directory = false;
|
||||
fsi.api_path = "/test.txt";
|
||||
fsi.size = 0u;
|
||||
fsi.size = 8U;
|
||||
fsi.source_path = source_path;
|
||||
|
||||
open_file o(0u, 0U, fsi, mp, um);
|
||||
test_closeable_open_file(o, false, api_error::success, 0u, source_path);
|
||||
EXPECT_CALL(upload_mgr, remove_resume)
|
||||
.WillOnce(
|
||||
[&fsi](const std::string &api_path, const std::string &source_path2) {
|
||||
EXPECT_EQ(fsi.api_path, api_path);
|
||||
EXPECT_EQ(fsi.source_path, source_path2);
|
||||
});
|
||||
|
||||
o.close();
|
||||
EXPECT_EQ(api_error::success, o.get_api_error());
|
||||
EXPECT_STREQ(source_path.c_str(), o.get_source_path().c_str());
|
||||
EXPECT_TRUE(utils::file::file(fsi.source_path).exists());
|
||||
open_file file(1U, 0U, fsi, provider, upload_mgr);
|
||||
EXPECT_EQ(std::size_t(8U), file.get_read_state().size());
|
||||
EXPECT_TRUE(file.get_read_state().none());
|
||||
|
||||
EXPECT_FALSE(file.is_modified());
|
||||
|
||||
EXPECT_CALL(provider, set_item_meta(fsi.api_path, META_SOURCE, _))
|
||||
.WillOnce(Return(api_error::success));
|
||||
EXPECT_EQ(std::size_t(1U), file.get_chunk_size());
|
||||
}
|
||||
|
||||
TEST(open_file, will_change_source_path_if_file_size_is_greater_than_0) {
|
||||
TEST_F(open_file_test, will_not_change_source_path_for_0_byte_file) {
|
||||
const auto source_path =
|
||||
test::generate_test_file_name("file_manager_open_file_test");
|
||||
|
||||
mock_provider mp;
|
||||
mock_upload_manager um;
|
||||
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
EXPECT_CALL(mp, is_read_only()).WillRepeatedly(Return(false));
|
||||
filesystem_item fsi;
|
||||
fsi.directory = false;
|
||||
fsi.api_path = "/test.txt";
|
||||
fsi.size = 0U;
|
||||
fsi.source_path = source_path;
|
||||
|
||||
EXPECT_CALL(upload_mgr, remove_resume)
|
||||
.WillOnce(
|
||||
[&fsi](const std::string &api_path, const std::string &source_path2) {
|
||||
EXPECT_EQ(fsi.api_path, api_path);
|
||||
EXPECT_EQ(fsi.source_path, source_path2);
|
||||
});
|
||||
|
||||
open_file file(0U, 0U, fsi, provider, upload_mgr);
|
||||
test_closeable_open_file(file, false, api_error::success, 0U, source_path);
|
||||
|
||||
file.close();
|
||||
EXPECT_EQ(api_error::success, file.get_api_error());
|
||||
EXPECT_STREQ(source_path.c_str(), file.get_source_path().c_str());
|
||||
EXPECT_TRUE(utils::file::file(fsi.source_path).exists());
|
||||
}
|
||||
|
||||
TEST_F(open_file_test, will_change_source_path_if_file_size_is_greater_than_0) {
|
||||
const auto source_path =
|
||||
test::generate_test_file_name("file_manager_open_file_test");
|
||||
|
||||
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
filesystem_item fsi;
|
||||
fsi.api_path = "/test.txt";
|
||||
fsi.size = test_chunk_size;
|
||||
fsi.source_path = source_path;
|
||||
|
||||
EXPECT_CALL(um, remove_resume)
|
||||
EXPECT_CALL(upload_mgr, remove_resume)
|
||||
.WillOnce(
|
||||
[&fsi](const std::string &api_path, const std::string &source_path2) {
|
||||
EXPECT_EQ(fsi.api_path, api_path);
|
||||
EXPECT_EQ(fsi.source_path, source_path2);
|
||||
});
|
||||
|
||||
EXPECT_CALL(mp, set_item_meta(fsi.api_path, META_SOURCE, _))
|
||||
EXPECT_CALL(provider, set_item_meta(fsi.api_path, META_SOURCE, _))
|
||||
.WillOnce([&fsi](const std::string &, const std::string &,
|
||||
const std::string &source_path2) -> api_error {
|
||||
EXPECT_STRNE(fsi.source_path.c_str(), source_path2.c_str());
|
||||
return api_error::success;
|
||||
});
|
||||
|
||||
open_file o(test_chunk_size, 0U, fsi, mp, um);
|
||||
test_closeable_open_file(o, false, api_error::success, test_chunk_size,
|
||||
open_file file(test_chunk_size, 0U, fsi, provider, upload_mgr);
|
||||
test_closeable_open_file(file, false, api_error::success, test_chunk_size,
|
||||
source_path);
|
||||
|
||||
o.close();
|
||||
EXPECT_EQ(api_error::download_stopped, o.get_api_error());
|
||||
EXPECT_STRNE(source_path.c_str(), o.get_source_path().c_str());
|
||||
file.close();
|
||||
EXPECT_EQ(api_error::download_stopped, file.get_api_error());
|
||||
EXPECT_STRNE(source_path.c_str(), file.get_source_path().c_str());
|
||||
EXPECT_FALSE(utils::file::file(source_path).exists());
|
||||
}
|
||||
|
||||
TEST(open_file,
|
||||
will_not_change_source_path_if_file_size_matches_existing_source) {
|
||||
TEST_F(open_file_test,
|
||||
will_not_change_source_path_if_file_size_matches_existing_source) {
|
||||
auto &rf = test::create_random_file(test_chunk_size);
|
||||
const auto source_path = rf.get_path();
|
||||
rf.close();
|
||||
|
||||
mock_provider mp;
|
||||
mock_upload_manager um;
|
||||
|
||||
EXPECT_CALL(mp, is_read_only()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
filesystem_item fsi;
|
||||
fsi.api_path = "/test.txt";
|
||||
fsi.size = test_chunk_size;
|
||||
fsi.source_path = source_path;
|
||||
|
||||
open_file o(test_chunk_size, 0U, fsi, mp, um);
|
||||
test_closeable_open_file(o, false, api_error::success, test_chunk_size,
|
||||
EXPECT_CALL(upload_mgr, remove_resume)
|
||||
.WillOnce(
|
||||
[&fsi](const std::string &api_path, const std::string &source_path2) {
|
||||
EXPECT_EQ(fsi.api_path, api_path);
|
||||
EXPECT_EQ(fsi.source_path, source_path2);
|
||||
});
|
||||
|
||||
open_file file(test_chunk_size, 0U, fsi, provider, upload_mgr);
|
||||
test_closeable_open_file(file, false, api_error::success, test_chunk_size,
|
||||
source_path);
|
||||
|
||||
o.close();
|
||||
EXPECT_EQ(api_error::success, o.get_api_error());
|
||||
EXPECT_STREQ(source_path.c_str(), o.get_source_path().c_str());
|
||||
file.close();
|
||||
EXPECT_EQ(api_error::success, file.get_api_error());
|
||||
EXPECT_STREQ(source_path.c_str(), file.get_source_path().c_str());
|
||||
EXPECT_TRUE(utils::file::file(source_path).exists());
|
||||
}
|
||||
|
||||
TEST(open_file, write_with_incomplete_download) {
|
||||
TEST_F(open_file_test, write_with_incomplete_download) {
|
||||
const auto source_path = test::generate_test_file_name("test");
|
||||
auto &nf = test::create_random_file(test_chunk_size * 2u);
|
||||
|
||||
mock_provider mp;
|
||||
mock_upload_manager um;
|
||||
|
||||
EXPECT_CALL(mp, is_read_only()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
filesystem_item fsi;
|
||||
fsi.api_path = "/test.txt";
|
||||
fsi.size = test_chunk_size * 2u;
|
||||
fsi.source_path = source_path;
|
||||
|
||||
open_file o(test_chunk_size, 0U, fsi, mp, um);
|
||||
test_closeable_open_file(o, false, api_error::success, test_chunk_size * 2u,
|
||||
source_path);
|
||||
open_file file(test_chunk_size, 0U, fsi, provider, upload_mgr);
|
||||
test_closeable_open_file(file, false, api_error::success,
|
||||
test_chunk_size * 2u, source_path);
|
||||
|
||||
EXPECT_CALL(mp, set_item_meta(fsi.api_path, _))
|
||||
EXPECT_CALL(provider, set_item_meta(fsi.api_path, _))
|
||||
.WillOnce([](const std::string &, const api_meta_map &meta) -> api_error {
|
||||
EXPECT_NO_THROW(EXPECT_FALSE(meta.at(META_CHANGED).empty()));
|
||||
EXPECT_NO_THROW(EXPECT_FALSE(meta.at(META_MODIFIED).empty()));
|
||||
@ -220,7 +253,7 @@ TEST(open_file, write_with_incomplete_download) {
|
||||
return api_error::success;
|
||||
});
|
||||
|
||||
EXPECT_CALL(mp, read_file_bytes)
|
||||
EXPECT_CALL(provider, read_file_bytes)
|
||||
.WillRepeatedly([&nf](const std::string & /* api_path */,
|
||||
std::size_t size, std::uint64_t offset,
|
||||
data_buffer &data,
|
||||
@ -229,7 +262,7 @@ TEST(open_file, write_with_incomplete_download) {
|
||||
return api_error::download_stopped;
|
||||
}
|
||||
|
||||
if (offset == 0u) {
|
||||
if (offset == 0U) {
|
||||
std::size_t bytes_read{};
|
||||
data.resize(size);
|
||||
auto ret = nf.read(data, offset, &bytes_read) ? api_error::success
|
||||
@ -244,11 +277,12 @@ TEST(open_file, write_with_incomplete_download) {
|
||||
return api_error::download_stopped;
|
||||
});
|
||||
|
||||
EXPECT_CALL(um, remove_upload).WillOnce([&fsi](const std::string &api_path) {
|
||||
EXPECT_EQ(fsi.api_path, api_path);
|
||||
});
|
||||
EXPECT_CALL(upload_mgr, remove_upload)
|
||||
.WillOnce([&fsi](const std::string &api_path) {
|
||||
EXPECT_EQ(fsi.api_path, api_path);
|
||||
});
|
||||
|
||||
EXPECT_CALL(um, store_resume)
|
||||
EXPECT_CALL(upload_mgr, store_resume)
|
||||
.Times(2)
|
||||
.WillRepeatedly([&fsi](const i_open_file &cur_file) {
|
||||
EXPECT_EQ(fsi.api_path, cur_file.get_api_path());
|
||||
@ -257,55 +291,53 @@ TEST(open_file, write_with_incomplete_download) {
|
||||
|
||||
data_buffer data = {10, 9, 8};
|
||||
std::size_t bytes_written{};
|
||||
EXPECT_EQ(api_error::success, o.write(0u, data, bytes_written));
|
||||
validate_write(o, 0u, data, bytes_written);
|
||||
EXPECT_EQ(api_error::success, file.write(0U, data, bytes_written));
|
||||
validate_write(file, 0U, data, bytes_written);
|
||||
|
||||
const auto test_state = [&]() {
|
||||
EXPECT_STREQ(source_path.c_str(), o.get_source_path().c_str());
|
||||
EXPECT_STREQ(source_path.c_str(), file.get_source_path().c_str());
|
||||
|
||||
EXPECT_FALSE(o.can_close());
|
||||
EXPECT_FALSE(file.can_close());
|
||||
|
||||
EXPECT_TRUE(o.is_modified());
|
||||
EXPECT_TRUE(file.is_modified());
|
||||
|
||||
EXPECT_TRUE(o.get_read_state(0u));
|
||||
EXPECT_FALSE(o.get_read_state(1u));
|
||||
EXPECT_TRUE(file.get_read_state(0U));
|
||||
EXPECT_FALSE(file.get_read_state(1u));
|
||||
};
|
||||
test_state();
|
||||
|
||||
o.close();
|
||||
file.close();
|
||||
nf.close();
|
||||
|
||||
test_state();
|
||||
|
||||
EXPECT_EQ(api_error::download_incomplete, o.get_api_error());
|
||||
EXPECT_EQ(api_error::download_incomplete, file.get_api_error());
|
||||
|
||||
EXPECT_TRUE(utils::file::file(fsi.source_path).exists());
|
||||
}
|
||||
|
||||
TEST(open_file, write_new_file) {
|
||||
TEST_F(open_file_test, write_new_file) {
|
||||
const auto source_path =
|
||||
test::generate_test_file_name("file_manager_open_file_test");
|
||||
|
||||
mock_provider mp;
|
||||
mock_upload_manager um;
|
||||
|
||||
EXPECT_CALL(mp, is_read_only()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
filesystem_item fsi;
|
||||
fsi.api_path = "/test.txt";
|
||||
fsi.size = 0u;
|
||||
fsi.size = 0U;
|
||||
fsi.source_path = source_path;
|
||||
|
||||
EXPECT_CALL(um, store_resume).WillOnce([&fsi](const i_open_file &o) {
|
||||
EXPECT_EQ(fsi.api_path, o.get_api_path());
|
||||
EXPECT_EQ(fsi.source_path, o.get_source_path());
|
||||
});
|
||||
EXPECT_CALL(upload_mgr, store_resume)
|
||||
.WillOnce([&fsi](const i_open_file &file) {
|
||||
EXPECT_EQ(fsi.api_path, file.get_api_path());
|
||||
EXPECT_EQ(fsi.source_path, file.get_source_path());
|
||||
});
|
||||
|
||||
open_file o(test_chunk_size, 0U, fsi, mp, um);
|
||||
test_closeable_open_file(o, false, api_error::success, 0u, source_path);
|
||||
open_file file(test_chunk_size, 0U, fsi, provider, upload_mgr);
|
||||
test_closeable_open_file(file, false, api_error::success, 0U, source_path);
|
||||
data_buffer data = {10, 9, 8};
|
||||
|
||||
EXPECT_CALL(mp, set_item_meta(fsi.api_path, _))
|
||||
EXPECT_CALL(provider, set_item_meta(fsi.api_path, _))
|
||||
.WillOnce([&data](const std::string &,
|
||||
const api_meta_map &meta) -> api_error {
|
||||
EXPECT_NO_THROW(EXPECT_FALSE(meta.at(META_CHANGED).empty()));
|
||||
@ -322,62 +354,62 @@ TEST(open_file, write_new_file) {
|
||||
return api_error::success;
|
||||
});
|
||||
|
||||
EXPECT_CALL(um, remove_upload).WillOnce([&fsi](const std::string &api_path) {
|
||||
EXPECT_EQ(fsi.api_path, api_path);
|
||||
});
|
||||
EXPECT_CALL(upload_mgr, remove_upload)
|
||||
.WillOnce([&fsi](const std::string &api_path) {
|
||||
EXPECT_EQ(fsi.api_path, api_path);
|
||||
});
|
||||
|
||||
EXPECT_CALL(um, queue_upload).WillOnce([&fsi](const i_open_file &cur_file) {
|
||||
EXPECT_EQ(fsi.api_path, cur_file.get_api_path());
|
||||
EXPECT_EQ(fsi.source_path, cur_file.get_source_path());
|
||||
});
|
||||
EXPECT_CALL(upload_mgr, queue_upload)
|
||||
.WillOnce([&fsi](const i_open_file &cur_file) {
|
||||
EXPECT_EQ(fsi.api_path, cur_file.get_api_path());
|
||||
EXPECT_EQ(fsi.source_path, cur_file.get_source_path());
|
||||
});
|
||||
|
||||
std::size_t bytes_written{};
|
||||
EXPECT_EQ(api_error::success, o.write(0u, data, bytes_written));
|
||||
EXPECT_EQ(api_error::success, file.write(0U, data, bytes_written));
|
||||
|
||||
const auto test_state = [&]() {
|
||||
EXPECT_STREQ(source_path.c_str(), o.get_source_path().c_str());
|
||||
EXPECT_STREQ(source_path.c_str(), file.get_source_path().c_str());
|
||||
|
||||
EXPECT_FALSE(o.can_close());
|
||||
EXPECT_TRUE(o.is_modified());
|
||||
EXPECT_FALSE(file.can_close());
|
||||
EXPECT_TRUE(file.is_modified());
|
||||
|
||||
EXPECT_TRUE(o.get_read_state(0u));
|
||||
EXPECT_EQ(std::size_t(1u), o.get_read_state().size());
|
||||
EXPECT_EQ(data.size(), o.get_file_size());
|
||||
EXPECT_TRUE(file.get_read_state(0U));
|
||||
EXPECT_EQ(std::size_t(1u), file.get_read_state().size());
|
||||
EXPECT_EQ(data.size(), file.get_file_size());
|
||||
};
|
||||
test_state();
|
||||
|
||||
o.close();
|
||||
file.close();
|
||||
|
||||
test_state();
|
||||
|
||||
EXPECT_EQ(api_error::success, o.get_api_error());
|
||||
EXPECT_EQ(api_error::success, file.get_api_error());
|
||||
|
||||
EXPECT_TRUE(utils::file::file(fsi.source_path).exists());
|
||||
}
|
||||
|
||||
TEST(open_file, write_new_file_multiple_chunks) {
|
||||
TEST_F(open_file_test, write_new_file_multiple_chunks) {
|
||||
const auto source_path =
|
||||
test::generate_test_file_name("file_manager_open_file_test");
|
||||
|
||||
mock_provider mp;
|
||||
mock_upload_manager um;
|
||||
|
||||
EXPECT_CALL(mp, is_read_only()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
filesystem_item fsi;
|
||||
fsi.api_path = "/test.txt";
|
||||
fsi.size = 0u;
|
||||
fsi.size = 0U;
|
||||
fsi.source_path = source_path;
|
||||
|
||||
EXPECT_CALL(um, store_resume).WillOnce([&fsi](const i_open_file &o) {
|
||||
EXPECT_EQ(fsi.api_path, o.get_api_path());
|
||||
EXPECT_EQ(fsi.source_path, o.get_source_path());
|
||||
});
|
||||
open_file o(test_chunk_size, 0U, fsi, mp, um);
|
||||
test_closeable_open_file(o, false, api_error::success, 0u, source_path);
|
||||
EXPECT_CALL(upload_mgr, store_resume)
|
||||
.WillOnce([&fsi](const i_open_file &file) {
|
||||
EXPECT_EQ(fsi.api_path, file.get_api_path());
|
||||
EXPECT_EQ(fsi.source_path, file.get_source_path());
|
||||
});
|
||||
open_file file(test_chunk_size, 0U, fsi, provider, upload_mgr);
|
||||
test_closeable_open_file(file, false, api_error::success, 0U, source_path);
|
||||
data_buffer data = {10, 9, 8};
|
||||
|
||||
EXPECT_CALL(mp, set_item_meta(fsi.api_path, _))
|
||||
EXPECT_CALL(provider, set_item_meta(fsi.api_path, _))
|
||||
.WillOnce([&data](const std::string &,
|
||||
const api_meta_map &meta) -> api_error {
|
||||
EXPECT_NO_THROW(EXPECT_FALSE(meta.at(META_CHANGED).empty()));
|
||||
@ -410,153 +442,159 @@ TEST(open_file, write_new_file_multiple_chunks) {
|
||||
return api_error::success;
|
||||
});
|
||||
|
||||
EXPECT_CALL(um, remove_upload).WillOnce([&fsi](const std::string &api_path) {
|
||||
EXPECT_EQ(fsi.api_path, api_path);
|
||||
});
|
||||
EXPECT_CALL(upload_mgr, remove_upload)
|
||||
.WillOnce([&fsi](const std::string &api_path) {
|
||||
EXPECT_EQ(fsi.api_path, api_path);
|
||||
});
|
||||
|
||||
EXPECT_CALL(um, queue_upload).WillOnce([&fsi](const i_open_file &cur_file) {
|
||||
EXPECT_EQ(fsi.api_path, cur_file.get_api_path());
|
||||
EXPECT_EQ(fsi.source_path, cur_file.get_source_path());
|
||||
});
|
||||
EXPECT_CALL(upload_mgr, queue_upload)
|
||||
.WillOnce([&fsi](const i_open_file &cur_file) {
|
||||
EXPECT_EQ(fsi.api_path, cur_file.get_api_path());
|
||||
EXPECT_EQ(fsi.source_path, cur_file.get_source_path());
|
||||
});
|
||||
|
||||
std::size_t bytes_written{};
|
||||
EXPECT_EQ(api_error::success, o.write(0u, data, bytes_written));
|
||||
EXPECT_EQ(api_error::success, o.write(test_chunk_size, data, bytes_written));
|
||||
EXPECT_EQ(api_error::success, file.write(0U, data, bytes_written));
|
||||
EXPECT_EQ(api_error::success,
|
||||
file.write(test_chunk_size, data, bytes_written));
|
||||
|
||||
const auto test_state = [&]() {
|
||||
EXPECT_STREQ(source_path.c_str(), o.get_source_path().c_str());
|
||||
EXPECT_STREQ(source_path.c_str(), file.get_source_path().c_str());
|
||||
|
||||
EXPECT_FALSE(o.can_close());
|
||||
EXPECT_TRUE(o.is_modified());
|
||||
EXPECT_FALSE(file.can_close());
|
||||
EXPECT_TRUE(file.is_modified());
|
||||
|
||||
EXPECT_EQ(std::size_t(2u), o.get_read_state().size());
|
||||
for (std::size_t i = 0u; i < 2u; i++) {
|
||||
EXPECT_TRUE(o.get_read_state(i));
|
||||
EXPECT_EQ(std::size_t(2u), file.get_read_state().size());
|
||||
for (std::size_t i = 0U; i < 2u; i++) {
|
||||
EXPECT_TRUE(file.get_read_state(i));
|
||||
}
|
||||
|
||||
EXPECT_EQ(data.size() + test_chunk_size, o.get_file_size());
|
||||
EXPECT_EQ(data.size() + test_chunk_size, file.get_file_size());
|
||||
};
|
||||
test_state();
|
||||
|
||||
o.close();
|
||||
file.close();
|
||||
|
||||
test_state();
|
||||
|
||||
EXPECT_EQ(api_error::success, o.get_api_error());
|
||||
EXPECT_EQ(api_error::success, file.get_api_error());
|
||||
|
||||
EXPECT_TRUE(utils::file::file(fsi.source_path).exists());
|
||||
}
|
||||
|
||||
TEST(open_file, resize_file_to_0_bytes) {
|
||||
auto &rf = test::create_random_file(test_chunk_size * 4u);
|
||||
const auto source_path = rf.get_path();
|
||||
rf.close();
|
||||
TEST_F(open_file_test, resize_file_to_0_bytes) {
|
||||
auto &r_file = test::create_random_file(test_chunk_size * 4U);
|
||||
const auto source_path = r_file.get_path();
|
||||
r_file.close();
|
||||
|
||||
mock_provider mp;
|
||||
mock_upload_manager um;
|
||||
|
||||
EXPECT_CALL(mp, is_read_only()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
filesystem_item fsi;
|
||||
fsi.api_path = "/test.txt";
|
||||
fsi.size = test_chunk_size * 4u;
|
||||
fsi.size = test_chunk_size * 4U;
|
||||
fsi.source_path = source_path;
|
||||
|
||||
open_file o(test_chunk_size, 0U, fsi, mp, um);
|
||||
test_closeable_open_file(o, false, api_error::success, fsi.size, source_path);
|
||||
EXPECT_CALL(mp, set_item_meta(fsi.api_path, _))
|
||||
EXPECT_EQ(api_error::success, cache_size_mgr::instance().expand(fsi.size));
|
||||
|
||||
open_file file(test_chunk_size, 0U, fsi, provider, upload_mgr);
|
||||
test_closeable_open_file(file, false, api_error::success, fsi.size,
|
||||
source_path);
|
||||
EXPECT_CALL(provider, set_item_meta(fsi.api_path, _))
|
||||
.WillOnce([](const std::string &, const api_meta_map &meta) -> api_error {
|
||||
EXPECT_NO_THROW(EXPECT_FALSE(meta.at(META_CHANGED).empty()));
|
||||
EXPECT_NO_THROW(EXPECT_FALSE(meta.at(META_MODIFIED).empty()));
|
||||
EXPECT_NO_THROW(EXPECT_FALSE(meta.at(META_SIZE).empty()));
|
||||
EXPECT_NO_THROW(EXPECT_FALSE(meta.at(META_WRITTEN).empty()));
|
||||
EXPECT_EQ(std::size_t(0u),
|
||||
EXPECT_EQ(std::size_t(0U),
|
||||
utils::string::to_size_t(meta.at(META_SIZE)));
|
||||
return api_error::success;
|
||||
});
|
||||
|
||||
EXPECT_CALL(um, remove_upload).WillOnce([&fsi](const std ::string &api_path) {
|
||||
EXPECT_EQ(fsi.api_path, api_path);
|
||||
});
|
||||
EXPECT_CALL(upload_mgr, remove_upload)
|
||||
.WillOnce([&fsi](const std ::string &api_path) {
|
||||
EXPECT_EQ(fsi.api_path, api_path);
|
||||
});
|
||||
|
||||
EXPECT_CALL(um, queue_upload).WillOnce([&fsi](const i_open_file &cur_file) {
|
||||
EXPECT_EQ(fsi.api_path, cur_file.get_api_path());
|
||||
EXPECT_EQ(fsi.source_path, cur_file.get_source_path());
|
||||
});
|
||||
EXPECT_CALL(um, store_resume).WillOnce([&fsi](const i_open_file &cur_file) {
|
||||
EXPECT_EQ(fsi.api_path, cur_file.get_api_path());
|
||||
EXPECT_EQ(fsi.source_path, cur_file.get_source_path());
|
||||
});
|
||||
EXPECT_CALL(upload_mgr, queue_upload)
|
||||
.WillOnce([&fsi](const i_open_file &cur_file) {
|
||||
EXPECT_EQ(fsi.api_path, cur_file.get_api_path());
|
||||
EXPECT_EQ(fsi.source_path, cur_file.get_source_path());
|
||||
});
|
||||
EXPECT_CALL(upload_mgr, store_resume)
|
||||
.WillOnce([&fsi](const i_open_file &cur_file) {
|
||||
EXPECT_EQ(fsi.api_path, cur_file.get_api_path());
|
||||
EXPECT_EQ(fsi.source_path, cur_file.get_source_path());
|
||||
});
|
||||
|
||||
EXPECT_EQ(api_error::success, o.resize(0u));
|
||||
EXPECT_EQ(api_error::success, file.resize(0U));
|
||||
|
||||
EXPECT_EQ(std::size_t(0u), o.get_file_size());
|
||||
EXPECT_FALSE(o.can_close());
|
||||
EXPECT_TRUE(o.is_modified());
|
||||
EXPECT_EQ(std::size_t(0U), file.get_file_size());
|
||||
EXPECT_FALSE(file.can_close());
|
||||
EXPECT_TRUE(file.is_modified());
|
||||
|
||||
EXPECT_EQ(std::size_t(0u), o.get_read_state().size());
|
||||
EXPECT_EQ(std::size_t(0U), file.get_read_state().size());
|
||||
}
|
||||
|
||||
TEST(open_file, resize_file_by_full_chunk) {
|
||||
auto &rf = test::create_random_file(test_chunk_size * 4u);
|
||||
const auto source_path = rf.get_path();
|
||||
rf.close();
|
||||
TEST_F(open_file_test, resize_file_by_full_chunk) {
|
||||
auto &r_file = test::create_random_file(test_chunk_size * 4U);
|
||||
const auto source_path = r_file.get_path();
|
||||
r_file.close();
|
||||
|
||||
mock_provider mp;
|
||||
mock_upload_manager um;
|
||||
|
||||
EXPECT_CALL(mp, is_read_only()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
filesystem_item fsi;
|
||||
fsi.api_path = "/test.txt";
|
||||
fsi.size = test_chunk_size * 4u;
|
||||
fsi.size = test_chunk_size * 4U;
|
||||
fsi.source_path = source_path;
|
||||
|
||||
EXPECT_CALL(um, store_resume).WillOnce([&fsi](const i_open_file &o) {
|
||||
EXPECT_EQ(fsi.api_path, o.get_api_path());
|
||||
EXPECT_EQ(fsi.source_path, o.get_source_path());
|
||||
});
|
||||
EXPECT_EQ(api_error::success, cache_size_mgr::instance().expand(fsi.size));
|
||||
|
||||
open_file o(test_chunk_size, 0U, fsi, mp, um);
|
||||
test_closeable_open_file(o, false, api_error::success, fsi.size, source_path);
|
||||
EXPECT_CALL(mp, set_item_meta(fsi.api_path, _))
|
||||
EXPECT_CALL(upload_mgr, store_resume)
|
||||
.WillOnce([&fsi](const i_open_file &file) {
|
||||
EXPECT_EQ(fsi.api_path, file.get_api_path());
|
||||
EXPECT_EQ(fsi.source_path, file.get_source_path());
|
||||
});
|
||||
|
||||
open_file file(test_chunk_size, 0U, fsi, provider, upload_mgr);
|
||||
test_closeable_open_file(file, false, api_error::success, fsi.size,
|
||||
source_path);
|
||||
EXPECT_CALL(provider, set_item_meta(fsi.api_path, _))
|
||||
.WillOnce([](const std::string &, const api_meta_map &meta) -> api_error {
|
||||
EXPECT_NO_THROW(EXPECT_FALSE(meta.at(META_CHANGED).empty()));
|
||||
EXPECT_NO_THROW(EXPECT_FALSE(meta.at(META_MODIFIED).empty()));
|
||||
EXPECT_NO_THROW(EXPECT_FALSE(meta.at(META_SIZE).empty()));
|
||||
EXPECT_NO_THROW(EXPECT_FALSE(meta.at(META_WRITTEN).empty()));
|
||||
EXPECT_EQ(std::size_t(test_chunk_size * 3u),
|
||||
EXPECT_EQ(std::size_t(test_chunk_size * 3U),
|
||||
utils::string::to_size_t(meta.at(META_SIZE)));
|
||||
return api_error::success;
|
||||
});
|
||||
|
||||
EXPECT_CALL(um, remove_upload).WillOnce([&fsi](const std::string &api_path) {
|
||||
EXPECT_EQ(fsi.api_path, api_path);
|
||||
});
|
||||
EXPECT_CALL(upload_mgr, remove_upload)
|
||||
.WillOnce([&fsi](const std::string &api_path) {
|
||||
EXPECT_EQ(fsi.api_path, api_path);
|
||||
});
|
||||
|
||||
EXPECT_CALL(um, queue_upload).WillOnce([&fsi](const i_open_file &cur_file) {
|
||||
EXPECT_EQ(fsi.api_path, cur_file.get_api_path());
|
||||
EXPECT_EQ(fsi.source_path, cur_file.get_source_path());
|
||||
});
|
||||
EXPECT_CALL(upload_mgr, queue_upload)
|
||||
.WillOnce([&fsi](const i_open_file &cur_file) {
|
||||
EXPECT_EQ(fsi.api_path, cur_file.get_api_path());
|
||||
EXPECT_EQ(fsi.source_path, cur_file.get_source_path());
|
||||
});
|
||||
|
||||
EXPECT_EQ(api_error::success, o.resize(test_chunk_size * 3u));
|
||||
EXPECT_EQ(api_error::success, file.resize(test_chunk_size * 3U));
|
||||
|
||||
EXPECT_EQ(std::size_t(test_chunk_size * 3u), o.get_file_size());
|
||||
EXPECT_FALSE(o.can_close());
|
||||
EXPECT_TRUE(o.is_modified());
|
||||
EXPECT_EQ(std::size_t(3u), o.get_read_state().size());
|
||||
EXPECT_EQ(std::size_t(test_chunk_size * 3U), file.get_file_size());
|
||||
EXPECT_FALSE(file.can_close());
|
||||
EXPECT_TRUE(file.is_modified());
|
||||
EXPECT_EQ(std::size_t(3U), file.get_read_state().size());
|
||||
}
|
||||
|
||||
TEST(open_file, can_add_handle) {
|
||||
TEST_F(open_file_test, can_add_handle) {
|
||||
event_system::instance().start();
|
||||
console_consumer c;
|
||||
const auto source_path =
|
||||
test::generate_test_file_name("file_manager_open_file_test");
|
||||
|
||||
mock_provider mp;
|
||||
mock_upload_manager um;
|
||||
|
||||
EXPECT_CALL(mp, is_read_only()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
filesystem_item fsi;
|
||||
fsi.api_path = "/test.txt";
|
||||
@ -582,9 +620,9 @@ TEST(open_file, can_add_handle) {
|
||||
EXPECT_STREQ("1", ee.get_handle().get<std::string>().c_str());
|
||||
});
|
||||
|
||||
EXPECT_CALL(mp, set_item_meta(fsi.api_path, META_SOURCE, _))
|
||||
EXPECT_CALL(provider, set_item_meta(fsi.api_path, META_SOURCE, _))
|
||||
.WillOnce(Return(api_error::success));
|
||||
EXPECT_CALL(um, remove_resume)
|
||||
EXPECT_CALL(upload_mgr, remove_resume)
|
||||
.WillOnce(
|
||||
[&fsi](const std::string &api_path, const std::string &source_path2) {
|
||||
EXPECT_EQ(fsi.api_path, api_path);
|
||||
@ -594,7 +632,7 @@ TEST(open_file, can_add_handle) {
|
||||
event_capture capture(
|
||||
{"filesystem_item_opened", "filesystem_item_handle_opened"});
|
||||
|
||||
open_file o(test_chunk_size, 0U, fsi, mp, um);
|
||||
open_file o(test_chunk_size, 0U, fsi, provider, upload_mgr);
|
||||
#if defined(_WIN32)
|
||||
o.add(1u, {});
|
||||
EXPECT_EQ(nullptr, o.get_open_data(1u).directory_buffer);
|
||||
@ -608,17 +646,14 @@ TEST(open_file, can_add_handle) {
|
||||
event_system::instance().stop();
|
||||
}
|
||||
|
||||
TEST(open_file, can_remove_handle) {
|
||||
TEST_F(open_file_test, can_remove_handle) {
|
||||
event_system::instance().start();
|
||||
console_consumer c;
|
||||
|
||||
const auto source_path =
|
||||
test::generate_test_file_name("file_manager_open_file_test");
|
||||
|
||||
mock_provider mp;
|
||||
mock_upload_manager um;
|
||||
|
||||
EXPECT_CALL(mp, is_read_only()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
filesystem_item fsi;
|
||||
fsi.api_path = "/test.txt";
|
||||
@ -644,13 +679,13 @@ TEST(open_file, can_remove_handle) {
|
||||
EXPECT_STREQ("1", ee.get_handle().get<std::string>().c_str());
|
||||
});
|
||||
|
||||
EXPECT_CALL(um, remove_resume)
|
||||
EXPECT_CALL(upload_mgr, remove_resume)
|
||||
.WillOnce(
|
||||
[&fsi](const std::string &api_path, const std::string &source_path2) {
|
||||
EXPECT_EQ(fsi.api_path, api_path);
|
||||
EXPECT_EQ(fsi.source_path, source_path2);
|
||||
});
|
||||
EXPECT_CALL(mp, set_item_meta(fsi.api_path, META_SOURCE, _))
|
||||
EXPECT_CALL(provider, set_item_meta(fsi.api_path, META_SOURCE, _))
|
||||
.WillOnce(Return(api_error::success));
|
||||
|
||||
event_capture capture({
|
||||
@ -660,7 +695,7 @@ TEST(open_file, can_remove_handle) {
|
||||
"filesystem_item_closed",
|
||||
});
|
||||
|
||||
open_file o(test_chunk_size, 0U, fsi, mp, um);
|
||||
open_file o(test_chunk_size, 0U, fsi, provider, upload_mgr);
|
||||
#if defined(_WIN32)
|
||||
o.add(1u, {});
|
||||
#else
|
||||
@ -673,12 +708,13 @@ TEST(open_file, can_remove_handle) {
|
||||
event_system::instance().stop();
|
||||
}
|
||||
|
||||
TEST(open_file,
|
||||
can_read_locally_after_write_with_file_size_greater_than_existing_size) {}
|
||||
TEST_F(open_file_test,
|
||||
can_read_locally_after_write_with_file_size_greater_than_existing_size) {
|
||||
}
|
||||
|
||||
TEST(open_file, test_valid_download_chunks) {}
|
||||
TEST_F(open_file_test, test_valid_download_chunks) {}
|
||||
|
||||
TEST(open_file, test_full_download_with_partial_chunk) {}
|
||||
TEST_F(open_file_test, test_full_download_with_partial_chunk) {}
|
||||
|
||||
TEST(open_file, source_is_read_after_full_download) {}
|
||||
TEST_F(open_file_test, source_is_read_after_full_download) {}
|
||||
} // namespace repertory
|
||||
|
@ -31,18 +31,29 @@
|
||||
namespace {
|
||||
constexpr const std::size_t test_chunk_size{1024U};
|
||||
|
||||
std::string ring_buffer_dir = repertory::utils::path::combine(
|
||||
repertory::test::get_test_output_dir(),
|
||||
{"file_manager_ring_buffer_open_file_test"});
|
||||
const auto ring_buffer_dir{
|
||||
repertory::utils::path::combine(
|
||||
repertory::test::get_test_output_dir(),
|
||||
{"file_manager_ring_buffer_open_file_test"}),
|
||||
};
|
||||
} // namespace
|
||||
|
||||
namespace repertory {
|
||||
TEST(ring_buffer_open_file, can_forward_to_last_chunk) {
|
||||
class ring_buffer_open_file_test : public ::testing::Test {
|
||||
public:
|
||||
console_consumer con_consumer;
|
||||
mock_provider provider;
|
||||
|
||||
protected:
|
||||
void SetUp() override { event_system::instance().start(); }
|
||||
|
||||
void TearDown() override { event_system::instance().stop(); }
|
||||
};
|
||||
|
||||
TEST_F(ring_buffer_open_file_test, can_forward_to_last_chunk) {
|
||||
auto source_path = test::generate_test_file_name("ring_buffer_open_file");
|
||||
|
||||
mock_provider prov;
|
||||
|
||||
EXPECT_CALL(prov, is_read_only()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
filesystem_item fsi;
|
||||
fsi.directory = false;
|
||||
@ -51,8 +62,8 @@ TEST(ring_buffer_open_file, can_forward_to_last_chunk) {
|
||||
fsi.source_path = source_path;
|
||||
|
||||
{
|
||||
ring_buffer_open_file file(ring_buffer_dir, test_chunk_size, 30U, fsi, prov,
|
||||
8U);
|
||||
ring_buffer_open_file file(ring_buffer_dir, test_chunk_size, 30U, fsi,
|
||||
provider, 8U);
|
||||
file.set(0U, 3U);
|
||||
file.forward(4U);
|
||||
|
||||
@ -63,17 +74,13 @@ TEST(ring_buffer_open_file, can_forward_to_last_chunk) {
|
||||
EXPECT_TRUE(file.get_read_state(chunk));
|
||||
}
|
||||
}
|
||||
|
||||
EXPECT_TRUE(utils::file::directory(ring_buffer_dir).remove_recursively());
|
||||
}
|
||||
|
||||
TEST(ring_buffer_open_file,
|
||||
can_forward_to_last_chunk_if_count_is_greater_than_remaining) {
|
||||
TEST_F(ring_buffer_open_file_test,
|
||||
can_forward_to_last_chunk_if_count_is_greater_than_remaining) {
|
||||
auto source_path = test::generate_test_file_name("ring_buffer_open_file");
|
||||
|
||||
mock_provider prov;
|
||||
|
||||
EXPECT_CALL(prov, is_read_only()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
filesystem_item fsi;
|
||||
fsi.directory = false;
|
||||
@ -82,8 +89,8 @@ TEST(ring_buffer_open_file,
|
||||
fsi.source_path = source_path;
|
||||
|
||||
{
|
||||
ring_buffer_open_file file(ring_buffer_dir, test_chunk_size, 30U, fsi, prov,
|
||||
8U);
|
||||
ring_buffer_open_file file(ring_buffer_dir, test_chunk_size, 30U, fsi,
|
||||
provider, 8U);
|
||||
file.set(0U, 3U);
|
||||
file.forward(100U);
|
||||
|
||||
@ -94,16 +101,12 @@ TEST(ring_buffer_open_file,
|
||||
EXPECT_FALSE(file.get_read_state(chunk));
|
||||
}
|
||||
}
|
||||
|
||||
EXPECT_TRUE(utils::file::directory(ring_buffer_dir).remove_recursively());
|
||||
}
|
||||
|
||||
TEST(ring_buffer_open_file, can_forward_after_last_chunk) {
|
||||
TEST_F(ring_buffer_open_file_test, can_forward_after_last_chunk) {
|
||||
auto source_path = test::generate_test_file_name("ring_buffer_open_file");
|
||||
|
||||
mock_provider prov;
|
||||
|
||||
EXPECT_CALL(prov, is_read_only()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
filesystem_item fsi;
|
||||
fsi.directory = false;
|
||||
@ -112,8 +115,8 @@ TEST(ring_buffer_open_file, can_forward_after_last_chunk) {
|
||||
fsi.source_path = source_path;
|
||||
|
||||
{
|
||||
ring_buffer_open_file file(ring_buffer_dir, test_chunk_size, 30U, fsi, prov,
|
||||
8U);
|
||||
ring_buffer_open_file file(ring_buffer_dir, test_chunk_size, 30U, fsi,
|
||||
provider, 8U);
|
||||
file.set(0U, 3U);
|
||||
file.forward(5U);
|
||||
|
||||
@ -125,16 +128,12 @@ TEST(ring_buffer_open_file, can_forward_after_last_chunk) {
|
||||
EXPECT_TRUE(file.get_read_state(chunk));
|
||||
}
|
||||
}
|
||||
|
||||
EXPECT_TRUE(utils::file::directory(ring_buffer_dir).remove_recursively());
|
||||
}
|
||||
|
||||
TEST(ring_buffer_open_file, can_forward_and_rollover_after_last_chunk) {
|
||||
TEST_F(ring_buffer_open_file_test, can_forward_and_rollover_after_last_chunk) {
|
||||
auto source_path = test::generate_test_file_name("ring_buffer_open_file");
|
||||
|
||||
mock_provider prov;
|
||||
|
||||
EXPECT_CALL(prov, is_read_only()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
filesystem_item fsi;
|
||||
fsi.directory = false;
|
||||
@ -143,8 +142,8 @@ TEST(ring_buffer_open_file, can_forward_and_rollover_after_last_chunk) {
|
||||
fsi.source_path = source_path;
|
||||
|
||||
{
|
||||
ring_buffer_open_file file(ring_buffer_dir, test_chunk_size, 30U, fsi, prov,
|
||||
8U);
|
||||
ring_buffer_open_file file(ring_buffer_dir, test_chunk_size, 30U, fsi,
|
||||
provider, 8U);
|
||||
file.set(16U, 20U);
|
||||
file.forward(8U);
|
||||
|
||||
@ -152,16 +151,12 @@ TEST(ring_buffer_open_file, can_forward_and_rollover_after_last_chunk) {
|
||||
EXPECT_EQ(std::size_t(21U), file.get_first_chunk());
|
||||
EXPECT_EQ(std::size_t(28U), file.get_last_chunk());
|
||||
}
|
||||
|
||||
EXPECT_TRUE(utils::file::directory(ring_buffer_dir).remove_recursively());
|
||||
}
|
||||
|
||||
TEST(ring_buffer_open_file, can_reverse_to_first_chunk) {
|
||||
TEST_F(ring_buffer_open_file_test, can_reverse_to_first_chunk) {
|
||||
auto source_path = test::generate_test_file_name("ring_buffer_open_file");
|
||||
|
||||
mock_provider prov;
|
||||
|
||||
EXPECT_CALL(prov, is_read_only()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
filesystem_item fsi;
|
||||
fsi.directory = false;
|
||||
@ -170,8 +165,8 @@ TEST(ring_buffer_open_file, can_reverse_to_first_chunk) {
|
||||
fsi.source_path = source_path;
|
||||
|
||||
{
|
||||
ring_buffer_open_file file(ring_buffer_dir, test_chunk_size, 30U, fsi, prov,
|
||||
8U);
|
||||
ring_buffer_open_file file(ring_buffer_dir, test_chunk_size, 30U, fsi,
|
||||
provider, 8U);
|
||||
file.set(0U, 3U);
|
||||
file.reverse(3U);
|
||||
|
||||
@ -182,17 +177,13 @@ TEST(ring_buffer_open_file, can_reverse_to_first_chunk) {
|
||||
EXPECT_TRUE(file.get_read_state(chunk));
|
||||
}
|
||||
}
|
||||
|
||||
EXPECT_TRUE(utils::file::directory(ring_buffer_dir).remove_recursively());
|
||||
}
|
||||
|
||||
TEST(ring_buffer_open_file,
|
||||
can_reverse_to_first_chunk_if_count_is_greater_than_remaining) {
|
||||
TEST_F(ring_buffer_open_file_test,
|
||||
can_reverse_to_first_chunk_if_count_is_greater_than_remaining) {
|
||||
auto source_path = test::generate_test_file_name("ring_buffer_open_file");
|
||||
|
||||
mock_provider prov;
|
||||
|
||||
EXPECT_CALL(prov, is_read_only()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
filesystem_item fsi;
|
||||
fsi.directory = false;
|
||||
@ -201,8 +192,8 @@ TEST(ring_buffer_open_file,
|
||||
fsi.source_path = source_path;
|
||||
|
||||
{
|
||||
ring_buffer_open_file file(ring_buffer_dir, test_chunk_size, 30U, fsi, prov,
|
||||
8U);
|
||||
ring_buffer_open_file file(ring_buffer_dir, test_chunk_size, 30U, fsi,
|
||||
provider, 8U);
|
||||
file.set(0U, 3U);
|
||||
file.reverse(13U);
|
||||
|
||||
@ -213,16 +204,12 @@ TEST(ring_buffer_open_file,
|
||||
EXPECT_TRUE(file.get_read_state(chunk));
|
||||
}
|
||||
}
|
||||
|
||||
EXPECT_TRUE(utils::file::directory(ring_buffer_dir).remove_recursively());
|
||||
}
|
||||
|
||||
TEST(ring_buffer_open_file, can_reverse_before_first_chunk) {
|
||||
TEST_F(ring_buffer_open_file_test, can_reverse_before_first_chunk) {
|
||||
auto source_path = test::generate_test_file_name("ring_buffer_open_file");
|
||||
|
||||
mock_provider prov;
|
||||
|
||||
EXPECT_CALL(prov, is_read_only()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
filesystem_item fsi;
|
||||
fsi.directory = false;
|
||||
@ -231,8 +218,8 @@ TEST(ring_buffer_open_file, can_reverse_before_first_chunk) {
|
||||
fsi.source_path = source_path;
|
||||
|
||||
{
|
||||
ring_buffer_open_file file(ring_buffer_dir, test_chunk_size, 30U, fsi, prov,
|
||||
8U);
|
||||
ring_buffer_open_file file(ring_buffer_dir, test_chunk_size, 30U, fsi,
|
||||
provider, 8U);
|
||||
file.set(1U, 3U);
|
||||
file.reverse(3U);
|
||||
|
||||
@ -244,16 +231,13 @@ TEST(ring_buffer_open_file, can_reverse_before_first_chunk) {
|
||||
EXPECT_TRUE(file.get_read_state(chunk));
|
||||
}
|
||||
}
|
||||
|
||||
EXPECT_TRUE(utils::file::directory(ring_buffer_dir).remove_recursively());
|
||||
}
|
||||
|
||||
TEST(ring_buffer_open_file, can_reverse_and_rollover_before_first_chunk) {
|
||||
TEST_F(ring_buffer_open_file_test,
|
||||
can_reverse_and_rollover_before_first_chunk) {
|
||||
auto source_path = test::generate_test_file_name("ring_buffer_open_file");
|
||||
|
||||
mock_provider prov;
|
||||
|
||||
EXPECT_CALL(prov, is_read_only()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
filesystem_item fsi;
|
||||
fsi.directory = false;
|
||||
@ -262,8 +246,8 @@ TEST(ring_buffer_open_file, can_reverse_and_rollover_before_first_chunk) {
|
||||
fsi.source_path = source_path;
|
||||
|
||||
{
|
||||
ring_buffer_open_file file(ring_buffer_dir, test_chunk_size, 30U, fsi, prov,
|
||||
8U);
|
||||
ring_buffer_open_file file(ring_buffer_dir, test_chunk_size, 30U, fsi,
|
||||
provider, 8U);
|
||||
file.set(16U, 20U);
|
||||
file.reverse(8U);
|
||||
|
||||
@ -279,16 +263,12 @@ TEST(ring_buffer_open_file, can_reverse_and_rollover_before_first_chunk) {
|
||||
EXPECT_TRUE(file.get_read_state(chunk));
|
||||
}
|
||||
}
|
||||
|
||||
EXPECT_TRUE(utils::file::directory(ring_buffer_dir).remove_recursively());
|
||||
}
|
||||
|
||||
TEST(ring_buffer_open_file, can_reverse_full_ring) {
|
||||
TEST_F(ring_buffer_open_file_test, can_reverse_full_ring) {
|
||||
auto source_path = test::generate_test_file_name("ring_buffer_open_file");
|
||||
|
||||
mock_provider prov;
|
||||
|
||||
EXPECT_CALL(prov, is_read_only()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
|
||||
|
||||
filesystem_item fsi;
|
||||
fsi.directory = false;
|
||||
@ -297,8 +277,8 @@ TEST(ring_buffer_open_file, can_reverse_full_ring) {
|
||||
fsi.source_path = source_path;
|
||||
|
||||
{
|
||||
ring_buffer_open_file file(ring_buffer_dir, test_chunk_size, 30U, fsi, prov,
|
||||
8U);
|
||||
ring_buffer_open_file file(ring_buffer_dir, test_chunk_size, 30U, fsi,
|
||||
provider, 8U);
|
||||
file.set(8U, 15U);
|
||||
file.reverse(16U);
|
||||
|
||||
@ -310,12 +290,10 @@ TEST(ring_buffer_open_file, can_reverse_full_ring) {
|
||||
EXPECT_FALSE(file.get_read_state(chunk));
|
||||
}
|
||||
}
|
||||
|
||||
EXPECT_TRUE(utils::file::directory(ring_buffer_dir).remove_recursively());
|
||||
}
|
||||
|
||||
TEST(ring_buffer_open_file, read_full_file) {
|
||||
auto &nf = test::create_random_file(test_chunk_size * 32u);
|
||||
TEST_F(ring_buffer_open_file_test, read_full_file) {
|
||||
auto &nf = test::create_random_file(test_chunk_size * 33u + 11u);
|
||||
auto download_source_path = nf.get_path();
|
||||
|
||||
auto dest_path = test::generate_test_file_name("ring_buffer_open_file");
|
||||
@ -327,14 +305,17 @@ TEST(ring_buffer_open_file, read_full_file) {
|
||||
filesystem_item fsi;
|
||||
fsi.directory = false;
|
||||
fsi.api_path = "/test.txt";
|
||||
fsi.size = test_chunk_size * 32u;
|
||||
fsi.size = test_chunk_size * 33u + 11u;
|
||||
fsi.source_path = test::generate_test_file_name("ring_buffer_open_file");
|
||||
|
||||
std::mutex read_mtx;
|
||||
EXPECT_CALL(mp, read_file_bytes)
|
||||
.WillRepeatedly([&nf](const std::string & /* api_path */,
|
||||
std::size_t size, std::uint64_t offset,
|
||||
data_buffer &data,
|
||||
stop_type &stop_requested) -> api_error {
|
||||
.WillRepeatedly([&read_mtx, &nf](const std::string & /* api_path */,
|
||||
std::size_t size, std::uint64_t offset,
|
||||
data_buffer &data,
|
||||
stop_type &stop_requested) -> api_error {
|
||||
mutex_lock lock(read_mtx);
|
||||
|
||||
EXPECT_FALSE(stop_requested);
|
||||
std::size_t bytes_read{};
|
||||
data.resize(size);
|
||||
@ -375,11 +356,9 @@ TEST(ring_buffer_open_file, read_full_file) {
|
||||
EXPECT_STREQ(hash1.value().c_str(), hash2.value().c_str());
|
||||
}
|
||||
}
|
||||
|
||||
EXPECT_TRUE(utils::file::directory(ring_buffer_dir).remove_recursively());
|
||||
}
|
||||
|
||||
TEST(ring_buffer_open_file, read_full_file_in_reverse) {
|
||||
TEST_F(ring_buffer_open_file_test, read_full_file_in_reverse) {
|
||||
auto &nf = test::create_random_file(test_chunk_size * 32u);
|
||||
auto download_source_path = nf.get_path();
|
||||
|
||||
@ -395,11 +374,14 @@ TEST(ring_buffer_open_file, read_full_file_in_reverse) {
|
||||
fsi.size = test_chunk_size * 32u;
|
||||
fsi.source_path = test::generate_test_file_name("ring_buffer_open_file");
|
||||
|
||||
std::mutex read_mtx;
|
||||
EXPECT_CALL(mp, read_file_bytes)
|
||||
.WillRepeatedly([&nf](const std::string & /* api_path */,
|
||||
std::size_t size, std::uint64_t offset,
|
||||
data_buffer &data,
|
||||
stop_type &stop_requested) -> api_error {
|
||||
.WillRepeatedly([&read_mtx, &nf](const std::string & /* api_path */,
|
||||
std::size_t size, std::uint64_t offset,
|
||||
data_buffer &data,
|
||||
stop_type &stop_requested) -> api_error {
|
||||
mutex_lock lock(read_mtx);
|
||||
|
||||
EXPECT_FALSE(stop_requested);
|
||||
std::size_t bytes_read{};
|
||||
data.resize(size);
|
||||
@ -417,15 +399,15 @@ TEST(ring_buffer_open_file, read_full_file_in_reverse) {
|
||||
EXPECT_TRUE(nf2);
|
||||
|
||||
auto to_read = fsi.size;
|
||||
std::size_t chunk = rb.get_total_chunks() - 1u;
|
||||
while (to_read) {
|
||||
std::size_t chunk = rb.get_total_chunks() - 1U;
|
||||
while (to_read > 0U) {
|
||||
data_buffer data{};
|
||||
EXPECT_EQ(api_error::success,
|
||||
rb.read(test_chunk_size, chunk * test_chunk_size, data));
|
||||
|
||||
std::size_t bytes_written{};
|
||||
EXPECT_TRUE(nf2.write(data, chunk * test_chunk_size, &bytes_written));
|
||||
chunk--;
|
||||
--chunk;
|
||||
to_read -= data.size();
|
||||
}
|
||||
nf2.close();
|
||||
@ -440,11 +422,9 @@ TEST(ring_buffer_open_file, read_full_file_in_reverse) {
|
||||
EXPECT_STREQ(hash1.value().c_str(), hash2.value().c_str());
|
||||
}
|
||||
}
|
||||
|
||||
EXPECT_TRUE(utils::file::directory(ring_buffer_dir).remove_recursively());
|
||||
}
|
||||
|
||||
TEST(ring_buffer_open_file, read_full_file_in_partial_chunks) {
|
||||
TEST_F(ring_buffer_open_file_test, read_full_file_in_partial_chunks) {
|
||||
auto &nf = test::create_random_file(test_chunk_size * 32u);
|
||||
auto download_source_path = nf.get_path();
|
||||
|
||||
@ -460,11 +440,14 @@ TEST(ring_buffer_open_file, read_full_file_in_partial_chunks) {
|
||||
fsi.size = test_chunk_size * 32u;
|
||||
fsi.source_path = test::generate_test_file_name("test");
|
||||
|
||||
std::mutex read_mtx;
|
||||
EXPECT_CALL(mp, read_file_bytes)
|
||||
.WillRepeatedly([&nf](const std::string & /* api_path */,
|
||||
std::size_t size, std::uint64_t offset,
|
||||
data_buffer &data,
|
||||
stop_type &stop_requested) -> api_error {
|
||||
.WillRepeatedly([&read_mtx, &nf](const std::string & /* api_path */,
|
||||
std::size_t size, std::uint64_t offset,
|
||||
data_buffer &data,
|
||||
stop_type &stop_requested) -> api_error {
|
||||
mutex_lock lock(read_mtx);
|
||||
|
||||
EXPECT_FALSE(stop_requested);
|
||||
std::size_t bytes_read{};
|
||||
data.resize(size);
|
||||
@ -506,11 +489,10 @@ TEST(ring_buffer_open_file, read_full_file_in_partial_chunks) {
|
||||
EXPECT_STREQ(hash1.value().c_str(), hash2.value().c_str());
|
||||
}
|
||||
}
|
||||
|
||||
EXPECT_TRUE(utils::file::directory(ring_buffer_dir).remove_recursively());
|
||||
}
|
||||
|
||||
TEST(ring_buffer_open_file, read_full_file_in_partial_chunks_in_reverse) {
|
||||
TEST_F(ring_buffer_open_file_test,
|
||||
read_full_file_in_partial_chunks_in_reverse) {
|
||||
auto &nf = test::create_random_file(test_chunk_size * 32u);
|
||||
auto download_source_path = nf.get_path();
|
||||
|
||||
@ -526,11 +508,14 @@ TEST(ring_buffer_open_file, read_full_file_in_partial_chunks_in_reverse) {
|
||||
fsi.size = test_chunk_size * 32u;
|
||||
fsi.source_path = test::generate_test_file_name("ring_buffer_open_file");
|
||||
|
||||
std::mutex read_mtx;
|
||||
EXPECT_CALL(mp, read_file_bytes)
|
||||
.WillRepeatedly([&nf](const std::string & /* api_path */,
|
||||
std::size_t size, std::uint64_t offset,
|
||||
data_buffer &data,
|
||||
stop_type &stop_requested) -> api_error {
|
||||
.WillRepeatedly([&read_mtx, &nf](const std::string & /* api_path */,
|
||||
std::size_t size, std::uint64_t offset,
|
||||
data_buffer &data,
|
||||
stop_type &stop_requested) -> api_error {
|
||||
mutex_lock lock(read_mtx);
|
||||
|
||||
EXPECT_FALSE(stop_requested);
|
||||
std::size_t bytes_read{};
|
||||
data.resize(size);
|
||||
@ -577,7 +562,5 @@ TEST(ring_buffer_open_file, read_full_file_in_partial_chunks_in_reverse) {
|
||||
EXPECT_STREQ(hash1.value().c_str(), hash2.value().c_str());
|
||||
}
|
||||
}
|
||||
|
||||
EXPECT_TRUE(utils::file::directory(ring_buffer_dir).remove_recursively());
|
||||
}
|
||||
} // namespace repertory
|
||||
|
@ -25,8 +25,8 @@
|
||||
#include "utils/string.hpp"
|
||||
|
||||
namespace repertory::utils {
|
||||
auto compare_version_strings(std::string version1, std::string version2)
|
||||
-> std::int32_t {
|
||||
auto compare_version_strings(std::string version1,
|
||||
std::string version2) -> std::int32_t {
|
||||
|
||||
if (utils::string::contains(version1, "-")) {
|
||||
version1 = utils::string::split(version1, '-', true)[0U];
|
||||
|
Reference in New Issue
Block a user