1 Commits

Author SHA1 Message Date
8dd46b8ad8 v2.0.2-rc (#27)
Some checks reported errors
BlockStorage/repertory/pipeline/head Something is wrong with the build of this commit
## v2.0.2-rc

### BREAKING CHANGES

* Refactored `config.json` - will need to verify configuration settings prior to mounting

### Issues

* \#12 \[Unit Test\] Complete all providers unit tests
* \#14 \[Unit Test\] SQLite mini-ORM unit tests and cleanup
* \#16 Add support for bucket name in Sia provider
* \#17 Update to common c++ build system
  * A single 64-bit Linux Jenkins server is used to build all Linux and Windows versions
  * All dependency sources are now included
  * MSVC is no longer supported
  * MSYS2 is required for building Windows binaries on Windows
  * OS X support is temporarily disabled
* \#19 \[bug\] Rename file is broken for files that are existing
* \#23 \[bug\] Incorrect file size displayed while upload is pending
* \#24 RocksDB implementations should be transactional
* \#25 Writes should block when maximum cache size is reached
* \#26 Complete ring buffer and direct download support

### Changes from v2.0.1-rc

* Ability to choose between RocksDB and SQLite databases
* Added direct reads and implemented download fallback
* Corrected file times on S3 and Sia providers
* Corrected handling of `chown()` and `chmod()`
* Fixed erroneous download of chunks after resize

Reviewed-on: #27
2024-12-28 15:56:40 -06:00
153 changed files with 12606 additions and 5497 deletions

View File

@ -96,6 +96,7 @@ expect_streq
fallocate_impl
fext
fgetattr
fgetattr_impl
filebase
flac_version
flag_nopath
@ -160,6 +161,7 @@ openssldir
pkgconfig
plarge_integer
plex
println
project_enable_fontconfig
project_enable_gtkmm
project_enable_libdsm

View File

@ -2,10 +2,14 @@
## v2.0.2-rc
### BREAKING CHANGES
* Refactored `config.json` - will need to verify configuration settings prior to mounting
### Issues
* \#12 \[Unit Test\] Complete all providers unit tests
* \#14 \[Unit Test \] SQLite mini-ORM unit tests and cleanup
* \#14 \[Unit Test\] SQLite mini-ORM unit tests and cleanup
* \#16 Add support for bucket name in Sia provider
* \#17 Update to common c++ build system
* A single 64-bit Linux Jenkins server is used to build all Linux and Windows versions
@ -14,9 +18,15 @@
* MSYS2 is required for building Windows binaries on Windows
* OS X support is temporarily disabled
* \#19 \[bug\] Rename file is broken for files that are existing
* \#23 \[bug\] Incorrect file size displayed while upload is pending
* \#24 RocksDB implementations should be transactional
* \#25 Writes should block when maximum cache size is reached
* \#26 Complete ring buffer and direct download support
### Changes from v2.0.1-rc
* Ability to choose between RocksDB and SQLite databases
* Added direct reads and implemented download fallback
* Corrected file times on S3 and Sia providers
* Corrected handling of `chown()` and `chmod()`
* Fixed erroneous download of chunks after resize

View File

@ -148,6 +148,7 @@ endif()
-DPROJECT_ENABLE_LIBSODIUM=${PROJECT_ENABLE_LIBSODIUM}
-DPROJECT_ENABLE_OPENSSL=${PROJECT_ENABLE_OPENSSL}
-DPROJECT_ENABLE_PUGIXML=${PROJECT_ENABLE_PUGIXML}
-DPROJECT_ENABLE_ROCKSDB=${PROJECT_ENABLE_ROCKSDB}
-DPROJECT_ENABLE_SPDLOG=${PROJECT_ENABLE_SPDLOG}
-DPROJECT_ENABLE_SQLITE=${PROJECT_ENABLE_SQLITE}
-DPROJECT_ENABLE_STDUUID=${PROJECT_ENABLE_STDUUID}

View File

@ -1,19 +1,20 @@
set(BINUTILS_HASH ae9a5789e23459e59606e6714723f2d3ffc31c03174191ef0d015bdf06007450)
set(BOOST_HASH f55c340aa49763b1925ccf02b2e83f35fdcf634c9d5164a2acb87540173c741d)
set(BOOST2_HASH 7bd7ddceec1a1dfdcbdb3e609b60d01739c38390a5f956385a12f3122049f0ca)
set(BOOST_HASH be0d91732d5b0cc6fbb275c7939974457e79b54d6f07ce2e3dfdd68bef883b0b)
set(CPP_HTTPLIB_HASH c1742fc7179aaae2a67ad9bba0740b7e9ffaf4f5e62feef53101ecdef1478716)
set(CURL_HASH d714818f6ac41ae9154850158fed44b7a87650a6d52f83d3bcb9aa527be354d7)
set(EXPAT_HASH fbd032683370d761ba68dba2566d3280a154f5290634172d60a79b24d366d9dc)
set(CPP_HTTPLIB_HASH 405abd8170f2a446fc8612ac635d0db5947c0d2e156e32603403a4496255ff00)
set(CURL_HASH 5a231145114589491fc52da118f9c7ef8abee885d1cb1ced99c7290e9a352f07)
set(EXPAT_HASH 372b18f6527d162fa9658f1c74d22a37429b82d822f5a1e1fc7e00f6045a06a2)
set(GCC_HASH 7d376d445f93126dc545e2c0086d0f647c3094aae081cdb78f42ce2bc25e7293)
set(GTEST_HASH 7b42b4d6ed48810c5362c265a17faebe90dc2373c885e5216439d37927f02926)
set(ICU_HASH 925e6b4b8cf8856e0ac214f6f34e30dee63b7bb7a50460ab4603950eff48f89e)
set(JSON_HASH 0d8ef5af7f9794e3263480193c491549b2ba6cc74bb018906202ada498a79406)
set(LIBSODIUM_HASH 8e5aeca07a723a27bbecc3beef14b0068d37e7fc0e97f51b3f1c82d2a58005c1)
set(MINGW_HASH 3f66bce069ee8bed7439a1a13da7cb91a5e67ea6170f21317ac7f5794625ee10)
set(OPENSSL_HASH 777cd596284c883375a2a7a11bf5d2786fc5413255efab20c50d6ffe6d020b7e)
set(OPENSSL_HASH e15dda82fe2fe8139dc2ac21a36d4ca01d5313c75f99f46c4e8a27709b7294bf)
set(PKG_CONFIG_HASH 6fc69c01688c9458a57eb9a1664c9aba372ccda420a02bf4429fe610e7e7d591)
set(PUGIXML_HASH 2f10e276870c64b1db6809050a75e11a897a8d7456c4be5c6b2e35a11168a015)
set(SPDLOG_HASH 1586508029a7d0670dfcb2d97575dcdc242d3868a259742b69f100801ab4e16b)
set(ROCKSDB_HASH 9b810c81731835fda0d4bbdb51d3199d901fa4395733ab63752d297da84c5a47)
set(SPDLOG_HASH 9962648c9b4f1a7bbc76fd8d9172555bad1871fdb14ff4f842ef87949682caa5)
set(SQLITE_HASH 77823cb110929c2bcb0f5d48e4833b5c59a8a6e40cdea3936b99e199dbbe5784)
set(STDUUID_HASH b1176597e789531c38481acbbed2a6894ad419aab0979c10410d59eb0ebf40d3)
set(ZLIB_HASH 17e88863f3600672ab49182f217281b6fc4d3c762bde361935e436a95214d05c)

View File

@ -17,6 +17,7 @@ include(cmake/libraries/fuse.cmake)
include(cmake/libraries/json.cmake)
include(cmake/libraries/libsodium.cmake)
include(cmake/libraries/pugixml.cmake)
include(cmake/libraries/rocksdb.cmake)
include(cmake/libraries/spdlog.cmake)
include(cmake/libraries/sqlite.cmake)
include(cmake/libraries/stduuid.cmake)

View File

@ -0,0 +1,34 @@
if(PROJECT_ENABLE_ROCKSDB)
if(PROJECT_BUILD)
add_definitions(-DPROJECT_ENABLE_ROCKSDB)
find_library(ROCKSDB_LIBRARY NAMES librocksdb.a REQUIRED)
link_libraries(${ROCKSDB_LIBRARY})
elseif(NOT PROJECT_IS_MINGW OR CMAKE_HOST_WIN32)
ExternalProject_Add(rocksdb_project
PREFIX external
URL ${PROJECT_3RD_PARTY_DIR}/rocksdb-${ROCKSDB_VERSION}.tar.gz
URL_HASH SHA256=${ROCKSDB_HASH}
LIST_SEPARATOR |
CMAKE_ARGS ${PROJECT_EXTERNAL_CMAKE_FLAGS}
-DBUILD_SHARED_LIBS=OFF
-DBUILD_STATIC_LIBS=ON
-DFAIL_ON_WARNINGS=OFF
-DPORTABLE=1
-DROCKSDB_BUILD_SHARED=OFF
-DROCKSDB_INSTALL_ON_WINDOWS=ON
-DWITH_BENCHMARK=OFF
-DWITH_BENCHMARK_TOOLS=OFF
-DWITH_CORE_TOOLS=OFF
-DWITH_EXAMPLES=OFF
-DWITH_GFLAGS=OFF
-DWITH_IOSTATS_CONTEXT=OFF
-DWITH_PERF_CONTEXT=OFF
-DWITH_TESTS=OFF
-DWITH_TOOLS=OFF
-DWITH_TRACE_TOOLS=OFF
-DWITH_ZLIB=ON
)
list(APPEND PROJECT_DEPENDENCIES rocksdb_project)
endif()
endif()

View File

@ -6,6 +6,7 @@ option(PROJECT_ENABLE_JSON "Enable JSON for Modern C++ library" ON)
option(PROJECT_ENABLE_LIBSODIUM "Enable libsodium library" ON)
option(PROJECT_ENABLE_OPENSSL "Enable OpenSSL library" ON)
option(PROJECT_ENABLE_PUGIXML "Enable PugiXML library" ON)
option(PROJECT_ENABLE_ROCKSDB "Enable RocksDB library" ON)
option(PROJECT_ENABLE_SPDLOG "Enable spdlog library" ON)
option(PROJECT_ENABLE_SQLITE "Enable SQLite" ON)
option(PROJECT_ENABLE_STDUUID "Enable stduuid library" ON)

View File

@ -3,13 +3,13 @@ set(BOOST2_MAJOR_VERSION 1)
set(BOOST2_MINOR_VERSION 76)
set(BOOST2_PATCH_VERSION 0)
set(BOOST_MAJOR_VERSION 1)
set(BOOST_MINOR_VERSION 85)
set(BOOST_MINOR_VERSION 87)
set(BOOST_PATCH_VERSION 0)
set(CPP_HTTPLIB_VERSION 0.16.3)
set(CURL2_VERSION 8_9_1)
set(CURL_VERSION 8.9.1)
set(EXPAT2_VERSION 2_6_2)
set(EXPAT_VERSION 2.6.2)
set(CPP_HTTPLIB_VERSION 0.18.1)
set(CURL2_VERSION 8_11_0)
set(CURL_VERSION 8.11.0)
set(EXPAT2_VERSION 2_6_4)
set(EXPAT_VERSION 2.6.4)
set(GCC_VERSION 14.2.0)
set(GTEST_VERSION 1.15.2)
set(ICU_VERSION 75-1)
@ -17,10 +17,11 @@ set(JSON_VERSION 3.11.3)
set(LIBSODIUM_VERSION 1.0.20)
set(MESA_VERSION 23.3.3)
set(MINGW_VERSION 11.0.1)
set(OPENSSL_VERSION 3.3.1)
set(OPENSSL_VERSION 3.4.0)
set(PKG_CONFIG_VERSION 0.29.2)
set(PUGIXML_VERSION 1.14)
set(SPDLOG_VERSION 1.14.1)
set(ROCKSDB_VERSION 9.7.4)
set(SPDLOG_VERSION 1.15.0)
set(SQLITE2_VERSION 3.46.1)
set(SQLITE_VERSION 3460100)
set(STDUUID_VERSION 1.2.3)

View File

@ -30,6 +30,7 @@ PROJECT_ENABLE_JSON=ON
PROJECT_ENABLE_LIBSODIUM=ON
PROJECT_ENABLE_OPENSSL=ON
PROJECT_ENABLE_PUGIXML=ON
PROJECT_ENABLE_ROCKSDB=ON
PROJECT_ENABLE_SPDLOG=ON
PROJECT_ENABLE_SQLITE=ON
PROJECT_ENABLE_STDUUID=ON

View File

@ -604,6 +604,7 @@ RUN if [ -f "/3rd_party/SFML-${MY_SFML_VERSION}.tar.gz" ]; then \
-DBUILD_STATIC_LIBS=ON \
-DCMAKE_CXX_STANDARD=${MY_CXX_STANDARD} \
-DCMAKE_INSTALL_PREFIX=${MY_MINGW_DIR} \
-DCMAKE_SYSTEM_PROCESSOR=AMD64 \
-DCMAKE_TOOLCHAIN_FILE=${MY_TOOLCHAIN_FILE_CMAKE} \
&& make -j${MY_NUM_JOBS} \
&& make install \

View File

@ -22,10 +22,9 @@
#ifndef REPERTORY_INCLUDE_APP_CONFIG_HPP_
#define REPERTORY_INCLUDE_APP_CONFIG_HPP_
#include "events/event_system.hpp"
#include "events/events.hpp"
#include "events/event.hpp"
#include "types/remote.hpp"
#include "types/repertory.hpp"
#include "utils/error_utils.hpp"
namespace repertory {
class app_config final {
@ -40,7 +39,7 @@ public:
default_data_directory(const provider_type &prov) -> std::string;
[[nodiscard]] static auto
default_remote_port(const provider_type &prov) -> std::uint16_t;
default_remote_api_port(const provider_type &prov) -> std::uint16_t;
[[nodiscard]] static auto
default_rpc_port(const provider_type &prov) -> std::uint16_t;
@ -54,406 +53,202 @@ public:
public:
app_config(const provider_type &prov, std::string_view data_directory = "");
app_config() = delete;
app_config(app_config &&) = delete;
app_config(const app_config &) = delete;
~app_config() { save(); }
auto operator=(const app_config &) -> app_config & = delete;
auto operator=(app_config &&) -> app_config & = delete;
private:
provider_type prov_;
std::string api_auth_;
std::uint16_t api_port_;
std::string api_user_;
bool config_changed_;
std::string data_directory_;
std::uint8_t download_timeout_secs_;
bool enable_chunk_downloader_timeout_;
bool enable_comm_duration_events_;
bool enable_drive_events_;
bool enable_max_cache_size_;
atomic<std::string> api_auth_;
std::atomic<std::uint16_t> api_port_;
atomic<std::string> api_user_;
std::atomic<bool> config_changed_;
std::atomic<database_type> db_type_{database_type::rocksdb};
std::atomic<std::uint8_t> download_timeout_secs_;
std::atomic<bool> enable_download_timeout_;
std::atomic<bool> enable_drive_events_;
#if defined(_WIN32)
bool enable_mount_manager_;
std::atomic<bool> enable_mount_manager_;
#endif // defined(_WIN32)
bool enable_remote_mount_;
encrypt_config encrypt_config_;
event_level event_level_;
std::uint32_t eviction_delay_mins_;
bool eviction_uses_accessed_time_;
std::uint8_t high_freq_interval_secs_;
bool is_remote_mount_;
std::uint32_t low_freq_interval_secs_;
std::uint64_t max_cache_size_bytes_;
std::uint8_t max_upload_count_;
std::uint8_t min_download_timeout_secs_;
std::uint16_t online_check_retry_secs_;
std::uint16_t orphaned_file_retention_days_;
std::string preferred_download_type_;
std::uint8_t read_ahead_count_;
std::uint8_t remote_client_pool_size_;
std::string remote_host_name_or_ip_;
std::uint8_t remote_max_connections_;
std::uint16_t remote_port_;
std::uint16_t remote_receive_timeout_secs_;
std::uint16_t remote_send_timeout_secs_;
std::string remote_token_;
std::uint16_t retry_read_count_;
std::uint16_t ring_buffer_file_size_;
std::atomic<event_level> event_level_;
std::atomic<std::uint32_t> eviction_delay_mins_;
std::atomic<bool> eviction_uses_accessed_time_;
std::atomic<std::uint16_t> high_freq_interval_secs_;
std::atomic<std::uint16_t> low_freq_interval_secs_;
std::atomic<std::uint64_t> max_cache_size_bytes_;
std::atomic<std::uint8_t> max_upload_count_;
std::atomic<std::uint16_t> med_freq_interval_secs_;
std::atomic<std::uint16_t> online_check_retry_secs_;
std::atomic<std::uint16_t> orphaned_file_retention_days_;
std::atomic<download_type> preferred_download_type_;
std::atomic<std::uint16_t> retry_read_count_;
std::atomic<std::uint16_t> ring_buffer_file_size_;
std::atomic<std::uint16_t> task_wait_ms_;
private:
std::string cache_directory_;
host_config hc_;
s3_config s3_config_;
sia_config sia_config_{};
std::uint64_t version_{REPERTORY_CONFIG_VERSION};
std::string data_directory_;
atomic<encrypt_config> encrypt_config_;
atomic<host_config> host_config_;
std::string log_directory_;
mutable std::recursive_mutex read_write_mutex_;
mutable std::recursive_mutex remote_mount_mutex_;
atomic<remote::remote_config> remote_config_;
atomic<remote::remote_mount> remote_mount_;
atomic<s3_config> s3_config_;
atomic<sia_config> sia_config_;
std::unordered_map<std::string, std::function<std::string()>>
value_get_lookup_;
std::unordered_map<std::string,
std::function<std::string(const std::string &)>>
value_set_lookup_;
std::uint64_t version_{REPERTORY_CONFIG_VERSION};
private:
[[nodiscard]] auto load() -> bool;
template <typename dest>
auto get_value(const json &json_document, const std::string &name, dest &dst,
bool &success_flag) -> bool {
REPERTORY_USES_FUNCTION_NAME();
auto ret{false};
try {
if (json_document.find(name) != json_document.end()) {
dst = json_document[name].get<dest>();
ret = true;
} else {
success_flag = false;
}
} catch (const json::exception &ex) {
utils::error::raise_error(function_name, ex, "exception occurred");
success_flag = false;
ret = false;
}
return ret;
}
template <typename dest, typename source>
auto set_value(dest &dst, const source &src) -> bool {
auto ret{false};
recur_mutex_lock lock(read_write_mutex_);
if (dst != src) {
dst = src;
config_changed_ = true;
save();
ret = true;
}
return ret;
}
auto set_value(dest &dst, const source &src) -> bool;
public:
[[nodiscard]] auto get_api_auth() const -> std::string { return api_auth_; }
[[nodiscard]] auto get_api_auth() const -> std::string;
[[nodiscard]] auto get_api_port() const -> std::uint16_t { return api_port_; }
[[nodiscard]] auto get_api_port() const -> std::uint16_t;
[[nodiscard]] auto get_api_user() const -> std::string { return api_user_; }
[[nodiscard]] auto get_api_user() const -> std::string;
[[nodiscard]] auto get_cache_directory() const -> std::string {
return cache_directory_;
}
[[nodiscard]] auto get_chunk_downloader_timeout_secs() const -> std::uint8_t {
return std::max(min_download_timeout_secs_, download_timeout_secs_);
}
[[nodiscard]] auto get_cache_directory() const -> std::string;
[[nodiscard]] auto get_config_file_path() const -> std::string;
[[nodiscard]] auto get_data_directory() const -> std::string {
return data_directory_;
}
[[nodiscard]] auto get_database_type() const -> database_type;
[[nodiscard]] auto get_enable_chunk_download_timeout() const -> bool {
return enable_chunk_downloader_timeout_;
}
[[nodiscard]] auto get_data_directory() const -> std::string;
[[nodiscard]] auto get_enable_comm_duration_events() const -> bool {
return enable_comm_duration_events_;
}
[[nodiscard]] auto get_download_timeout_secs() const -> std::uint8_t;
[[nodiscard]] auto get_enable_drive_events() const -> bool {
return enable_drive_events_;
}
[[nodiscard]] auto get_enable_download_timeout() const -> bool;
[[nodiscard]] auto get_encrypt_config() const -> encrypt_config {
return encrypt_config_;
}
[[nodiscard]] auto get_enable_drive_events() const -> bool;
[[nodiscard]] auto get_encrypt_config() const -> encrypt_config;
#if defined(_WIN32)
[[nodiscard]] auto get_enable_mount_manager() const -> bool {
return enable_mount_manager_;
}
#endif
[[nodiscard]] auto get_enable_mount_manager() const -> bool;
#endif // defined(_WIN32)
[[nodiscard]] auto get_enable_max_cache_size() const -> bool {
return enable_max_cache_size_;
}
[[nodiscard]] auto get_event_level() const -> event_level;
[[nodiscard]] auto get_enable_remote_mount() const -> bool {
return enable_remote_mount_;
}
[[nodiscard]] auto get_eviction_delay_mins() const -> std::uint32_t;
[[nodiscard]] auto get_event_level() const -> event_level {
return event_level_;
}
[[nodiscard]] auto get_eviction_uses_accessed_time() const -> bool;
[[nodiscard]] auto get_eviction_delay_mins() const -> std::uint32_t {
return eviction_delay_mins_;
}
[[nodiscard]] auto get_high_frequency_interval_secs() const -> std::uint16_t;
[[nodiscard]] auto get_eviction_uses_accessed_time() const -> bool {
return eviction_uses_accessed_time_;
}
[[nodiscard]] auto get_high_frequency_interval_secs() const -> std::uint8_t {
return std::max(static_cast<std::uint8_t>(1U), high_freq_interval_secs_);
}
[[nodiscard]] auto get_host_config() const -> host_config { return hc_; }
[[nodiscard]] auto get_is_remote_mount() const -> bool {
return is_remote_mount_;
}
[[nodiscard]] auto get_host_config() const -> host_config;
[[nodiscard]] auto get_json() const -> json;
[[nodiscard]] auto get_log_directory() const -> std::string {
return log_directory_;
}
[[nodiscard]] auto get_log_directory() const -> std::string;
[[nodiscard]] auto get_low_frequency_interval_secs() const -> std::uint32_t {
return std::max(1U, low_freq_interval_secs_);
}
[[nodiscard]] auto get_low_frequency_interval_secs() const -> std::uint16_t;
[[nodiscard]] auto get_max_cache_size_bytes() const -> std::uint64_t;
[[nodiscard]] auto get_max_upload_count() const -> std::uint8_t {
return std::max(std::uint8_t(1U), max_upload_count_);
}
[[nodiscard]] auto get_max_upload_count() const -> std::uint8_t;
[[nodiscard]] auto get_online_check_retry_secs() const -> std::uint16_t {
return std::max(std::uint16_t(15U), online_check_retry_secs_);
}
[[nodiscard]] auto get_med_frequency_interval_secs() const -> std::uint16_t;
[[nodiscard]] auto get_orphaned_file_retention_days() const -> std::uint16_t {
return std::min(static_cast<std::uint16_t>(31U),
std::max(static_cast<std::uint16_t>(1U),
orphaned_file_retention_days_));
}
[[nodiscard]] auto get_online_check_retry_secs() const -> std::uint16_t;
[[nodiscard]] auto get_preferred_download_type() const -> download_type {
return download_type_from_string(preferred_download_type_,
download_type::fallback);
}
[[nodiscard]] auto get_orphaned_file_retention_days() const -> std::uint16_t;
[[nodiscard]] auto get_provider_type() const -> provider_type {
return prov_;
}
[[nodiscard]] auto get_preferred_download_type() const -> download_type;
[[nodiscard]] auto get_read_ahead_count() const -> std::uint8_t {
return std::max(static_cast<std::uint8_t>(1U), read_ahead_count_);
}
[[nodiscard]] auto get_provider_type() const -> provider_type;
[[nodiscard]] auto get_remote_client_pool_size() const -> std::uint8_t {
return std::max(static_cast<std::uint8_t>(5U), remote_client_pool_size_);
}
[[nodiscard]] auto get_remote_config() const -> remote::remote_config;
[[nodiscard]] auto get_remote_host_name_or_ip() const -> std::string {
return remote_host_name_or_ip_;
}
[[nodiscard]] auto get_remote_mount() const -> remote::remote_mount;
[[nodiscard]] auto get_remote_max_connections() const -> std::uint8_t {
return std::max(static_cast<std::uint8_t>(1U), remote_max_connections_);
}
[[nodiscard]] auto get_retry_read_count() const -> std::uint16_t;
[[nodiscard]] auto get_remote_port() const -> std::uint16_t {
return remote_port_;
}
[[nodiscard]] auto get_ring_buffer_file_size() const -> std::uint16_t;
[[nodiscard]] auto get_remote_receive_timeout_secs() const -> std::uint16_t {
return remote_receive_timeout_secs_;
}
[[nodiscard]] auto get_s3_config() const -> s3_config;
[[nodiscard]] auto get_remote_send_timeout_secs() const -> std::uint16_t {
return remote_send_timeout_secs_;
}
[[nodiscard]] auto get_sia_config() const -> sia_config;
[[nodiscard]] auto get_remote_token() const -> std::string {
return remote_token_;
}
[[nodiscard]] auto get_task_wait_ms() const -> std::uint16_t;
[[nodiscard]] auto get_retry_read_count() const -> std::uint16_t {
return std::max(std::uint16_t(2), retry_read_count_);
}
[[nodiscard]] auto
get_value_by_name(const std::string &name) const -> std::string;
[[nodiscard]] auto get_ring_buffer_file_size() const -> std::uint16_t {
return std::max(
static_cast<std::uint16_t>(64U),
std::min(static_cast<std::uint16_t>(1024U), ring_buffer_file_size_));
}
[[nodiscard]] auto get_s3_config() const -> s3_config { return s3_config_; }
[[nodiscard]] auto get_sia_config() const -> sia_config {
return sia_config_;
}
[[nodiscard]] auto get_value_by_name(const std::string &name) -> std::string;
[[nodiscard]] auto get_version() const -> std::uint64_t { return version_; }
[[nodiscard]] auto get_version() const -> std::uint64_t;
void save();
void set_api_auth(const std::string &api_auth) {
set_value(api_auth_, api_auth);
}
void set_api_auth(const std::string &value);
void set_api_port(std::uint16_t api_port) { set_value(api_port_, api_port); }
void set_api_port(std::uint16_t value);
void set_api_user(const std::string &api_user) {
set_value(api_user_, api_user);
}
void set_api_user(const std::string &value);
void set_chunk_downloader_timeout_secs(
std::uint8_t chunk_downloader_timeout_secs) {
set_value(download_timeout_secs_, chunk_downloader_timeout_secs);
}
void set_download_timeout_secs(std::uint8_t value);
void
set_enable_chunk_downloader_timeout(bool enable_chunk_downloader_timeout) {
set_value(enable_chunk_downloader_timeout_,
enable_chunk_downloader_timeout);
}
void set_database_type(const database_type &value);
void set_enable_comm_duration_events(bool enable_comm_duration_events) {
set_value(enable_comm_duration_events_, enable_comm_duration_events);
}
void set_enable_download_timeout(bool value);
void set_enable_drive_events(bool enable_drive_events) {
set_value(enable_drive_events_, enable_drive_events);
}
void set_enable_max_cache_size(bool enable_max_cache_size) {
set_value(enable_max_cache_size_, enable_max_cache_size);
}
void set_enable_drive_events(bool value);
#if defined(_WIN32)
void set_enable_mount_manager(bool enable_mount_manager) {
set_value(enable_mount_manager_, enable_mount_manager);
}
#endif
void set_enable_mount_manager(bool value);
#endif // defined(_WIN32)
void set_enable_remote_mount(bool enable_remote_mount);
void set_event_level(const event_level &value);
void set_event_level(const event_level &level) {
if (set_value(event_level_, level)) {
event_system::instance().raise<event_level_changed>(
event_level_to_string(level));
}
}
void set_encrypt_config(encrypt_config value);
void set_eviction_delay_mins(std::uint32_t eviction_delay_mins) {
set_value(eviction_delay_mins_, eviction_delay_mins);
}
void set_eviction_delay_mins(std::uint32_t value);
void set_eviction_uses_accessed_time(bool eviction_uses_accessed_time) {
set_value(eviction_uses_accessed_time_, eviction_uses_accessed_time);
}
void set_eviction_uses_accessed_time(bool value);
void
set_high_frequency_interval_secs(std::uint8_t high_frequency_interval_secs) {
set_value(high_freq_interval_secs_, high_frequency_interval_secs);
}
void set_high_frequency_interval_secs(std::uint16_t value);
#if defined(PROJECT_TESTING)
void set_host_config(host_config hc) {
config_changed_ = true;
hc_ = std::move(hc);
save();
}
void set_host_config(host_config value);
void set_s3_config(s3_config s3) {
config_changed_ = true;
s3_config_ = std::move(s3);
save();
}
void set_low_frequency_interval_secs(std::uint16_t value);
void set_sia_config(sia_config sia) {
config_changed_ = true;
sia_config_ = std::move(sia);
save();
}
#endif // defined(PROJECT_TESTING)
void set_max_cache_size_bytes(std::uint64_t value);
void set_is_remote_mount(bool is_remote_mount);
void set_max_upload_count(std::uint8_t value);
void
set_low_frequency_interval_secs(std::uint32_t low_frequency_interval_secs) {
set_value(low_freq_interval_secs_, low_frequency_interval_secs);
}
void set_med_frequency_interval_secs(std::uint16_t value);
void set_max_cache_size_bytes(std::uint64_t max_cache_size_bytes) {
set_value(max_cache_size_bytes_, max_cache_size_bytes);
}
void set_online_check_retry_secs(std::uint16_t value);
void set_max_upload_count(std::uint8_t max_upload_count) {
set_value(max_upload_count_, max_upload_count);
}
void set_orphaned_file_retention_days(std::uint16_t value);
void set_online_check_retry_secs(std::uint16_t online_check_retry_secs) {
set_value(online_check_retry_secs_, online_check_retry_secs);
}
void set_preferred_download_type(const download_type &value);
void
set_orphaned_file_retention_days(std::uint16_t orphaned_file_retention_days) {
set_value(orphaned_file_retention_days_, orphaned_file_retention_days);
}
void set_remote_config(remote::remote_config value);
void set_preferred_download_type(const download_type &dt) {
set_value(preferred_download_type_, download_type_to_string(dt));
}
void set_remote_mount(remote::remote_mount value);
void set_read_ahead_count(std::uint8_t read_ahead_count) {
set_value(read_ahead_count_, read_ahead_count);
}
void set_retry_read_count(std::uint16_t value);
void set_remote_client_pool_size(std::uint8_t remote_client_pool_size) {
set_value(remote_client_pool_size_, remote_client_pool_size);
}
void set_ring_buffer_file_size(std::uint16_t value);
void set_ring_buffer_file_size(std::uint16_t ring_buffer_file_size) {
set_value(ring_buffer_file_size_, ring_buffer_file_size);
}
void set_s3_config(s3_config value);
void set_remote_host_name_or_ip(const std::string &remote_host_name_or_ip) {
set_value(remote_host_name_or_ip_, remote_host_name_or_ip);
}
void set_sia_config(sia_config value);
void set_remote_max_connections(std::uint8_t remote_max_connections) {
set_value(remote_max_connections_, remote_max_connections);
}
void set_remote_port(std::uint16_t remote_port) {
set_value(remote_port_, remote_port);
}
void
set_remote_receive_timeout_secs(std::uint16_t remote_receive_timeout_secs) {
set_value(remote_receive_timeout_secs_, remote_receive_timeout_secs);
}
void set_remote_send_timeout_secs(std::uint16_t remote_send_timeout_secs) {
set_value(remote_send_timeout_secs_, remote_send_timeout_secs);
}
void set_remote_token(const std::string &remote_token) {
set_value(remote_token_, remote_token);
}
void set_retry_read_count(std::uint16_t retry_read_count) {
set_value(retry_read_count_, retry_read_count);
}
void set_task_wait_ms(std::uint16_t value);
[[nodiscard]] auto set_value_by_name(const std::string &name,
const std::string &value) -> std::string;

View File

@ -52,23 +52,23 @@ public:
~packet() = default;
private:
data_buffer buffer_;
std::size_t decode_offset_ = 0U;
data_buffer buffer_{};
std::size_t decode_offset_{0U};
public:
[[nodiscard]] static auto decode_json(packet &response,
json &json_data) -> int;
[[nodiscard]] static auto decode_json(packet &response, json &json_data)
-> int;
public:
void clear();
[[nodiscard]] auto current_pointer() -> unsigned char * {
return (decode_offset_ < buffer_.size()) ? &buffer_[decode_offset_]
return (decode_offset_ < buffer_.size()) ? &buffer_.at(decode_offset_)
: nullptr;
}
[[nodiscard]] auto current_pointer() const -> const unsigned char * {
return (decode_offset_ < buffer_.size()) ? &buffer_[decode_offset_]
return (decode_offset_ < buffer_.size()) ? &buffer_.at(decode_offset_)
: nullptr;
}
@ -206,7 +206,7 @@ public:
return static_cast<std::uint32_t>(buffer_.size());
}
void transfer_into(data_buffer &buffer);
void to_buffer(data_buffer &buffer);
public:
auto operator=(const data_buffer &buffer) noexcept -> packet &;
@ -226,8 +226,6 @@ public:
return buffer_.at(index);
}
};
using packet = packet;
} // namespace repertory
#endif // REPERTORY_INCLUDE_COMM_PACKET_PACKET_HPP_

View File

@ -23,6 +23,7 @@
#define REPERTORY_INCLUDE_COMM_PACKET_PACKET_CLIENT_HPP_
#include "comm/packet/packet.hpp"
#include "types/remote.hpp"
using boost::asio::ip::tcp;
@ -36,9 +37,7 @@ private:
};
public:
packet_client(std::string host_name_or_ip, std::uint8_t max_connections,
std::uint16_t port, std::uint16_t receive_timeout,
std::uint16_t send_timeout, std::string encryption_token);
packet_client(remote::remote_config cfg);
~packet_client();
@ -49,12 +48,7 @@ public:
private:
boost::asio::io_context io_context_;
std::string host_name_or_ip_;
std::uint8_t max_connections_;
std::uint16_t port_;
std::uint16_t receive_timeout_;
std::uint16_t send_timeout_;
std::string encryption_token_;
remote::remote_config cfg_;
std::string unique_id_;
private:
@ -75,21 +69,21 @@ private:
void put_client(std::shared_ptr<client> &cli);
[[nodiscard]] auto read_packet(client &cli,
packet &response) -> packet::error_type;
[[nodiscard]] auto read_packet(client &cli, packet &response)
-> packet::error_type;
void resolve();
public:
[[nodiscard]] auto send(std::string_view method,
std::uint32_t &service_flags) -> packet::error_type;
[[nodiscard]] auto send(std::string_view method, std::uint32_t &service_flags)
-> packet::error_type;
[[nodiscard]] auto send(std::string_view method, packet &request,
std::uint32_t &service_flags) -> packet::error_type;
[[nodiscard]] auto send(std::string_view method, packet &request,
packet &response,
std::uint32_t &service_flags) -> packet::error_type;
packet &response, std::uint32_t &service_flags)
-> packet::error_type;
};
} // namespace repertory

View File

@ -52,8 +52,8 @@ public:
private:
struct connection {
connection(boost::asio::io_service &io_service, tcp::acceptor &acceptor_)
: socket(io_service), acceptor(acceptor_) {}
connection(io_context &ctx, tcp::acceptor &acceptor_)
: socket(ctx), acceptor(acceptor_) {}
tcp::socket socket;
tcp::acceptor &acceptor;
@ -68,7 +68,7 @@ private:
std::string encryption_token_;
closed_callback closed_;
message_handler_callback message_handler_;
boost::asio::io_context io_context_;
io_context io_context_;
std::unique_ptr<std::thread> server_thread_;
std::vector<std::thread> service_threads_;
std::recursive_mutex connection_mutex_;

View File

@ -0,0 +1,34 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_DB_FILE_DB_HPP_
#define REPERTORY_INCLUDE_DB_FILE_DB_HPP_
#include "db/i_file_db.hpp"
namespace repertory {
class app_config;
[[nodiscard]] auto create_file_db(const app_config &cfg)
-> std::unique_ptr<i_file_db>;
} // namespace repertory
#endif // REPERTORY_INCLUDE_DB_FILE_DB_HPP_

View File

@ -0,0 +1,34 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_DB_FILE_MGR_DB_HPP_
#define REPERTORY_INCLUDE_DB_FILE_MGR_DB_HPP_
#include "db/i_file_mgr_db.hpp"
namespace repertory {
class app_config;
[[nodiscard]] auto
create_file_mgr_db(const app_config &cfg) -> std::unique_ptr<i_file_mgr_db>;
} // namespace repertory
#endif // REPERTORY_INCLUDE_DB_FILE_MGR_DB_HPP_

View File

@ -0,0 +1,95 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_DB_I_FILE_DB_HPP_
#define REPERTORY_INCLUDE_DB_I_FILE_DB_HPP_
#include "types/repertory.hpp"
namespace repertory {
class i_file_db {
INTERFACE_SETUP(i_file_db);
public:
struct file_info final {
std::string api_path;
bool directory;
std::string source_path;
};
struct file_data final {
std::string api_path;
std::uint64_t file_size{};
std::vector<
std::array<unsigned char, crypto_aead_xchacha20poly1305_IETF_NPUBBYTES>>
iv_list{};
std::string source_path;
};
public:
[[nodiscard]] virtual auto add_directory(const std::string &api_path,
const std::string &source_path)
-> api_error = 0;
[[nodiscard]] virtual auto add_or_update_file(const file_data &data)
-> api_error = 0;
virtual void clear() = 0;
[[nodiscard]] virtual auto count() const -> std::uint64_t = 0;
[[nodiscard]] virtual auto get_api_path(const std::string &source_path,
std::string &api_path) const
-> api_error = 0;
[[nodiscard]] virtual auto
get_directory_api_path(const std::string &source_path,
std::string &api_path) const -> api_error = 0;
[[nodiscard]] virtual auto
get_directory_source_path(const std::string &api_path,
std::string &source_path) const -> api_error = 0;
[[nodiscard]] virtual auto get_file_api_path(const std::string &source_path,
std::string &api_path) const
-> api_error = 0;
[[nodiscard]] virtual auto get_file_data(const std::string &api_path,
file_data &data) const
-> api_error = 0;
[[nodiscard]] virtual auto
get_file_source_path(const std::string &api_path,
std::string &source_path) const -> api_error = 0;
[[nodiscard]] virtual auto get_item_list() const
-> std::vector<file_info> = 0;
[[nodiscard]] virtual auto get_source_path(const std::string &api_path,
std::string &source_path) const
-> api_error = 0;
[[nodiscard]] virtual auto remove_item(const std::string &api_path)
-> api_error = 0;
};
} // namespace repertory
#endif // REPERTORY_INCLUDE_DB_I_FILE_DB_HPP_

View File

@ -0,0 +1,86 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_DB_I_FILE_MGR_DB_HPP_
#define REPERTORY_INCLUDE_DB_I_FILE_MGR_DB_HPP_
#include "types/repertory.hpp"
namespace repertory {
class i_file_mgr_db {
INTERFACE_SETUP(i_file_mgr_db);
public:
struct resume_entry final {
std::string api_path;
std::uint64_t chunk_size{};
boost::dynamic_bitset<> read_state;
std::string source_path;
};
struct upload_active_entry final {
std::string api_path;
std::string source_path;
};
struct upload_entry final {
std::string api_path;
std::string source_path;
};
public:
[[nodiscard]] virtual auto add_resume(const resume_entry &entry) -> bool = 0;
[[nodiscard]] virtual auto add_upload(const upload_entry &entry) -> bool = 0;
[[nodiscard]] virtual auto add_upload_active(const upload_active_entry &entry)
-> bool = 0;
virtual void clear() = 0;
[[nodiscard]] virtual auto get_next_upload() const
-> std::optional<upload_entry> = 0;
[[nodiscard]] virtual auto get_resume_list() const
-> std::vector<resume_entry> = 0;
[[nodiscard]] virtual auto get_upload(const std::string &api_path) const
-> std::optional<upload_entry> = 0;
[[nodiscard]] virtual auto get_upload_active_list() const
-> std::vector<upload_active_entry> = 0;
[[nodiscard]] virtual auto remove_resume(const std::string &api_path)
-> bool = 0;
[[nodiscard]] virtual auto remove_upload(const std::string &api_path)
-> bool = 0;
[[nodiscard]] virtual auto remove_upload_active(const std::string &api_path)
-> bool = 0;
[[nodiscard]] virtual auto rename_resume(const std::string &from_api_path,
const std::string &to_api_path)
-> bool = 0;
};
} // namespace repertory
#endif // REPERTORY_INCLUDE_DB_I_FILE_MGR_DB_HPP_

View File

@ -0,0 +1,78 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_DB_I_META_DB_HPP_
#define REPERTORY_INCLUDE_DB_I_META_DB_HPP_
#include "types/repertory.hpp"
namespace repertory {
class i_meta_db {
INTERFACE_SETUP(i_meta_db);
public:
virtual void clear() = 0;
[[nodiscard]] virtual auto get_api_path(const std::string &source_path,
std::string &api_path) const
-> api_error = 0;
[[nodiscard]] virtual auto get_api_path_list() const
-> std::vector<std::string> = 0;
[[nodiscard]] virtual auto get_item_meta(const std::string &api_path,
api_meta_map &meta) const
-> api_error = 0;
[[nodiscard]] virtual auto get_item_meta(const std::string &api_path,
const std::string &key,
std::string &value) const
-> api_error = 0;
[[nodiscard]] virtual auto get_pinned_files() const
-> std::vector<std::string> = 0;
[[nodiscard]] virtual auto get_total_item_count() const -> std::uint64_t = 0;
[[nodiscard]] virtual auto get_total_size() const -> std::uint64_t = 0;
virtual void remove_api_path(const std::string &api_path) = 0;
[[nodiscard]] virtual auto remove_item_meta(const std::string &api_path,
const std::string &key)
-> api_error = 0;
[[nodiscard]] virtual auto rename_item_meta(const std::string &from_api_path,
const std::string &to_api_path)
-> api_error = 0;
[[nodiscard]] virtual auto set_item_meta(const std::string &api_path,
const std::string &key,
const std::string &value)
-> api_error = 0;
[[nodiscard]] virtual auto set_item_meta(const std::string &api_path,
const api_meta_map &meta)
-> api_error = 0;
};
} // namespace repertory
#endif // REPERTORY_INCLUDE_DB_I_META_DB_HPP_

View File

@ -0,0 +1,117 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_DB_IMPL_RDB_FILE_DB_HPP_
#define REPERTORY_INCLUDE_DB_IMPL_RDB_FILE_DB_HPP_
#include "db/i_file_db.hpp"
namespace repertory {
class app_config;
class rdb_file_db final : public i_file_db {
public:
rdb_file_db(const app_config &cfg);
~rdb_file_db() override;
rdb_file_db(const rdb_file_db &) = delete;
rdb_file_db(rdb_file_db &&) = delete;
auto operator=(const rdb_file_db &) -> rdb_file_db & = delete;
auto operator=(rdb_file_db &&) -> rdb_file_db & = delete;
private:
const app_config &cfg_;
private:
std::unique_ptr<rocksdb::TransactionDB> db_{nullptr};
rocksdb::ColumnFamilyHandle *directory_family_{};
rocksdb::ColumnFamilyHandle *file_family_{};
rocksdb::ColumnFamilyHandle *path_family_{};
rocksdb::ColumnFamilyHandle *source_family_{};
private:
void create_or_open(bool clear);
[[nodiscard]] auto create_iterator(rocksdb::ColumnFamilyHandle *family) const
-> std::shared_ptr<rocksdb::Iterator>;
[[nodiscard]] static auto
perform_action(std::string_view function_name,
std::function<rocksdb::Status()> action) -> api_error;
[[nodiscard]] auto perform_action(
std::string_view function_name,
std::function<rocksdb::Status(rocksdb::Transaction *txn)> action)
-> api_error;
[[nodiscard]] auto remove_item(const std::string &api_path,
const std::string &source_path,
rocksdb::Transaction *txn) -> rocksdb::Status;
public:
[[nodiscard]] auto
add_directory(const std::string &api_path,
const std::string &source_path) -> api_error override;
[[nodiscard]] auto
add_or_update_file(const i_file_db::file_data &data) -> api_error override;
void clear() override;
[[nodiscard]] auto count() const -> std::uint64_t override;
[[nodiscard]] auto
get_api_path(const std::string &source_path,
std::string &api_path) const -> api_error override;
[[nodiscard]] auto
get_directory_api_path(const std::string &source_path,
std::string &api_path) const -> api_error override;
[[nodiscard]] auto get_directory_source_path(const std::string &api_path,
std::string &source_path) const
-> api_error override;
[[nodiscard]] auto
get_file_api_path(const std::string &source_path,
std::string &api_path) const -> api_error override;
[[nodiscard]] auto
get_file_data(const std::string &api_path,
i_file_db::file_data &data) const -> api_error override;
[[nodiscard]] auto
get_file_source_path(const std::string &api_path,
std::string &source_path) const -> api_error override;
[[nodiscard]] auto
get_item_list() const -> std::vector<i_file_db::file_info> override;
[[nodiscard]] auto
get_source_path(const std::string &api_path,
std::string &source_path) const -> api_error override;
[[nodiscard]] auto
remove_item(const std::string &api_path) -> api_error override;
};
} // namespace repertory
#endif // REPERTORY_INCLUDE_DB_IMPL_RDB_FILE_DB_HPP_

View File

@ -0,0 +1,109 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_DB_IMPL_RDB_FILE_MGR_DB_HPP_
#define REPERTORY_INCLUDE_DB_IMPL_RDB_FILE_MGR_DB_HPP_
#include "db/i_file_mgr_db.hpp"
namespace repertory {
class app_config;
class rdb_file_mgr_db final : public i_file_mgr_db {
public:
rdb_file_mgr_db(const app_config &cfg);
~rdb_file_mgr_db() override;
rdb_file_mgr_db(const rdb_file_mgr_db &) = delete;
rdb_file_mgr_db(rdb_file_mgr_db &&) = delete;
auto operator=(const rdb_file_mgr_db &) -> rdb_file_mgr_db & = delete;
auto operator=(rdb_file_mgr_db &&) -> rdb_file_mgr_db & = delete;
private:
const app_config &cfg_;
private:
std::unique_ptr<rocksdb::TransactionDB> db_{nullptr};
std::atomic<std::uint64_t> id_{0U};
rocksdb::ColumnFamilyHandle *resume_family_{};
rocksdb::ColumnFamilyHandle *upload_active_family_{};
rocksdb::ColumnFamilyHandle *upload_family_{};
private:
void create_or_open(bool clear);
[[nodiscard]] auto create_iterator(rocksdb::ColumnFamilyHandle *family) const
-> std::shared_ptr<rocksdb::Iterator>;
[[nodiscard]] static auto
perform_action(std::string_view function_name,
std::function<rocksdb::Status()> action) -> bool;
[[nodiscard]] auto perform_action(
std::string_view function_name,
std::function<rocksdb::Status(rocksdb::Transaction *txn)> action) -> bool;
[[nodiscard]] auto remove_resume(const std::string &api_path,
rocksdb::Transaction *txn)
-> rocksdb::Status;
[[nodiscard]] auto add_resume(const resume_entry &entry,
rocksdb::Transaction *txn) -> rocksdb::Status;
public:
[[nodiscard]] auto add_resume(const resume_entry &entry) -> bool override;
[[nodiscard]] auto add_upload(const upload_entry &entry) -> bool override;
[[nodiscard]] auto add_upload_active(const upload_active_entry &entry)
-> bool override;
void clear() override;
[[nodiscard]] auto get_next_upload() const
-> std::optional<upload_entry> override;
[[nodiscard]] auto get_resume_list() const
-> std::vector<resume_entry> override;
[[nodiscard]] auto get_upload(const std::string &api_path) const
-> std::optional<upload_entry> override;
[[nodiscard]] auto get_upload_active_list() const
-> std::vector<upload_active_entry> override;
[[nodiscard]] auto remove_resume(const std::string &api_path)
-> bool override;
[[nodiscard]] auto remove_upload(const std::string &api_path)
-> bool override;
[[nodiscard]] auto remove_upload_active(const std::string &api_path)
-> bool override;
[[nodiscard]] auto rename_resume(const std::string &from_api_path,
const std::string &to_api_path)
-> bool override;
};
} // namespace repertory
#endif // REPERTORY_INCLUDE_DB_IMPL_RDB_FILE_MGR_DB_HPP_

View File

@ -0,0 +1,127 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_DB_IMPL_RDB_META_DB_HPP_
#define REPERTORY_INCLUDE_DB_IMPL_RDB_META_DB_HPP_
#include "db/i_meta_db.hpp"
#include "types/repertory.hpp"
namespace repertory {
class app_config;
class rdb_meta_db final : public i_meta_db {
public:
rdb_meta_db(const app_config &cfg);
~rdb_meta_db() override;
rdb_meta_db(const rdb_meta_db &) = delete;
rdb_meta_db(rdb_meta_db &&) = delete;
auto operator=(const rdb_meta_db &) -> rdb_meta_db & = delete;
auto operator=(rdb_meta_db &&) -> rdb_meta_db & = delete;
private:
const app_config &cfg_;
private:
std::unique_ptr<rocksdb::TransactionDB> db_{nullptr};
rocksdb::ColumnFamilyHandle *meta_family_{};
rocksdb::ColumnFamilyHandle *pinned_family_{};
rocksdb::ColumnFamilyHandle *size_family_{};
rocksdb::ColumnFamilyHandle *source_family_{};
private:
[[nodiscard]] auto create_iterator(rocksdb::ColumnFamilyHandle *family) const
-> std::shared_ptr<rocksdb::Iterator>;
void create_or_open(bool clear);
[[nodiscard]] auto get_item_meta_json(const std::string &api_path,
json &json_data) const -> api_error;
[[nodiscard]] static auto
perform_action(std::string_view function_name,
std::function<rocksdb::Status()> action) -> api_error;
[[nodiscard]] auto perform_action(
std::string_view function_name,
std::function<rocksdb::Status(rocksdb::Transaction *txn)> action)
-> api_error;
[[nodiscard]] auto remove_api_path(const std::string &api_path,
const std::string &source_path,
rocksdb::Transaction *txn)
-> rocksdb::Status;
[[nodiscard]] auto update_item_meta(const std::string &api_path,
json json_data,
rocksdb::Transaction *base_txn = nullptr,
rocksdb::Status *status = nullptr)
-> api_error;
public:
void clear() override;
[[nodiscard]] auto get_api_path(const std::string &source_path,
std::string &api_path) const
-> api_error override;
[[nodiscard]] auto get_api_path_list() const
-> std::vector<std::string> override;
[[nodiscard]] auto get_item_meta(const std::string &api_path,
api_meta_map &meta) const
-> api_error override;
[[nodiscard]] auto get_item_meta(const std::string &api_path,
const std::string &key,
std::string &value) const
-> api_error override;
[[nodiscard]] auto get_pinned_files() const
-> std::vector<std::string> override;
[[nodiscard]] auto get_total_item_count() const -> std::uint64_t override;
[[nodiscard]] auto get_total_size() const -> std::uint64_t override;
void remove_api_path(const std::string &api_path) override;
[[nodiscard]] auto remove_item_meta(const std::string &api_path,
const std::string &key)
-> api_error override;
[[nodiscard]] auto rename_item_meta(const std::string &from_api_path,
const std::string &to_api_path)
-> api_error override;
[[nodiscard]] auto set_item_meta(const std::string &api_path,
const std::string &key,
const std::string &value)
-> api_error override;
[[nodiscard]] auto set_item_meta(const std::string &api_path,
const api_meta_map &meta)
-> api_error override;
};
} // namespace repertory
#endif // REPERTORY_INCLUDE_DB_IMPL_RDB_META_DB_HPP_

View File

@ -0,0 +1,92 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_DB_IMPL_SQLITE_FILE_DB_HPP_
#define REPERTORY_INCLUDE_DB_IMPL_SQLITE_FILE_DB_HPP_
#include "db/i_file_db.hpp"
#include "utils/db/sqlite/db_common.hpp"
namespace repertory {
class app_config;
class sqlite_file_db final : public i_file_db {
public:
sqlite_file_db(const app_config &cfg);
~sqlite_file_db() override;
sqlite_file_db(const sqlite_file_db &) = delete;
sqlite_file_db(sqlite_file_db &&) = delete;
auto operator=(const sqlite_file_db &) -> sqlite_file_db & = delete;
auto operator=(sqlite_file_db &&) -> sqlite_file_db & = delete;
private:
utils::db::sqlite::db3_t db_;
public:
[[nodiscard]] auto add_directory(const std::string &api_path,
const std::string &source_path)
-> api_error override;
[[nodiscard]] auto add_or_update_file(const i_file_db::file_data &data)
-> api_error override;
void clear() override;
[[nodiscard]] auto count() const -> std::uint64_t override;
[[nodiscard]] auto get_api_path(const std::string &source_path,
std::string &api_path) const
-> api_error override;
[[nodiscard]] auto get_directory_api_path(const std::string &source_path,
std::string &api_path) const
-> api_error override;
[[nodiscard]] auto get_directory_source_path(const std::string &api_path,
std::string &source_path) const
-> api_error override;
[[nodiscard]] auto get_file_api_path(const std::string &source_path,
std::string &api_path) const
-> api_error override;
[[nodiscard]] auto get_file_data(const std::string &api_path,
i_file_db::file_data &data) const
-> api_error override;
[[nodiscard]] auto get_file_source_path(const std::string &api_path,
std::string &source_path) const
-> api_error override;
[[nodiscard]] auto get_item_list() const
-> std::vector<i_file_db::file_info> override;
[[nodiscard]] auto get_source_path(const std::string &api_path,
std::string &source_path) const
-> api_error override;
[[nodiscard]] auto remove_item(const std::string &api_path)
-> api_error override;
};
} // namespace repertory
#endif // REPERTORY_INCLUDE_DB_IMPL_SQLITE_FILE_DB_HPP_

View File

@ -0,0 +1,82 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_DB_IMPL_SQLITE_FILE_MGR_DB_HPP_
#define REPERTORY_INCLUDE_DB_IMPL_SQLITE_FILE_MGR_DB_HPP_
#include "db/i_file_mgr_db.hpp"
#include "utils/db/sqlite/db_common.hpp"
namespace repertory {
class app_config;
class sqlite_file_mgr_db final : public i_file_mgr_db {
public:
sqlite_file_mgr_db(const app_config &cfg);
~sqlite_file_mgr_db() override;
sqlite_file_mgr_db(const sqlite_file_mgr_db &) = delete;
sqlite_file_mgr_db(sqlite_file_mgr_db &&) = delete;
auto operator=(const sqlite_file_mgr_db &) -> sqlite_file_mgr_db & = delete;
auto operator=(sqlite_file_mgr_db &&) -> sqlite_file_mgr_db & = delete;
private:
utils::db::sqlite::db3_t db_;
public:
[[nodiscard]] auto add_resume(const resume_entry &entry) -> bool override;
[[nodiscard]] auto add_upload(const upload_entry &entry) -> bool override;
[[nodiscard]] auto add_upload_active(const upload_active_entry &entry)
-> bool override;
void clear() override;
[[nodiscard]] auto get_next_upload() const
-> std::optional<upload_entry> override;
[[nodiscard]] auto get_resume_list() const
-> std::vector<resume_entry> override;
[[nodiscard]] auto get_upload(const std::string &api_path) const
-> std::optional<upload_entry> override;
[[nodiscard]] auto get_upload_active_list() const
-> std::vector<upload_active_entry> override;
[[nodiscard]] auto remove_resume(const std::string &api_path)
-> bool override;
[[nodiscard]] auto remove_upload(const std::string &api_path)
-> bool override;
[[nodiscard]] auto remove_upload_active(const std::string &api_path)
-> bool override;
[[nodiscard]] auto rename_resume(const std::string &from_api_path,
const std::string &to_api_path)
-> bool override;
};
} // namespace repertory
#endif // REPERTORY_INCLUDE_DB_IMPL_SQLITE_FILE_MGR_DB_HPP_

View File

@ -19,24 +19,25 @@
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_PROVIDERS_META_DB_HPP_
#define REPERTORY_INCLUDE_PROVIDERS_META_DB_HPP_
#ifndef REPERTORY_INCLUDE_DB_IMPL_SQLITE_META_DB_HPP_
#define REPERTORY_INCLUDE_DB_IMPL_SQLITE_META_DB_HPP_
#include "db/i_meta_db.hpp"
#include "types/repertory.hpp"
#include "utils/db/sqlite/db_common.hpp"
namespace repertory {
class app_config;
class meta_db final {
class sqlite_meta_db final : public i_meta_db {
public:
meta_db(const app_config &cfg);
~meta_db();
sqlite_meta_db(const app_config &cfg);
~sqlite_meta_db() override;
meta_db(const meta_db &) = delete;
meta_db(meta_db &&) = delete;
auto operator=(const meta_db &) -> meta_db & = delete;
auto operator=(meta_db &&) -> meta_db & = delete;
sqlite_meta_db(const sqlite_meta_db &) = delete;
sqlite_meta_db(sqlite_meta_db &&) = delete;
auto operator=(const sqlite_meta_db &) -> sqlite_meta_db & = delete;
auto operator=(sqlite_meta_db &&) -> sqlite_meta_db & = delete;
private:
utils::db::sqlite::db3_t db_;
@ -47,38 +48,50 @@ private:
api_meta_map meta) -> api_error;
public:
[[nodiscard]] auto get_api_path(const std::string &source_path,
std::string &api_path) -> api_error;
void clear() override;
[[nodiscard]] auto get_api_path_list() -> std::vector<std::string>;
[[nodiscard]] auto get_api_path(const std::string &source_path,
std::string &api_path) const
-> api_error override;
[[nodiscard]] auto get_api_path_list() const
-> std::vector<std::string> override;
[[nodiscard]] auto get_item_meta(const std::string &api_path,
api_meta_map &meta) -> api_error;
api_meta_map &meta) const
-> api_error override;
[[nodiscard]] auto get_item_meta(const std::string &api_path,
const std::string &key,
std::string &value) const -> api_error;
std::string &value) const
-> api_error override;
[[nodiscard]] auto get_pinned_files() const -> std::vector<std::string>;
[[nodiscard]] auto get_pinned_files() const
-> std::vector<std::string> override;
[[nodiscard]] auto get_total_item_count() const -> std::uint64_t;
[[nodiscard]] auto get_total_item_count() const -> std::uint64_t override;
void remove_api_path(const std::string &api_path);
[[nodiscard]] auto get_total_size() const -> std::uint64_t override;
void remove_api_path(const std::string &api_path) override;
[[nodiscard]] auto remove_item_meta(const std::string &api_path,
const std::string &key) -> api_error;
const std::string &key)
-> api_error override;
[[nodiscard]] auto
rename_item_meta(const std::string &from_api_path,
const std::string &to_api_path) -> api_error;
[[nodiscard]] auto rename_item_meta(const std::string &from_api_path,
const std::string &to_api_path)
-> api_error override;
[[nodiscard]] auto set_item_meta(const std::string &api_path,
const std::string &key,
const std::string &value) -> api_error;
const std::string &value)
-> api_error override;
[[nodiscard]] auto set_item_meta(const std::string &api_path,
const api_meta_map &meta) -> api_error;
const api_meta_map &meta)
-> api_error override;
};
} // namespace repertory
#endif // REPERTORY_INCLUDE_PROVIDERS_META_DB_HPP_
#endif // REPERTORY_INCLUDE_DB_IMPL_SQLITE_META_DB_HPP_

View File

@ -0,0 +1,34 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_DB_META_DB_HPP_
#define REPERTORY_INCLUDE_DB_META_DB_HPP_
#include "db/i_meta_db.hpp"
namespace repertory {
class app_config;
[[nodiscard]] auto create_meta_db(const app_config &cfg)
-> std::unique_ptr<i_meta_db>;
} // namespace repertory
#endif // REPERTORY_INCLUDE_DB_META_DB_HPP_

View File

@ -34,7 +34,7 @@ public:
private:
struct open_directory final {
std::shared_ptr<directory_iterator> iterator;
std::vector<std::uint64_t> handles{};
std::vector<std::uint64_t> handles;
std::chrono::system_clock::time_point last_update{
std::chrono::system_clock::now()};
};
@ -60,8 +60,8 @@ public:
void execute_action(const std::string &api_path,
const execute_callback &execute);
[[nodiscard]] auto
get_directory(std::uint64_t handle) -> std::shared_ptr<directory_iterator>;
[[nodiscard]] auto get_directory(std::uint64_t handle)
-> std::shared_ptr<directory_iterator>;
[[nodiscard]] auto remove_directory(const std::string &api_path)
-> std::shared_ptr<directory_iterator>;

View File

@ -31,22 +31,23 @@ class i_provider;
class eviction final : public single_thread_service_base {
public:
eviction(i_provider &provider, const app_config &config, i_file_manager &fm)
eviction(i_provider &provider, const app_config &config,
i_file_manager &file_mgr)
: single_thread_service_base("eviction"),
provider_(provider),
config_(config),
fm_(fm) {}
file_mgr_(file_mgr),
provider_(provider) {}
~eviction() override = default;
private:
i_provider &provider_;
const app_config &config_;
i_file_manager &fm_;
i_file_manager &file_mgr_;
i_provider &provider_;
private:
[[nodiscard]] auto
check_minimum_requirements(const std::string &file_path) -> bool;
[[nodiscard]] auto check_minimum_requirements(const std::string &file_path)
-> bool;
[[nodiscard]] auto get_filtered_cached_files() -> std::deque<std::string>;

View File

@ -103,7 +103,7 @@ protected:
struct fuse_file_info *file_info) -> api_error override;
[[nodiscard]] auto
fgetattr_impl(std::string api_path, struct stat *st,
fgetattr_impl(std::string api_path, struct stat *unix_st,
struct fuse_file_info *file_info) -> api_error override;
#if defined(__APPLE__)
@ -124,11 +124,11 @@ protected:
#if FUSE_USE_VERSION >= 30
[[nodiscard]] auto
getattr_impl(std::string api_path, struct stat *st,
getattr_impl(std::string api_path, struct stat *unix_st,
struct fuse_file_info *file_info) -> api_error override;
#else
[[nodiscard]] auto getattr_impl(std::string api_path,
struct stat *st) -> api_error override;
struct stat *unix_st) -> api_error override;
#endif
#if defined(__APPLE__)

View File

@ -29,6 +29,8 @@
#include "drives/fuse/remotefuse/i_remote_instance.hpp"
#include "drives/remote/remote_open_file_table.hpp"
#include "drives/winfsp/remotewinfsp/i_remote_instance.hpp"
#include "events/event_system.hpp"
#include "events/events.hpp"
#include "types/remote.hpp"
#include "types/repertory.hpp"
#include "utils/base64.hpp"
@ -52,7 +54,7 @@ public:
: config_(config),
drive_(drv),
mount_location_(std::move(mount_location)),
client_pool_(config.get_remote_client_pool_size()) {
client_pool_(config.get_remote_mount().client_pool_size) {
event_system::instance().raise<service_started>("remote_server_base");
handler_lookup_.insert(
{"::winfsp_can_delete",
@ -1357,7 +1359,8 @@ public:
}});
packet_server_ = std::make_unique<packet_server>(
config_.get_remote_port(), config_.get_remote_token(), 10,
config_.get_remote_mount().api_port,
config_.get_remote_mount().encryption_token, 10,
[this](const std::string &client_id) {
return this->closed_handler(client_id);
},

View File

@ -32,9 +32,12 @@ enum class event_level {
trace,
};
auto event_level_from_string(std::string level) -> event_level;
[[nodiscard]] auto
event_level_from_string(std::string level,
event_level default_level = event_level::info)
-> event_level;
auto event_level_to_string(event_level level) -> std::string;
[[nodiscard]] auto event_level_to_string(event_level level) -> std::string;
class event {
protected:
@ -72,4 +75,18 @@ public:
};
} // namespace repertory
NLOHMANN_JSON_NAMESPACE_BEGIN
template <> struct adl_serializer<std::atomic<repertory::event_level>> {
static void to_json(json &data,
const std::atomic<repertory::event_level> &value) {
data = repertory::event_level_to_string(value.load());
}
static void from_json(const json &data,
std::atomic<repertory::event_level> &value) {
value.store(repertory::event_level_from_string(data.get<std::string>()));
}
};
NLOHMANN_JSON_NAMESPACE_END
#endif // REPERTORY_INCLUDE_EVENTS_EVENT_HPP_

View File

@ -44,6 +44,7 @@ using event_consumer = event_system::event_consumer;
#define E_FROM_STRING(t) t
#define E_FROM_UINT16(t) std::to_string(t)
#define E_FROM_UINT64(t) std::to_string(t)
#define E_FROM_DOWNLOAD_TYPE(t) download_type_to_string(t)
#define E_PROP(type, name, short_name, ts) \
private: \

View File

@ -0,0 +1,67 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_FILE_MANAGER_CACHE_SIZE_MGR_HPP_
#define REPERTORY_INCLUDE_FILE_MANAGER_CACHE_SIZE_MGR_HPP_
#include "types/repertory.hpp"
namespace repertory {
class app_config;
class cache_size_mgr final {
public:
cache_size_mgr(const cache_size_mgr &) = delete;
cache_size_mgr(cache_size_mgr &&) = delete;
auto operator=(const cache_size_mgr &) -> cache_size_mgr & = delete;
auto operator=(cache_size_mgr &&) -> cache_size_mgr & = delete;
protected:
cache_size_mgr() = default;
~cache_size_mgr() { stop(); }
private:
static cache_size_mgr instance_;
private:
app_config *cfg_{nullptr};
std::uint64_t cache_size_{0U};
mutable std::mutex mtx_;
std::condition_variable notify_;
stop_type stop_requested_{false};
public:
[[nodiscard]] auto expand(std::uint64_t size) -> api_error;
void initialize(app_config *cfg);
[[nodiscard]] static auto instance() -> cache_size_mgr & { return instance_; }
[[nodiscard]] auto shrink(std::uint64_t size) -> api_error;
[[nodiscard]] auto size() const -> std::uint64_t;
void stop();
};
} // namespace repertory
#endif // REPERTORY_INCLUDE_FILE_MANAGER_CACHE_SIZE_MGR_HPP_

View File

@ -0,0 +1,83 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_FILE_MANAGER_DIRECT_OPEN_FILE_HPP_
#define REPERTORY_INCLUDE_FILE_MANAGER_DIRECT_OPEN_FILE_HPP_
#include "file_manager/ring_buffer_base.hpp"
#include "types/repertory.hpp"
namespace repertory {
class i_provider;
class i_upload_manager;
class direct_open_file final : public ring_buffer_base {
public:
direct_open_file(std::uint64_t chunk_size, std::uint8_t chunk_timeout,
filesystem_item fsi, i_provider &provider);
~direct_open_file() override;
public:
direct_open_file() = delete;
direct_open_file(const direct_open_file &) noexcept = delete;
direct_open_file(direct_open_file &&) noexcept = delete;
auto operator=(direct_open_file &&) noexcept -> direct_open_file & = delete;
auto
operator=(const direct_open_file &) noexcept -> direct_open_file & = delete;
private:
std::array<data_buffer, min_ring_size> ring_data_;
protected:
[[nodiscard]] auto on_check_start() -> bool override;
[[nodiscard]] auto
on_chunk_downloaded(std::size_t /* chunk */,
const data_buffer & /* buffer */) -> api_error override {
return api_error::success;
}
[[nodiscard]] auto
on_read_chunk(std::size_t chunk, std::size_t read_size,
std::uint64_t read_offset, data_buffer &data,
std::size_t &bytes_read) -> api_error override;
[[nodiscard]] auto use_buffer(std::size_t chunk,
std::function<api_error(data_buffer &)> func)
-> api_error override;
public:
[[nodiscard]] auto native_operation(native_operation_callback /* callback */)
-> api_error override {
return api_error::not_supported;
}
[[nodiscard]] auto native_operation(std::uint64_t /* new_file_size */,
native_operation_callback /* callback */)
-> api_error override {
return api_error::not_supported;
}
};
} // namespace repertory
#endif // REPERTORY_INCLUDE_FILE_MANAGER_DIRECT_OPEN_FILE_HPP_

View File

@ -32,23 +32,6 @@ E_SIMPLE2(download_begin, info, true,
std::string, dest_path, dest, E_FROM_STRING
);
E_SIMPLE5(download_chunk_begin, debug, true,
std::string, api_path, ap, E_FROM_STRING,
std::string, dest_path, dest, E_FROM_STRING,
std::size_t, chunk, chunk, E_FROM_SIZE_T,
std::size_t, total, total, E_FROM_SIZE_T,
std::size_t, complete, complete, E_FROM_SIZE_T
);
E_SIMPLE6(download_chunk_end, debug, true,
std::string, api_path, ap, E_FROM_STRING,
std::string, dest_path, dest, E_FROM_STRING,
std::size_t, chunk, chunk, E_FROM_SIZE_T,
std::size_t, total, total, E_FROM_SIZE_T,
std::size_t, complete, complete, E_FROM_SIZE_T,
api_error, result, result, E_FROM_API_FILE_ERROR
);
E_SIMPLE3(download_end, info, true,
std::string, api_path, ap, E_FROM_STRING,
std::string, dest_path, dest, E_FROM_STRING,
@ -78,19 +61,25 @@ E_SIMPLE3(download_resume_add_failed, error, true,
std::string, error, err, E_FROM_STRING
);
E_SIMPLE2(download_resume_added, info, true,
E_SIMPLE2(download_resume_added, debug, true,
std::string, api_path, ap, E_FROM_STRING,
std::string, dest_path, dest, E_FROM_STRING
);
E_SIMPLE2(download_resume_removed, info, true,
E_SIMPLE2(download_resume_removed, debug, true,
std::string, api_path, ap, E_FROM_STRING,
std::string, dest_path, dest, E_FROM_STRING
);
E_SIMPLE1(item_timeout, debug, true,
E_SIMPLE1(item_timeout, trace, true,
std::string, api_path, ap, E_FROM_STRING
);
E_SIMPLE3(download_type_selected, debug, true,
std::string, api_path, ap, E_FROM_STRING,
std::string, source, src, E_FROM_STRING,
download_type, download_type, type, E_FROM_DOWNLOAD_TYPE
);
// clang-format on
} // namespace repertory

View File

@ -22,15 +22,14 @@
#ifndef REPERTORY_INCLUDE_FILE_MANAGER_FILE_MANAGER_HPP_
#define REPERTORY_INCLUDE_FILE_MANAGER_FILE_MANAGER_HPP_
#include "db/i_file_mgr_db.hpp"
#include "events/event_system.hpp"
#include "events/events.hpp"
#include "file_manager/i_file_manager.hpp"
#include "file_manager/i_open_file.hpp"
#include "file_manager/i_upload_manager.hpp"
#include "file_manager/upload.hpp"
#include "platform/platform.hpp"
#include "types/repertory.hpp"
#include "utils/db/sqlite/db_common.hpp"
#include "utils/file.hpp"
namespace repertory {
@ -57,7 +56,7 @@ private:
i_provider &provider_;
private:
utils::db::sqlite::db3_t db_;
std::unique_ptr<i_file_mgr_db> mgr_db_;
std::atomic<std::uint64_t> next_handle_{0U};
mutable std::recursive_mutex open_file_mtx_;
std::unordered_map<std::string, std::shared_ptr<i_closeable_open_file>>
@ -69,7 +68,7 @@ private:
std::unique_ptr<std::thread> upload_thread_;
private:
void close_all(const std::string &api_path);
[[nodiscard]] auto close_all(const std::string &api_path) -> bool;
void close_timed_out_files();
@ -86,6 +85,9 @@ private:
void queue_upload(const std::string &api_path, const std::string &source_path,
bool no_lock);
void remove_resume(const std::string &api_path,
const std::string &source_path, bool no_lock);
void remove_upload(const std::string &api_path, bool no_lock);
void swap_renamed_items(std::string from_api_path, std::string to_api_path,
@ -106,6 +108,11 @@ public:
void remove_resume(const std::string &api_path,
const std::string &source_path) override;
static auto remove_source_and_shrink_cache(const std::string &api_path,
const std::string &source_path,
std::uint64_t file_size,
bool allocated) -> bool;
void remove_upload(const std::string &api_path) override;
void store_resume(const i_open_file &file) override;
@ -132,7 +139,8 @@ public:
[[nodiscard]] auto get_open_handle_count() const -> std::size_t;
[[nodiscard]] auto get_stored_downloads() const -> std::vector<json>;
[[nodiscard]] auto get_stored_downloads() const
-> std::vector<i_file_mgr_db::resume_entry>;
[[nodiscard]] auto has_no_open_file_handles() const -> bool override;
@ -161,8 +169,6 @@ public:
void start();
void stop();
void update_used_space(std::uint64_t &used_space) const override;
};
} // namespace repertory

View File

@ -31,21 +31,20 @@ class i_file_manager {
INTERFACE_SETUP(i_file_manager);
public:
[[nodiscard]] virtual auto
evict_file(const std::string &api_path) -> bool = 0;
[[nodiscard]] virtual auto get_directory_items(
const std::string &api_path) const -> directory_item_list = 0;
[[nodiscard]] virtual auto evict_file(const std::string &api_path)
-> bool = 0;
[[nodiscard]] virtual auto
get_open_files() const -> std::unordered_map<std::string, std::size_t> = 0;
get_directory_items(const std::string &api_path) const
-> directory_item_list = 0;
[[nodiscard]] virtual auto get_open_files() const
-> std::unordered_map<std::string, std::size_t> = 0;
[[nodiscard]] virtual auto has_no_open_file_handles() const -> bool = 0;
[[nodiscard]] virtual auto
is_processing(const std::string &api_path) const -> bool = 0;
virtual void update_used_space(std::uint64_t &used_space) const = 0;
[[nodiscard]] virtual auto is_processing(const std::string &api_path) const
-> bool = 0;
};
} // namespace repertory

View File

@ -62,8 +62,12 @@ public:
[[nodiscard]] virtual auto get_source_path() const -> std::string = 0;
[[nodiscard]] virtual auto is_complete() const -> bool = 0;
[[nodiscard]] virtual auto is_directory() const -> bool = 0;
[[nodiscard]] virtual auto is_write_supported() const -> bool = 0;
[[nodiscard]] virtual auto has_handle(std::uint64_t handle) const -> bool = 0;
[[nodiscard]] virtual auto
@ -93,6 +97,8 @@ class i_closeable_open_file : public i_open_file {
public:
virtual void add(std::uint64_t handle, open_file_data ofd) = 0;
[[nodiscard]] virtual auto get_allocated() const -> bool = 0;
[[nodiscard]] virtual auto can_close() const -> bool = 0;
virtual auto close() -> bool = 0;
@ -100,12 +106,8 @@ public:
[[nodiscard]] virtual auto get_handles() const
-> std::vector<std::uint64_t> = 0;
[[nodiscard]] virtual auto is_complete() const -> bool = 0;
[[nodiscard]] virtual auto is_modified() const -> bool = 0;
[[nodiscard]] virtual auto is_write_supported() const -> bool = 0;
virtual void remove(std::uint64_t handle) = 0;
virtual void remove_all() = 0;

View File

@ -29,14 +29,14 @@ class i_upload_manager {
INTERFACE_SETUP(i_upload_manager);
public:
virtual void queue_upload(const i_open_file &o) = 0;
virtual void queue_upload(const i_open_file &file) = 0;
virtual void remove_resume(const std::string &api_path,
const std::string &source_path) = 0;
virtual void remove_upload(const std::string &api_path) = 0;
virtual void store_resume(const i_open_file &o) = 0;
virtual void store_resume(const i_open_file &file) = 0;
};
} // namespace repertory

View File

@ -25,6 +25,7 @@
#include "file_manager/open_file_base.hpp"
#include "types/repertory.hpp"
#include "utils/types/file/i_file.hpp"
namespace repertory {
class i_provider;
@ -67,45 +68,55 @@ private:
i_upload_manager &mgr_;
private:
bool notified_ = false;
bool allocated{false};
std::unique_ptr<utils::file::i_file> nf_;
bool notified_{false};
std::size_t read_chunk_{};
boost::dynamic_bitset<> read_state_;
std::unique_ptr<std::thread> reader_thread_;
std::unique_ptr<std::thread> download_thread_;
stop_type stop_requested_ = false;
mutable std::recursive_mutex rw_mtx_;
stop_type stop_requested_{false};
private:
[[nodiscard]] auto adjust_cache_size(std::uint64_t file_size,
bool shrink) -> api_error;
[[nodiscard]] auto check_start() -> api_error;
void download_chunk(std::size_t chunk, bool skip_active, bool should_reset);
void download_range(std::size_t start_chunk, std::size_t end_chunk,
void download_range(std::size_t begin_chunk, std::size_t end_chunk,
bool should_reset);
void set_modified();
void update_background_reader(std::size_t read_chunk);
void set_read_state(std::size_t chunk);
protected:
auto is_download_complete() const -> bool override {
return read_state_.all();
}
void set_read_state(boost::dynamic_bitset<> read_state);
void update_reader(std::size_t chunk);
public:
auto close() -> bool override;
[[nodiscard]] auto get_allocated() const -> bool override;
[[nodiscard]] auto get_read_state() const -> boost::dynamic_bitset<> override;
[[nodiscard]] auto get_read_state(std::size_t chunk) const -> bool override;
[[nodiscard]] auto is_complete() const -> bool override;
auto is_write_supported() const -> bool override { return true; }
[[nodiscard]] auto is_write_supported() const -> bool override {
return true;
}
[[nodiscard]] auto native_operation(native_operation_callback callback)
-> api_error override;
[[nodiscard]] auto
native_operation(native_operation_callback callback) -> api_error override;
[[nodiscard]] auto native_operation(std::uint64_t new_file_size,
native_operation_callback callback)
-> api_error override;
[[nodiscard]] auto
native_operation(std::uint64_t new_file_size,
native_operation_callback callback) -> api_error override;
void remove(std::uint64_t handle) override;

View File

@ -24,20 +24,18 @@
#include "file_manager/i_open_file.hpp"
#include "utils/types/file/i_file.hpp"
namespace repertory {
class i_provider;
class open_file_base : public i_closeable_open_file {
public:
open_file_base(std::uint64_t chunk_size, std::uint8_t chunk_timeout,
filesystem_item fsi, i_provider &provider);
filesystem_item fsi, i_provider &provider, bool disable_io);
open_file_base(std::uint64_t chunk_size, std::uint8_t chunk_timeout,
filesystem_item fsi,
std::map<std::uint64_t, open_file_data> open_data,
i_provider &provider);
i_provider &provider, bool disable_io);
~open_file_base() override = default;
@ -98,7 +96,7 @@ public:
[[nodiscard]] auto get_result() -> api_error;
};
protected:
private:
std::uint64_t chunk_size_;
std::uint8_t chunk_timeout_;
filesystem_item fsi_;
@ -107,21 +105,19 @@ protected:
i_provider &provider_;
private:
std::unordered_map<std::size_t, std::shared_ptr<download>> active_downloads_;
api_error error_{api_error::success};
mutable std::mutex error_mtx_;
mutable std::recursive_mutex file_mtx_;
stop_type io_stop_requested_{false};
std::unique_ptr<std::thread> io_thread_;
protected:
std::unordered_map<std::size_t, std::shared_ptr<download>> active_downloads_;
mutable std::recursive_mutex file_mtx_;
std::atomic<std::chrono::system_clock::time_point> last_access_{
std::chrono::system_clock::now()};
bool modified_{false};
std::unique_ptr<utils::file::i_file> nf_;
mutable std::mutex io_thread_mtx_;
std::condition_variable io_thread_notify_;
std::deque<std::shared_ptr<io_item>> io_thread_queue_;
std::atomic<std::chrono::system_clock::time_point> last_access_{
std::chrono::system_clock::now(),
};
bool modified_{false};
bool removed_{false};
private:
@ -130,11 +126,42 @@ private:
protected:
[[nodiscard]] auto do_io(std::function<api_error()> action) -> api_error;
virtual auto is_download_complete() const -> bool = 0;
[[nodiscard]] auto get_active_downloads()
-> std::unordered_map<std::size_t, std::shared_ptr<download>> & {
return active_downloads_;
}
[[nodiscard]] auto get_mutex() const -> std::recursive_mutex & {
return file_mtx_;
}
[[nodiscard]] auto get_last_chunk_size() const -> std::size_t;
[[nodiscard]] auto get_provider() -> i_provider & { return provider_; }
[[nodiscard]] auto get_provider() const -> const i_provider & {
return provider_;
}
[[nodiscard]] auto is_removed() const -> bool;
void notify_io();
void reset_timeout();
auto set_api_error(const api_error &e) -> api_error;
auto set_api_error(const api_error &err) -> api_error;
void set_file_size(std::uint64_t size);
void set_last_chunk_size(std::size_t size);
void set_modified(bool modified);
void set_removed(bool removed);
void set_source_path(std::string source_path);
void wait_for_io(stop_type &stop_requested);
public:
void add(std::uint64_t handle, open_file_data ofd) override;
@ -143,6 +170,8 @@ public:
auto close() -> bool override;
[[nodiscard]] auto get_allocated() const -> bool override { return false; }
[[nodiscard]] auto get_api_error() const -> api_error;
[[nodiscard]] auto get_api_path() const -> std::string override;
@ -157,27 +186,23 @@ public:
[[nodiscard]] auto get_handles() const -> std::vector<std::uint64_t> override;
[[nodiscard]] auto get_open_data()
-> std::map<std::uint64_t, open_file_data> & override;
[[nodiscard]] auto
get_open_data() -> std::map<std::uint64_t, open_file_data> & override;
[[nodiscard]] auto get_open_data() const
-> const std::map<std::uint64_t, open_file_data> & override;
[[nodiscard]] auto get_open_data(std::uint64_t handle)
-> open_file_data & override;
[[nodiscard]] auto
get_open_data(std::uint64_t handle) -> open_file_data & override;
[[nodiscard]] auto get_open_data(std::uint64_t handle) const
-> const open_file_data & override;
[[nodiscard]] auto
get_open_data(std::uint64_t handle) const -> const open_file_data & override;
[[nodiscard]] auto get_open_file_count() const -> std::size_t override;
[[nodiscard]] auto get_source_path() const -> std::string override {
return fsi_.source_path;
}
[[nodiscard]] auto get_source_path() const -> std::string override;
[[nodiscard]] auto has_handle(std::uint64_t handle) const -> bool override {
return open_data_.find(handle) != open_data_.end();
}
[[nodiscard]] auto has_handle(std::uint64_t handle) const -> bool override;
[[nodiscard]] auto is_directory() const -> bool override {
return fsi_.directory;

View File

@ -0,0 +1,150 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_FILE_MANAGER_RING_BUFFER_BASE_HPP_
#define REPERTORY_INCLUDE_FILE_MANAGER_RING_BUFFER_BASE_HPP_
#include "file_manager/open_file_base.hpp"
#include "types/repertory.hpp"
#include "utils/file.hpp"
namespace repertory {
class i_provider;
class i_upload_manager;
class ring_buffer_base : public open_file_base {
public:
ring_buffer_base(std::uint64_t chunk_size, std::uint8_t chunk_timeout,
filesystem_item fsi, i_provider &provider,
std::size_t ring_size, bool disable_io);
~ring_buffer_base() override = default;
public:
ring_buffer_base() = delete;
ring_buffer_base(const ring_buffer_base &) noexcept = delete;
ring_buffer_base(ring_buffer_base &&) noexcept = delete;
auto operator=(ring_buffer_base &&) noexcept -> ring_buffer_base & = delete;
auto
operator=(const ring_buffer_base &) noexcept -> ring_buffer_base & = delete;
public:
static constexpr const auto min_ring_size{5U};
private:
boost::dynamic_bitset<> read_state_;
std::size_t total_chunks_;
private:
std::condition_variable chunk_notify_;
mutable std::mutex chunk_mtx_;
std::mutex read_mtx_;
std::unique_ptr<std::thread> reader_thread_;
std::size_t ring_begin_{};
std::size_t ring_end_{};
std::size_t ring_pos_{};
stop_type stop_requested_{false};
private:
[[nodiscard]] auto check_start() -> api_error;
auto download_chunk(std::size_t chunk, bool skip_active) -> api_error;
void reader_thread();
void update_position(std::size_t count, bool is_forward);
protected:
[[nodiscard]] auto has_reader_thread() const -> bool {
return reader_thread_ != nullptr;
}
[[nodiscard]] auto get_ring_size() const -> std::size_t {
return read_state_.size();
}
[[nodiscard]] virtual auto on_check_start() -> bool = 0;
[[nodiscard]] virtual auto
on_chunk_downloaded(std::size_t chunk,
const data_buffer &buffer) -> api_error = 0;
[[nodiscard]] virtual auto
on_read_chunk(std::size_t chunk, std::size_t read_size,
std::uint64_t read_offset, data_buffer &data,
std::size_t &bytes_read) -> api_error = 0;
[[nodiscard]] virtual auto
use_buffer(std::size_t chunk,
std::function<api_error(data_buffer &)> func) -> api_error = 0;
public:
auto close() -> bool override;
void forward(std::size_t count);
[[nodiscard]] auto get_current_chunk() const -> std::size_t {
return ring_pos_;
}
[[nodiscard]] auto get_first_chunk() const -> std::size_t {
return ring_begin_;
}
[[nodiscard]] auto get_last_chunk() const -> std::size_t { return ring_end_; }
[[nodiscard]] auto get_read_state() const -> boost::dynamic_bitset<> override;
[[nodiscard]] auto get_read_state(std::size_t chunk) const -> bool override;
[[nodiscard]] auto get_total_chunks() const -> std::size_t {
return total_chunks_;
}
[[nodiscard]] auto is_complete() const -> bool override { return false; }
[[nodiscard]] auto is_write_supported() const -> bool override {
return false;
}
[[nodiscard]] auto read(std::size_t read_size, std::uint64_t read_offset,
data_buffer &data) -> api_error override;
[[nodiscard]] auto resize(std::uint64_t /* size */) -> api_error override {
return api_error::not_supported;
}
void reverse(std::size_t count);
void set(std::size_t first_chunk, std::size_t current_chunk);
void set_api_path(const std::string &api_path) override;
[[nodiscard]] auto
write(std::uint64_t /* write_offset */, const data_buffer & /* data */,
std::size_t & /* bytes_written */) -> api_error override {
return api_error::not_supported;
}
};
} // namespace repertory
#endif // REPERTORY_INCLUDE_FILE_MANAGER_RING_BUFFER_BASE_HPP_

View File

@ -22,20 +22,17 @@
#ifndef REPERTORY_INCLUDE_FILE_MANAGER_RING_BUFFER_OPEN_FILE_HPP_
#define REPERTORY_INCLUDE_FILE_MANAGER_RING_BUFFER_OPEN_FILE_HPP_
#include "file_manager/open_file_base.hpp"
#include "file_manager/ring_buffer_base.hpp"
#include "types/repertory.hpp"
#include "utils/file.hpp"
namespace repertory {
class i_provider;
class i_upload_manager;
class ring_buffer_open_file final : public open_file_base {
class ring_buffer_open_file final : public ring_buffer_base {
public:
ring_buffer_open_file(std::string buffer_directory, std::uint64_t chunk_size,
std::uint8_t chunk_timeout, filesystem_item fsi,
i_provider &provider);
ring_buffer_open_file(std::string buffer_directory, std::uint64_t chunk_size,
std::uint8_t chunk_timeout, filesystem_item fsi,
i_provider &provider, std::size_t ring_size);
@ -46,85 +43,49 @@ public:
ring_buffer_open_file() = delete;
ring_buffer_open_file(const ring_buffer_open_file &) noexcept = delete;
ring_buffer_open_file(ring_buffer_open_file &&) noexcept = delete;
auto operator=(ring_buffer_open_file &&) noexcept
-> ring_buffer_open_file & = delete;
auto operator=(ring_buffer_open_file &&) noexcept -> ring_buffer_open_file & =
delete;
auto operator=(const ring_buffer_open_file &) noexcept
-> ring_buffer_open_file & = delete;
private:
boost::dynamic_bitset<> ring_state_;
std::size_t total_chunks_;
std::string source_path_;
private:
std::unique_ptr<std::thread> chunk_forward_thread_;
std::unique_ptr<std::thread> chunk_reverse_thread_;
std::condition_variable chunk_notify_;
mutable std::mutex chunk_mtx_;
std::size_t current_chunk_{};
std::size_t first_chunk_{};
std::size_t last_chunk_;
private:
auto download_chunk(std::size_t chunk) -> api_error;
void forward_reader_thread(std::size_t count);
void reverse_reader_thread(std::size_t count);
std::unique_ptr<utils::file::i_file> nf_;
protected:
auto is_download_complete() const -> bool override;
[[nodiscard]] auto on_check_start() -> bool override;
public:
void forward(std::size_t count);
[[nodiscard]] auto
on_chunk_downloaded(std::size_t chunk,
const data_buffer &buffer) -> api_error override;
[[nodiscard]] auto get_current_chunk() const -> std::size_t {
return current_chunk_;
}
[[nodiscard]] auto
on_read_chunk(std::size_t chunk, std::size_t read_size,
std::uint64_t read_offset, data_buffer &data,
std::size_t &bytes_read) -> api_error override;
[[nodiscard]] auto get_first_chunk() const -> std::size_t {
return first_chunk_;
}
[[nodiscard]] auto get_last_chunk() const -> std::size_t {
return last_chunk_;
}
[[nodiscard]] auto get_read_state() const -> boost::dynamic_bitset<> override;
[[nodiscard]] auto get_read_state(std::size_t chunk) const -> bool override;
[[nodiscard]] auto get_total_chunks() const -> std::size_t {
return total_chunks_;
}
[[nodiscard]] auto is_complete() const -> bool override { return true; }
auto is_write_supported() const -> bool override { return false; }
[[nodiscard]] auto native_operation(native_operation_callback callback)
[[nodiscard]] auto use_buffer(std::size_t chunk,
std::function<api_error(data_buffer &)> func)
-> api_error override;
[[nodiscard]] auto native_operation(std::uint64_t, native_operation_callback)
public:
[[nodiscard]] static auto can_handle_file(std::uint64_t file_size,
std::size_t chunk_size,
std::size_t ring_size) -> bool;
[[nodiscard]] auto
native_operation(native_operation_callback callback) -> api_error override;
[[nodiscard]] auto native_operation(std::uint64_t /* new_file_size */,
native_operation_callback /* callback */)
-> api_error override {
return api_error::not_supported;
}
[[nodiscard]] auto read(std::size_t read_size, std::uint64_t read_offset,
data_buffer &data) -> api_error override;
[[nodiscard]] auto resize(std::uint64_t) -> api_error override {
return api_error::not_supported;
}
void reverse(std::size_t count);
void set(std::size_t first_chunk, std::size_t current_chunk);
void set_api_path(const std::string &api_path) override;
[[nodiscard]] auto write(std::uint64_t, const data_buffer &, std::size_t &)
-> api_error override {
return api_error::not_supported;
[[nodiscard]] auto get_source_path() const -> std::string override {
return source_path_;
}
};
} // namespace repertory

View File

@ -22,8 +22,8 @@
#ifndef REPERTORY_INCLUDE_PROVIDERS_BASE_PROVIDER_HPP_
#define REPERTORY_INCLUDE_PROVIDERS_BASE_PROVIDER_HPP_
#include "db/i_meta_db.hpp"
#include "providers/i_provider.hpp"
#include "providers/meta_db.hpp"
#include "types/repertory.hpp"
namespace repertory {
@ -32,6 +32,13 @@ class i_file_manager;
class i_http_comm;
class base_provider : public i_provider {
private:
struct removed_item final {
std::string api_path;
bool directory{};
std::string source_path;
};
public:
base_provider(app_config &config, i_http_comm &comm)
: config_(config), comm_(comm) {}
@ -42,11 +49,23 @@ private:
private:
api_item_added_callback api_item_added_;
std::unique_ptr<meta_db> db3_;
std::unique_ptr<i_meta_db> db3_;
i_file_manager *fm_{};
private:
void remove_deleted_files(bool source_only);
void add_all_items(const stop_type &stop_requested);
void process_removed_directories(std::deque<removed_item> removed_list,
const stop_type &stop_requested);
void process_removed_files(std::deque<removed_item> removed_list,
const stop_type &stop_requested);
void process_removed_items(const stop_type &stop_requested);
void remove_deleted_items(const stop_type &stop_requested);
void remove_unmatched_source_files(const stop_type &stop_requested);
protected:
[[nodiscard]] static auto create_api_file(std::string path, std::string key,
@ -77,7 +96,9 @@ protected:
return api_item_added_;
}
[[nodiscard]] auto get_comm() const -> i_http_comm & { return comm_; }
[[nodiscard]] auto get_comm() -> i_http_comm & { return comm_; }
[[nodiscard]] auto get_comm() const -> const i_http_comm & { return comm_; }
[[nodiscard]] auto get_config() -> app_config & { return config_; }
@ -85,7 +106,9 @@ protected:
return config_;
}
[[nodiscard]] auto get_db() -> meta_db & { return *db3_; }
[[nodiscard]] auto get_db() -> i_meta_db & { return *db3_; }
[[nodiscard]] auto get_db() const -> const i_meta_db & { return *db3_; }
[[nodiscard]] virtual auto
get_directory_items_impl(const std::string &api_path,
@ -97,9 +120,6 @@ protected:
return fm_;
}
[[nodiscard]] virtual auto get_used_drive_space_impl() const
-> std::uint64_t = 0;
[[nodiscard]] virtual auto remove_directory_impl(const std::string &api_path)
-> api_error = 0;

View File

@ -23,8 +23,8 @@
#define REPERTORY_INCLUDE_PROVIDERS_ENCRYPT_ENCRYPT_PROVIDER_HPP_
#include "app_config.hpp"
#include "db/i_file_db.hpp"
#include "providers/i_provider.hpp"
#include "utils/db/sqlite/db_common.hpp"
#include "utils/encrypting_reader.hpp"
namespace repertory {
@ -45,21 +45,22 @@ public:
private:
struct reader_info final {
std::chrono::system_clock::time_point last_access_time =
std::chrono::system_clock::now();
std::unique_ptr<utils::encryption::encrypting_reader> reader{};
std::chrono::system_clock::time_point last_access_time{
std::chrono::system_clock::now(),
};
std::unique_ptr<utils::encryption::encrypting_reader> reader;
std::mutex reader_mtx;
};
private:
app_config &config_;
utils::db::sqlite::db3_t db_;
encrypt_config encrypt_config_;
private:
i_file_manager *fm_ = nullptr;
std::unordered_map<std::string, std::shared_ptr<reader_info>>
reader_lookup_{};
std::recursive_mutex reader_lookup_mtx_{};
std::unique_ptr<i_file_db> db_{nullptr};
i_file_manager *fm_{nullptr};
std::unordered_map<std::string, std::shared_ptr<reader_info>> reader_lookup_;
std::recursive_mutex reader_lookup_mtx_;
private:
static auto create_api_file(const std::string &api_path, bool directory,
@ -73,11 +74,15 @@ private:
const std::string &source_path)>
callback) const -> api_error;
[[nodiscard]] auto get_encrypt_config() const -> const encrypt_config & {
return encrypt_config_;
}
auto process_directory_entry(const utils::file::i_fs_item &dir_entry,
const encrypt_config &cfg,
std::string &api_path) const -> bool;
void remove_deleted_files();
void remove_deleted_files(const stop_type &stop_requested);
public:
[[nodiscard]] auto create_directory(const std::string &api_path,
@ -111,7 +116,8 @@ public:
[[nodiscard]] auto get_file(const std::string &api_path, api_file &file) const
-> api_error override;
[[nodiscard]] auto get_file_list(api_file_list &list) const
[[nodiscard]] auto get_file_list(api_file_list &list,
std::string &marker) const
-> api_error override;
[[nodiscard]] auto get_file_size(const std::string &api_path,

View File

@ -58,7 +58,8 @@ public:
[[nodiscard]] virtual auto get_file(const std::string &api_path,
api_file &file) const -> api_error = 0;
[[nodiscard]] virtual auto get_file_list(api_file_list &list) const
[[nodiscard]] virtual auto get_file_list(api_file_list &list,
std::string &marker) const
-> api_error = 0;
[[nodiscard]] virtual auto get_file_size(const std::string &api_path,

View File

@ -46,6 +46,9 @@ public:
auto operator=(const s3_provider &) -> s3_provider & = delete;
auto operator=(s3_provider &&) -> s3_provider & = delete;
private:
s3_config s3_config_;
private:
[[nodiscard]] auto add_if_not_found(api_file &file,
const std::string &object_name) const
@ -74,9 +77,14 @@ private:
[[nodiscard]] auto
get_object_list(std::string &response_data, long &response_code,
std::optional<std::string> delimiter = std::nullopt,
std::optional<std::string> prefix = std::nullopt) const
std::optional<std::string> prefix = std::nullopt,
std::optional<std::string> token = std::nullopt) const
-> bool;
[[nodiscard]] auto get_s3_config() const -> const s3_config & {
return s3_config_;
}
protected:
[[nodiscard]] auto create_directory_impl(const std::string &api_path,
api_meta_map &meta)
@ -86,9 +94,6 @@ protected:
directory_item_list &list) const
-> api_error override;
[[nodiscard]] auto get_used_drive_space_impl() const
-> std::uint64_t override;
[[nodiscard]] auto remove_directory_impl(const std::string &api_path)
-> api_error override;
@ -110,7 +115,8 @@ public:
[[nodiscard]] auto get_file(const std::string &api_path, api_file &file) const
-> api_error override;
[[nodiscard]] auto get_file_list(api_file_list &list) const
[[nodiscard]] auto get_file_list(api_file_list &list,
std::string &marker) const
-> api_error override;
[[nodiscard]] auto get_total_drive_space() const -> std::uint64_t override;

View File

@ -45,6 +45,9 @@ public:
auto operator=(const sia_provider &) -> sia_provider & = delete;
auto operator=(sia_provider &&) -> sia_provider & = delete;
private:
sia_config sia_config_;
private:
[[nodiscard]] auto get_object_info(const std::string &api_path,
json &object_info) const -> api_error;
@ -52,6 +55,10 @@ private:
[[nodiscard]] auto get_object_list(const std::string &api_path,
nlohmann::json &object_list) const -> bool;
[[nodiscard]] auto get_sia_config() const -> const auto & {
return sia_config_;
}
protected:
[[nodiscard]] auto create_directory_impl(const std::string &api_path,
api_meta_map &meta)
@ -61,9 +68,6 @@ protected:
directory_item_list &list) const
-> api_error override;
[[nodiscard]] auto get_used_drive_space_impl() const
-> std::uint64_t override;
[[nodiscard]] auto remove_directory_impl(const std::string &api_path)
-> api_error override;
@ -82,7 +86,8 @@ public:
[[nodiscard]] auto get_file(const std::string &api_path, api_file &file) const
-> api_error override;
[[nodiscard]] auto get_file_list(api_file_list &list) const
[[nodiscard]] auto get_file_list(api_file_list &list,
std::string &marker) const
-> api_error override;
[[nodiscard]] auto get_provider_type() const -> provider_type override {

View File

@ -22,6 +22,8 @@
#ifndef REPERTORY_INCLUDE_TYPES_REMOTE_HPP_
#define REPERTORY_INCLUDE_TYPES_REMOTE_HPP_
#include "types/repertory.hpp"
inline constexpr const auto PACKET_SERVICE_FUSE{1U};
inline constexpr const auto PACKET_SERVICE_WINFSP{2U};
@ -31,7 +33,67 @@ inline constexpr const auto PACKET_SERVICE_FLAGS{PACKET_SERVICE_WINFSP};
inline constexpr const auto PACKET_SERVICE_FLAGS{PACKET_SERVICE_FUSE};
#endif // defined(_WIN32)
constexpr const auto default_remote_client_pool_size{20U};
constexpr const auto default_remote_max_connections{20U};
constexpr const auto default_remote_receive_timeout_ms{120U * 1000U};
constexpr const auto default_remote_send_timeout_ms{30U * 1000U};
namespace repertory::remote {
struct remote_config final {
std::uint16_t api_port{};
std::string encryption_token;
std::string host_name_or_ip;
std::uint8_t max_connections{default_remote_max_connections};
std::uint32_t recv_timeout_ms{default_remote_receive_timeout_ms};
std::uint32_t send_timeout_ms{default_remote_send_timeout_ms};
auto operator==(const remote_config &cfg) const noexcept -> bool {
if (&cfg != this) {
return api_port == cfg.api_port &&
encryption_token == cfg.encryption_token &&
host_name_or_ip == cfg.host_name_or_ip &&
max_connections == cfg.max_connections &&
recv_timeout_ms == cfg.recv_timeout_ms &&
send_timeout_ms == cfg.send_timeout_ms;
}
return true;
}
auto operator!=(const remote_config &cfg) const noexcept -> bool {
if (&cfg != this) {
return not(cfg == *this);
}
return false;
}
};
struct remote_mount final {
std::uint16_t api_port{};
std::uint8_t client_pool_size{default_remote_client_pool_size};
bool enable{false};
std::string encryption_token;
auto operator==(const remote_mount &cfg) const noexcept -> bool {
if (&cfg == this) {
return true;
}
return api_port == cfg.api_port &&
client_pool_size == cfg.client_pool_size && enable == cfg.enable &&
encryption_token == cfg.encryption_token;
}
auto operator!=(const remote_mount &cfg) const noexcept -> bool {
if (&cfg == this) {
return false;
}
return not(cfg == *this);
}
};
using block_count = std::uint64_t;
using block_size = std::uint32_t;
using file_handle = std::uint64_t;
@ -160,4 +222,46 @@ create_os_open_flags(const open_flags &flags) -> std::uint32_t;
#endif // !defined(_WIN32)
} // namespace repertory::remote
NLOHMANN_JSON_NAMESPACE_BEGIN
template <> struct adl_serializer<repertory::remote::remote_config> {
static void to_json(json &data,
const repertory::remote::remote_config &value) {
data[repertory::JSON_API_PORT] = value.api_port;
data[repertory::JSON_ENCRYPTION_TOKEN] = value.encryption_token;
data[repertory::JSON_HOST_NAME_OR_IP] = value.host_name_or_ip;
data[repertory::JSON_MAX_CONNECTIONS] = value.max_connections;
data[repertory::JSON_RECV_TIMEOUT_MS] = value.recv_timeout_ms;
data[repertory::JSON_SEND_TIMEOUT_MS] = value.send_timeout_ms;
}
static void from_json(const json &data,
repertory::remote::remote_config &value) {
data.at(repertory::JSON_API_PORT).get_to(value.api_port);
data.at(repertory::JSON_ENCRYPTION_TOKEN).get_to(value.encryption_token);
data.at(repertory::JSON_HOST_NAME_OR_IP).get_to(value.host_name_or_ip);
data.at(repertory::JSON_MAX_CONNECTIONS).get_to(value.max_connections);
data.at(repertory::JSON_RECV_TIMEOUT_MS).get_to(value.recv_timeout_ms);
data.at(repertory::JSON_SEND_TIMEOUT_MS).get_to(value.send_timeout_ms);
}
};
template <> struct adl_serializer<repertory::remote::remote_mount> {
static void to_json(json &data,
const repertory::remote::remote_mount &value) {
data[repertory::JSON_API_PORT] = value.api_port;
data[repertory::JSON_CLIENT_POOL_SIZE] = value.client_pool_size;
data[repertory::JSON_ENABLE_REMOTE_MOUNT] = value.enable;
data[repertory::JSON_ENCRYPTION_TOKEN] = value.encryption_token;
}
static void from_json(const json &data,
repertory::remote::remote_mount &value) {
data.at(repertory::JSON_API_PORT).get_to(value.api_port);
data.at(repertory::JSON_CLIENT_POOL_SIZE).get_to(value.client_pool_size);
data.at(repertory::JSON_ENABLE_REMOTE_MOUNT).get_to(value.enable);
data.at(repertory::JSON_ENCRYPTION_TOKEN).get_to(value.encryption_token);
}
};
NLOHMANN_JSON_NAMESPACE_END
#endif // REPERTORY_INCLUDE_TYPES_REMOTE_HPP_

View File

@ -1,292 +1,655 @@
/*
Copyright <2018-2023> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_TYPES_REPERTORY_HPP_
#define REPERTORY_INCLUDE_TYPES_REPERTORY_HPP_
namespace repertory {
inline constexpr const auto max_time{std::numeric_limits<std::uint64_t>::max()};
inline constexpr const std::string META_ACCESSED{"accessed"};
inline constexpr const std::string META_ATTRIBUTES{"attributes"};
inline constexpr const std::string META_BACKUP{"backup"};
inline constexpr const std::string META_CHANGED{"changed"};
inline constexpr const std::string META_CREATION{"creation"};
inline constexpr const std::string META_DIRECTORY{"directory"};
inline constexpr const std::string META_GID{"gid"};
inline constexpr const std::string META_KEY{"key"};
inline constexpr const std::string META_MODE{"mode"};
inline constexpr const std::string META_MODIFIED{"modified"};
inline constexpr const std::string META_OSXFLAGS{"flags"};
inline constexpr const std::string META_PINNED{"pinned"};
inline constexpr const std::string META_SIZE{"size"};
inline constexpr const std::string META_SOURCE{"source"};
inline constexpr const std::string META_UID{"uid"};
inline constexpr const std::string META_WRITTEN{"written"};
inline constexpr const std::array<std::string, 16U> META_USED_NAMES = {
META_ACCESSED, META_ATTRIBUTES, META_BACKUP, META_CHANGED,
META_CREATION, META_DIRECTORY, META_GID, META_KEY,
META_MODE, META_MODIFIED, META_OSXFLAGS, META_PINNED,
META_SIZE, META_SOURCE, META_UID, META_WRITTEN,
};
using api_meta_map = std::map<std::string, std::string>;
enum class api_error {
success = 0,
access_denied,
bad_address,
buffer_overflow,
buffer_too_small,
comm_error,
decryption_error,
directory_end_of_files,
directory_exists,
directory_not_empty,
directory_not_found,
download_failed,
download_incomplete,
download_stopped,
empty_ring_buffer_chunk_size,
empty_ring_buffer_size,
error,
file_in_use,
file_size_mismatch,
incompatible_version,
invalid_handle,
invalid_operation,
invalid_ring_buffer_multiple,
invalid_ring_buffer_size,
invalid_version,
item_exists,
item_not_found,
no_disk_space,
not_implemented,
not_supported,
os_error,
out_of_memory,
permission_denied,
upload_failed,
xattr_buffer_small,
xattr_exists,
xattr_not_found,
xattr_too_big,
ERROR_COUNT
};
[[nodiscard]] auto api_error_from_string(std::string_view str) -> api_error;
[[nodiscard]] auto
api_error_to_string(const api_error &error) -> const std::string &;
enum class download_type { direct, fallback, ring_buffer };
[[nodiscard]] auto
download_type_from_string(std::string type,
const download_type &default_type) -> download_type;
[[nodiscard]] auto
download_type_to_string(const download_type &type) -> std::string;
enum class exit_code : std::int32_t {
success,
communication_error = -1,
file_creation_failed = -2,
incompatible_version = -3,
invalid_syntax = -4,
lock_failed = -5,
mount_active = -6,
mount_result = -7,
not_mounted = -8,
startup_exception = -9,
failed_to_get_mount_state = -10,
export_failed = -11,
import_failed = -12,
option_not_found = -13,
invalid_provider_type = -14,
set_option_not_found = -15,
pin_failed = -16,
unpin_failed = -17,
init_failed = -18,
};
enum http_error_codes : std::int32_t {
ok = 200,
multiple_choices = 300,
not_found = 404,
};
enum class lock_result {
success,
locked,
failure,
};
enum class provider_type : std::size_t {
sia,
remote,
s3,
encrypt,
unknown,
};
#if defined(_WIN32)
struct open_file_data final {
PVOID directory_buffer{nullptr};
};
#else
using open_file_data = int;
#endif
struct api_file final {
std::string api_path{};
std::string api_parent{};
std::uint64_t accessed_date{};
std::uint64_t changed_date{};
std::uint64_t creation_date{};
std::uint64_t file_size{};
std::string key{};
std::uint64_t modified_date{};
std::string source_path;
};
struct directory_item final {
std::string api_path{};
std::string api_parent{};
bool directory{false};
std::uint64_t size{};
api_meta_map meta{};
bool resolved{false};
[[nodiscard]] static auto from_json(const json &item) -> directory_item {
directory_item ret{};
ret.api_path = item["path"].get<std::string>();
ret.api_parent = item["parent"].get<std::string>();
ret.directory = item["directory"].get<bool>();
ret.size = item["size"].get<std::uint64_t>();
ret.meta = item["meta"].get<api_meta_map>();
return ret;
}
[[nodiscard]] auto to_json() const -> json {
return {
{"path", api_path}, {"parent", api_parent}, {"size", size},
{"directory", directory}, {"meta", meta},
};
}
};
struct encrypt_config final {
std::string encryption_token{};
std::string path{};
};
struct filesystem_item final {
std::string api_path{};
std::string api_parent{};
bool directory{false};
std::uint64_t size{};
std::string source_path{};
};
struct host_config final {
std::string agent_string{};
std::string api_password{};
std::string api_user{};
std::uint16_t api_port{};
std::string host_name_or_ip{"localhost"};
std::string path{};
std::string protocol{"http"};
std::uint32_t timeout_ms{60000U};
auto operator==(const host_config &hc) const noexcept -> bool {
if (&hc != this) {
return agent_string == hc.agent_string &&
api_password == hc.api_password && api_user == hc.api_user &&
api_port == hc.api_port && host_name_or_ip == hc.host_name_or_ip &&
path == hc.path && protocol == hc.protocol &&
timeout_ms == hc.timeout_ms;
}
return true;
}
auto operator!=(const host_config &hc) const noexcept -> bool {
if (&hc != this) {
return not(hc == *this);
}
return false;
}
};
#if defined(__GNUG__)
__attribute__((unused))
#endif
static void
to_json(json &j, const host_config &hc) {
j = json{{"AgentString", hc.agent_string},
{"ApiPassword", hc.api_password},
{"ApiPort", hc.api_port},
{"ApiUser", hc.api_user},
{"HostNameOrIp", hc.host_name_or_ip},
{"Path", hc.path},
{"Protocol", hc.protocol},
{"TimeoutMs", hc.timeout_ms}};
}
#if defined(__GNUG__)
__attribute__((unused))
#endif
static void
from_json(const json &j, host_config &hc) {
j.at("AgentString").get_to(hc.agent_string);
j.at("ApiPassword").get_to(hc.api_password);
j.at("ApiPort").get_to(hc.api_port);
j.at("AuthUser").get_to(hc.api_user);
j.at("HostNameOrIp").get_to(hc.host_name_or_ip);
j.at("Path").get_to(hc.path);
j.at("Protocol").get_to(hc.protocol);
j.at("TimeoutMs").get_to(hc.timeout_ms);
}
struct s3_config final {
std::string access_key{};
std::string bucket{};
std::string encryption_token{};
std::string region{"any"};
std::string secret_key{};
std::uint32_t timeout_ms{60000U};
std::string url{};
bool use_path_style{false};
bool use_region_in_url{false};
};
struct sia_config final {
std::string bucket{};
};
using api_file_list = std::vector<api_file>;
using api_file_provider_callback = std::function<void(api_file &)>;
using api_item_added_callback = std::function<api_error(bool, api_file &)>;
using directory_item_list = std::vector<directory_item>;
using meta_provider_callback = std::function<void(directory_item &)>;
} // namespace repertory
#endif // REPERTORY_INCLUDE_TYPES_REPERTORY_HPP_
/*
Copyright <2018-2023> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_TYPES_REPERTORY_HPP_
#define REPERTORY_INCLUDE_TYPES_REPERTORY_HPP_
namespace repertory {
constexpr const auto default_api_auth_size{48U};
constexpr const auto default_download_timeout_ces{30U};
constexpr const auto default_eviction_delay_mins{1U};
constexpr const auto default_high_freq_interval_secs{30U};
constexpr const auto default_low_freq_interval_secs{0U * 60U};
constexpr const auto default_max_cache_size_bytes{
std::uint64_t(20UL * 1024UL * 1024UL * 1024UL),
};
constexpr const auto default_max_upload_count{5U};
constexpr const auto default_med_freq_interval_secs{2U * 60U};
constexpr const auto default_online_check_retry_secs{60U};
constexpr const auto default_orphaned_file_retention_days{15U};
constexpr const auto default_retry_read_count{6U};
constexpr const auto default_ring_buffer_file_size{512U};
constexpr const auto default_task_wait_ms{100U};
constexpr const auto default_timeout_ms{60000U};
constexpr const auto max_orphaned_file_retention_days{std::uint16_t(31U)};
constexpr const auto max_ring_buffer_file_size{std::uint16_t(1024U)};
constexpr const auto min_cache_size_bytes{
std::uint64_t(100UL * 1024UL * 1024UL)};
constexpr const auto min_download_timeout_secs{std::uint8_t(5U)};
constexpr const auto min_online_check_retry_secs{std::uint16_t(15U)};
constexpr const auto min_orphaned_file_retention_days{std::uint16_t(1U)};
constexpr const auto min_retry_read_count{std::uint16_t(2U)};
constexpr const auto min_ring_buffer_file_size{std::uint16_t(64U)};
constexpr const auto min_task_wait_ms{std::uint16_t(50U)};
template <typename data_t> class atomic final {
public:
atomic() : mtx_(std::make_shared<std::mutex>()) {}
atomic(const atomic &at_data)
: data_(at_data.load()), mtx_(std::make_shared<std::mutex>()) {}
atomic(data_t data)
: data_(std::move(data)), mtx_(std::make_shared<std::mutex>()) {}
atomic(atomic &&) = default;
~atomic() = default;
private:
data_t data_;
std::shared_ptr<std::mutex> mtx_;
public:
[[nodiscard]] auto load() const -> data_t {
mutex_lock lock(*mtx_);
return data_;
}
auto store(data_t data) -> data_t {
mutex_lock lock(*mtx_);
data_ = std::move(data);
return data_;
}
auto operator=(const atomic &at_data) -> atomic & {
if (&at_data == this) {
return *this;
}
store(at_data.load());
return *this;
}
auto operator=(atomic &&) -> atomic & = default;
auto operator=(data_t data) -> atomic & {
if (&data == &data_) {
return *this;
}
store(std::move(data));
return *this;
}
[[nodiscard]] auto operator==(const atomic &at_data) const -> bool {
if (&at_data == this) {
return true;
}
mutex_lock lock(*mtx_);
return at_data.load() == data_;
}
[[nodiscard]] auto operator==(const data_t &data) const -> bool {
if (&data == &data_) {
return true;
}
mutex_lock lock(*mtx_);
return data == data_;
}
[[nodiscard]] auto operator!=(const atomic &at_data) const -> bool {
if (&at_data == this) {
return false;
}
mutex_lock lock(*mtx_);
return at_data.load() != data_;
}
[[nodiscard]] auto operator!=(const data_t &data) const -> bool {
if (&data == &data_) {
return false;
}
mutex_lock lock(*mtx_);
return data != data_;
}
[[nodiscard]] operator data_t() const { return load(); }
};
inline constexpr const auto max_time{
std::numeric_limits<std::uint64_t>::max(),
};
inline constexpr const std::string META_ACCESSED{"accessed"};
inline constexpr const std::string META_ATTRIBUTES{"attributes"};
inline constexpr const std::string META_BACKUP{"backup"};
inline constexpr const std::string META_CHANGED{"changed"};
inline constexpr const std::string META_CREATION{"creation"};
inline constexpr const std::string META_DIRECTORY{"directory"};
inline constexpr const std::string META_GID{"gid"};
inline constexpr const std::string META_KEY{"key"};
inline constexpr const std::string META_MODE{"mode"};
inline constexpr const std::string META_MODIFIED{"modified"};
inline constexpr const std::string META_OSXFLAGS{"flags"};
inline constexpr const std::string META_PINNED{"pinned"};
inline constexpr const std::string META_SIZE{"size"};
inline constexpr const std::string META_SOURCE{"source"};
inline constexpr const std::string META_UID{"uid"};
inline constexpr const std::string META_WRITTEN{"written"};
inline constexpr const std::array<std::string, 16U> META_USED_NAMES = {
META_ACCESSED, META_ATTRIBUTES, META_BACKUP, META_CHANGED,
META_CREATION, META_DIRECTORY, META_GID, META_KEY,
META_MODE, META_MODIFIED, META_OSXFLAGS, META_PINNED,
META_SIZE, META_SOURCE, META_UID, META_WRITTEN,
};
using api_meta_map = std::map<std::string, std::string>;
enum class api_error {
success = 0,
access_denied,
bad_address,
buffer_overflow,
buffer_too_small,
cache_not_initialized,
comm_error,
decryption_error,
directory_end_of_files,
directory_exists,
directory_not_empty,
directory_not_found,
download_failed,
download_incomplete,
download_stopped,
empty_ring_buffer_chunk_size,
empty_ring_buffer_size,
error,
file_in_use,
file_size_mismatch,
incompatible_version,
invalid_handle,
invalid_operation,
invalid_ring_buffer_multiple,
invalid_ring_buffer_position,
invalid_ring_buffer_size,
invalid_version,
item_exists,
item_not_found,
more_data,
no_disk_space,
not_implemented,
not_supported,
os_error,
out_of_memory,
permission_denied,
upload_failed,
xattr_buffer_small,
xattr_exists,
xattr_not_found,
xattr_too_big,
ERROR_COUNT
};
[[nodiscard]] auto api_error_from_string(std::string_view str) -> api_error;
[[nodiscard]] auto
api_error_to_string(const api_error &error) -> const std::string &;
enum class database_type {
rocksdb,
sqlite,
};
[[nodiscard]] auto database_type_from_string(
std::string type,
database_type default_type = database_type::rocksdb) -> database_type;
[[nodiscard]] auto
database_type_to_string(const database_type &type) -> std::string;
enum class download_type {
default_,
direct,
ring_buffer,
};
[[nodiscard]] auto download_type_from_string(
std::string type,
download_type default_type = download_type::default_) -> download_type;
[[nodiscard]] auto
download_type_to_string(const download_type &type) -> std::string;
enum class exit_code : std::int32_t {
success = 0,
communication_error = -1,
file_creation_failed = -2,
incompatible_version = -3,
invalid_syntax = -4,
lock_failed = -5,
mount_active = -6,
mount_result = -7,
not_mounted = -8,
startup_exception = -9,
failed_to_get_mount_state = -10,
export_failed = -11,
import_failed = -12,
option_not_found = -13,
invalid_provider_type = -14,
set_option_not_found = -15,
pin_failed = -16,
unpin_failed = -17,
init_failed = -18,
};
enum http_error_codes : std::int32_t {
ok = 200,
multiple_choices = 300,
not_found = 404,
};
enum class lock_result {
success,
locked,
failure,
};
enum class provider_type : std::size_t {
sia,
remote,
s3,
encrypt,
unknown,
};
#if defined(_WIN32)
struct open_file_data final {
PVOID directory_buffer{nullptr};
};
#else
using open_file_data = int;
#endif
struct api_file final {
std::string api_path;
std::string api_parent;
std::uint64_t accessed_date{};
std::uint64_t changed_date{};
std::uint64_t creation_date{};
std::uint64_t file_size{};
std::string key;
std::uint64_t modified_date{};
std::string source_path;
};
struct directory_item final {
std::string api_path;
std::string api_parent;
bool directory{false};
std::uint64_t size{};
api_meta_map meta;
bool resolved{false};
};
struct encrypt_config final {
std::string encryption_token;
std::string path;
auto operator==(const encrypt_config &cfg) const noexcept -> bool {
if (&cfg != this) {
return encryption_token == cfg.encryption_token && path == cfg.path;
}
return true;
}
auto operator!=(const encrypt_config &cfg) const noexcept -> bool {
if (&cfg != this) {
return not(cfg == *this);
}
return false;
}
};
struct filesystem_item final {
std::string api_path;
std::string api_parent;
bool directory{false};
std::uint64_t size{};
std::string source_path;
};
struct host_config final {
std::string agent_string;
std::string api_password;
std::string api_user;
std::uint16_t api_port;
std::string host_name_or_ip{"localhost"};
std::string path;
std::string protocol{"http"};
std::uint32_t timeout_ms{default_timeout_ms};
auto operator==(const host_config &cfg) const noexcept -> bool {
if (&cfg != this) {
return agent_string == cfg.agent_string &&
api_password == cfg.api_password && api_user == cfg.api_user &&
api_port == cfg.api_port &&
host_name_or_ip == cfg.host_name_or_ip && path == cfg.path &&
protocol == cfg.protocol && timeout_ms == cfg.timeout_ms;
}
return true;
}
auto operator!=(const host_config &cfg) const noexcept -> bool {
if (&cfg != this) {
return not(cfg == *this);
}
return false;
}
};
struct s3_config final {
std::string access_key;
std::string bucket;
std::string encryption_token;
std::string region{"any"};
std::string secret_key;
std::uint32_t timeout_ms{default_timeout_ms};
std::string url;
bool use_path_style{false};
bool use_region_in_url{false};
auto operator==(const s3_config &cfg) const noexcept -> bool {
if (&cfg != this) {
return access_key == cfg.access_key && bucket == cfg.bucket &&
encryption_token == cfg.encryption_token && region == cfg.region &&
secret_key == cfg.secret_key && timeout_ms == cfg.timeout_ms &&
url == cfg.url && use_path_style == cfg.use_path_style &&
use_region_in_url == cfg.use_region_in_url;
}
return true;
}
auto operator!=(const s3_config &cfg) const noexcept -> bool {
if (&cfg != this) {
return not(cfg == *this);
}
return false;
}
};
struct sia_config final {
std::string bucket;
auto operator==(const sia_config &cfg) const noexcept -> bool {
if (&cfg != this) {
return bucket == cfg.bucket;
}
return true;
}
auto operator!=(const sia_config &cfg) const noexcept -> bool {
if (&cfg != this) {
return not(cfg == *this);
}
return false;
}
};
using api_file_list = std::vector<api_file>;
using api_file_provider_callback = std::function<void(api_file &)>;
using api_item_added_callback = std::function<api_error(bool, api_file &)>;
using directory_item_list = std::vector<directory_item>;
using meta_provider_callback = std::function<void(directory_item &)>;
inline constexpr const auto JSON_ACCESS_KEY{"AccessKey"};
inline constexpr const auto JSON_AGENT_STRING{"AgentString"};
inline constexpr const auto JSON_API_AUTH{"ApiAuth"};
inline constexpr const auto JSON_API_PARENT{"ApiParent"};
inline constexpr const auto JSON_API_PASSWORD{"ApiPassword"};
inline constexpr const auto JSON_API_PATH{"ApiPath"};
inline constexpr const auto JSON_API_PORT{"ApiPort"};
inline constexpr const auto JSON_API_USER{"ApiUser"};
inline constexpr const auto JSON_BUCKET{"Bucket"};
inline constexpr const auto JSON_CLIENT_POOL_SIZE{"ClientPoolSize"};
inline constexpr const auto JSON_DATABASE_TYPE{"DatabaseType"};
inline constexpr const auto JSON_DIRECTORY{"Directory"};
inline constexpr const auto JSON_DOWNLOAD_TIMEOUT_SECS{
"DownloadTimeoutSeconds"};
inline constexpr const auto JSON_ENABLE_DRIVE_EVENTS{"EnableDriveEvents"};
inline constexpr const auto JSON_ENABLE_DOWNLOAD_TIMEOUT{
"EnableDownloadTimeout"};
inline constexpr const auto JSON_ENABLE_MOUNT_MANAGER{"EnableMountManager"};
inline constexpr const auto JSON_ENABLE_REMOTE_MOUNT{"Enable"};
inline constexpr const auto JSON_ENCRYPTION_TOKEN{"EncryptionToken"};
inline constexpr const auto JSON_ENCRYPT_CONFIG{"EncryptConfig"};
inline constexpr const auto JSON_EVENT_LEVEL{"EventLevel"};
inline constexpr const auto JSON_EVICTION_DELAY_MINS{"EvictionDelayMinutes"};
inline constexpr const auto JSON_EVICTION_USE_ACCESS_TIME{
"EvictionUseAccessedTime"};
inline constexpr const auto JSON_HIGH_FREQ_INTERVAL_SECS{
"HighFreqIntervalSeconds"};
inline constexpr const auto JSON_HOST_CONFIG{"HostConfig"};
inline constexpr const auto JSON_HOST_NAME_OR_IP{"HostNameOrIp"};
inline constexpr const auto JSON_LOW_FREQ_INTERVAL_SECS{
"LowFreqIntervalSeconds"};
inline constexpr const auto JSON_MAX_CACHE_SIZE_BYTES{"MaxCacheSizeBytes"};
inline constexpr const auto JSON_MAX_CONNECTIONS{"MaxConnections"};
inline constexpr const auto JSON_MAX_UPLOAD_COUNT{"MaxUploadCount"};
inline constexpr const auto JSON_MED_FREQ_INTERVAL_SECS{
"MedFreqIntervalSeconds"};
inline constexpr const auto JSON_META{"Meta"};
inline constexpr const auto JSON_ONLINE_CHECK_RETRY_SECS{
"OnlineCheckRetrySeconds"};
inline constexpr const auto JSON_ORPHANED_FILE_RETENTION_DAYS{
"OrphanedFileRetentionDays"};
inline constexpr const auto JSON_PATH{"Path"};
inline constexpr const auto JSON_PREFERRED_DOWNLOAD_TYPE{
"PreferredDownloadType"};
inline constexpr const auto JSON_PROTOCOL{"Protocol"};
inline constexpr const auto JSON_RECV_TIMEOUT_MS{"ReceiveTimeoutMs"};
inline constexpr const auto JSON_REGION{"Region"};
inline constexpr const auto JSON_REMOTE_CONFIG{"RemoteConfig"};
inline constexpr const auto JSON_REMOTE_MOUNT{"RemoteMount"};
inline constexpr const auto JSON_RETRY_READ_COUNT{"RetryReadCount"};
inline constexpr const auto JSON_RING_BUFFER_FILE_SIZE{"RingBufferFileSize"};
inline constexpr const auto JSON_S3_CONFIG{"S3Config"};
inline constexpr const auto JSON_SECRET_KEY{"SecretKey"};
inline constexpr const auto JSON_SEND_TIMEOUT_MS{"SendTimeoutMs"};
inline constexpr const auto JSON_SIA_CONFIG{"SiaConfig"};
inline constexpr const auto JSON_SIZE{"Size"};
inline constexpr const auto JSON_TASK_WAIT_MS{"TaskWaitMs"};
inline constexpr const auto JSON_TIMEOUT_MS{"TimeoutMs"};
inline constexpr const auto JSON_URL{"URL"};
inline constexpr const auto JSON_USE_PATH_STYLE{"UsePathStyle"};
inline constexpr const auto JSON_USE_REGION_IN_URL{"UseRegionInURL"};
inline constexpr const auto JSON_VERSION{"Version"};
} // namespace repertory
NLOHMANN_JSON_NAMESPACE_BEGIN
template <> struct adl_serializer<repertory::directory_item> {
static void to_json(json &data, const repertory::directory_item &value) {
data[repertory::JSON_API_PARENT] = value.api_parent;
data[repertory::JSON_API_PATH] = value.api_path;
data[repertory::JSON_DIRECTORY] = value.directory;
data[repertory::JSON_META] = value.meta;
data[repertory::JSON_SIZE] = value.size;
}
static void from_json(const json &data, repertory::directory_item &value) {
data.at(repertory::JSON_API_PARENT).get_to<std::string>(value.api_parent);
data.at(repertory::JSON_API_PATH).get_to<std::string>(value.api_path);
data.at(repertory::JSON_DIRECTORY).get_to<bool>(value.directory);
data.at(repertory::JSON_META).get_to<repertory::api_meta_map>(value.meta);
data.at(repertory::JSON_SIZE).get_to<std::uint64_t>(value.size);
}
};
template <> struct adl_serializer<repertory::encrypt_config> {
static void to_json(json &data, const repertory::encrypt_config &value) {
data[repertory::JSON_ENCRYPTION_TOKEN] = value.encryption_token;
data[repertory::JSON_PATH] = value.path;
}
static void from_json(const json &data, repertory::encrypt_config &value) {
data.at(repertory::JSON_ENCRYPTION_TOKEN).get_to(value.encryption_token);
data.at(repertory::JSON_PATH).get_to(value.path);
}
};
template <> struct adl_serializer<repertory::host_config> {
static void to_json(json &data, const repertory::host_config &value) {
data[repertory::JSON_AGENT_STRING] = value.agent_string;
data[repertory::JSON_API_PASSWORD] = value.api_password;
data[repertory::JSON_API_PORT] = value.api_port;
data[repertory::JSON_API_USER] = value.api_user;
data[repertory::JSON_HOST_NAME_OR_IP] = value.host_name_or_ip;
data[repertory::JSON_PATH] = value.path;
data[repertory::JSON_PROTOCOL] = value.protocol;
data[repertory::JSON_TIMEOUT_MS] = value.timeout_ms;
}
static void from_json(const json &data, repertory::host_config &value) {
data.at(repertory::JSON_AGENT_STRING).get_to(value.agent_string);
data.at(repertory::JSON_API_PASSWORD).get_to(value.api_password);
data.at(repertory::JSON_API_PORT).get_to(value.api_port);
data.at(repertory::JSON_API_USER).get_to(value.api_user);
data.at(repertory::JSON_HOST_NAME_OR_IP).get_to(value.host_name_or_ip);
data.at(repertory::JSON_PATH).get_to(value.path);
data.at(repertory::JSON_PROTOCOL).get_to(value.protocol);
data.at(repertory::JSON_TIMEOUT_MS).get_to(value.timeout_ms);
}
};
template <> struct adl_serializer<repertory::s3_config> {
static void to_json(json &data, const repertory::s3_config &value) {
data[repertory::JSON_ACCESS_KEY] = value.access_key;
data[repertory::JSON_BUCKET] = value.bucket;
data[repertory::JSON_ENCRYPTION_TOKEN] = value.encryption_token;
data[repertory::JSON_REGION] = value.region;
data[repertory::JSON_SECRET_KEY] = value.secret_key;
data[repertory::JSON_TIMEOUT_MS] = value.timeout_ms;
data[repertory::JSON_URL] = value.url;
data[repertory::JSON_USE_PATH_STYLE] = value.use_path_style;
data[repertory::JSON_USE_REGION_IN_URL] = value.use_region_in_url;
}
static void from_json(const json &data, repertory::s3_config &value) {
data.at(repertory::JSON_ACCESS_KEY).get_to(value.access_key);
data.at(repertory::JSON_BUCKET).get_to(value.bucket);
data.at(repertory::JSON_ENCRYPTION_TOKEN).get_to(value.encryption_token);
data.at(repertory::JSON_REGION).get_to(value.region);
data.at(repertory::JSON_SECRET_KEY).get_to(value.secret_key);
data.at(repertory::JSON_TIMEOUT_MS).get_to(value.timeout_ms);
data.at(repertory::JSON_URL).get_to(value.url);
data.at(repertory::JSON_USE_PATH_STYLE).get_to(value.use_path_style);
data.at(repertory::JSON_USE_REGION_IN_URL).get_to(value.use_region_in_url);
}
};
template <> struct adl_serializer<repertory::sia_config> {
static void to_json(json &data, const repertory::sia_config &value) {
data[repertory::JSON_BUCKET] = value.bucket;
}
static void from_json(const json &data, repertory::sia_config &value) {
data.at(repertory::JSON_BUCKET).get_to(value.bucket);
}
};
template <typename data_t> struct adl_serializer<repertory::atomic<data_t>> {
static void to_json(json &data, const repertory::atomic<data_t> &value) {
data = value.load();
}
static void from_json(const json &data, repertory::atomic<data_t> &value) {
value.store(data.get<data_t>());
}
};
template <typename primitive_t>
struct adl_serializer<std::atomic<primitive_t>> {
static void to_json(json &data, const std::atomic<primitive_t> &value) {
data = value.load();
}
static void from_json(const json &data, std::atomic<primitive_t> &value) {
value.store(data.get<primitive_t>());
}
};
template <> struct adl_serializer<std::atomic<repertory::database_type>> {
static void to_json(json &data,
const std::atomic<repertory::database_type> &value) {
data = repertory::database_type_to_string(value.load());
}
static void from_json(const json &data,
std::atomic<repertory::database_type> &value) {
value.store(repertory::database_type_from_string(data.get<std::string>()));
}
};
template <> struct adl_serializer<std::atomic<repertory::download_type>> {
static void to_json(json &data,
const std::atomic<repertory::download_type> &value) {
data = repertory::download_type_to_string(value.load());
}
static void from_json(const json &data,
std::atomic<repertory::download_type> &value) {
value.store(repertory::download_type_from_string(data.get<std::string>()));
}
};
template <> struct adl_serializer<repertory::database_type> {
static void to_json(json &data, const repertory::database_type &value) {
data = repertory::database_type_to_string(value);
}
static void from_json(const json &data, repertory::database_type &value) {
value = repertory::database_type_from_string(data.get<std::string>());
}
};
template <> struct adl_serializer<repertory::download_type> {
static void to_json(json &data, const repertory::download_type &value) {
data = repertory::download_type_to_string(value);
}
static void from_json(const json &data, repertory::download_type &value) {
value = repertory::download_type_from_string(data.get<std::string>());
}
};
NLOHMANN_JSON_NAMESPACE_END
#endif // REPERTORY_INCLUDE_TYPES_REPERTORY_HPP_

View File

@ -26,18 +26,21 @@
namespace repertory {
class app_config;
class polling final {
public:
enum struct frequency {
high,
low,
medium,
second,
size,
};
struct polling_item {
struct polling_item final {
std::string name;
frequency freq;
std::function<void()> action;
std::function<void(const stop_type &stop_requested)> action;
};
public:
@ -58,15 +61,15 @@ public:
static auto instance() -> polling & { return instance_; }
private:
app_config *config_ = nullptr;
std::unique_ptr<std::thread> high_frequency_thread_;
app_config *config_{nullptr};
std::array<std::unique_ptr<std::thread>,
static_cast<std::size_t>(frequency::size)>
frequency_threads_;
std::unordered_map<std::string, polling_item> items_;
std::unique_ptr<std::thread> low_frequency_thread_;
std::mutex mutex_;
std::condition_variable notify_;
std::unique_ptr<std::thread> second_frequency_thread_;
std::mutex start_stop_mutex_;
stop_type stop_requested_ = false;
stop_type stop_requested_{false};
private:
void frequency_thread(std::function<std::uint32_t()> get_frequency_seconds,

View File

@ -0,0 +1,116 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_UTILS_TASKS_HPP_
#define REPERTORY_INCLUDE_UTILS_TASKS_HPP_
#include "common.hpp"
namespace repertory {
class app_config;
class tasks final {
public:
struct task final {
std::function<void(const stop_type &task_stopped)> action;
};
class i_task {
INTERFACE_SETUP(i_task);
public:
virtual auto wait() const -> bool = 0;
};
using task_ptr = std::shared_ptr<i_task>;
private:
class task_wait final : public i_task {
public:
task_wait() = default;
task_wait(const task_wait &) = delete;
task_wait(task_wait &&) = delete;
~task_wait() override { set_result(false); }
auto operator=(const task_wait &) -> task_wait & = delete;
auto operator=(task_wait &&) -> task_wait & = delete;
private:
bool complete{false};
mutable std::mutex mtx;
mutable std::condition_variable notify;
bool success{false};
public:
void set_result(bool result);
auto wait() const -> bool override;
};
struct scheduled_task final {
task item;
std::shared_ptr<task_wait> wait{
std::make_shared<task_wait>(),
};
};
public:
tasks(const tasks &) = delete;
tasks(tasks &&) = delete;
auto operator=(const tasks &) -> tasks & = delete;
auto operator=(tasks &&) -> tasks & = delete;
private:
tasks() = default;
~tasks() { stop(); }
private:
static tasks instance_;
public:
static auto instance() -> tasks & { return instance_; }
private:
app_config *config_{nullptr};
std::atomic<std::uint64_t> count_{0U};
std::mutex mutex_;
std::condition_variable notify_;
std::mutex start_stop_mutex_;
stop_type stop_requested_{false};
std::vector<std::unique_ptr<std::jthread>> task_threads_;
std::deque<scheduled_task> tasks_;
private:
void task_thread();
public:
auto schedule(task item) -> task_ptr;
void start(app_config *config);
void stop();
};
} // namespace repertory
#endif // REPERTORY_INCLUDE_UTILS_TASKS_HPP_

View File

@ -24,15 +24,25 @@
#include "types/repertory.hpp"
namespace repertory::utils {
namespace repertory {
class app_config;
namespace utils {
void calculate_allocation_size(bool directory, std::uint64_t file_size,
UINT64 allocation_size,
std::string &allocation_meta_size);
[[nodiscard]] auto
create_volume_label(const provider_type &prov) -> std::string;
create_rocksdb(const app_config &cfg, const std::string &name,
const std::vector<rocksdb::ColumnFamilyDescriptor> &families,
std::vector<rocksdb::ColumnFamilyHandle *> &handles, bool clear)
-> std::unique_ptr<rocksdb::TransactionDB>;
[[nodiscard]] auto create_volume_label(const provider_type &prov)
-> std::string;
[[nodiscard]] auto get_attributes_from_meta(const api_meta_map &meta) -> DWORD;
} // namespace repertory::utils
} // namespace utils
} // namespace repertory
#endif // REPERTORY_INCLUDE_UTILS_UTILS_HPP_

File diff suppressed because it is too large Load Diff

View File

@ -30,7 +30,7 @@ namespace repertory {
void client_pool::pool::execute(
std::uint64_t thread_id, const worker_callback &worker,
const worker_complete_callback &worker_complete) {
const auto index = thread_id % pool_queues_.size();
auto index = thread_id % pool_queues_.size();
auto job = std::make_shared<work_item>(worker, worker_complete);
auto &pool_queue = pool_queues_[index];
@ -51,7 +51,7 @@ client_pool::pool::pool(std::uint8_t pool_size) {
for (std::size_t i = 0U; i < pool_queues_.size(); i++) {
pool_threads_.emplace_back([this]() {
const auto thread_index = thread_index_++;
auto thread_index = thread_index_++;
auto &pool_queue = pool_queues_[thread_index];
auto &queue = pool_queue->queue;
@ -74,7 +74,7 @@ client_pool::pool::pool(std::uint8_t pool_size) {
queue_lock.unlock();
try {
const auto result = item->work();
auto result = item->work();
item->work_complete(result);
} catch (const std::exception &e) {
item->work_complete(utils::from_api_error(api_error::error));

View File

@ -36,8 +36,8 @@ void packet::clear() {
}
auto packet::decode(std::string &data) -> packet::error_type {
const auto *str = reinterpret_cast<const char *>(&buffer_[decode_offset_]);
const auto length = strnlen(str, buffer_.size() - decode_offset_);
const auto *str = reinterpret_cast<const char *>(&buffer_.at(decode_offset_));
auto length = strnlen(str, buffer_.size() - decode_offset_);
data = std::string(str, length);
decode_offset_ += (length + 1);
@ -46,7 +46,7 @@ auto packet::decode(std::string &data) -> packet::error_type {
auto packet::decode(std::wstring &data) -> packet::error_type {
std::string utf8_string;
const auto ret = decode(utf8_string);
auto ret = decode(utf8_string);
if (ret == 0) {
data = utils::string::from_utf8(utf8_string);
}
@ -60,7 +60,7 @@ auto packet::decode(void *&ptr) -> packet::error_type {
auto packet::decode(void *buffer, std::size_t size) -> packet::error_type {
if (size != 0U) {
const auto read_size =
auto read_size =
utils::calculate_read_size(buffer_.size(), size, decode_offset_);
if (read_size == size) {
memcpy(buffer, &buffer_[decode_offset_], size);
@ -76,7 +76,7 @@ auto packet::decode(void *buffer, std::size_t size) -> packet::error_type {
}
auto packet::decode(std::int8_t &val) -> packet::error_type {
const auto ret = decode(&val, sizeof(val));
auto ret = decode(&val, sizeof(val));
if (ret == 0) {
boost::endian::big_to_native_inplace(val);
}
@ -84,7 +84,7 @@ auto packet::decode(std::int8_t &val) -> packet::error_type {
}
auto packet::decode(std::uint8_t &val) -> packet::error_type {
const auto ret = decode(&val, sizeof(val));
auto ret = decode(&val, sizeof(val));
if (ret == 0) {
boost::endian::big_to_native_inplace(val);
}
@ -92,7 +92,7 @@ auto packet::decode(std::uint8_t &val) -> packet::error_type {
}
auto packet::decode(std::int16_t &val) -> packet::error_type {
const auto ret = decode(&val, sizeof(val));
auto ret = decode(&val, sizeof(val));
if (ret == 0) {
boost::endian::big_to_native_inplace(val);
}
@ -100,7 +100,7 @@ auto packet::decode(std::int16_t &val) -> packet::error_type {
}
auto packet::decode(std::uint16_t &val) -> packet::error_type {
const auto ret = decode(&val, sizeof(val));
auto ret = decode(&val, sizeof(val));
if (ret == 0) {
boost::endian::big_to_native_inplace(val);
}
@ -108,7 +108,7 @@ auto packet::decode(std::uint16_t &val) -> packet::error_type {
}
auto packet::decode(std::int32_t &val) -> packet::error_type {
const auto ret = decode(&val, sizeof(val));
auto ret = decode(&val, sizeof(val));
if (ret == 0) {
boost::endian::big_to_native_inplace(val);
}
@ -116,7 +116,7 @@ auto packet::decode(std::int32_t &val) -> packet::error_type {
}
auto packet::decode(std::uint32_t &val) -> packet::error_type {
const auto ret = decode(&val, sizeof(val));
auto ret = decode(&val, sizeof(val));
if (ret == 0) {
boost::endian::big_to_native_inplace(val);
}
@ -124,7 +124,7 @@ auto packet::decode(std::uint32_t &val) -> packet::error_type {
}
auto packet::decode(std::int64_t &val) -> packet::error_type {
const auto ret = decode(&val, sizeof(val));
auto ret = decode(&val, sizeof(val));
if (ret == 0) {
boost::endian::big_to_native_inplace(val);
}
@ -132,7 +132,7 @@ auto packet::decode(std::int64_t &val) -> packet::error_type {
}
auto packet::decode(std::uint64_t &val) -> packet::error_type {
const auto ret = decode(&val, sizeof(val));
auto ret = decode(&val, sizeof(val));
if (ret == 0) {
boost::endian::big_to_native_inplace(val);
}
@ -140,7 +140,7 @@ auto packet::decode(std::uint64_t &val) -> packet::error_type {
}
auto packet::decode(remote::setattr_x &val) -> packet::error_type {
const auto ret = decode(&val, sizeof(val));
auto ret = decode(&val, sizeof(val));
if (ret == 0) {
boost::endian::big_to_native_inplace(val.acctime);
boost::endian::big_to_native_inplace(val.bkuptime);
@ -159,7 +159,7 @@ auto packet::decode(remote::setattr_x &val) -> packet::error_type {
}
auto packet::decode(remote::stat &val) -> packet::error_type {
const auto ret = decode(&val, sizeof(val));
auto ret = decode(&val, sizeof(val));
if (ret == 0) {
boost::endian::big_to_native_inplace(val.st_mode);
boost::endian::big_to_native_inplace(val.st_nlink);
@ -179,7 +179,7 @@ auto packet::decode(remote::stat &val) -> packet::error_type {
}
auto packet::decode(remote::statfs &val) -> packet::error_type {
const auto ret = decode(&val, sizeof(val));
auto ret = decode(&val, sizeof(val));
if (ret == 0) {
boost::endian::big_to_native_inplace(val.f_bavail);
boost::endian::big_to_native_inplace(val.f_bfree);
@ -200,7 +200,7 @@ auto packet::decode(remote::statfs_x &val) -> packet::error_type {
}
auto packet::decode(remote::file_info &val) -> packet::error_type {
const auto ret = decode(&val, sizeof(val));
auto ret = decode(&val, sizeof(val));
if (ret == 0) {
boost::endian::big_to_native_inplace(val.AllocationSize);
boost::endian::big_to_native_inplace(val.ChangeTime);
@ -268,7 +268,7 @@ void packet::encode(const void *buffer, std::size_t size, bool should_reserve) {
}
void packet::encode(std::string_view str) {
const auto len = str.size();
auto len = str.size();
buffer_.reserve(len + 1 + buffer_.size());
encode(str.data(), len, false);
buffer_.emplace_back(0);
@ -401,7 +401,7 @@ void packet::encode_top(const void *buffer, std::size_t size,
}
void packet::encode_top(std::string_view str) {
const auto len = str.size();
auto len = str.size();
buffer_.reserve(len + 1U + buffer_.size());
encode_top(str.data(), len, false);
buffer_.insert(buffer_.begin() + static_cast<std::int32_t>(len), 0);
@ -531,7 +531,7 @@ void packet::encrypt(std::string_view token) {
}
}
void packet::transfer_into(data_buffer &buffer) {
void packet::to_buffer(data_buffer &buffer) {
buffer = std::move(buffer_);
buffer_ = data_buffer();
decode_offset_ = 0;

View File

@ -38,18 +38,8 @@ E_SIMPLE2(packet_client_timeout, error, true,
);
// clang-format on
packet_client::packet_client(std::string host_name_or_ip,
std::uint8_t max_connections, std::uint16_t port,
std::uint16_t receive_timeout,
std::uint16_t send_timeout,
std::string encryption_token)
: host_name_or_ip_(std::move(host_name_or_ip)),
max_connections_(max_connections == 0U ? 20U : max_connections),
port_(port),
receive_timeout_(receive_timeout),
send_timeout_(send_timeout),
encryption_token_(std::move(encryption_token)),
unique_id_(utils::create_uuid_string()) {}
packet_client::packet_client(remote::remote_config cfg)
: cfg_(std::move(cfg)), unique_id_(utils::create_uuid_string()) {}
packet_client::~packet_client() {
allow_connections_ = false;
@ -85,7 +75,7 @@ void packet_client::connect(client &cli) {
cli.socket.set_option(boost::asio::socket_base::linger(false, 0));
packet response;
const auto res = read_packet(cli, response);
auto res = read_packet(cli, response);
if (res != 0) {
throw std::runtime_error(std::to_string(res));
}
@ -95,27 +85,27 @@ void packet_client::connect(client &cli) {
}
auto packet_client::get_client() -> std::shared_ptr<packet_client::client> {
std::shared_ptr<client> ret;
unique_mutex_lock clients_lock(clients_mutex_);
if (allow_connections_) {
if (clients_.empty()) {
clients_lock.unlock();
ret = std::make_shared<client>(io_context_);
connect(*ret);
} else {
ret = clients_[0U];
utils::collection::remove_element(clients_, ret);
clients_lock.unlock();
}
if (not allow_connections_) {
return nullptr;
}
return ret;
if (clients_.empty()) {
clients_lock.unlock();
auto cli = std::make_shared<client>(io_context_);
connect(*cli);
return cli;
}
auto cli = clients_.at(0U);
utils::collection::remove_element(clients_, cli);
return cli;
}
void packet_client::put_client(std::shared_ptr<client> &cli) {
mutex_lock clientsLock(clients_mutex_);
if (clients_.size() < max_connections_) {
if (clients_.size() < cfg_.max_connections) {
clients_.emplace_back(cli);
}
}
@ -126,7 +116,7 @@ auto packet_client::read_packet(client &cli, packet &response)
const auto read_buffer = [&]() {
std::uint32_t offset{};
while (offset < buffer.size()) {
const auto bytes_read = boost::asio::read(
auto bytes_read = boost::asio::read(
cli.socket,
boost::asio::buffer(&buffer[offset], buffer.size() - offset));
if (bytes_read <= 0) {
@ -137,14 +127,14 @@ auto packet_client::read_packet(client &cli, packet &response)
};
read_buffer();
const auto size = boost::endian::big_to_native(
auto size = boost::endian::big_to_native(
*reinterpret_cast<std::uint32_t *>(buffer.data()));
buffer.resize(size);
read_buffer();
response = std::move(buffer);
auto ret = response.decrypt(encryption_token_);
auto ret = response.decrypt(cfg_.encryption_token);
if (ret == 0) {
ret = response.decode(cli.nonce);
}
@ -153,10 +143,13 @@ auto packet_client::read_packet(client &cli, packet &response)
}
void packet_client::resolve() {
if (resolve_results_.empty()) {
resolve_results_ = tcp::resolver(io_context_)
.resolve({host_name_or_ip_, std::to_string(port_)});
if (not resolve_results_.empty()) {
return;
}
resolve_results_ =
tcp::resolver(io_context_)
.resolve(cfg_.host_name_or_ip, std::to_string(cfg_.api_port));
}
auto packet_client::send(std::string_view method, std::uint32_t &service_flags)
@ -184,14 +177,14 @@ auto packet_client::send(std::string_view method, packet &request,
request.encode_top(PACKET_SERVICE_FLAGS);
request.encode_top(std::string{project_get_version()});
static const std::uint8_t max_attempts{5U};
static constexpr const std::uint8_t max_attempts{5U};
for (std::uint8_t i = 1U;
allow_connections_ && not success && (i <= max_attempts); i++) {
auto current_client = get_client();
if (current_client) {
try {
request.encode_top(current_client->nonce);
request.encrypt(encryption_token_);
request.encrypt(cfg_.encryption_token);
timeout request_timeout(
[method, current_client]() {
@ -199,11 +192,11 @@ auto packet_client::send(std::string_view method, packet &request,
"request", std::string{method});
packet_client::close(*current_client);
},
std::chrono::seconds(send_timeout_));
std::chrono::milliseconds(cfg_.send_timeout_ms));
std::uint32_t offset{};
while (offset < request.get_size()) {
const auto bytes_written = boost::asio::write(
auto bytes_written = boost::asio::write(
current_client->socket,
boost::asio::buffer(&request[offset],
request.get_size() - offset));
@ -221,7 +214,7 @@ auto packet_client::send(std::string_view method, packet &request,
"response", std::string{method});
packet_client::close(*current_client);
},
std::chrono::seconds(receive_timeout_));
std::chrono::milliseconds(cfg_.recv_timeout_ms));
ret = read_packet(*current_client, response);
response_timeout.disable();

View File

@ -68,11 +68,11 @@ void packet_server::add_client(connection &conn, const std::string &client_id) {
void packet_server::initialize(const uint16_t &port, uint8_t pool_size) {
REPERTORY_USES_FUNCTION_NAME();
pool_size = std::max(uint8_t(1U), pool_size);
pool_size = std::max(std::uint8_t(1U), pool_size);
server_thread_ = std::make_unique<std::thread>([this, port, pool_size]() {
tcp::acceptor acceptor(io_context_);
try {
const auto endpoint = tcp::endpoint(tcp::v4(), port);
auto endpoint = tcp::endpoint(tcp::v4(), port);
acceptor.open(endpoint.protocol());
acceptor.set_option(socket_base::reuse_address(true));
acceptor.bind(endpoint);
@ -148,7 +148,7 @@ void packet_server::read_packet(std::shared_ptr<connection> conn,
const auto read_buffer = [&]() {
std::uint32_t offset{};
while (offset < conn->buffer.size()) {
const auto bytes_read = boost::asio::read(
auto bytes_read = boost::asio::read(
conn->socket, boost::asio::buffer(&conn->buffer[offset],
conn->buffer.size() - offset));
if (bytes_read <= 0) {
@ -244,7 +244,7 @@ void packet_server::send_response(std::shared_ptr<connection> conn,
response.encode_top(PACKET_SERVICE_FLAGS);
response.encode_top(conn->nonce);
response.encrypt(encryption_token_);
response.transfer_into(conn->buffer);
response.to_buffer(conn->buffer);
boost::asio::async_write(
conn->socket, boost::asio::buffer(conn->buffer),

View File

@ -0,0 +1,38 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "db/file_db.hpp"
#include "app_config.hpp"
#include "db/impl/rdb_file_db.hpp"
#include "db/impl/sqlite_file_db.hpp"
namespace repertory {
auto create_file_db(const app_config &cfg) -> std::unique_ptr<i_file_db> {
switch (cfg.get_database_type()) {
case database_type::sqlite:
return std::make_unique<sqlite_file_db>(cfg);
default:
return std::make_unique<rdb_file_db>(cfg);
}
}
} // namespace repertory

View File

@ -0,0 +1,40 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "db/file_mgr_db.hpp"
#include "app_config.hpp"
#include "db/i_file_mgr_db.hpp"
#include "db/impl/rdb_file_mgr_db.hpp"
#include "db/impl/sqlite_file_mgr_db.hpp"
namespace repertory {
auto create_file_mgr_db(const app_config &cfg)
-> std::unique_ptr<i_file_mgr_db> {
switch (cfg.get_database_type()) {
case database_type::sqlite:
return std::make_unique<sqlite_file_mgr_db>(cfg);
default:
return std::make_unique<rdb_file_mgr_db>(cfg);
}
}
} // namespace repertory

View File

@ -0,0 +1,390 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "db/impl/rdb_file_db.hpp"
#include "app_config.hpp"
#include "types/startup_exception.hpp"
#include "utils/config.hpp"
#include "utils/error_utils.hpp"
#include "utils/file.hpp"
#include "utils/path.hpp"
#include "utils/string.hpp"
#include "utils/utils.hpp"
namespace repertory {
rdb_file_db::rdb_file_db(const app_config &cfg) : cfg_(cfg) {
create_or_open(false);
}
rdb_file_db::~rdb_file_db() { db_.reset(); }
void rdb_file_db::create_or_open(bool clear) {
db_.reset();
auto families = std::vector<rocksdb::ColumnFamilyDescriptor>();
families.emplace_back(rocksdb::kDefaultColumnFamilyName,
rocksdb::ColumnFamilyOptions());
families.emplace_back("file", rocksdb::ColumnFamilyOptions());
families.emplace_back("path", rocksdb::ColumnFamilyOptions());
families.emplace_back("source", rocksdb::ColumnFamilyOptions());
auto handles = std::vector<rocksdb::ColumnFamilyHandle *>();
db_ = utils::create_rocksdb(cfg_, "file", families, handles, clear);
std::size_t idx{};
directory_family_ = handles.at(idx++);
file_family_ = handles.at(idx++);
path_family_ = handles.at(idx++);
source_family_ = handles.at(idx++);
}
auto rdb_file_db::add_directory(const std::string &api_path,
const std::string &source_path) -> api_error {
REPERTORY_USES_FUNCTION_NAME();
std::string existing_source_path;
auto result = get_directory_source_path(api_path, existing_source_path);
if (result != api_error::success &&
result != api_error::directory_not_found) {
return result;
}
return perform_action(
function_name, [&](rocksdb::Transaction *txn) -> rocksdb::Status {
if (not existing_source_path.empty()) {
auto res = remove_item(api_path, existing_source_path, txn);
if (not res.ok() && not res.IsNotFound()) {
return res;
}
}
auto res = txn->Put(directory_family_, api_path, source_path);
if (not res.ok()) {
return res;
}
res = txn->Put(path_family_, api_path, source_path);
if (not res.ok()) {
return res;
}
return txn->Put(source_family_, source_path, api_path);
});
}
auto rdb_file_db::add_or_update_file(const i_file_db::file_data &data)
-> api_error {
REPERTORY_USES_FUNCTION_NAME();
std::string existing_source_path;
auto result = get_file_source_path(data.api_path, existing_source_path);
if (result != api_error::success && result != api_error::item_not_found) {
return result;
}
return perform_action(
function_name, [&](rocksdb::Transaction *txn) -> rocksdb::Status {
if (not existing_source_path.empty()) {
auto res = remove_item(data.api_path, existing_source_path, txn);
if (not res.ok() && not res.IsNotFound()) {
return res;
}
}
json json_data = {
{"file_size", data.file_size},
{"iv", data.iv_list},
{"source_path", data.source_path},
};
auto res = txn->Put(file_family_, data.api_path, json_data.dump());
if (not res.ok()) {
return res;
}
res = txn->Put(path_family_, data.api_path, data.source_path);
if (not res.ok()) {
return res;
}
return txn->Put(source_family_, data.source_path, data.api_path);
});
}
void rdb_file_db::clear() { create_or_open(true); }
auto rdb_file_db::create_iterator(rocksdb::ColumnFamilyHandle *family) const
-> std::shared_ptr<rocksdb::Iterator> {
return std::shared_ptr<rocksdb::Iterator>(
db_->NewIterator(rocksdb::ReadOptions{}, family));
}
auto rdb_file_db::count() const -> std::uint64_t {
std::uint64_t ret{};
auto iter = create_iterator(source_family_);
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
++ret;
}
return ret;
}
auto rdb_file_db::get_api_path(const std::string &source_path,
std::string &api_path) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
return perform_action(function_name, [&]() -> rocksdb::Status {
return db_->Get(rocksdb::ReadOptions{}, source_family_, source_path,
&api_path);
});
}
auto rdb_file_db::get_directory_api_path(
const std::string &source_path, std::string &api_path) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
auto result = perform_action(function_name, [&]() -> rocksdb::Status {
auto res = db_->Get(rocksdb::ReadOptions{}, source_family_, source_path,
&api_path);
if (not res.ok()) {
return res;
}
std::string value;
return db_->Get(rocksdb::ReadOptions{}, directory_family_, api_path,
&value);
});
if (result != api_error::success) {
api_path.clear();
}
return result == api_error::item_not_found ? api_error::directory_not_found
: result;
}
auto rdb_file_db::get_directory_source_path(
const std::string &api_path, std::string &source_path) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
auto result = perform_action(function_name, [&]() -> rocksdb::Status {
return db_->Get(rocksdb::ReadOptions{}, directory_family_, api_path,
&source_path);
});
return result == api_error::item_not_found ? api_error::directory_not_found
: result;
}
auto rdb_file_db::get_file_api_path(const std::string &source_path,
std::string &api_path) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
auto result = perform_action(function_name, [&]() -> rocksdb::Status {
auto res = db_->Get(rocksdb::ReadOptions{}, source_family_, source_path,
&api_path);
if (not res.ok()) {
return res;
}
std::string value;
return db_->Get(rocksdb::ReadOptions{}, file_family_, api_path, &value);
});
if (result != api_error::success) {
api_path.clear();
}
return result;
}
auto rdb_file_db::get_file_data(const std::string &api_path,
i_file_db::file_data &data) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
auto result = perform_action(function_name, [&]() -> rocksdb::Status {
std::string value;
auto res = db_->Get(rocksdb::ReadOptions{}, file_family_, api_path, &value);
if (not res.ok()) {
return res;
}
auto json_data = json::parse(value);
data.api_path = api_path;
data.file_size = json_data.at("file_size").get<std::uint64_t>();
data.iv_list =
json_data.at("iv")
.get<std::vector<
std::array<unsigned char,
crypto_aead_xchacha20poly1305_IETF_NPUBBYTES>>>();
data.source_path = json_data.at("source_path").get<std::string>();
return res;
});
return result;
}
auto rdb_file_db::get_file_source_path(
const std::string &api_path, std::string &source_path) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
auto result = perform_action(function_name, [&]() -> rocksdb::Status {
std::string value;
auto res = db_->Get(rocksdb::ReadOptions{}, file_family_, api_path, &value);
if (not res.ok()) {
return res;
}
auto json_data = json::parse(value);
source_path = json_data.at("source_path").get<std::string>();
return res;
});
return result;
}
auto rdb_file_db::get_item_list() const -> std::vector<i_file_db::file_info> {
std::vector<i_file_db::file_info> ret{};
{
auto iter = create_iterator(directory_family_);
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
ret.emplace_back(i_file_db::file_info{
iter->key().ToString(),
true,
iter->value().ToString(),
});
}
}
{
auto iter = create_iterator(file_family_);
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
auto json_data = json::parse(iter->value().ToString());
ret.emplace_back(i_file_db::file_info{
iter->key().ToString(),
true,
json_data.at("source_path").get<std::string>(),
});
}
}
return ret;
}
auto rdb_file_db::get_source_path(const std::string &api_path,
std::string &source_path) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
return perform_action(function_name, [&]() -> rocksdb::Status {
return db_->Get(rocksdb::ReadOptions{}, path_family_, api_path,
&source_path);
});
}
auto rdb_file_db::perform_action(std::string_view function_name,
std::function<rocksdb::Status()> action)
-> api_error {
auto res = action();
if (res.ok()) {
return api_error::success;
}
if (not res.IsNotFound()) {
utils::error::raise_error(function_name, res.ToString());
}
return res.IsNotFound() ? api_error::item_not_found : api_error::error;
}
auto rdb_file_db::perform_action(
std::string_view function_name,
std::function<rocksdb::Status(rocksdb::Transaction *txn)> action)
-> api_error {
std::unique_ptr<rocksdb::Transaction> txn{
db_->BeginTransaction(rocksdb::WriteOptions{},
rocksdb::TransactionOptions{}),
};
try {
auto res = action(txn.get());
if (res.ok()) {
auto commit_res = txn->Commit();
if (commit_res.ok()) {
return api_error::success;
}
utils::error::raise_error(function_name,
"rocksdb commit failed|" + res.ToString());
return api_error::error;
}
utils::error::raise_error(function_name,
"rocksdb action failed|" + res.ToString());
} catch (const std::exception &ex) {
utils::error::raise_error(function_name, ex,
"failed to handle rocksdb action");
}
auto rollback_res = txn->Rollback();
utils::error::raise_error(function_name, "rocksdb rollback failed|" +
rollback_res.ToString());
return api_error::error;
}
auto rdb_file_db::remove_item(const std::string &api_path) -> api_error {
REPERTORY_USES_FUNCTION_NAME();
std::string source_path;
auto res = get_source_path(api_path, source_path);
if (res != api_error::success) {
return res;
}
return perform_action(function_name,
[&](rocksdb::Transaction *txn) -> rocksdb::Status {
return remove_item(api_path, source_path, txn);
});
}
auto rdb_file_db::remove_item(const std::string &api_path,
const std::string &source_path,
rocksdb::Transaction *txn) -> rocksdb::Status {
auto res = txn->Delete(source_family_, source_path);
if (not res.ok()) {
return res;
}
res = txn->Delete(path_family_, api_path);
if (not res.ok()) {
return res;
}
res = txn->Delete(directory_family_, api_path);
if (not res.ok()) {
return res;
}
return txn->Delete(file_family_, api_path);
}
} // namespace repertory

View File

@ -0,0 +1,327 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "db/impl/rdb_file_mgr_db.hpp"
#include "app_config.hpp"
#include "types/startup_exception.hpp"
#include "utils/config.hpp"
#include "utils/error_utils.hpp"
#include "utils/file.hpp"
#include "utils/path.hpp"
#include "utils/string.hpp"
#include "utils/utils.hpp"
namespace repertory {
rdb_file_mgr_db::rdb_file_mgr_db(const app_config &cfg) : cfg_(cfg) {
create_or_open(false);
}
rdb_file_mgr_db::~rdb_file_mgr_db() { db_.reset(); }
void rdb_file_mgr_db::create_or_open(bool clear) {
db_.reset();
auto families = std::vector<rocksdb::ColumnFamilyDescriptor>();
families.emplace_back(rocksdb::kDefaultColumnFamilyName,
rocksdb::ColumnFamilyOptions());
families.emplace_back("upload_active", rocksdb::ColumnFamilyOptions());
families.emplace_back("upload", rocksdb::ColumnFamilyOptions());
auto handles = std::vector<rocksdb::ColumnFamilyHandle *>();
db_ = utils::create_rocksdb(cfg_, "file_mgr", families, handles, clear);
std::size_t idx{};
resume_family_ = handles.at(idx++);
upload_active_family_ = handles.at(idx++);
upload_family_ = handles.at(idx++);
}
auto rdb_file_mgr_db::add_resume(const resume_entry &entry) -> bool {
REPERTORY_USES_FUNCTION_NAME();
return perform_action(
function_name,
[this, &entry](rocksdb::Transaction *txn) -> rocksdb::Status {
return add_resume(entry, txn);
});
}
auto rdb_file_mgr_db::add_resume(const resume_entry &entry,
rocksdb::Transaction *txn) -> rocksdb::Status {
REPERTORY_USES_FUNCTION_NAME();
auto data = json({
{"chunk_size", entry.chunk_size},
{"read_state", utils::string::from_dynamic_bitset(entry.read_state)},
{"source_path", entry.source_path},
});
return txn->Put(resume_family_, entry.api_path, data.dump());
}
auto rdb_file_mgr_db::add_upload(const upload_entry &entry) -> bool {
REPERTORY_USES_FUNCTION_NAME();
return perform_action(
function_name,
[this, &entry](rocksdb::Transaction *txn) -> rocksdb::Status {
return txn->Put(upload_family_,
utils::string::zero_pad(std::to_string(++id_), 20U) +
'|' + entry.api_path,
entry.source_path);
});
}
auto rdb_file_mgr_db::add_upload_active(const upload_active_entry &entry)
-> bool {
REPERTORY_USES_FUNCTION_NAME();
return perform_action(
function_name,
[this, &entry](rocksdb::Transaction *txn) -> rocksdb::Status {
return txn->Put(upload_active_family_, entry.api_path,
entry.source_path);
});
}
void rdb_file_mgr_db::clear() { create_or_open(true); }
auto rdb_file_mgr_db::create_iterator(rocksdb::ColumnFamilyHandle *family) const
-> std::shared_ptr<rocksdb::Iterator> {
return std::shared_ptr<rocksdb::Iterator>(
db_->NewIterator(rocksdb::ReadOptions(), family));
}
auto rdb_file_mgr_db::get_next_upload() const -> std::optional<upload_entry> {
auto iter = create_iterator(upload_family_);
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
auto parts = utils::string::split(iter->key().ToString(), '|', false);
parts.erase(parts.begin());
auto api_path = utils::string::join(parts, '|');
return upload_entry{
api_path,
iter->value().ToString(),
};
}
return std::nullopt;
}
auto rdb_file_mgr_db::get_resume_list() const -> std::vector<resume_entry> {
std::vector<resume_entry> ret;
auto iter = create_iterator(resume_family_);
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
auto data = json::parse(iter->value().ToString());
ret.emplace_back(resume_entry{
iter->key().ToString(),
data.at("chunk_size").get<std::uint64_t>(),
utils::string::to_dynamic_bitset(
data.at("read_state").get<std::string>()),
data.at("source_path").get<std::string>(),
});
}
return ret;
}
auto rdb_file_mgr_db::get_upload(const std::string &api_path) const
-> std::optional<upload_entry> {
auto iter = create_iterator(upload_family_);
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
auto parts = utils::string::split(iter->key().ToString(), '|', false);
parts.erase(parts.begin());
if (api_path != utils::string::join(parts, '|')) {
continue;
}
return upload_entry{
api_path,
iter->value().ToString(),
};
}
return std::nullopt;
}
auto rdb_file_mgr_db::get_upload_active_list() const
-> std::vector<upload_active_entry> {
std::vector<upload_active_entry> ret;
auto iter = create_iterator(upload_active_family_);
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
ret.emplace_back(upload_active_entry{
iter->key().ToString(),
iter->value().ToString(),
});
}
return ret;
}
auto rdb_file_mgr_db::perform_action(std::string_view function_name,
std::function<rocksdb::Status()> action)
-> bool {
try {
auto res = action();
if (not res.ok()) {
utils::error::raise_error(function_name, res.ToString());
}
return res.ok();
} catch (const std::exception &ex) {
utils::error::raise_error(function_name, ex);
}
return false;
}
auto rdb_file_mgr_db::perform_action(
std::string_view function_name,
std::function<rocksdb::Status(rocksdb::Transaction *txn)> action) -> bool {
std::unique_ptr<rocksdb::Transaction> txn{
db_->BeginTransaction(rocksdb::WriteOptions{},
rocksdb::TransactionOptions{}),
};
try {
auto res = action(txn.get());
if (res.ok()) {
auto commit_res = txn->Commit();
if (commit_res.ok()) {
return true;
}
utils::error::raise_error(function_name,
"rocksdb commit failed|" + res.ToString());
return false;
}
utils::error::raise_error(function_name,
"rocksdb action failed|" + res.ToString());
} catch (const std::exception &ex) {
utils::error::raise_error(function_name, ex,
"failed to handle rocksdb action");
}
auto rollback_res = txn->Rollback();
utils::error::raise_error(function_name, "rocksdb rollback failed|" +
rollback_res.ToString());
return false;
}
auto rdb_file_mgr_db::remove_resume(const std::string &api_path) -> bool {
REPERTORY_USES_FUNCTION_NAME();
return perform_action(
function_name,
[this, &api_path](rocksdb::Transaction *txn) -> rocksdb::Status {
return remove_resume(api_path, txn);
});
}
auto rdb_file_mgr_db::remove_resume(
const std::string &api_path, rocksdb::Transaction *txn) -> rocksdb::Status {
return txn->Delete(resume_family_, api_path);
}
auto rdb_file_mgr_db::remove_upload(const std::string &api_path) -> bool {
REPERTORY_USES_FUNCTION_NAME();
auto iter = create_iterator(upload_family_);
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
auto parts = utils::string::split(iter->key().ToString(), '|', false);
parts.erase(parts.begin());
if (api_path != utils::string::join(parts, '|')) {
continue;
}
return perform_action(
function_name,
[this, &iter](rocksdb::Transaction *txn) -> rocksdb::Status {
return txn->Delete(upload_family_, iter->key());
});
}
return true;
}
auto rdb_file_mgr_db::remove_upload_active(const std::string &api_path)
-> bool {
REPERTORY_USES_FUNCTION_NAME();
return perform_action(
function_name,
[this, &api_path](rocksdb::Transaction *txn) -> rocksdb::Status {
return txn->Delete(upload_active_family_, api_path);
});
}
auto rdb_file_mgr_db::rename_resume(const std::string &from_api_path,
const std::string &to_api_path) -> bool {
REPERTORY_USES_FUNCTION_NAME();
bool not_found{false};
std::string value;
auto res = perform_action(
function_name,
[this, &from_api_path, &not_found, &value]() -> rocksdb::Status {
auto result = db_->Get(rocksdb::ReadOptions{}, from_api_path, &value);
not_found = result.IsNotFound();
return result;
});
if (not_found) {
return true;
}
if (not res) {
return false;
}
if (value.empty()) {
return true;
}
auto data = json::parse(value);
resume_entry entry{
to_api_path,
data.at("chunk_size").get<std::uint64_t>(),
utils::string::to_dynamic_bitset(
data.at("read_state").get<std::string>()),
data.at("source_path").get<std::string>(),
};
return perform_action(function_name,
[this, &entry, &from_api_path](
rocksdb::Transaction *txn) -> rocksdb::Status {
auto txn_res = remove_resume(from_api_path, txn);
if (not txn_res.ok()) {
return txn_res;
}
return add_resume(entry, txn);
});
}
} // namespace repertory

View File

@ -0,0 +1,511 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "db/impl/rdb_meta_db.hpp"
#include "app_config.hpp"
#include "types/startup_exception.hpp"
#include "utils/error_utils.hpp"
#include "utils/file.hpp"
#include "utils/path.hpp"
#include "utils/string.hpp"
#include "utils/utils.hpp"
namespace repertory {
rdb_meta_db::rdb_meta_db(const app_config &cfg) : cfg_(cfg) {
create_or_open(false);
}
rdb_meta_db::~rdb_meta_db() { db_.reset(); }
void rdb_meta_db::create_or_open(bool clear) {
db_.reset();
auto families = std::vector<rocksdb::ColumnFamilyDescriptor>();
families.emplace_back(rocksdb::kDefaultColumnFamilyName,
rocksdb::ColumnFamilyOptions());
families.emplace_back("pinned", rocksdb::ColumnFamilyOptions());
families.emplace_back("size", rocksdb::ColumnFamilyOptions());
families.emplace_back("source", rocksdb::ColumnFamilyOptions());
auto handles = std::vector<rocksdb::ColumnFamilyHandle *>();
db_ = utils::create_rocksdb(cfg_, "provider_meta", families, handles, clear);
std::size_t idx{};
meta_family_ = handles.at(idx++);
pinned_family_ = handles.at(idx++);
size_family_ = handles.at(idx++);
source_family_ = handles.at(idx++);
}
void rdb_meta_db::clear() { create_or_open(true); }
auto rdb_meta_db::create_iterator(rocksdb::ColumnFamilyHandle *family) const
-> std::shared_ptr<rocksdb::Iterator> {
return std::shared_ptr<rocksdb::Iterator>(
db_->NewIterator(rocksdb::ReadOptions{}, family));
}
auto rdb_meta_db::get_api_path(const std::string &source_path,
std::string &api_path) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
if (source_path.empty()) {
return api_error::item_not_found;
}
return perform_action(function_name, [&]() -> rocksdb::Status {
return db_->Get(rocksdb::ReadOptions{}, source_family_, source_path,
&api_path);
});
}
auto rdb_meta_db::get_api_path_list() const -> std::vector<std::string> {
std::vector<std::string> ret;
auto iter = create_iterator(meta_family_);
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
ret.push_back(iter->key().ToString());
}
return ret;
}
auto rdb_meta_db::get_item_meta_json(const std::string &api_path,
json &json_data) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
try {
json_data.clear();
{
std::string value;
auto res = perform_action(function_name, [&]() -> rocksdb::Status {
return db_->Get(rocksdb::ReadOptions{}, meta_family_, api_path, &value);
});
if (res != api_error::success) {
return res;
}
if (not value.empty()) {
json_data = json::parse(value);
}
}
{
std::string value;
auto res = perform_action(function_name, [&]() -> rocksdb::Status {
return db_->Get(rocksdb::ReadOptions{}, pinned_family_, api_path,
&value);
});
if (res != api_error::success) {
return res;
}
if (not value.empty()) {
json_data[META_PINNED] = value;
}
}
{
std::string value;
auto res = perform_action(function_name, [&]() -> rocksdb::Status {
return db_->Get(rocksdb::ReadOptions{}, size_family_, api_path, &value);
});
if (res != api_error::success) {
return res;
}
if (not value.empty()) {
json_data[META_SIZE] = value;
}
}
return json_data.empty() ? api_error::item_not_found : api_error::success;
} catch (const std::exception &e) {
utils::error::raise_api_path_error(function_name, api_path, e,
"failed to get item meta");
}
return api_error::error;
}
auto rdb_meta_db::get_item_meta(const std::string &api_path,
api_meta_map &meta) const -> api_error {
json json_data;
auto ret = get_item_meta_json(api_path, json_data);
if (ret != api_error::success) {
return ret;
}
for (auto it = json_data.begin(); it != json_data.end(); ++it) {
meta[it.key()] = it.value().get<std::string>();
}
return api_error::success;
}
auto rdb_meta_db::get_item_meta(const std::string &api_path,
const std::string &key,
std::string &value) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
if (key == META_PINNED) {
return perform_action(function_name, [&]() -> rocksdb::Status {
return db_->Get(rocksdb::ReadOptions{}, pinned_family_, api_path, &value);
});
}
if (key == META_SIZE) {
return perform_action(function_name, [&]() -> rocksdb::Status {
return db_->Get(rocksdb::ReadOptions{}, size_family_, api_path, &value);
});
}
json json_data;
auto ret = get_item_meta_json(api_path, json_data);
if (ret != api_error::success) {
return ret;
}
if (json_data.find(key) != json_data.end()) {
value = json_data[key].get<std::string>();
}
return api_error::success;
}
auto rdb_meta_db::get_pinned_files() const -> std::vector<std::string> {
std::vector<std::string> ret;
auto iter = create_iterator(pinned_family_);
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
if (not utils::string::to_bool(iter->value().ToString())) {
continue;
}
ret.push_back(iter->key().ToString());
}
return ret;
}
auto rdb_meta_db::get_total_item_count() const -> std::uint64_t {
std::uint64_t ret{};
auto iter = create_iterator(meta_family_);
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
++ret;
}
return ret;
}
auto rdb_meta_db::get_total_size() const -> std::uint64_t {
std::uint64_t ret{};
auto iter = create_iterator(size_family_);
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
ret += utils::string::to_uint64(iter->value().ToString());
}
return ret;
}
auto rdb_meta_db::perform_action(std::string_view function_name,
std::function<rocksdb::Status()> action)
-> api_error {
auto res = action();
if (res.ok()) {
return api_error::success;
}
if (not res.IsNotFound()) {
utils::error::raise_error(function_name, res.ToString());
}
return res.IsNotFound() ? api_error::item_not_found : api_error::error;
}
auto rdb_meta_db::perform_action(
std::string_view function_name,
std::function<rocksdb::Status(rocksdb::Transaction *txn)> action)
-> api_error {
std::unique_ptr<rocksdb::Transaction> txn{
db_->BeginTransaction(rocksdb::WriteOptions{},
rocksdb::TransactionOptions{}),
};
try {
auto res = action(txn.get());
if (res.ok()) {
auto commit_res = txn->Commit();
if (commit_res.ok()) {
return api_error::success;
}
utils::error::raise_error(function_name,
"rocksdb commit failed|" + res.ToString());
return api_error::error;
}
utils::error::raise_error(function_name,
"rocksdb action failed|" + res.ToString());
} catch (const std::exception &ex) {
utils::error::raise_error(function_name, ex,
"failed to handle rocksdb action");
}
auto rollback_res = txn->Rollback();
utils::error::raise_error(function_name, "rocksdb rollback failed|" +
rollback_res.ToString());
return api_error::error;
}
void rdb_meta_db::remove_api_path(const std::string &api_path) {
REPERTORY_USES_FUNCTION_NAME();
std::string source_path;
auto res = get_item_meta(api_path, META_SOURCE, source_path);
if (res != api_error::success) {
utils::error::raise_api_path_error(function_name, api_path, res,
"failed to get source path");
}
res = perform_action(function_name,
[this, &api_path, &source_path](
rocksdb::Transaction *txn) -> rocksdb::Status {
return remove_api_path(api_path, source_path, txn);
});
if (res != api_error::success) {
utils::error::raise_api_path_error(function_name, api_path, res,
"failed to remove api path");
}
}
auto rdb_meta_db::remove_api_path(const std::string &api_path,
const std::string &source_path,
rocksdb::Transaction *txn)
-> rocksdb::Status {
auto txn_res = txn->Delete(pinned_family_, api_path);
if (not txn_res.ok()) {
return txn_res;
}
txn_res = txn->Delete(size_family_, api_path);
if (not txn_res.ok()) {
return txn_res;
}
if (not source_path.empty()) {
txn_res = txn->Delete(source_family_, source_path);
if (not txn_res.ok()) {
return txn_res;
}
}
return txn->Delete(meta_family_, api_path);
}
auto rdb_meta_db::remove_item_meta(const std::string &api_path,
const std::string &key) -> api_error {
if (key == META_DIRECTORY || key == META_PINNED || key == META_SIZE ||
key == META_SOURCE) {
// TODO log warning for unsupported attributes
return api_error::success;
}
json json_data;
auto res = get_item_meta_json(api_path, json_data);
if (res != api_error::success) {
return res;
}
json_data.erase(key);
return update_item_meta(api_path, json_data);
}
auto rdb_meta_db::rename_item_meta(const std::string &from_api_path,
const std::string &to_api_path)
-> api_error {
REPERTORY_USES_FUNCTION_NAME();
json json_data;
auto res = get_item_meta_json(from_api_path, json_data);
if (res != api_error::success) {
return res;
}
return perform_action(
function_name, [&](rocksdb::Transaction *txn) -> rocksdb::Status {
auto txn_res = remove_api_path(
from_api_path, json_data[META_SOURCE].get<std::string>(), txn);
if (not txn_res.ok()) {
return txn_res;
}
rocksdb::Status status;
[[maybe_unused]] auto api_res =
update_item_meta(to_api_path, json_data, txn, &status);
return status;
});
}
auto rdb_meta_db::set_item_meta(const std::string &api_path,
const std::string &key,
const std::string &value) -> api_error {
REPERTORY_USES_FUNCTION_NAME();
if (key == META_PINNED) {
return perform_action(function_name,
[&](rocksdb::Transaction *txn) -> rocksdb::Status {
return txn->Put(pinned_family_, api_path, value);
});
}
if (key == META_SIZE) {
return perform_action(function_name,
[&](rocksdb::Transaction *txn) -> rocksdb::Status {
return txn->Put(size_family_, api_path, value);
});
}
json json_data;
auto res = get_item_meta_json(api_path, json_data);
if (res != api_error::success && res != api_error::item_not_found) {
return res;
}
json_data[key] = value;
return update_item_meta(api_path, json_data);
}
auto rdb_meta_db::set_item_meta(const std::string &api_path,
const api_meta_map &meta) -> api_error {
json json_data;
auto res = get_item_meta_json(api_path, json_data);
if (res != api_error::success && res != api_error::item_not_found) {
return res;
}
for (const auto &data : meta) {
json_data[data.first] = data.second;
}
return update_item_meta(api_path, json_data);
}
auto rdb_meta_db::update_item_meta(const std::string &api_path, json json_data,
rocksdb::Transaction *base_txn,
rocksdb::Status *status) -> api_error {
REPERTORY_USES_FUNCTION_NAME();
try {
if (not json_data.contains(META_PINNED)) {
json_data[META_PINNED] = utils::string::from_bool(false);
}
if (not json_data.contains(META_SIZE)) {
json_data[META_SIZE] = "0";
}
if (not json_data.contains(META_SOURCE)) {
json_data[META_SOURCE] = "";
}
auto directory =
utils::string::to_bool(json_data.at(META_DIRECTORY).get<std::string>());
auto pinned = directory ? false
: utils::string::to_bool(
json_data.at(META_PINNED).get<std::string>());
auto size = directory ? std::uint64_t(0U)
: utils::string::to_uint64(
json_data.at(META_SIZE).get<std::string>());
auto source_path = directory ? std::string("")
: json_data.at(META_SOURCE).get<std::string>();
json_data[META_PINNED] = utils::string::from_bool(pinned);
json_data[META_SIZE] = std::to_string(size);
json_data[META_SOURCE] = source_path;
auto should_del_source{false};
std::string orig_source_path;
if (not directory) {
auto res = get_item_meta(api_path, META_SOURCE, orig_source_path);
if (res != api_error::success && res != api_error::item_not_found) {
return res;
}
should_del_source =
not orig_source_path.empty() && orig_source_path != source_path;
}
json_data.erase(META_PINNED);
json_data.erase(META_SIZE);
const auto set_status = [&status](rocksdb::Status res) -> rocksdb::Status {
if (status != nullptr) {
*status = res;
}
return res;
};
const auto do_transaction =
[&](rocksdb::Transaction *txn) -> rocksdb::Status {
if (should_del_source) {
auto res = set_status(txn->Delete(source_family_, orig_source_path));
if (not res.ok()) {
return res;
}
}
auto res = set_status(
txn->Put(pinned_family_, api_path, utils::string::from_bool(pinned)));
if (not res.ok()) {
return res;
}
res = set_status(txn->Put(size_family_, api_path, std::to_string(size)));
if (not res.ok()) {
return res;
}
if (not source_path.empty()) {
res = set_status(txn->Put(source_family_, source_path, api_path));
if (not res.ok()) {
return res;
}
}
return set_status(txn->Put(meta_family_, api_path, json_data.dump()));
};
if (base_txn == nullptr) {
return perform_action(function_name, do_transaction);
}
auto res = set_status(do_transaction(base_txn));
if (res.ok()) {
return api_error::success;
}
} catch (const std::exception &e) {
utils::error::raise_api_path_error(function_name, api_path, e,
"failed to update item meta");
}
return api_error::error;
}
} // namespace repertory

View File

@ -0,0 +1,334 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "db/impl/sqlite_file_db.hpp"
#include "app_config.hpp"
#include "types/startup_exception.hpp"
#include "utils/config.hpp"
#include "utils/db/sqlite/db_common.hpp"
#include "utils/db/sqlite/db_delete.hpp"
#include "utils/db/sqlite/db_insert.hpp"
#include "utils/db/sqlite/db_select.hpp"
#include "utils/db/sqlite/db_update.hpp"
#include "utils/error_utils.hpp"
#include "utils/file.hpp"
#include "utils/path.hpp"
#include "utils/string.hpp"
namespace {
const std::string file_table = "file";
const std::map<std::string, std::string> sql_create_tables = {
{
{file_table},
{"CREATE TABLE IF NOT EXISTS " + file_table +
"("
"source_path TEXT PRIMARY KEY ASC, "
"api_path TEXT UNIQUE NOT NULL, "
"iv TEXT DEFAULT '' NOT NULL, "
"directory INTEGER NOT NULL, "
"size INTEGER DEFAULT 0 NOT NULL"
");"},
},
};
} // namespace
namespace repertory {
sqlite_file_db::sqlite_file_db(const app_config &cfg) {
auto db_dir = utils::path::combine(cfg.get_data_directory(), {"db"});
if (not utils::file::directory{db_dir}.create_directory()) {
throw startup_exception(
fmt::format("failed to create db directory|", db_dir));
}
db_ = utils::db::sqlite::create_db(utils::path::combine(db_dir, {"file.db"}),
sql_create_tables);
}
sqlite_file_db::~sqlite_file_db() { db_.reset(); }
auto sqlite_file_db::add_directory(const std::string &api_path,
const std::string &source_path)
-> api_error {
REPERTORY_USES_FUNCTION_NAME();
auto result = utils::db::sqlite::db_insert{*db_, file_table}
.column_value("api_path", api_path)
.column_value("directory", 1)
.column_value("source_path", source_path)
.go();
if (result.ok()) {
return api_error::success;
}
utils::error::raise_api_path_error(
function_name, api_path, api_error::error,
fmt::format("failed to add directory|{}", result.get_error_str()));
return api_error::error;
}
auto sqlite_file_db::add_or_update_file(const i_file_db::file_data &data)
-> api_error {
REPERTORY_USES_FUNCTION_NAME();
auto result =
utils::db::sqlite::db_insert{*db_, file_table}
.or_replace()
.column_value("api_path", data.api_path)
.column_value("directory", 0)
.column_value("iv", json(data.iv_list).dump())
.column_value("size", static_cast<std::int64_t>(data.file_size))
.column_value("source_path", data.source_path)
.go();
if (result.ok()) {
return api_error::success;
}
utils::error::raise_api_path_error(
function_name, data.api_path, api_error::error,
fmt::format("failed to add file|{}", result.get_error_str()));
return api_error::error;
}
void sqlite_file_db::clear() {
REPERTORY_USES_FUNCTION_NAME();
auto result = utils::db::sqlite::db_delete{*db_, file_table}.go();
if (not result.ok()) {
utils::error::raise_error(function_name,
fmt::format("failed to clear file table|{}",
std::to_string(result.get_error())));
}
}
auto sqlite_file_db::count() const -> std::uint64_t {
auto result = utils::db::sqlite::db_select{*db_, file_table}
.count("api_path", "count")
.go();
std::optional<utils::db::sqlite::db_result::row> row;
if (result.get_row(row) && row.has_value()) {
return static_cast<std::uint64_t>(
row->get_column("count").get_value<std::int64_t>());
}
return 0U;
}
auto sqlite_file_db::get_api_path(const std::string &source_path,
std::string &api_path) const -> api_error {
auto result = utils::db::sqlite::db_select{*db_, file_table}
.column("api_path")
.where("source_path")
.equals(source_path)
.op()
.limit(1)
.go();
std::optional<utils::db::sqlite::db_result::row> row;
if (result.get_row(row) && row.has_value()) {
api_path = row->get_column("api_path").get_value<std::string>();
return api_error::success;
}
return api_error::item_not_found;
}
auto sqlite_file_db::get_directory_api_path(const std::string &source_path,
std::string &api_path) const
-> api_error {
auto result = utils::db::sqlite::db_select{*db_, file_table}
.column("api_path")
.where("source_path")
.equals(source_path)
.and_()
.where("directory")
.equals(1)
.op()
.limit(1)
.go();
std::optional<utils::db::sqlite::db_result::row> row;
if (result.get_row(row) && row.has_value()) {
api_path = row->get_column("api_path").get_value<std::string>();
return api_error::success;
}
return api_error::directory_not_found;
}
auto sqlite_file_db::get_directory_source_path(const std::string &api_path,
std::string &source_path) const
-> api_error {
auto result = utils::db::sqlite::db_select{*db_, file_table}
.column("source_path")
.where("api_path")
.equals(api_path)
.and_()
.where("directory")
.equals(1)
.op()
.limit(1)
.go();
std::optional<utils::db::sqlite::db_result::row> row;
if (result.get_row(row) && row.has_value()) {
source_path = row->get_column("source_path").get_value<std::string>();
return api_error::success;
}
return api_error::directory_not_found;
}
auto sqlite_file_db::get_file_api_path(const std::string &source_path,
std::string &api_path) const
-> api_error {
auto result = utils::db::sqlite::db_select{*db_, file_table}
.column("api_path")
.where("source_path")
.equals(source_path)
.and_()
.where("directory")
.equals(0)
.op()
.limit(1)
.go();
std::optional<utils::db::sqlite::db_result::row> row;
if (result.get_row(row) && row.has_value()) {
api_path = row->get_column("api_path").get_value<std::string>();
return api_error::success;
}
return api_error::item_not_found;
}
auto sqlite_file_db::get_file_data(const std::string &api_path,
i_file_db::file_data &data) const
-> api_error {
auto result = utils::db::sqlite::db_select{*db_, file_table}
.column("iv")
.column("size")
.column("source_path")
.where("api_path")
.equals(api_path)
.and_()
.where("directory")
.equals(0)
.op()
.limit(1)
.go();
std::optional<utils::db::sqlite::db_result::row> row;
if (result.get_row(row) && row.has_value()) {
data.api_path = api_path;
data.file_size = static_cast<std::uint64_t>(
row->get_column("size").get_value<std::int64_t>());
data.source_path = row->get_column("source_path").get_value<std::string>();
auto str_data = row->get_column("iv").get_value<std::string>();
if (not str_data.empty()) {
data.iv_list =
json::parse(str_data)
.get<std::vector<
std::array<unsigned char,
crypto_aead_xchacha20poly1305_IETF_NPUBBYTES>>>();
}
return api_error::success;
}
return api_error::item_not_found;
}
auto sqlite_file_db::get_file_source_path(const std::string &api_path,
std::string &source_path) const
-> api_error {
auto result = utils::db::sqlite::db_select{*db_, file_table}
.column("source_path")
.where("api_path")
.equals(api_path)
.and_()
.where("directory")
.equals(0)
.op()
.limit(1)
.go();
std::optional<utils::db::sqlite::db_result::row> row;
if (result.get_row(row) && row.has_value()) {
source_path = row->get_column("source_path").get_value<std::string>();
return api_error::success;
}
return api_error::item_not_found;
}
auto sqlite_file_db::get_item_list() const
-> std::vector<i_file_db::file_info> {
std::vector<i_file_db::file_info> ret;
auto result = utils::db::sqlite::db_select{*db_, file_table}.go();
while (result.has_row()) {
std::optional<utils::db::sqlite::db_result::row> row;
if (result.get_row(row) && row.has_value()) {
ret.emplace_back(i_file_db::file_info{
row->get_column("api_path").get_value<std::string>(),
row->get_column("directory").get_value<std::int64_t>() == 1,
row->get_column("source_path").get_value<std::string>(),
});
}
result.next_row();
}
return ret;
}
auto sqlite_file_db::get_source_path(const std::string &api_path,
std::string &source_path) const
-> api_error {
auto result = utils::db::sqlite::db_select{*db_, file_table}
.column("source_path")
.where("api_path")
.equals(api_path)
.op()
.limit(1)
.go();
std::optional<utils::db::sqlite::db_result::row> row;
if (result.get_row(row) && row.has_value()) {
source_path = row->get_column("source_path").get_value<std::string>();
return api_error::success;
}
return api_error::item_not_found;
}
auto sqlite_file_db::remove_item(const std::string &api_path) -> api_error {
auto result = utils::db::sqlite::db_delete{*db_, file_table}
.where("api_path")
.equals(api_path)
.go();
return result.ok() ? api_error::success : api_error::error;
}
} // namespace repertory

View File

@ -0,0 +1,275 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "db/impl/sqlite_file_mgr_db.hpp"
#include "app_config.hpp"
#include "types/startup_exception.hpp"
#include "utils/config.hpp"
#include "utils/db/sqlite/db_common.hpp"
#include "utils/db/sqlite/db_delete.hpp"
#include "utils/db/sqlite/db_insert.hpp"
#include "utils/db/sqlite/db_select.hpp"
#include "utils/db/sqlite/db_update.hpp"
#include "utils/error_utils.hpp"
#include "utils/file.hpp"
#include "utils/path.hpp"
#include "utils/string.hpp"
namespace {
const std::string resume_table = "resume";
const std::string upload_table = "upload";
const std::string upload_active_table = "upload_active";
const std::map<std::string, std::string> sql_create_tables{
{
{resume_table},
{
"CREATE TABLE IF NOT EXISTS " + resume_table +
"("
"api_path TEXT PRIMARY KEY ASC, "
"chunk_size INTEGER, "
"read_state TEXT, "
"source_path TEXT"
");",
},
},
{
{upload_table},
{
"CREATE TABLE IF NOT EXISTS " + upload_table +
"("
"id INTEGER PRIMARY KEY AUTOINCREMENT, "
"api_path TEXT UNIQUE, "
"source_path TEXT"
");",
},
},
{
{upload_active_table},
{
"CREATE TABLE IF NOT EXISTS " + upload_active_table +
"("
"api_path TEXT PRIMARY KEY ASC, "
"source_path TEXT"
");",
},
},
};
} // namespace
namespace repertory {
sqlite_file_mgr_db::sqlite_file_mgr_db(const app_config &cfg) {
auto db_dir = utils::path::combine(cfg.get_data_directory(), {"db"});
if (not utils::file::directory{db_dir}.create_directory()) {
throw startup_exception(
fmt::format("failed to create db directory|", db_dir));
}
db_ = utils::db::sqlite::create_db(
utils::path::combine(db_dir, {"file_mgr.db"}), sql_create_tables);
}
sqlite_file_mgr_db::~sqlite_file_mgr_db() { db_.reset(); }
auto sqlite_file_mgr_db::add_resume(const resume_entry &entry) -> bool {
return utils::db::sqlite::db_insert{*db_, resume_table}
.or_replace()
.column_value("api_path", entry.api_path)
.column_value("chunk_size", static_cast<std::int64_t>(entry.chunk_size))
.column_value("read_state",
utils::string::from_dynamic_bitset(entry.read_state))
.column_value("source_path", entry.source_path)
.go()
.ok();
}
auto sqlite_file_mgr_db::add_upload(const upload_entry &entry) -> bool {
return utils::db::sqlite::db_insert{*db_, upload_table}
.or_replace()
.column_value("api_path", entry.api_path)
.column_value("source_path", entry.source_path)
.go()
.ok();
}
auto sqlite_file_mgr_db::add_upload_active(const upload_active_entry &entry)
-> bool {
return utils::db::sqlite::db_insert{*db_, upload_active_table}
.or_replace()
.column_value("api_path", entry.api_path)
.column_value("source_path", entry.source_path)
.go()
.ok();
}
void sqlite_file_mgr_db::clear() {
REPERTORY_USES_FUNCTION_NAME();
auto result = utils::db::sqlite::db_delete{*db_, resume_table}.go();
if (not result.ok()) {
utils::error::raise_error(function_name,
"failed to clear resume table|" +
std::to_string(result.get_error()));
}
result = utils::db::sqlite::db_delete{*db_, upload_active_table}.go();
if (not result.ok()) {
utils::error::raise_error(function_name,
"failed to clear upload active table|" +
std::to_string(result.get_error()));
}
result = utils::db::sqlite::db_delete{*db_, upload_table}.go();
if (not result.ok()) {
utils::error::raise_error(function_name,
"failed to clear upload table|" +
std::to_string(result.get_error()));
}
}
auto sqlite_file_mgr_db::get_next_upload() const
-> std::optional<upload_entry> {
auto result = utils::db::sqlite::db_select{*db_, upload_table}
.order_by("id", true)
.limit(1)
.go();
std::optional<utils::db::sqlite::db_result::row> row;
if (not result.get_row(row) || not row.has_value()) {
return std::nullopt;
}
return upload_entry{
row->get_column("api_path").get_value<std::string>(),
row->get_column("source_path").get_value<std::string>(),
};
}
auto sqlite_file_mgr_db::get_resume_list() const -> std::vector<resume_entry> {
REPERTORY_USES_FUNCTION_NAME();
std::vector<resume_entry> ret;
auto result = utils::db::sqlite::db_select{*db_, resume_table}.go();
while (result.has_row()) {
try {
std::optional<utils::db::sqlite::db_result::row> row;
if (not result.get_row(row)) {
continue;
}
if (not row.has_value()) {
continue;
}
ret.push_back(resume_entry{
row->get_column("api_path").get_value<std::string>(),
static_cast<std::uint64_t>(
row->get_column("chunk_size").get_value<std::int64_t>()),
utils::string::to_dynamic_bitset(
row->get_column("read_state").get_value<std::string>()),
row->get_column("source_path").get_value<std::string>(),
});
} catch (const std::exception &ex) {
utils::error::raise_error(function_name, ex, "query error");
}
}
return ret;
}
auto sqlite_file_mgr_db::get_upload(const std::string &api_path) const
-> std::optional<upload_entry> {
auto result = utils::db::sqlite::db_select{*db_, upload_table}
.where("api_path")
.equals(api_path)
.go();
std::optional<utils::db::sqlite::db_result::row> row;
if (not result.get_row(row) || not row.has_value()) {
return std::nullopt;
}
return upload_entry{
row->get_column("api_path").get_value<std::string>(),
row->get_column("source_path").get_value<std::string>(),
};
}
auto sqlite_file_mgr_db::get_upload_active_list() const
-> std::vector<upload_active_entry> {
REPERTORY_USES_FUNCTION_NAME();
std::vector<upload_active_entry> ret;
auto result = utils::db::sqlite::db_select{*db_, upload_active_table}.go();
while (result.has_row()) {
try {
std::optional<utils::db::sqlite::db_result::row> row;
if (not result.get_row(row)) {
continue;
}
if (not row.has_value()) {
continue;
}
ret.push_back(upload_active_entry{
row->get_column("api_path").get_value<std::string>(),
row->get_column("source_path").get_value<std::string>(),
});
} catch (const std::exception &ex) {
utils::error::raise_error(function_name, ex, "query error");
}
}
return ret;
}
auto sqlite_file_mgr_db::remove_resume(const std::string &api_path) -> bool {
return utils::db::sqlite::db_delete{*db_, resume_table}
.where("api_path")
.equals(api_path)
.go()
.ok();
}
auto sqlite_file_mgr_db::remove_upload(const std::string &api_path) -> bool {
return utils::db::sqlite::db_delete{*db_, upload_table}
.where("api_path")
.equals(api_path)
.go()
.ok();
}
auto sqlite_file_mgr_db::remove_upload_active(const std::string &api_path)
-> bool {
return utils::db::sqlite::db_delete{*db_, upload_active_table}
.where("api_path")
.equals(api_path)
.go()
.ok();
}
auto sqlite_file_mgr_db::rename_resume(const std::string &from_api_path,
const std::string &to_api_path) -> bool {
return utils::db::sqlite::db_update{*db_, resume_table}
.column_value("api_path", to_api_path)
.where("api_path")
.equals(from_api_path)
.go()
.ok();
}
} // namespace repertory

View File

@ -19,21 +19,21 @@
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "providers/meta_db.hpp"
#include "db/impl/sqlite_meta_db.hpp"
#include "app_config.hpp"
#include "types/startup_exception.hpp"
#include "utils/db/sqlite/db_common.hpp"
#include "utils/db/sqlite/db_delete.hpp"
#include "utils/db/sqlite/db_insert.hpp"
#include "utils/db/sqlite/db_select.hpp"
#include "utils/error_utils.hpp"
#include "utils/file.hpp"
#include "utils/path.hpp"
#include "utils/string.hpp"
namespace repertory {
meta_db::meta_db(const app_config &cfg) {
REPERTORY_USES_FUNCTION_NAME();
sqlite_meta_db::sqlite_meta_db(const app_config &cfg) {
const std::map<std::string, std::string> sql_create_tables{
{
{"meta"},
@ -44,20 +44,39 @@ meta_db::meta_db(const app_config &cfg) {
"data TEXT, "
"directory INTEGER, "
"pinned INTEGER, "
"size INTEGER, "
"source_path TEXT"
");"},
},
};
db_ = utils::db::sqlite::create_db(
utils::path::combine(cfg.get_data_directory(), {"meta.db"}),
sql_create_tables);
auto db_dir = utils::path::combine(cfg.get_data_directory(), {"db"});
if (not utils::file::directory{db_dir}.create_directory()) {
throw startup_exception(
fmt::format("failed to create db directory|", db_dir));
}
db_ = utils::db::sqlite::create_db(utils::path::combine(db_dir, {"meta.db"}),
sql_create_tables);
}
meta_db::~meta_db() { db_.reset(); }
sqlite_meta_db::~sqlite_meta_db() { db_.reset(); }
auto meta_db::get_api_path(const std::string &source_path,
std::string &api_path) -> api_error {
void sqlite_meta_db::clear() {
REPERTORY_USES_FUNCTION_NAME();
auto result = utils::db::sqlite::db_delete{*db_, table_name}.go();
if (result.ok()) {
return;
}
utils::error::raise_error(function_name,
"failed to clear meta db|" +
std::to_string(result.get_error()));
}
auto sqlite_meta_db::get_api_path(const std::string &source_path,
std::string &api_path) const -> api_error {
auto result = utils::db::sqlite::db_select{*db_, table_name}
.column("api_path")
.where("source_path")
@ -75,7 +94,7 @@ auto meta_db::get_api_path(const std::string &source_path,
return api_error::item_not_found;
}
auto meta_db::get_api_path_list() -> std::vector<std::string> {
auto sqlite_meta_db::get_api_path_list() const -> std::vector<std::string> {
std::vector<std::string> ret{};
auto result =
@ -90,8 +109,8 @@ auto meta_db::get_api_path_list() -> std::vector<std::string> {
return ret;
}
auto meta_db::get_item_meta(const std::string &api_path,
api_meta_map &meta) -> api_error {
auto sqlite_meta_db::get_item_meta(const std::string &api_path,
api_meta_map &meta) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
auto result = utils::db::sqlite::db_select{*db_, table_name}
@ -114,9 +133,10 @@ auto meta_db::get_item_meta(const std::string &api_path,
row->get_column("directory").get_value<std::int64_t>() == 1);
meta[META_PINNED] = utils::string::from_bool(
row->get_column("pinned").get_value<std::int64_t>() == 1);
meta[META_SIZE] = std::to_string(static_cast<std::uint64_t>(
row->get_column("size").get_value<std::int64_t>()));
meta[META_SOURCE] =
row->get_column("source_path").get_value<std::string>();
return api_error::success;
}
@ -129,8 +149,9 @@ auto meta_db::get_item_meta(const std::string &api_path,
return api_error::error;
}
auto meta_db::get_item_meta(const std::string &api_path, const std::string &key,
std::string &value) const -> api_error {
auto sqlite_meta_db::get_item_meta(const std::string &api_path,
const std::string &key,
std::string &value) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
auto result = utils::db::sqlite::db_select{*db_, table_name}
@ -156,6 +177,9 @@ auto meta_db::get_item_meta(const std::string &api_path, const std::string &key,
: key == META_DIRECTORY
? utils::string::from_bool(
row->get_column("directory").get_value<std::int64_t>() == 1)
: key == META_SIZE
? std::to_string(static_cast<std::uint64_t>(
row->get_column("size").get_value<std::int64_t>()))
: json::parse(
row->get_column("data").get_value<std::string>())[key]
.get<std::string>();
@ -171,7 +195,7 @@ auto meta_db::get_item_meta(const std::string &api_path, const std::string &key,
return api_error::error;
}
auto meta_db::get_pinned_files() const -> std::vector<std::string> {
auto sqlite_meta_db::get_pinned_files() const -> std::vector<std::string> {
REPERTORY_USES_FUNCTION_NAME();
std::vector<std::string> ret{};
@ -195,7 +219,7 @@ auto meta_db::get_pinned_files() const -> std::vector<std::string> {
return ret;
}
auto meta_db::get_total_item_count() const -> std::uint64_t {
auto sqlite_meta_db::get_total_item_count() const -> std::uint64_t {
REPERTORY_USES_FUNCTION_NAME();
std::uint64_t ret{};
@ -218,7 +242,29 @@ auto meta_db::get_total_item_count() const -> std::uint64_t {
return ret;
}
void meta_db::remove_api_path(const std::string &api_path) {
auto sqlite_meta_db::get_total_size() const -> std::uint64_t {
REPERTORY_USES_FUNCTION_NAME();
try {
auto result = utils::db::sqlite::db_select{*db_, table_name}
.column("SUM(size) as total_size")
.where("directory")
.equals(0)
.go();
std::optional<utils::db::sqlite::db_result::row> row;
if (result.get_row(row) && row.has_value()) {
return static_cast<std::uint64_t>(
row->get_column("total_size").get_value<std::int64_t>());
}
} catch (const std::exception &e) {
utils::error::raise_error(function_name, e, "failed to get total size");
}
return 0U;
}
void sqlite_meta_db::remove_api_path(const std::string &api_path) {
REPERTORY_USES_FUNCTION_NAME();
auto result = utils::db::sqlite::db_delete{*db_, table_name}
@ -231,8 +277,14 @@ void meta_db::remove_api_path(const std::string &api_path) {
}
}
auto meta_db::remove_item_meta(const std::string &api_path,
const std::string &key) -> api_error {
auto sqlite_meta_db::remove_item_meta(const std::string &api_path,
const std::string &key) -> api_error {
if (key == META_DIRECTORY || key == META_PINNED || key == META_SIZE ||
key == META_SOURCE) {
// TODO log warning for unsupported attributes
return api_error::success;
}
api_meta_map meta{};
auto res = get_item_meta(api_path, meta);
if (res != api_error::success) {
@ -243,8 +295,9 @@ auto meta_db::remove_item_meta(const std::string &api_path,
return update_item_meta(api_path, meta);
}
auto meta_db::rename_item_meta(const std::string &from_api_path,
const std::string &to_api_path) -> api_error {
auto sqlite_meta_db::rename_item_meta(const std::string &from_api_path,
const std::string &to_api_path)
-> api_error {
api_meta_map meta{};
auto res = get_item_meta(from_api_path, meta);
if (res != api_error::success) {
@ -255,13 +308,14 @@ auto meta_db::rename_item_meta(const std::string &from_api_path,
return update_item_meta(to_api_path, meta);
}
auto meta_db::set_item_meta(const std::string &api_path, const std::string &key,
const std::string &value) -> api_error {
auto sqlite_meta_db::set_item_meta(const std::string &api_path,
const std::string &key,
const std::string &value) -> api_error {
return set_item_meta(api_path, {{key, value}});
}
auto meta_db::set_item_meta(const std::string &api_path,
const api_meta_map &meta) -> api_error {
auto sqlite_meta_db::set_item_meta(const std::string &api_path,
const api_meta_map &meta) -> api_error {
api_meta_map existing_meta{};
if (get_item_meta(api_path, existing_meta) != api_error::success) {
// TODO handle error
@ -274,33 +328,55 @@ auto meta_db::set_item_meta(const std::string &api_path,
return update_item_meta(api_path, existing_meta);
}
auto meta_db::update_item_meta(const std::string &api_path,
api_meta_map meta) -> api_error {
auto sqlite_meta_db::update_item_meta(const std::string &api_path,
api_meta_map meta) -> api_error {
REPERTORY_USES_FUNCTION_NAME();
auto directory = utils::string::to_bool(meta[META_DIRECTORY]);
auto pinned = utils::string::to_bool(meta[META_PINNED]);
auto source_path = meta[META_SOURCE];
try {
if (meta[META_PINNED].empty()) {
meta[META_PINNED] = utils::string::from_bool(false);
}
if (meta[META_SIZE].empty()) {
meta[META_SIZE] = "0";
}
if (meta[META_SOURCE].empty()) {
meta[META_SOURCE] = "";
}
meta.erase(META_DIRECTORY);
meta.erase(META_PINNED);
meta.erase(META_SOURCE);
auto directory = utils::string::to_bool(meta.at(META_DIRECTORY));
auto pinned =
directory ? false : utils::string::to_bool(meta.at(META_PINNED));
auto size = directory ? std::uint64_t(0U)
: utils::string::to_uint64(meta.at(META_SIZE));
auto source_path = directory ? std::string("") : meta.at(META_SOURCE);
auto result = utils::db::sqlite::db_insert{*db_, table_name}
.or_replace()
.column_value("api_path", api_path)
.column_value("data", nlohmann::json(meta).dump())
.column_value("directory", directory ? 1 : 0)
.column_value("pinned", pinned ? 1 : 0)
.column_value("source_path", source_path)
.go();
if (not result.ok()) {
utils::error::raise_api_path_error(function_name, api_path,
result.get_error(),
meta.erase(META_DIRECTORY);
meta.erase(META_PINNED);
meta.erase(META_SIZE);
meta.erase(META_SOURCE);
auto result = utils::db::sqlite::db_insert{*db_, table_name}
.or_replace()
.column_value("api_path", api_path)
.column_value("data", nlohmann::json(meta).dump())
.column_value("directory", directory ? 1 : 0)
.column_value("pinned", pinned ? 1 : 0)
.column_value("size", static_cast<std::int64_t>(size))
.column_value("source_path", source_path)
.go();
if (not result.ok()) {
utils::error::raise_api_path_error(function_name, api_path,
result.get_error(),
"failed to update item meta");
return api_error::error;
}
return api_error::success;
} catch (const std::exception &e) {
utils::error::raise_api_path_error(function_name, api_path, e,
"failed to update item meta");
return api_error::error;
}
return api_error::success;
return api_error::error;
}
} // namespace repertory

View File

@ -0,0 +1,38 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "db/meta_db.hpp"
#include "app_config.hpp"
#include "db/impl/rdb_meta_db.hpp"
#include "db/impl/sqlite_meta_db.hpp"
namespace repertory {
auto create_meta_db(const app_config &cfg) -> std::unique_ptr<i_meta_db> {
switch (cfg.get_database_type()) {
case database_type::sqlite:
return std::make_unique<sqlite_meta_db>(cfg);
default:
return std::make_unique<rdb_meta_db>(cfg);
}
}
} // namespace repertory

View File

@ -115,7 +115,7 @@ auto directory_iterator::get_directory_item(const std::string &api_path,
auto directory_iterator::get_json(std::size_t offset, json &item) -> int {
if (offset < items_.size()) {
item = items_[offset].to_json();
item = json(items_.at(offset));
return 0;
}

View File

@ -30,30 +30,17 @@
#include "utils/file_utils.hpp"
#include "utils/time.hpp"
#include "utils/utils.hpp"
#include <spdlog/fmt/bundled/base.h>
namespace repertory {
auto eviction::check_minimum_requirements(const std::string &file_path)
-> bool {
REPERTORY_USES_FUNCTION_NAME();
auto check_file = utils::file::file{file_path};
auto opt_size = check_file.size();
if (not opt_size.has_value()) {
utils::error::raise_error(function_name, utils::get_last_error_code(),
file_path, "failed to get file size");
return false;
}
auto file_size{opt_size.value()};
if (file_size == 0U) {
return false;
}
auto reference_time =
check_file.get_time(config_.get_eviction_uses_accessed_time()
? utils::file::time_type::accessed
: utils::file::time_type::modified);
auto file = utils::file::file{file_path};
auto reference_time = file.get_time(config_.get_eviction_uses_accessed_time()
? utils::file::time_type::accessed
: utils::file::time_type::modified);
if (not reference_time.has_value()) {
utils::error::raise_error(function_name, utils::get_last_error_code(),
@ -61,18 +48,17 @@ auto eviction::check_minimum_requirements(const std::string &file_path)
return false;
}
auto delay = (config_.get_eviction_delay_mins() * 60UL) *
utils::time::NANOS_PER_SECOND;
return ((reference_time.value() + static_cast<std::uint64_t>(delay)) <=
utils::time::get_time_now());
auto delay =
static_cast<std::uint64_t>(config_.get_eviction_delay_mins() * 60U) *
utils::time::NANOS_PER_SECOND;
return (reference_time.value() + delay) <= utils::time::get_time_now();
}
auto eviction::get_filtered_cached_files() -> std::deque<std::string> {
auto list =
utils::file::get_directory_files(config_.get_cache_directory(), true);
list.erase(std::remove_if(list.begin(), list.end(),
[this](const std::string &path) -> bool {
[this](auto &&path) -> bool {
return not this->check_minimum_requirements(path);
}),
list.end());
@ -82,65 +68,38 @@ auto eviction::get_filtered_cached_files() -> std::deque<std::string> {
void eviction::service_function() {
REPERTORY_USES_FUNCTION_NAME();
auto should_evict = true;
auto cached_files_list = get_filtered_cached_files();
auto was_file_evicted{false};
while (not get_stop_requested() && not cached_files_list.empty()) {
auto file_path = cached_files_list.front();
cached_files_list.pop_front();
// Handle maximum cache size eviction
auto used_bytes =
utils::file::directory{config_.get_cache_directory()}.size();
if (config_.get_enable_max_cache_size()) {
should_evict = (used_bytes > config_.get_max_cache_size_bytes());
}
if (should_evict) {
// Remove cached source files that don't meet minimum requirements
auto cached_files_list = get_filtered_cached_files();
while (not get_stop_requested() && should_evict &&
not cached_files_list.empty()) {
try {
std::string api_path;
if (provider_.get_api_path_from_source(
cached_files_list.front(), api_path) == api_error::success) {
api_file file{};
filesystem_item fsi{};
if (provider_.get_filesystem_item_and_file(api_path, file, fsi) ==
api_error::success) {
// Only evict files that match expected size
auto opt_size = utils::file::file{cached_files_list.front()}.size();
if (opt_size.has_value()) {
auto file_size{opt_size.value()};
if (file_size == fsi.size) {
// Try to evict file
if (fm_.evict_file(fsi.api_path) &&
config_.get_enable_max_cache_size()) {
// Restrict number of items evicted if maximum cache size is
// enabled
used_bytes -= file_size;
should_evict =
(used_bytes > config_.get_max_cache_size_bytes());
}
}
} else {
utils::error::raise_api_path_error(
function_name, file.api_path, file.source_path,
utils::get_last_error_code(), "failed to get file size");
}
}
}
} catch (const std::exception &ex) {
utils::error::raise_error(function_name, ex,
"failed to process cached file|sp|" +
cached_files_list.front());
try {
std::string api_path;
auto res = provider_.get_api_path_from_source(file_path, api_path);
if (res != api_error::success) {
continue;
}
cached_files_list.pop_front();
if (file_mgr_.evict_file(api_path)) {
was_file_evicted = true;
}
} catch (const std::exception &ex) {
utils::error::raise_error(
function_name, ex,
fmt::format("failed to process cached file|sp|{}", file_path));
}
}
if (not get_stop_requested()) {
unique_mutex_lock lock(get_mutex());
if (not get_stop_requested()) {
get_notify().wait_for(lock, 30s);
}
if (get_stop_requested() || was_file_evicted) {
return;
}
unique_mutex_lock lock(get_mutex());
if (get_stop_requested()) {
return;
}
get_notify().wait_for(lock, 30s);
}
} // namespace repertory

View File

@ -30,6 +30,7 @@
#include "initialize.hpp"
#include "platform/platform.hpp"
#include "utils/collection.hpp"
#include "utils/error_utils.hpp"
#include "utils/file_utils.hpp"
#include "utils/path.hpp"
#include "utils/string.hpp"
@ -40,47 +41,37 @@ auto fuse_base::instance() -> fuse_base & {
}
fuse_base::fuse_base(app_config &config) : config_(config) {
fuse_ops_.getattr = fuse_base::getattr_;
fuse_ops_.mkdir = fuse_base::mkdir_;
fuse_ops_.unlink = fuse_base::unlink_;
fuse_ops_.rmdir = fuse_base::rmdir_;
fuse_ops_.rename = fuse_base::rename_;
fuse_ops_.access = fuse_base::access_;
fuse_ops_.chmod = fuse_base::chmod_;
fuse_ops_.chown = fuse_base::chown_;
fuse_ops_.truncate = fuse_base::truncate_;
fuse_ops_.create = fuse_base::create_;
fuse_ops_.destroy = fuse_base::destroy_;
fuse_ops_.fallocate = fuse_base::fallocate_;
fuse_ops_.fsync = fuse_base::fsync_;
fuse_ops_.getattr = fuse_base::getattr_;
fuse_ops_.init = fuse_base::init_;
fuse_ops_.mkdir = fuse_base::mkdir_;
fuse_ops_.open = fuse_base::open_;
fuse_ops_.opendir = fuse_base::opendir_;
fuse_ops_.read = fuse_base::read_;
fuse_ops_.write = fuse_base::write_;
fuse_ops_.readdir = fuse_base::readdir_;
fuse_ops_.release = fuse_base::release_;
fuse_ops_.releasedir = fuse_base::releasedir_;
fuse_ops_.rename = fuse_base::rename_;
fuse_ops_.rmdir = fuse_base::rmdir_;
fuse_ops_.truncate = fuse_base::truncate_;
#if !defined(__APPLE__)
fuse_ops_.statfs = fuse_base::statfs_;
#endif // __APPLE__
fuse_ops_.release = fuse_base::release_;
fuse_ops_.fsync = fuse_base::fsync_;
fuse_ops_.unlink = fuse_base::unlink_;
fuse_ops_.utimens = fuse_base::utimens_;
fuse_ops_.write = fuse_base::write_;
#if defined(HAS_SETXATTR)
fuse_ops_.setxattr = fuse_base::setxattr_;
fuse_ops_.getxattr = fuse_base::getxattr_;
fuse_ops_.listxattr = fuse_base::listxattr_;
fuse_ops_.removexattr = fuse_base::removexattr_;
#endif // HAS_SETXATTR
fuse_ops_.opendir = fuse_base::opendir_;
fuse_ops_.readdir = fuse_base::readdir_;
fuse_ops_.releasedir = fuse_base::releasedir_;
fuse_ops_.init = fuse_base::init_;
fuse_ops_.destroy = fuse_base::destroy_;
fuse_ops_.access = fuse_base::access_;
fuse_ops_.create = fuse_base::create_;
#if FUSE_USE_VERSION < 30
fuse_ops_.ftruncate = fuse_base::ftruncate_;
fuse_ops_.fgetattr = fuse_base::fgetattr_;
#endif
fuse_ops_.utimens = fuse_base::utimens_;
#if FUSE_USE_VERSION < 30
fuse_ops_.flag_nullpath_ok = 0;
fuse_ops_.flag_nopath = 0;
fuse_ops_.flag_utime_omit_ok = 1;
fuse_ops_.flag_reserved = 0;
#endif
fuse_ops_.fallocate = fuse_base::fallocate_;
fuse_ops_.setxattr = fuse_base::setxattr_;
#endif // defined(HAS_SETXATTR)
#if defined(__APPLE__)
fuse_ops_.chflags = fuse_base::chflags_;
fuse_ops_.fsetattr_x = fuse_base::fsetattr_x_;
@ -91,7 +82,13 @@ fuse_base::fuse_base(app_config &config) : config_(config) {
fuse_ops_.setcrtime = fuse_base::setcrtime_;
fuse_ops_.setvolname = fuse_base::setvolname_;
fuse_ops_.statfs_x = fuse_base::statfs_x_;
#endif // __APPLE__
#endif // defined(__APPLE__)
#if FUSE_USE_VERSION < 30
fuse_ops_.flag_nullpath_ok = 0;
fuse_ops_.flag_nopath = 0;
fuse_ops_.flag_utime_omit_ok = 1;
fuse_ops_.flag_reserved = 0;
#endif // FUSE_USE_VERSION < 30
E_SUBSCRIBE_EXACT(unmount_requested, [this](const unmount_requested &) {
std::thread([this]() { this->shutdown(); }).detach();
@ -121,8 +118,8 @@ auto fuse_base::chflags_(const char *path, uint32_t flags) -> int {
#endif // __APPLE__
#if FUSE_USE_VERSION >= 30
auto fuse_base::chmod_(const char *path, mode_t mode, struct fuse_file_info *fi)
-> int {
auto fuse_base::chmod_(const char *path, mode_t mode,
struct fuse_file_info *fi) -> int {
REPERTORY_USES_FUNCTION_NAME();
return instance().execute_callback(
@ -189,7 +186,7 @@ void fuse_base::display_options(
#if FUSE_USE_VERSION >= 30
fuse_cmdline_help();
#else
struct fuse_operations fuse_ops{};
struct fuse_operations fuse_ops {};
fuse_main(args.size(),
reinterpret_cast<char **>(const_cast<char **>(args.data())),
&fuse_ops, nullptr);
@ -199,7 +196,7 @@ void fuse_base::display_options(
}
void fuse_base::display_version_information(std::vector<const char *> args) {
struct fuse_operations fuse_ops{};
struct fuse_operations fuse_ops {};
fuse_main(static_cast<int>(args.size()),
reinterpret_cast<char **>(const_cast<char **>(args.data())),
&fuse_ops, nullptr);
@ -337,8 +334,8 @@ auto fuse_base::getxtimes_(const char *path, struct timespec *bkuptime,
#endif // __APPLE__
#if FUSE_USE_VERSION >= 30
auto fuse_base::init_(struct fuse_conn_info *conn, struct fuse_config *cfg)
-> void * {
auto fuse_base::init_(struct fuse_conn_info *conn,
struct fuse_config *cfg) -> void * {
REPERTORY_USES_FUNCTION_NAME();
return execute_void_pointer_callback(function_name, [&]() -> void * {
@ -411,7 +408,7 @@ auto fuse_base::mount(std::vector<std::string> args) -> int {
char *mount_location{nullptr};
#if FUSE_USE_VERSION >= 30
struct fuse_cmdline_opts opts{};
struct fuse_cmdline_opts opts {};
fuse_parse_cmdline(&fa, &opts);
mount_location = opts.mountpoint;
#else
@ -480,8 +477,8 @@ auto fuse_base::read_(const char *path, char *buffer, size_t read_size,
#if FUSE_USE_VERSION >= 30
auto fuse_base::readdir_(const char *path, void *buf,
fuse_fill_dir_t fuse_fill_dir, off_t offset,
struct fuse_file_info *fi, fuse_readdir_flags flags)
-> int {
struct fuse_file_info *fi,
fuse_readdir_flags flags) -> int {
REPERTORY_USES_FUNCTION_NAME();
return instance().execute_callback(
@ -513,8 +510,8 @@ auto fuse_base::release_(const char *path, struct fuse_file_info *fi) -> int {
});
}
auto fuse_base::releasedir_(const char *path, struct fuse_file_info *fi)
-> int {
auto fuse_base::releasedir_(const char *path,
struct fuse_file_info *fi) -> int {
REPERTORY_USES_FUNCTION_NAME();
return instance().execute_callback(
@ -524,8 +521,8 @@ auto fuse_base::releasedir_(const char *path, struct fuse_file_info *fi)
}
#if FUSE_USE_VERSION >= 30
auto fuse_base::rename_(const char *from, const char *to, unsigned int flags)
-> int {
auto fuse_base::rename_(const char *from, const char *to,
unsigned int flags) -> int {
REPERTORY_USES_FUNCTION_NAME();
return instance().execute_callback(
@ -768,8 +765,8 @@ auto fuse_base::setattr_x_(const char *path, struct setattr_x *attr) -> int {
});
}
auto fuse_base::setbkuptime_(const char *path, const struct timespec *bkuptime)
-> int {
auto fuse_base::setbkuptime_(const char *path,
const struct timespec *bkuptime) -> int {
REPERTORY_USES_FUNCTION_NAME();
return instance().execute_callback(
@ -778,8 +775,8 @@ auto fuse_base::setbkuptime_(const char *path, const struct timespec *bkuptime)
});
}
auto fuse_base::setchgtime_(const char *path, const struct timespec *chgtime)
-> int {
auto fuse_base::setchgtime_(const char *path,
const struct timespec *chgtime) -> int {
REPERTORY_USES_FUNCTION_NAME();
return instance().execute_callback(
@ -788,8 +785,8 @@ auto fuse_base::setchgtime_(const char *path, const struct timespec *chgtime)
});
}
auto fuse_base::setcrtime_(const char *path, const struct timespec *crtime)
-> int {
auto fuse_base::setcrtime_(const char *path,
const struct timespec *crtime) -> int {
REPERTORY_USES_FUNCTION_NAME();
return instance().execute_callback(

View File

@ -88,7 +88,7 @@ auto fuse_drive::chown_impl(std::string api_path, uid_t uid,
api_path, X_OK, [&](api_meta_map &meta) -> api_error {
meta.clear();
if (uid != static_cast<uid_t>(-1)) {
if (get_effective_uid() != 0) {
if (get_effective_uid() != 0 && get_effective_uid() != uid) {
return api_error::permission_denied;
}
@ -116,19 +116,16 @@ auto fuse_drive::create_impl(std::string api_path, mode_t mode,
struct fuse_file_info *file_info) -> api_error {
file_info->fh = 0U;
auto is_append_op = ((file_info->flags & O_APPEND) == O_APPEND);
auto is_create_op = ((file_info->flags & O_CREAT) == O_CREAT);
auto is_directory_op = ((file_info->flags & O_DIRECTORY) == O_DIRECTORY);
auto is_truncate_op = ((file_info->flags & O_TRUNC) == O_TRUNC);
auto is_exclusive = ((file_info->flags & O_EXCL) == O_EXCL);
auto is_read_write_op = ((file_info->flags & O_RDWR) == O_RDWR);
auto is_truncate_op = ((file_info->flags & O_TRUNC) == O_TRUNC);
auto is_write_only_op = ((file_info->flags & O_WRONLY) == O_WRONLY);
if (((file_info->flags & O_WRONLY) != 0) ||
((file_info->flags & O_RDWR) != 0)) {
auto res = provider_.is_file_writeable(api_path)
? api_error::success
: api_error::permission_denied;
if (res != api_error::success) {
return res;
}
if (is_create_op && is_append_op && is_truncate_op) {
return api_error::invalid_operation;
}
auto res = check_parent_access(api_path, X_OK);
@ -137,18 +134,26 @@ auto fuse_drive::create_impl(std::string api_path, mode_t mode,
}
if (is_create_op) {
if ((res = check_access(api_path, W_OK)) == api_error::item_not_found) {
res = check_access(api_path, W_OK);
if (res == api_error::item_not_found) {
res = check_parent_access(api_path, W_OK);
}
} else if ((res = check_access(api_path, R_OK)) ==
api_error::item_not_found) {
res = check_parent_access(api_path, R_OK);
} else {
res = check_access(api_path, R_OK);
if (res == api_error::item_not_found) {
res = check_parent_access(api_path, R_OK);
}
}
if (res != api_error::success) {
return res;
}
if ((is_write_only_op || is_read_write_op) &&
not provider_.is_file_writeable(api_path)) {
return api_error::permission_denied;
}
if (is_create_op && is_directory_op) {
return api_error::invalid_operation;
}
@ -159,20 +164,31 @@ auto fuse_drive::create_impl(std::string api_path, mode_t mode,
return res;
}
if (is_create_op) {
if (is_exclusive && file_exists) {
return api_error::item_exists;
}
} else {
bool dir_exists{};
bool dir_exists{};
if (not file_exists) {
res = provider_.is_directory(api_path, dir_exists);
if (res != api_error::success) {
return res;
}
}
if (is_create_op) {
if (dir_exists) {
return api_error::directory_exists;
}
if (is_exclusive && file_exists) {
return api_error::item_exists;
}
} else {
if (is_directory_op ? file_exists : dir_exists) {
return is_directory_op ? api_error::item_exists
: api_error::directory_exists;
}
if (not(is_directory_op ? dir_exists : file_exists)) {
return (is_directory_op ? api_error::directory_not_found
: api_error::item_not_found);
return is_directory_op ? api_error::directory_not_found
: api_error::item_not_found;
}
if ((is_exclusive || is_truncate_op) && not file_exists) {
@ -202,19 +218,24 @@ auto fuse_drive::create_impl(std::string api_path, mode_t mode,
if ((res != api_error::item_exists) && (res != api_error::success)) {
return res;
}
} else if (((res = fm_->open(api_path, is_directory_op, file_info->flags,
handle, open_file)) != api_error::success)) {
return res;
} else {
res = fm_->open(api_path, is_directory_op, file_info->flags, handle,
open_file);
if (res != api_error::success) {
return res;
}
}
}
file_info->fh = handle;
if (is_truncate_op) {
#if FUSE_USE_VERSION >= 30
if ((res = truncate_impl(api_path, 0, file_info)) != api_error::success) {
#else
if ((res = ftruncate_impl(api_path, 0, file_info)) != api_error::success) {
#endif
res = truncate_impl(api_path, 0, file_info);
if (res != api_error::success) {
#else // FUSE_USE_VERSION < 30
res = ftruncate_impl(api_path, 0, file_info);
if (res != api_error::success) {
#endif // FUSE_USE_VERSION >= 30
fm_->close(handle);
file_info->fh = 0U;
errno = std::abs(utils::from_api_error(res));
@ -283,9 +304,9 @@ auto fuse_drive::fallocate_impl(std::string /*api_path*/, int mode,
return res;
}
if ((res = check_open_flags(
open_file->get_open_data(file_info->fh), O_WRONLY | O_APPEND,
api_error::invalid_handle)) != api_error::success) {
res = check_open_flags(open_file->get_open_data(file_info->fh),
O_WRONLY | O_APPEND, api_error::invalid_handle);
if (res != api_error::success) {
return res;
}
@ -329,7 +350,7 @@ auto fuse_drive::fallocate_impl(std::string /*api_path*/, int mode,
static_cast<std::uint64_t>(offset + length), allocator);
}
auto fuse_drive::fgetattr_impl(std::string api_path, struct stat *st,
auto fuse_drive::fgetattr_impl(std::string api_path, struct stat *unix_st,
struct fuse_file_info *file_info) -> api_error {
std::shared_ptr<i_open_file> open_file;
if (not fm_->get_open_file(file_info->fh, false, open_file)) {
@ -348,7 +369,7 @@ auto fuse_drive::fgetattr_impl(std::string api_path, struct stat *st,
return res;
}
fuse_drive_base::populate_stat(api_path, open_file->get_file_size(), meta,
directory, provider_, st);
directory, provider_, unix_st);
return api_error::success;
}
@ -456,12 +477,12 @@ auto fuse_drive::get_item_meta(const std::string &api_path,
}
#if FUSE_USE_VERSION >= 30
auto fuse_drive::getattr_impl(std::string api_path, struct stat *st,
auto fuse_drive::getattr_impl(std::string api_path, struct stat *unix_st,
struct fuse_file_info * /*file_info*/)
-> api_error {
#else
auto fuse_drive::getattr_impl(std::string api_path,
struct stat *st) -> api_error {
struct stat *unix_st) -> api_error {
#endif
auto parent = utils::path::get_parent_api_path(api_path);
@ -473,16 +494,17 @@ auto fuse_drive::getattr_impl(std::string api_path,
auto found = false;
directory_cache_->execute_action(parent, [&](directory_iterator &iter) {
directory_item dir_item{};
if ((found = (iter.get_directory_item(api_path, dir_item) ==
api_error::success))) {
found = (iter.get_directory_item(api_path, dir_item) == api_error::success);
if (found) {
fuse_drive_base::populate_stat(api_path, dir_item.size, dir_item.meta,
dir_item.directory, provider_, st);
dir_item.directory, provider_, unix_st);
}
});
if (not found) {
api_meta_map meta{};
if ((res = provider_.get_item_meta(api_path, meta)) != api_error::success) {
res = provider_.get_item_meta(api_path, meta);
if (res != api_error::success) {
return res;
}
@ -493,7 +515,7 @@ auto fuse_drive::getattr_impl(std::string api_path,
}
fuse_drive_base::populate_stat(api_path,
utils::string::to_uint64(meta[META_SIZE]),
meta, directory, provider_, st);
meta, directory, provider_, unix_st);
}
return api_error::success;
@ -566,8 +588,6 @@ void *fuse_drive::init_impl(struct fuse_conn_info *conn) {
event_system::instance().start();
was_mounted_ = true;
polling::instance().start(&config_);
fm_ = std::make_unique<file_manager>(config_, provider_);
server_ = std::make_unique<full_server>(config_, provider_, *fm_);
if (not provider_.is_read_only()) {
@ -592,7 +612,7 @@ void *fuse_drive::init_impl(struct fuse_conn_info *conn) {
eviction_->start();
}
if (config_.get_enable_remote_mount()) {
if (config_.get_remote_mount().enable) {
remote_server_ = std::make_unique<remote_fuse::remote_server>(
config_, *this, get_mount_location());
}
@ -600,6 +620,9 @@ void *fuse_drive::init_impl(struct fuse_conn_info *conn) {
if (not lock_data_.set_mount_state(true, get_mount_location(), getpid())) {
utils::error::raise_error(function_name, "failed to set mount state");
}
polling::instance().start(&config_);
event_system::instance().raise<drive_mounted>(get_mount_location());
} catch (const std::exception &e) {
utils::error::raise_error(function_name, e, "exception during fuse init");
@ -657,6 +680,7 @@ void fuse_drive::notify_fuse_main_exit(int &ret) {
auto fuse_drive::open_impl(std::string api_path,
struct fuse_file_info *file_info) -> api_error {
file_info->flags &= (~O_CREAT);
return create_impl(api_path, 0, file_info);
}
@ -669,7 +693,8 @@ auto fuse_drive::opendir_impl(std::string api_path,
return res;
}
if ((res = check_parent_access(api_path, mask)) != api_error::success) {
res = check_parent_access(api_path, mask);
if (res != api_error::success) {
return res;
}
@ -682,9 +707,14 @@ auto fuse_drive::opendir_impl(std::string api_path,
return api_error::directory_not_found;
}
if ((file_info->flags & O_APPEND) == O_APPEND ||
(file_info->flags & O_EXCL) == O_EXCL) {
return api_error::directory_exists;
}
directory_item_list list{};
if ((res = provider_.get_directory_items(api_path, list)) !=
api_error::success) {
res = provider_.get_directory_items(api_path, list);
if (res != api_error::success) {
return res;
}
@ -712,7 +742,8 @@ auto fuse_drive::read_impl(std::string api_path, char *buffer, size_t read_size,
data_buffer data;
res =
open_file->read(read_size, static_cast<std::uint64_t>(read_offset), data);
if ((bytes_read = data.size()) != 0U) {
bytes_read = data.size();
if (bytes_read != 0U) {
std::memcpy(buffer, data.data(), data.size());
data.clear();
update_accessed_time(api_path);
@ -742,16 +773,17 @@ auto fuse_drive::readdir_impl(std::string api_path, void *buf,
}
while (res == api_error::success) {
res = (iter->fill_buffer(
static_cast<remote::file_offset>(offset++), fuse_fill_dir, buf,
[this](const std::string &cur_api_path,
std::uint64_t cur_file_size, const api_meta_map &meta,
bool directory, struct stat *st) {
fuse_drive_base::populate_stat(cur_api_path, cur_file_size,
meta, directory, provider_, st);
}) == 0)
? api_error::success
: api_error::os_error;
res =
(iter->fill_buffer(
static_cast<remote::file_offset>(offset++), fuse_fill_dir, buf,
[this](const std::string &cur_api_path,
std::uint64_t cur_file_size, const api_meta_map &meta,
bool directory, struct stat *unix_st) {
fuse_drive_base::populate_stat(cur_api_path, cur_file_size, meta,
directory, provider_, unix_st);
}) == 0)
? api_error::success
: api_error::os_error;
}
if ((res == api_error::os_error) && ((errno == 120) || (errno == ENOMEM))) {
@ -806,8 +838,8 @@ auto fuse_drive::rename_impl(std::string from_api_path,
return res;
}
if ((res = check_parent_access(from_api_path, W_OK | X_OK)) !=
api_error::success) {
res = check_parent_access(from_api_path, W_OK | X_OK);
if (res != api_error::success) {
return res;
}
@ -834,7 +866,8 @@ auto fuse_drive::rmdir_impl(std::string api_path) -> api_error {
return res;
}
if ((res = provider_.remove_directory(api_path)) != api_error::success) {
res = provider_.remove_directory(api_path);
if (res != api_error::success) {
return res;
}
@ -859,42 +892,48 @@ auto fuse_drive::getxattr_common(std::string api_path, const char *name,
return res;
}
if ((res = check_parent_access(api_path, X_OK)) != api_error::success) {
res = check_parent_access(api_path, X_OK);
if (res != api_error::success) {
return res;
}
api_meta_map meta;
auto found = false;
auto found{false};
directory_cache_->execute_action(
utils::path::get_parent_api_path(api_path),
[&](directory_iterator &iterator) {
directory_item dir_item{};
if ((found = (iterator.get_directory_item(api_path, dir_item) ==
api_error::success))) {
found = (iterator.get_directory_item(api_path, dir_item) ==
api_error::success);
if (found) {
meta = dir_item.meta;
}
});
if (found ||
((res = provider_.get_item_meta(api_path, meta)) == api_error::success)) {
res = api_error::xattr_not_found;
if (meta.find(attribute_name) != meta.end()) {
auto data = macaron::Base64::Decode(meta[attribute_name]);
if ((position == nullptr) || (*position < data.size())) {
res = api_error::success;
attribute_size = static_cast<int>(data.size());
if (size != 0U) {
res = api_error::xattr_buffer_small;
if (size >= data.size()) {
memcpy(value, data.data(), data.size());
return api_error::success;
}
}
}
}
res = found ? api_error::success : provider_.get_item_meta(api_path, meta);
if (res != api_error::success) {
return res;
}
return res;
if (meta.find(attribute_name) == meta.end()) {
return api_error::xattr_not_found;
}
auto data = macaron::Base64::Decode(meta.at(attribute_name));
if ((position == nullptr) || (*position < data.size())) {
attribute_size = static_cast<int>(data.size());
if (size == 0U) {
return api_error::success;
}
if (size < data.size()) {
return api_error::xattr_buffer_small;
}
std::memcpy(value, data.data(), data.size());
}
return api_error::success;
}
#if defined(__APPLE__)
@ -923,7 +962,8 @@ auto fuse_drive::listxattr_impl(std::string api_path, char *buffer, size_t size,
}
api_meta_map meta;
if ((res = provider_.get_item_meta(api_path, meta)) == api_error::success) {
res = provider_.get_item_meta(api_path, meta);
if (res == api_error::success) {
for (auto &&meta_item : meta) {
if (utils::collection::excludes(META_USED_NAMES, meta_item.first)) {
auto attribute_name = meta_item.first;
@ -1016,15 +1056,18 @@ auto fuse_drive::setxattr_impl(std::string api_path, const char *name,
}
api_meta_map meta;
if ((res = provider_.get_item_meta(api_path, meta)) != api_error::success) {
res = provider_.get_item_meta(api_path, meta);
if (res != api_error::success) {
return res;
}
if ((res = check_parent_access(api_path, X_OK)) != api_error::success) {
res = check_parent_access(api_path, X_OK);
if (res != api_error::success) {
return res;
}
if ((res = check_owner(meta)) != api_error::success) {
res = check_owner(meta);
if (res != api_error::success) {
return res;
}
@ -1254,12 +1297,12 @@ auto fuse_drive::truncate_impl(std::string api_path, off_t size) -> api_error {
if (res != api_error::success) {
return res;
}
if ((res = check_parent_access(api_path, X_OK)) != api_error::success) {
res = check_parent_access(api_path, X_OK);
if (res != api_error::success) {
return res;
}
if ((res = check_access(api_path, W_OK)) != api_error::success) {
res = check_access(api_path, W_OK);
if (res != api_error::success) {
return res;
}
@ -1267,11 +1310,15 @@ auto fuse_drive::truncate_impl(std::string api_path, off_t size) -> api_error {
{
open_file_data ofd{O_RDWR};
std::shared_ptr<i_open_file> open_file;
if ((res = fm_->open(api_path, false, ofd, handle, open_file)) !=
api_error::success) {
res = fm_->open(api_path, false, ofd, handle, open_file);
if (res != api_error::success) {
return res;
}
if (not fm_->get_open_file(handle, true, open_file)) {
return api_error::invalid_handle;
}
res = open_file->resize(static_cast<std::uint64_t>(size));
}
@ -1311,7 +1358,8 @@ auto fuse_drive::utimens_impl(std::string api_path,
return res;
}
if ((res = check_owner(meta)) != api_error::success) {
res = check_owner(meta);
if (res != api_error::success) {
return res;
}

View File

@ -176,8 +176,8 @@ auto fuse_drive_base::check_parent_access(const std::string &api_path,
for (auto parent = utils::path::get_parent_path(api_path);
(ret == api_error::success) && not parent.empty();
parent = utils::path::get_parent_path(parent)) {
if (((ret = check_access(parent, X_OK)) == api_error::success) &&
(parent == "/")) {
ret = check_access(parent, X_OK);
if ((ret == api_error::success) && (parent == "/")) {
break;
}
}

View File

@ -27,12 +27,7 @@
namespace repertory::remote_fuse {
remote_client::remote_client(const app_config &config)
: config_(config),
packet_client_(
config.get_remote_host_name_or_ip(),
config.get_remote_max_connections(), config.get_remote_port(),
config.get_remote_receive_timeout_secs(),
config.get_remote_send_timeout_secs(), config.get_remote_token()) {}
: config_(config), packet_client_(config.get_remote_config()) {}
auto remote_client::fuse_access(const char *path, const std::int32_t &mask)
-> packet::error_type {

View File

@ -40,8 +40,8 @@
#include "utils/utils.hpp"
namespace repertory::remote_fuse {
auto remote_fuse_drive::access_impl(std::string api_path, int mask)
-> api_error {
auto remote_fuse_drive::access_impl(std::string api_path,
int mask) -> api_error {
return utils::to_api_error(
remote_instance_->fuse_access(api_path.c_str(), mask));
}
@ -59,8 +59,8 @@ auto remote_fuse_drive::chmod_impl(std::string api_path, mode_t mode,
struct fuse_file_info * /*f_info*/)
-> api_error {
#else
auto remote_fuse_drive::chmod_impl(std::string api_path, mode_t mode)
-> api_error {
auto remote_fuse_drive::chmod_impl(std::string api_path,
mode_t mode) -> api_error {
#endif
return utils::to_api_error(remote_instance_->fuse_chmod(
api_path.c_str(), static_cast<remote::file_mode>(mode)));
@ -71,8 +71,8 @@ auto remote_fuse_drive::chown_impl(std::string api_path, uid_t uid, gid_t gid,
struct fuse_file_info * /*f_info*/)
-> api_error {
#else
auto remote_fuse_drive::chown_impl(std::string api_path, uid_t uid, gid_t gid)
-> api_error {
auto remote_fuse_drive::chown_impl(std::string api_path, uid_t uid,
gid_t gid) -> api_error {
#endif
return utils::to_api_error(
remote_instance_->fuse_chown(api_path.c_str(), uid, gid));
@ -116,10 +116,9 @@ void remote_fuse_drive::destroy_impl(void *ptr) {
fuse_base::destroy_impl(ptr);
}
auto remote_fuse_drive::fgetattr_impl(std::string api_path,
struct stat *unix_st,
struct fuse_file_info *f_info)
-> api_error {
auto remote_fuse_drive::fgetattr_impl(
std::string api_path, struct stat *unix_st,
struct fuse_file_info *f_info) -> api_error {
remote::stat r_stat{};
auto directory = false;
@ -180,8 +179,8 @@ auto remote_fuse_drive::getattr_impl(std::string api_path, struct stat *unix_st,
struct fuse_file_info * /*f_info*/)
-> api_error {
#else
auto remote_fuse_drive::getattr_impl(std::string api_path, struct stat *unix_st)
-> api_error {
auto remote_fuse_drive::getattr_impl(std::string api_path,
struct stat *unix_st) -> api_error {
#endif
bool directory = false;
remote::stat r_stat{};
@ -264,8 +263,8 @@ auto remote_fuse_drive::init_impl(struct fuse_conn_info *conn) -> void * {
return ret;
}
auto remote_fuse_drive::mkdir_impl(std::string api_path, mode_t mode)
-> api_error {
auto remote_fuse_drive::mkdir_impl(std::string api_path,
mode_t mode) -> api_error {
return utils::to_api_error(remote_instance_->fuse_mkdir(
api_path.c_str(), static_cast<remote::file_mode>(mode)));
}
@ -287,9 +286,12 @@ auto remote_fuse_drive::open_impl(std::string api_path,
f_info->fh));
}
auto remote_fuse_drive::opendir_impl(std::string api_path,
struct fuse_file_info *f_info)
-> api_error {
auto remote_fuse_drive::opendir_impl(
std::string api_path, struct fuse_file_info *f_info) -> api_error {
if ((f_info->flags & O_APPEND) == O_APPEND ||
(f_info->flags & O_EXCL) == O_EXCL) {
return api_error::directory_exists;
}
return utils::to_api_error(
remote_instance_->fuse_opendir(api_path.c_str(), f_info->fh));
@ -378,18 +380,14 @@ auto remote_fuse_drive::read_impl(std::string api_path, char *buffer,
}
#if FUSE_USE_VERSION >= 30
auto remote_fuse_drive::readdir_impl(std::string api_path, void *buf,
fuse_fill_dir_t fuse_fill_dir,
off_t offset,
struct fuse_file_info *f_info,
fuse_readdir_flags /*flags*/)
-> api_error {
auto remote_fuse_drive::readdir_impl(
std::string api_path, void *buf, fuse_fill_dir_t fuse_fill_dir,
off_t offset, struct fuse_file_info *f_info,
fuse_readdir_flags /*flags*/) -> api_error {
#else
auto remote_fuse_drive::readdir_impl(std::string api_path, void *buf,
fuse_fill_dir_t fuse_fill_dir,
off_t offset,
struct fuse_file_info *f_info)
-> api_error {
auto remote_fuse_drive::readdir_impl(
std::string api_path, void *buf, fuse_fill_dir_t fuse_fill_dir,
off_t offset, struct fuse_file_info *f_info) -> api_error {
#endif
std::string item_path;
int res = 0;
@ -417,16 +415,14 @@ auto remote_fuse_drive::readdir_impl(std::string api_path, void *buf,
return utils::to_api_error(res);
}
auto remote_fuse_drive::release_impl(std::string api_path,
struct fuse_file_info *f_info)
-> api_error {
auto remote_fuse_drive::release_impl(
std::string api_path, struct fuse_file_info *f_info) -> api_error {
return utils::to_api_error(
remote_instance_->fuse_release(api_path.c_str(), f_info->fh));
}
auto remote_fuse_drive::releasedir_impl(std::string api_path,
struct fuse_file_info *f_info)
-> api_error {
auto remote_fuse_drive::releasedir_impl(
std::string api_path, struct fuse_file_info *f_info) -> api_error {
return utils::to_api_error(
remote_instance_->fuse_releasedir(api_path.c_str(), f_info->fh));
}
@ -523,8 +519,8 @@ api_error remote_fuse_drive::statfs_x_impl(std::string api_path,
return utils::to_api_error(res);
}
#else // __APPLE__
auto remote_fuse_drive::statfs_impl(std::string api_path, struct statvfs *stbuf)
-> api_error {
auto remote_fuse_drive::statfs_impl(std::string api_path,
struct statvfs *stbuf) -> api_error {
auto res = statvfs(config_.get_data_directory().c_str(), stbuf);
if (res == 0) {
remote::statfs r_stat{};
@ -551,8 +547,8 @@ auto remote_fuse_drive::truncate_impl(std::string api_path, off_t size,
struct fuse_file_info * /*f_info*/)
-> api_error {
#else
auto remote_fuse_drive::truncate_impl(std::string api_path, off_t size)
-> api_error {
auto remote_fuse_drive::truncate_impl(std::string api_path,
off_t size) -> api_error {
#endif
return utils::to_api_error(remote_instance_->fuse_truncate(
api_path.c_str(), static_cast<remote::file_offset>(size)));
@ -563,10 +559,9 @@ auto remote_fuse_drive::unlink_impl(std::string api_path) -> api_error {
}
#if FUSE_USE_VERSION >= 30
auto remote_fuse_drive::utimens_impl(std::string api_path,
const struct timespec tv[2],
struct fuse_file_info * /*f_info*/)
-> api_error {
auto remote_fuse_drive::utimens_impl(
std::string api_path, const struct timespec tv[2],
struct fuse_file_info * /*f_info*/) -> api_error {
#else
auto remote_fuse_drive::utimens_impl(std::string api_path,
const struct timespec tv[2]) -> api_error {

View File

@ -47,12 +47,7 @@ E_SIMPLE3(remote_winfsp_client_event, debug, true,
// clang-format on
remote_client::remote_client(const app_config &config)
: config_(config),
packet_client_(
config.get_remote_host_name_or_ip(),
config.get_remote_max_connections(), config.get_remote_port(),
config.get_remote_receive_timeout_secs(),
config.get_remote_send_timeout_secs(), config.get_remote_token()) {}
: config_(config), packet_client_(config.get_remote_config()) {}
auto remote_client::winfsp_can_delete(PVOID file_desc, PWSTR file_name)
-> packet::error_type {

View File

@ -302,27 +302,27 @@ auto remote_winfsp_drive::Overwrite(PVOID /*file_node*/, PVOID file_desc,
BOOLEAN replace_attributes,
UINT64 allocation_size, FileInfo *file_info)
-> NTSTATUS {
remote::file_info fi{};
remote::file_info info{};
auto ret = remote_instance_->winfsp_overwrite(
file_desc, attributes, replace_attributes, allocation_size, &fi);
set_file_info(*file_info, fi);
file_desc, attributes, replace_attributes, allocation_size, &info);
set_file_info(*file_info, info);
return ret;
}
void remote_winfsp_drive::populate_file_info(const json &item,
FSP_FSCTL_FILE_INFO &file_info) {
auto di = directory_item::from_json(item);
file_info.FileSize = di.directory ? 0 : di.size;
auto dir_item = item.get<directory_item>();
file_info.FileSize = dir_item.directory ? 0 : dir_item.size;
file_info.AllocationSize =
utils::divide_with_ceiling(file_info.FileSize, WINFSP_ALLOCATION_UNIT) *
WINFSP_ALLOCATION_UNIT;
file_info.ChangeTime = utils::get_changed_time_from_meta(di.meta);
file_info.CreationTime = utils::get_creation_time_from_meta(di.meta);
file_info.FileAttributes = utils::get_attributes_from_meta(di.meta);
file_info.ChangeTime = utils::get_changed_time_from_meta(dir_item.meta);
file_info.CreationTime = utils::get_creation_time_from_meta(dir_item.meta);
file_info.FileAttributes = utils::get_attributes_from_meta(dir_item.meta);
file_info.HardLinks = 0;
file_info.IndexNumber = 0;
file_info.LastAccessTime = utils::get_accessed_time_from_meta(di.meta);
file_info.LastWriteTime = utils::get_written_time_from_meta(di.meta);
file_info.LastAccessTime = utils::get_accessed_time_from_meta(dir_item.meta);
file_info.LastWriteTime = utils::get_written_time_from_meta(dir_item.meta);
file_info.ReparseTag = 0;
file_info.EaSize = 0;
}

View File

@ -619,7 +619,6 @@ auto winfsp_drive::Mounted(PVOID host) -> NTSTATUS {
auto *file_system_host{
reinterpret_cast<FileSystemHost *>(host),
};
polling::instance().start(&config_);
fm_ = std::make_unique<file_manager>(config_, provider_);
server_ = std::make_unique<full_server>(config_, provider_, *fm_);
if (not provider_.is_read_only()) {
@ -642,7 +641,7 @@ auto winfsp_drive::Mounted(PVOID host) -> NTSTATUS {
}
auto mount_location = parse_mount_location(file_system_host->MountPoint());
if (config_.get_enable_remote_mount()) {
if (config_.get_remote_mount().enable) {
remote_server_ = std::make_unique<remote_winfsp::remote_server>(
config_, *this, mount_location);
}
@ -652,6 +651,8 @@ auto winfsp_drive::Mounted(PVOID host) -> NTSTATUS {
utils::error::raise_error(function_name, "failed to set mount state");
}
polling::instance().start(&config_);
event_system::instance().raise<drive_mounted>(mount_location);
} catch (const std::exception &e) {
utils::error::raise_error(function_name, e, "exception occurred");

View File

@ -24,7 +24,8 @@
#include "utils/string.hpp"
namespace repertory {
auto event_level_from_string(std::string level) -> event_level {
auto event_level_from_string(std::string level, event_level default_level)
-> event_level {
level = utils::string::to_lower(level);
if (level == "critical" || level == "event_level::critical") {
return event_level::critical;
@ -50,7 +51,7 @@ auto event_level_from_string(std::string level) -> event_level {
return event_level::trace;
}
return event_level::info;
return default_level;
}
auto event_level_to_string(event_level level) -> std::string {

View File

@ -0,0 +1,128 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "file_manager/cache_size_mgr.hpp"
#include "app_config.hpp"
#include "events/event.hpp"
#include "events/event_system.hpp"
#include "types/startup_exception.hpp"
#include "utils/file_utils.hpp"
namespace repertory {
// clang-format off
E_SIMPLE2(invalid_cache_size, warn, true,
std::uint64_t, cache_size, sz, E_FROM_UINT64,
std::uint64_t, by, by, E_FROM_UINT64
);
E_SIMPLE2(max_cache_size_reached, warn, true,
std::uint64_t, cache_size, sz, E_FROM_UINT64,
std::uint64_t, max_cache_size, max, E_FROM_UINT64
);
// clang-format on
cache_size_mgr cache_size_mgr::instance_{};
// TODO add timeout
auto cache_size_mgr::expand(std::uint64_t size) -> api_error {
if (size == 0U) {
return api_error::success;
}
unique_mutex_lock lock(mtx_);
if (cfg_ == nullptr) {
return api_error::cache_not_initialized;
}
cache_size_ += size;
auto max_cache_size = cfg_->get_max_cache_size_bytes();
auto cache_dir = utils::file::directory{cfg_->get_cache_directory()};
while (not stop_requested_ && cache_size_ > max_cache_size &&
cache_dir.count() > 1U) {
event_system::instance().raise<max_cache_size_reached>(cache_size_,
max_cache_size);
notify_.wait(lock);
}
notify_.notify_all();
return api_error::success;
}
void cache_size_mgr::initialize(app_config *cfg) {
if (cfg == nullptr) {
throw startup_exception("app_config must not be null");
}
mutex_lock lock(mtx_);
cfg_ = cfg;
stop_requested_ = false;
auto cache_dir = utils::file::directory{cfg_->get_cache_directory()};
if (not cache_dir.create_directory()) {
throw startup_exception(fmt::format("failed to create cache directory|{}",
cache_dir.get_path()));
}
cache_size_ = cache_dir.size(false);
notify_.notify_all();
}
auto cache_size_mgr::shrink(std::uint64_t size) -> api_error {
mutex_lock lock(mtx_);
if (size == 0U) {
notify_.notify_all();
return api_error::success;
}
if (cache_size_ >= size) {
cache_size_ -= size;
} else {
event_system::instance().raise<invalid_cache_size>(cache_size_, size);
cache_size_ = 0U;
}
notify_.notify_all();
return api_error::success;
}
auto cache_size_mgr::size() const -> std::uint64_t {
mutex_lock lock(mtx_);
return cache_size_;
}
void cache_size_mgr::stop() {
if (stop_requested_) {
return;
}
stop_requested_ = true;
mutex_lock lock(mtx_);
notify_.notify_all();
}
} // namespace repertory

View File

@ -0,0 +1,63 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "file_manager/direct_open_file.hpp"
#include "file_manager/open_file_base.hpp"
#include "providers/i_provider.hpp"
#include "types/repertory.hpp"
namespace repertory {
direct_open_file::direct_open_file(std::uint64_t chunk_size,
std::uint8_t chunk_timeout,
filesystem_item fsi, i_provider &provider)
: ring_buffer_base(chunk_size, chunk_timeout, fsi, provider,
min_ring_size, true) {}
direct_open_file::~direct_open_file() {
REPERTORY_USES_FUNCTION_NAME();
close();
}
auto direct_open_file::on_check_start() -> bool {
return (get_file_size() == 0U || has_reader_thread());
}
auto direct_open_file::on_read_chunk(std::size_t chunk, std::size_t read_size,
std::uint64_t read_offset,
data_buffer &data,
std::size_t &bytes_read) -> api_error {
auto &buffer = ring_data_.at(chunk % get_ring_size());
auto begin =
std::next(buffer.begin(), static_cast<std::int64_t>(read_offset));
auto end = std::next(begin, static_cast<std::int64_t>(read_size));
data.insert(data.end(), begin, end);
bytes_read = read_size;
return api_error::success;
}
auto direct_open_file::use_buffer(std::size_t chunk,
std::function<api_error(data_buffer &)> func)
-> api_error {
return func(ring_data_.at(chunk % get_ring_size()));
}
} // namespace repertory

View File

@ -22,105 +22,42 @@
#include "file_manager/file_manager.hpp"
#include "app_config.hpp"
#include "db/file_mgr_db.hpp"
#include "file_manager/cache_size_mgr.hpp"
#include "file_manager/direct_open_file.hpp"
#include "file_manager/events.hpp"
#include "file_manager/open_file.hpp"
#include "file_manager/open_file_base.hpp"
#include "file_manager/ring_buffer_open_file.hpp"
#include "file_manager/upload.hpp"
#include "platform/platform.hpp"
#include "providers/i_provider.hpp"
#include "types/repertory.hpp"
#include "utils/common.hpp"
#include "utils/db/sqlite/db_common.hpp"
#include "utils/db/sqlite/db_delete.hpp"
#include "utils/db/sqlite/db_insert.hpp"
#include "utils/db/sqlite/db_select.hpp"
#include "utils/db/sqlite/db_update.hpp"
#include "utils/encrypting_reader.hpp"
#include "utils/error_utils.hpp"
#include "utils/file.hpp"
#include "utils/path.hpp"
#include "utils/polling.hpp"
#include "utils/time.hpp"
namespace {
[[nodiscard]] auto create_resume_entry(const repertory::i_open_file &file)
-> json {
return {
{"chunk_size", file.get_chunk_size()},
{"path", file.get_api_path()},
{"read_state",
repertory::utils::string::from_dynamic_bitset(file.get_read_state())},
{"source", file.get_source_path()},
};
}
void restore_resume_entry(const json &resume_entry, std::string &api_path,
std::size_t &chunk_size,
boost::dynamic_bitset<> &read_state,
std::string &source_path) {
api_path = resume_entry["path"].get<std::string>();
chunk_size = resume_entry["chunk_size"].get<std::size_t>();
read_state = repertory::utils::string::to_dynamic_bitset(
resume_entry["read_state"].get<std::string>());
source_path = resume_entry["source"].get<std::string>();
}
const std::string resume_table = "resume";
const std::string upload_table = "upload";
const std::string upload_active_table = "upload_active";
const std::map<std::string, std::string> sql_create_tables{
{
{resume_table},
{
"CREATE TABLE IF NOT EXISTS " + resume_table +
"("
"api_path TEXT PRIMARY KEY ASC, "
"data TEXT"
");",
},
},
{
{upload_table},
{
"CREATE TABLE IF NOT EXISTS " + upload_table +
"("
"api_path TEXT PRIMARY KEY ASC, "
"date_time INTEGER, "
"source_path TEXT"
");",
},
},
{
{upload_active_table},
{
"CREATE TABLE IF NOT EXISTS " + upload_active_table +
"("
"api_path TEXT PRIMARY KEY ASC, "
"source_path TEXT"
");",
},
},
};
} // namespace
namespace repertory {
file_manager::file_manager(app_config &config, i_provider &provider)
: config_(config), provider_(provider) {
db_ = utils::db::sqlite::create_db(
utils::path::combine(config_.get_data_directory(), {"file_manager.db"}),
sql_create_tables);
mgr_db_ = create_file_mgr_db(config);
if (not provider_.is_read_only()) {
E_SUBSCRIBE_EXACT(file_upload_completed,
[this](const file_upload_completed &completed) {
this->upload_completed(completed);
});
if (provider_.is_read_only()) {
return;
}
E_SUBSCRIBE_EXACT(file_upload_completed,
[this](const file_upload_completed &completed) {
this->upload_completed(completed);
});
}
file_manager::~file_manager() {
stop();
db_.reset();
mgr_db_.reset();
E_CONSUMER_RELEASE();
}
@ -136,13 +73,13 @@ void file_manager::close(std::uint64_t handle) {
closeable_file->remove(handle);
}
void file_manager::close_all(const std::string &api_path) {
auto file_manager::close_all(const std::string &api_path) -> bool {
REPERTORY_USES_FUNCTION_NAME();
unique_recur_mutex_lock file_lock(open_file_mtx_);
auto file_iter = open_file_lookup_.find(api_path);
if (file_iter == open_file_lookup_.end()) {
return;
return false;
}
auto closeable_file = file_iter->second;
@ -151,6 +88,8 @@ void file_manager::close_all(const std::string &api_path) {
closeable_file->remove_all();
closeable_file->close();
return closeable_file->get_allocated();
}
void file_manager::close_timed_out_files() {
@ -165,12 +104,12 @@ void file_manager::close_timed_out_files() {
}
return items;
});
for (auto &&closeable_file : closeable_list) {
for (const auto &closeable_file : closeable_list) {
open_file_lookup_.erase(closeable_file->get_api_path());
}
file_lock.unlock();
for (auto &&closeable_file : closeable_list) {
for (auto &closeable_file : closeable_list) {
closeable_file->close();
event_system::instance().raise<item_timeout>(
closeable_file->get_api_path());
@ -203,7 +142,7 @@ auto file_manager::evict_file(const std::string &api_path) -> bool {
return false;
}
recur_mutex_lock open_lock(open_file_mtx_);
unique_recur_mutex_lock open_lock(open_file_mtx_);
if (is_processing(api_path)) {
return false;
}
@ -212,8 +151,18 @@ auto file_manager::evict_file(const std::string &api_path) -> bool {
return false;
}
filesystem_item fsi{};
auto res = provider_.get_filesystem_item(api_path, false, fsi);
if (res != api_error::success) {
return false;
}
if (fsi.source_path.empty()) {
return false;
}
std::string pinned;
auto res = provider_.get_item_meta(api_path, META_PINNED, pinned);
res = provider_.get_item_meta(api_path, META_PINNED, pinned);
if (res != api_error::success && res != api_error::item_not_found) {
utils::error::raise_api_path_error(std::string{function_name}, api_path,
res, "failed to get pinned status");
@ -224,23 +173,22 @@ auto file_manager::evict_file(const std::string &api_path) -> bool {
return false;
}
std::string source_path{};
res = provider_.get_item_meta(api_path, META_SOURCE, source_path);
if (res != api_error::success) {
utils::error::raise_api_path_error(std::string{function_name}, api_path,
res, "failed to get source path");
return false;
}
if (source_path.empty()) {
return false;
std::shared_ptr<i_closeable_open_file> closeable_file;
if (open_file_lookup_.contains(api_path)) {
closeable_file = open_file_lookup_.at(api_path);
}
open_file_lookup_.erase(api_path);
open_lock.unlock();
auto removed = utils::file::file{source_path}.remove();
auto allocated = closeable_file ? closeable_file->get_allocated() : true;
closeable_file.reset();
auto removed = remove_source_and_shrink_cache(api_path, fsi.source_path,
fsi.size, allocated);
if (removed) {
event_system::instance().raise<filesystem_item_evicted>(api_path,
source_path);
fsi.source_path);
}
return removed;
@ -271,7 +219,7 @@ auto file_manager::get_open_file_by_handle(std::uint64_t handle) const
-> std::shared_ptr<i_closeable_open_file> {
auto file_iter =
std::find_if(open_file_lookup_.begin(), open_file_lookup_.end(),
[&handle](const auto &item) -> bool {
[&handle](auto &&item) -> bool {
return item.second->has_handle(handle);
});
return (file_iter == open_file_lookup_.end()) ? nullptr : file_iter->second;
@ -287,7 +235,7 @@ auto file_manager::get_open_file_count(const std::string &api_path) const
auto file_manager::get_open_file(std::uint64_t handle, bool write_supported,
std::shared_ptr<i_open_file> &file) -> bool {
recur_mutex_lock open_lock(open_file_mtx_);
unique_recur_mutex_lock open_lock(open_file_mtx_);
auto file_ptr = get_open_file_by_handle(handle);
if (not file_ptr) {
return false;
@ -296,8 +244,8 @@ auto file_manager::get_open_file(std::uint64_t handle, bool write_supported,
if (write_supported && not file_ptr->is_write_supported()) {
auto writeable_file = std::make_shared<open_file>(
utils::encryption::encrypting_reader::get_data_chunk_size(),
config_.get_enable_chunk_download_timeout()
? config_.get_chunk_downloader_timeout_secs()
config_.get_enable_download_timeout()
? config_.get_download_timeout_secs()
: 0U,
file_ptr->get_filesystem_item(), file_ptr->get_open_data(), provider_,
*this);
@ -320,7 +268,7 @@ auto file_manager::get_open_files() const
std::unordered_map<std::string, std::size_t> ret;
recur_mutex_lock open_lock(open_file_mtx_);
for (auto &&item : open_file_lookup_) {
for (const auto &item : open_file_lookup_) {
ret[item.first] = item.second->get_open_file_count();
}
@ -336,32 +284,15 @@ auto file_manager::get_open_handle_count() const -> std::size_t {
});
}
auto file_manager::get_stored_downloads() const -> std::vector<json> {
auto file_manager::get_stored_downloads() const
-> std::vector<i_file_mgr_db::resume_entry> {
REPERTORY_USES_FUNCTION_NAME();
if (provider_.is_read_only()) {
return {};
}
std::vector<json> ret;
auto result = utils::db::sqlite::db_select{*db_, resume_table}.go();
while (result.has_row()) {
try {
std::optional<utils::db::sqlite::db_result::row> row;
if (not result.get_row(row)) {
continue;
}
if (not row.has_value()) {
continue;
}
ret.push_back(row.value().get_column("data").get_value_as_json());
} catch (const std::exception &ex) {
utils::error::raise_error(function_name, ex, "query error");
}
}
return ret;
return mgr_db_->get_resume_list();
}
auto file_manager::handle_file_rename(const std::string &from_api_path,
@ -379,15 +310,10 @@ auto file_manager::handle_file_rename(const std::string &from_api_path,
source_path = upload_lookup_.at(from_api_path)->get_source_path();
}
} else {
auto result = utils::db::sqlite::db_select{*db_, upload_table}
.column("source_path")
.where("api_path")
.equals(from_api_path)
.go();
std::optional<utils::db::sqlite::db_result::row> row;
should_upload = result.get_row(row) && row.has_value();
auto upload = mgr_db_->get_upload(from_api_path);
should_upload = upload.has_value();
if (should_upload && source_path.empty()) {
source_path = row->get_column("source_path").get_value<std::string>();
source_path = upload->source_path;
}
}
@ -427,17 +353,24 @@ auto file_manager::is_processing(const std::string &api_path) const -> bool {
}
upload_lock.unlock();
utils::db::sqlite::db_select query{*db_, upload_table};
if (query.where("api_path").equals(api_path).go().has_row()) {
auto upload = mgr_db_->get_upload(api_path);
if (upload.has_value()) {
return true;
};
recur_mutex_lock open_lock(open_file_mtx_);
unique_recur_mutex_lock open_lock(open_file_mtx_);
auto file_iter = open_file_lookup_.find(api_path);
return (file_iter == open_file_lookup_.end())
? false
: file_iter->second->is_modified() ||
not file_iter->second->is_complete();
if (file_iter == open_file_lookup_.end()) {
return false;
}
auto closeable_file = file_iter->second;
open_lock.unlock();
return closeable_file->is_write_supported()
? closeable_file->is_modified() ||
not closeable_file->is_complete()
: false;
}
auto file_manager::open(const std::string &api_path, bool directory,
@ -447,11 +380,12 @@ auto file_manager::open(const std::string &api_path, bool directory,
return open(api_path, directory, ofd, handle, file, nullptr);
}
auto file_manager::open(const std::string &api_path, bool directory,
const open_file_data &ofd, std::uint64_t &handle,
std::shared_ptr<i_open_file> &file,
std::shared_ptr<i_closeable_open_file> closeable_file)
-> api_error {
auto file_manager::open(
const std::string &api_path, bool directory, const open_file_data &ofd,
std::uint64_t &handle, std::shared_ptr<i_open_file> &file,
std::shared_ptr<i_closeable_open_file> closeable_file) -> api_error {
REPERTORY_USES_FUNCTION_NAME();
const auto create_and_add_handle =
[&](std::shared_ptr<i_closeable_open_file> cur_file) {
handle = get_next_handle();
@ -481,21 +415,99 @@ auto file_manager::open(const std::string &api_path, bool directory,
}
if (not closeable_file) {
closeable_file = std::make_shared<open_file>(
auto buffer_directory{
utils::path::combine(config_.get_data_directory(), {"buffer"}),
};
auto chunk_size{
utils::encryption::encrypting_reader::get_data_chunk_size(),
config_.get_enable_chunk_download_timeout()
? config_.get_chunk_downloader_timeout_secs()
: 0U,
fsi, provider_, *this);
};
auto chunk_timeout = config_.get_enable_download_timeout()
? config_.get_download_timeout_secs()
: 0U;
auto ring_buffer_file_size{
static_cast<std::uint64_t>(config_.get_ring_buffer_file_size()) *
1024UL * 1024UL,
};
auto ring_size{ring_buffer_file_size / chunk_size};
const auto get_download_type = [&](download_type type) -> download_type {
if (directory || fsi.size == 0U || is_processing(api_path)) {
return download_type::default_;
}
if (type == download_type::direct) {
return type;
}
if (type == download_type::default_) {
auto free_space =
utils::file::get_free_drive_space(config_.get_cache_directory());
if (fsi.size < free_space) {
return download_type::default_;
}
}
if (not ring_buffer_open_file::can_handle_file(fsi.size, chunk_size,
ring_size)) {
return download_type::direct;
}
if (not utils::file::directory{buffer_directory}.create_directory()) {
utils::error::raise_error(
function_name, utils::get_last_error_code(),
fmt::format("failed to create buffer directory|sp|{}",
buffer_directory));
return download_type::direct;
}
auto free_space = utils::file::get_free_drive_space(buffer_directory);
if (ring_buffer_file_size < free_space) {
return download_type::ring_buffer;
}
return download_type::direct;
};
auto preferred_type = config_.get_preferred_download_type();
auto type = get_download_type(directory ? download_type::default_
: preferred_type == download_type::default_
? download_type::ring_buffer
: preferred_type);
if (not directory) {
event_system::instance().raise<download_type_selected>(
fsi.api_path, fsi.source_path, type);
}
switch (type) {
case repertory::download_type::direct: {
closeable_file = std::make_shared<direct_open_file>(
chunk_size, chunk_timeout, fsi, provider_);
} break;
case repertory::download_type::ring_buffer: {
closeable_file = std::make_shared<ring_buffer_open_file>(
buffer_directory, chunk_size, chunk_timeout, fsi, provider_,
ring_size);
} break;
default: {
closeable_file = std::make_shared<open_file>(chunk_size, chunk_timeout,
fsi, provider_, *this);
} break;
}
}
open_file_lookup_[api_path] = closeable_file;
create_and_add_handle(closeable_file);
return api_error::success;
}
void file_manager::queue_upload(const i_open_file &file) {
return queue_upload(file.get_api_path(), file.get_source_path(), false);
queue_upload(file.get_api_path(), file.get_source_path(), false);
}
void file_manager::queue_upload(const std::string &api_path,
@ -504,28 +516,22 @@ void file_manager::queue_upload(const std::string &api_path,
return;
}
std::unique_ptr<mutex_lock> lock;
std::unique_ptr<mutex_lock> upload_lock;
if (not no_lock) {
lock = std::make_unique<mutex_lock>(upload_mtx_);
upload_lock = std::make_unique<mutex_lock>(upload_mtx_);
}
remove_upload(api_path, true);
auto result =
utils::db::sqlite::db_insert{*db_, upload_table}
.or_replace()
.column_value("api_path", api_path)
.column_value("date_time",
static_cast<std::int64_t>(utils::time::get_time_now()))
.column_value("source_path", source_path)
.go();
if (result.ok()) {
remove_resume(api_path, source_path);
if (mgr_db_->add_upload(i_file_mgr_db::upload_entry{
api_path,
source_path,
})) {
remove_resume(api_path, source_path, true);
event_system::instance().raise<file_upload_queued>(api_path, source_path);
} else {
event_system::instance().raise<file_upload_failed>(
api_path, source_path,
std::to_string(result.get_error()) + '|' + result.get_error_str());
api_path, source_path, "failed to queue upload");
}
if (not no_lock) {
@ -536,40 +542,90 @@ void file_manager::queue_upload(const std::string &api_path,
auto file_manager::remove_file(const std::string &api_path) -> api_error {
REPERTORY_USES_FUNCTION_NAME();
recur_mutex_lock open_lock(open_file_mtx_);
filesystem_item fsi{};
auto res = provider_.get_filesystem_item(api_path, false, fsi);
if (res != api_error::success) {
return res;
}
close_all(api_path);
auto allocated = close_all(api_path);
unique_mutex_lock upload_lock(upload_mtx_);
remove_upload(api_path, true);
remove_resume(api_path, fsi.source_path, true);
upload_notify_.notify_all();
upload_lock.unlock();
recur_mutex_lock open_lock(open_file_mtx_);
res = provider_.remove_file(api_path);
if (res != api_error::success) {
return res;
}
if (not utils::file::file{fsi.source_path}.remove()) {
utils::error::raise_api_path_error(
function_name, fsi.api_path, fsi.source_path,
utils::get_last_error_code(), "failed to delete source");
}
remove_source_and_shrink_cache(api_path, fsi.source_path, fsi.size,
allocated);
return api_error::success;
}
void file_manager::remove_resume(const std::string &api_path,
const std::string &source_path) {
auto result = utils::db::sqlite::db_delete{*db_, resume_table}
.where("api_path")
.equals(api_path)
.go();
if (result.ok()) {
remove_resume(api_path, source_path, false);
}
void file_manager::remove_resume(const std::string &api_path,
const std::string &source_path, bool no_lock) {
if (provider_.is_read_only()) {
return;
}
std::unique_ptr<mutex_lock> upload_lock;
if (not no_lock) {
upload_lock = std::make_unique<mutex_lock>(upload_mtx_);
}
if (mgr_db_->remove_resume(api_path)) {
event_system::instance().raise<download_resume_removed>(api_path,
source_path);
}
if (not no_lock) {
upload_notify_.notify_all();
}
}
auto file_manager::remove_source_and_shrink_cache(
const std::string &api_path, const std::string &source_path,
std::uint64_t file_size, bool allocated) -> bool {
REPERTORY_USES_FUNCTION_NAME();
auto file = utils::file::file{source_path};
auto source_size = file.exists() ? file.size().value_or(0U) : 0U;
if (not file.remove()) {
utils::error::raise_api_path_error(function_name, api_path, source_path,
utils::get_last_error_code(),
"failed to delete source");
return false;
}
if (not allocated || source_size == 0U) {
auto res = cache_size_mgr::instance().shrink(0U);
if (res != api_error::success) {
utils::error::raise_api_path_error(function_name, api_path, source_path,
res, "failed to shrink cache");
}
return true;
}
auto res = cache_size_mgr::instance().shrink(file_size);
if (res != api_error::success) {
utils::error::raise_api_path_error(function_name, api_path, source_path,
res, "failed to shrink cache");
}
return true;
}
void file_manager::remove_upload(const std::string &api_path) {
@ -583,29 +639,21 @@ void file_manager::remove_upload(const std::string &api_path, bool no_lock) {
return;
}
std::unique_ptr<mutex_lock> lock;
std::unique_ptr<mutex_lock> upload_lock;
if (not no_lock) {
lock = std::make_unique<mutex_lock>(upload_mtx_);
upload_lock = std::make_unique<mutex_lock>(upload_mtx_);
}
auto result = utils::db::sqlite::db_delete{*db_, upload_table}
.where("api_path")
.equals(api_path)
.go();
if (not result.ok()) {
if (not mgr_db_->remove_upload(api_path)) {
utils::error::raise_api_path_error(
function_name, api_path, api_error::error, "failed to remove upload");
}
auto removed = mgr_db_->remove_upload_active(api_path);
if (not removed) {
utils::error::raise_api_path_error(function_name, api_path,
api_error::error,
"failed to remove from upload table");
}
result = utils::db::sqlite::db_delete{*db_, upload_active_table}
.where("api_path")
.equals(api_path)
.go();
if (not result.ok()) {
utils::error::raise_api_path_error(
function_name, api_path, api_error::error,
"failed to remove from upload_active table");
"failed to remove active upload");
}
if (upload_lookup_.find(api_path) != upload_lookup_.end()) {
@ -613,7 +661,7 @@ void file_manager::remove_upload(const std::string &api_path, bool no_lock) {
upload_lookup_.erase(api_path);
}
if (result.ok()) {
if (removed) {
event_system::instance().raise<file_upload_removed>(api_path);
}
@ -699,8 +747,8 @@ auto file_manager::rename_directory(const std::string &from_api_path,
}
auto file_manager::rename_file(const std::string &from_api_path,
const std::string &to_api_path, bool overwrite)
-> api_error {
const std::string &to_api_path,
bool overwrite) -> api_error {
if (not provider_.is_rename_supported()) {
return api_error::not_implemented;
}
@ -780,76 +828,36 @@ void file_manager::start() {
stop_requested_ = false;
polling::instance().set_callback(
{"timed_out_close", polling::frequency::second,
[this]() { this->close_timed_out_files(); }});
polling::instance().set_callback({
"timed_out_close",
polling::frequency::second,
[this](auto && /* stop_requested */) { this->close_timed_out_files(); },
});
if (provider_.is_read_only()) {
stop_requested_ = false;
return;
}
struct active_item final {
std::string api_path;
std::string source_path;
};
for (const auto &entry : mgr_db_->get_upload_active_list()) {
queue_upload(entry.api_path, entry.source_path, false);
}
std::vector<active_item> active_items{};
auto result = utils::db::sqlite::db_select{*db_, upload_active_table}.go();
while (result.has_row()) {
for (const auto &entry : mgr_db_->get_resume_list()) {
try {
std::optional<utils::db::sqlite::db_result::row> row;
if (result.get_row(row) && row.has_value()) {
active_items.emplace_back(active_item{
row->get_column("api_path").get_value<std::string>(),
row->get_column("source_path").get_value<std::string>(),
});
}
} catch (const std::exception &ex) {
utils::error::raise_error(function_name, ex, "query error");
}
}
for (auto &&active_item : active_items) {
queue_upload(active_item.api_path, active_item.source_path, false);
}
active_items.clear();
result = utils::db::sqlite::db_select{*db_, resume_table}.go();
if (not result.ok()) {
return;
}
while (result.has_row()) {
try {
std::optional<utils::db::sqlite::db_result::row> row;
if (not(result.get_row(row) && row.has_value())) {
return;
}
auto resume_entry = row.value().get_column("data").get_value_as_json();
std::string api_path;
std::string source_path;
std::size_t chunk_size{};
boost::dynamic_bitset<> read_state;
restore_resume_entry(resume_entry, api_path, chunk_size, read_state,
source_path);
filesystem_item fsi{};
auto res = provider_.get_filesystem_item(api_path, false, fsi);
auto res = provider_.get_filesystem_item(entry.api_path, false, fsi);
if (res != api_error::success) {
event_system::instance().raise<download_restore_failed>(
api_path, source_path,
entry.api_path, entry.source_path,
"failed to get filesystem item|" + api_error_to_string(res));
continue;
}
if (source_path != fsi.source_path) {
if (entry.source_path != fsi.source_path) {
event_system::instance().raise<download_restore_failed>(
fsi.api_path, fsi.source_path,
"source path mismatch|expected|" + source_path + "|actual|" +
"source path mismatch|expected|" + entry.source_path + "|actual|" +
fsi.source_path);
continue;
}
@ -872,13 +880,13 @@ void file_manager::start() {
continue;
}
auto closeable_file = std::make_shared<open_file>(
chunk_size,
config_.get_enable_chunk_download_timeout()
? config_.get_chunk_downloader_timeout_secs()
: 0U,
fsi, provider_, read_state, *this);
open_file_lookup_[api_path] = closeable_file;
auto closeable_file =
std::make_shared<open_file>(entry.chunk_size,
config_.get_enable_download_timeout()
? config_.get_download_timeout_secs()
: 0U,
fsi, provider_, entry.read_state, *this);
open_file_lookup_[entry.api_path] = closeable_file;
event_system::instance().raise<download_restored>(fsi.api_path,
fsi.source_path);
} catch (const std::exception &ex) {
@ -896,9 +904,11 @@ void file_manager::stop() {
}
event_system::instance().raise<service_shutdown_begin>("file_manager");
polling::instance().remove_callback("timed_out_close");
stop_requested_ = true;
polling::instance().remove_callback("timed_out_close");
unique_mutex_lock upload_lock(upload_mtx_);
upload_notify_.notify_all();
upload_lock.unlock();
@ -910,7 +920,7 @@ void file_manager::stop() {
open_file_lookup_.clear();
upload_lock.lock();
for (auto &&item : upload_lookup_) {
for (auto &item : upload_lookup_) {
item.second->stop();
}
upload_notify_.notify_all();
@ -935,21 +945,19 @@ void file_manager::store_resume(const i_open_file &file) {
return;
}
auto result = utils::db::sqlite::db_insert{*db_, resume_table}
.or_replace()
.column_value("api_path", file.get_api_path())
.column_value("data", create_resume_entry(file).dump())
.go();
if (result.ok()) {
if (mgr_db_->add_resume(i_file_mgr_db::resume_entry{
file.get_api_path(),
file.get_chunk_size(),
file.get_read_state(),
file.get_source_path(),
})) {
event_system::instance().raise<download_resume_added>(
file.get_api_path(), file.get_source_path());
return;
}
event_system::instance().raise<download_resume_add_failed>(
file.get_api_path(), file.get_source_path(),
"failed to insert|" + std::to_string(result.get_error()) + '|' +
result.get_error_str());
file.get_api_path(), file.get_source_path(), "failed to store resume");
}
void file_manager::swap_renamed_items(std::string from_api_path,
@ -958,26 +966,23 @@ void file_manager::swap_renamed_items(std::string from_api_path,
auto file_iter = open_file_lookup_.find(from_api_path);
if (file_iter != open_file_lookup_.end()) {
auto ptr = std::move(open_file_lookup_[from_api_path]);
auto closeable_file = std::move(open_file_lookup_[from_api_path]);
open_file_lookup_.erase(from_api_path);
ptr->set_api_path(to_api_path);
open_file_lookup_[to_api_path] = std::move(ptr);
closeable_file->set_api_path(to_api_path);
open_file_lookup_[to_api_path] = std::move(closeable_file);
}
if (directory) {
return;
}
auto result = utils::db::sqlite::db_update{*db_, resume_table}
.column_value("api_path", to_api_path)
.where("api_path")
.equals(from_api_path)
.go();
if (not result.ok()) {
utils::error::raise_api_path_error(function_name, to_api_path,
api_error::error,
"failed to update resume table");
if (mgr_db_->rename_resume(from_api_path, to_api_path)) {
return;
}
utils::error::raise_api_path_error(function_name, to_api_path,
api_error::error,
"failed to update resume table");
}
void file_manager::upload_completed(const file_upload_completed &evt) {
@ -988,11 +993,8 @@ void file_manager::upload_completed(const file_upload_completed &evt) {
if (not utils::string::to_bool(evt.get_cancelled().get<std::string>())) {
auto err = api_error_from_string(evt.get_result().get<std::string>());
if (err == api_error::success) {
auto result = utils::db::sqlite::db_delete{*db_, upload_active_table}
.where("api_path")
.equals(evt.get_api_path().get<std::string>())
.go();
if (not result.ok()) {
if (not mgr_db_->remove_upload_active(
evt.get_api_path().get<std::string>())) {
utils::error::raise_api_path_error(
function_name, evt.get_api_path().get<std::string>(),
evt.get_source().get<std::string>(),
@ -1033,25 +1035,17 @@ void file_manager::upload_handler() {
}
if (upload_lookup_.size() < config_.get_max_upload_count()) {
auto result = utils::db::sqlite::db_select{*db_, upload_table}
.order_by("api_path", true)
.limit(1)
.go();
try {
std::optional<utils::db::sqlite::db_result::row> row;
if (result.get_row(row) && row.has_value()) {
auto api_path = row->get_column("api_path").get_value<std::string>();
auto source_path =
row->get_column("source_path").get_value<std::string>();
auto entry = mgr_db_->get_next_upload();
if (entry.has_value()) {
filesystem_item fsi{};
auto res = provider_.get_filesystem_item(api_path, false, fsi);
auto res = provider_.get_filesystem_item(entry->api_path, false, fsi);
switch (res) {
case api_error::item_not_found: {
should_wait = false;
event_system::instance().raise<file_upload_not_found>(api_path,
source_path);
remove_upload(api_path, true);
event_system::instance().raise<file_upload_not_found>(
entry->api_path, entry->source_path);
remove_upload(entry->api_path, true);
} break;
case api_error::success: {
@ -1059,28 +1053,23 @@ void file_manager::upload_handler() {
upload_lookup_[fsi.api_path] =
std::make_unique<upload>(fsi, provider_);
auto del_res = utils::db::sqlite::db_delete{*db_, upload_table}
.where("api_path")
.equals(api_path)
.go();
if (del_res.ok()) {
auto ins_res =
utils::db::sqlite::db_insert{*db_, upload_active_table}
.column_value("api_path", api_path)
.column_value("source_path", source_path)
.go();
if (not ins_res.ok()) {
if (mgr_db_->remove_upload(entry->api_path)) {
if (not mgr_db_->add_upload_active(
i_file_mgr_db::upload_active_entry{
entry->api_path,
entry->source_path,
})) {
utils::error::raise_api_path_error(
function_name, api_path, source_path,
function_name, entry->api_path, entry->source_path,
"failed to add to upload_active table");
}
}
} break;
default: {
event_system::instance().raise<file_upload_retry>(api_path,
source_path, res);
queue_upload(api_path, source_path, true);
event_system::instance().raise<file_upload_retry>(
entry->api_path, entry->source_path, res);
queue_upload(entry->api_path, entry->source_path, true);
} break;
}
}
@ -1096,18 +1085,4 @@ void file_manager::upload_handler() {
upload_notify_.notify_all();
}
}
void file_manager::update_used_space(std::uint64_t &used_space) const {
recur_mutex_lock open_lock(open_file_mtx_);
for (auto &&item : open_file_lookup_) {
std::uint64_t file_size{};
auto res = provider_.get_file_size(item.second->get_api_path(), file_size);
if ((res == api_error::success) &&
(file_size != item.second->get_file_size()) &&
(used_space >= file_size)) {
used_space -= file_size;
used_space += item.second->get_file_size();
}
}
}
} // namespace repertory

View File

@ -21,18 +21,17 @@
*/
#include "file_manager/open_file.hpp"
#include "file_manager/cache_size_mgr.hpp"
#include "file_manager/events.hpp"
#include "file_manager/file_manager.hpp"
#include "file_manager/i_upload_manager.hpp"
#include "platform/platform.hpp"
#include "providers/i_provider.hpp"
#include "types/repertory.hpp"
#include "types/startup_exception.hpp"
#include "utils/common.hpp"
#include "utils/error_utils.hpp"
#include "utils/file_utils.hpp"
#include "utils/path.hpp"
#include "utils/time.hpp"
#include "utils/utils.hpp"
namespace repertory {
open_file::open_file(std::uint64_t chunk_size, std::uint8_t chunk_timeout,
@ -61,75 +60,246 @@ open_file::open_file(std::uint64_t chunk_size, std::uint8_t chunk_timeout,
i_provider &provider,
std::optional<boost::dynamic_bitset<>> read_state,
i_upload_manager &mgr)
: open_file_base(chunk_size, chunk_timeout, fsi, open_data, provider),
: open_file_base(chunk_size, chunk_timeout, fsi, open_data, provider,
false),
mgr_(mgr) {
if (fsi_.directory && read_state.has_value()) {
throw startup_exception("cannot resume a directory|" + fsi.api_path);
REPERTORY_USES_FUNCTION_NAME();
if (fsi.directory) {
if (read_state.has_value()) {
utils::error::raise_api_path_error(
function_name, fsi.api_path, fsi.source_path,
fmt::format("cannot resume a directory|sp|", fsi.api_path));
}
return;
}
if (not fsi.directory) {
nf_ = utils::file::file::open_or_create_file(fsi.source_path,
provider_.is_read_only());
set_api_error(*nf_ ? api_error::success : api_error::os_error);
if (get_api_error() == api_error::success) {
if (read_state.has_value()) {
read_state_ = read_state.value();
set_modified();
} else if (fsi_.size > 0U) {
read_state_.resize(static_cast<std::size_t>(utils::divide_with_ceiling(
fsi_.size, chunk_size)),
false);
nf_ = utils::file::file::open_or_create_file(fsi.source_path,
get_provider().is_read_only());
set_api_error(*nf_ ? api_error::success : api_error::os_error);
if (get_api_error() != api_error::success) {
return;
}
auto file_size = nf_->size();
if (provider_.is_read_only() || file_size == fsi.size) {
read_state_.set(0U, read_state_.size(), true);
} else if (not nf_->truncate(fsi.size)) {
set_api_error(api_error::os_error);
}
}
if (read_state.has_value()) {
read_state_ = read_state.value();
set_modified();
allocated = true;
return;
}
if (get_api_error() != api_error::success && *nf_) {
nf_->close();
}
}
if (fsi.size == 0U) {
return;
}
read_state_.resize(static_cast<std::size_t>(
utils::divide_with_ceiling(fsi.size, chunk_size)),
false);
auto file_size = nf_->size();
if (not file_size.has_value()) {
utils::error::raise_api_path_error(
function_name, fsi.api_path, fsi.source_path,
utils::get_last_error_code(), "failed to get file size");
set_api_error(api_error::os_error);
return;
}
if (get_provider().is_read_only() || file_size.value() == fsi.size) {
read_state_.set(0U, read_state_.size(), true);
allocated = true;
}
if (get_api_error() != api_error::success && *nf_) {
nf_->close();
}
}
open_file::~open_file() { close(); }
auto open_file::adjust_cache_size(std::uint64_t file_size,
bool shrink) -> api_error {
REPERTORY_USES_FUNCTION_NAME();
if (file_size == get_file_size()) {
return api_error::success;
}
if (file_size > get_file_size()) {
auto size = file_size - get_file_size();
auto res = shrink ? cache_size_mgr::instance().shrink(size)
: cache_size_mgr::instance().expand(size);
if (res == api_error::success) {
return res;
}
utils::error::raise_api_path_error(
function_name, get_api_path(), get_source_path(), res,
fmt::format("failed to {} cache|size|{}",
(shrink ? "shrink" : "expand"), size));
return set_api_error(res);
}
auto size = get_file_size() - file_size;
auto res = shrink ? cache_size_mgr::instance().expand(size)
: cache_size_mgr::instance().shrink(size);
if (res == api_error::success) {
return res;
}
utils::error::raise_api_path_error(
function_name, get_api_path(), get_source_path(), res,
fmt::format("failed to {} cache|size|{}", (shrink ? "expand" : "shrink"),
size));
return set_api_error(res);
}
auto open_file::check_start() -> api_error {
REPERTORY_USES_FUNCTION_NAME();
unique_recur_mutex_lock file_lock(get_mutex());
if (allocated) {
return api_error::success;
}
auto file_size = nf_->size();
if (not file_size.has_value()) {
utils::error::raise_api_path_error(
function_name, get_api_path(), get_source_path(),
utils::get_last_error_code(), "failed to get file size");
return set_api_error(api_error::os_error);
}
if (file_size.value() == get_file_size()) {
allocated = true;
return api_error::success;
}
file_lock.unlock();
auto res = adjust_cache_size(file_size.value(), true);
if (res != api_error::success) {
return res;
}
file_lock.lock();
if (not nf_->truncate(get_file_size())) {
utils::error::raise_api_path_error(
function_name, get_api_path(), get_source_path(),
utils::get_last_error_code(),
fmt::format("failed to truncate file|size|{}", get_file_size()));
return set_api_error(res);
}
allocated = true;
return api_error::success;
}
auto open_file::close() -> bool {
REPERTORY_USES_FUNCTION_NAME();
if (is_directory() || stop_requested_) {
return false;
}
stop_requested_ = true;
notify_io();
if (reader_thread_) {
reader_thread_->join();
reader_thread_.reset();
}
if (not open_file_base::close()) {
return false;
}
auto read_state = get_read_state();
auto err = get_api_error();
if (err == api_error::success || err == api_error::download_incomplete ||
err == api_error::download_stopped) {
if (is_modified() && not read_state.all()) {
set_api_error(api_error::download_incomplete);
} else if (not is_modified() && (get_file_size() > 0U) &&
not read_state.all()) {
set_api_error(api_error::download_stopped);
}
err = get_api_error();
}
nf_->close();
if (is_modified()) {
if (err == api_error::success) {
mgr_.queue_upload(*this);
return true;
}
if (err == api_error::download_incomplete) {
mgr_.store_resume(*this);
return true;
}
}
if (err != api_error::success || read_state.all()) {
mgr_.remove_resume(get_api_path(), get_source_path());
}
if (err == api_error::success) {
return true;
}
file_manager::remove_source_and_shrink_cache(
get_api_path(), get_source_path(), get_file_size(), allocated);
auto parent = utils::path::get_parent_path(get_source_path());
set_source_path(utils::path::combine(parent, {utils::create_uuid_string()}));
auto res = get_provider().set_item_meta(get_api_path(), META_SOURCE,
get_source_path());
if (res != api_error::success) {
utils::error::raise_api_path_error(function_name, get_api_path(),
get_source_path(), res,
"failed to set new source path");
}
return true;
}
void open_file::download_chunk(std::size_t chunk, bool skip_active,
bool should_reset) {
if (should_reset) {
reset_timeout();
}
unique_recur_mutex_lock download_lock(file_mtx_);
if ((get_api_error() == api_error::success) && (chunk < read_state_.size()) &&
not read_state_[chunk]) {
if (active_downloads_.find(chunk) != active_downloads_.end()) {
if (not skip_active) {
auto active_download = active_downloads_.at(chunk);
download_lock.unlock();
active_download->wait();
unique_recur_mutex_lock rw_lock(rw_mtx_);
auto read_state = get_read_state();
if ((get_api_error() == api_error::success) && (chunk < read_state.size()) &&
not read_state[chunk]) {
if (get_active_downloads().find(chunk) != get_active_downloads().end()) {
if (skip_active) {
return;
}
auto active_download = get_active_downloads().at(chunk);
rw_lock.unlock();
active_download->wait();
return;
}
auto data_offset = chunk * chunk_size_;
auto data_size =
(chunk == read_state_.size() - 1U) ? last_chunk_size_ : chunk_size_;
if (active_downloads_.empty() && (read_state_.count() == 0U)) {
event_system::instance().raise<download_begin>(fsi_.api_path,
fsi_.source_path);
auto data_offset = chunk * get_chunk_size();
auto data_size = (chunk == read_state.size() - 1U) ? get_last_chunk_size()
: get_chunk_size();
if (get_active_downloads().empty() && (read_state.count() == 0U)) {
event_system::instance().raise<download_begin>(get_api_path(),
get_source_path());
}
event_system::instance().raise<download_chunk_begin>(
fsi_.api_path, fsi_.source_path, chunk, read_state_.size(),
read_state_.count());
active_downloads_[chunk] = std::make_shared<download>();
download_lock.unlock();
get_active_downloads()[chunk] = std::make_shared<download>();
rw_lock.unlock();
if (should_reset) {
reset_timeout();
@ -138,28 +308,28 @@ void open_file::download_chunk(std::size_t chunk, bool skip_active,
std::async(std::launch::async, [this, chunk, data_size, data_offset,
should_reset]() {
const auto notify_complete = [this, chunk, should_reset]() {
unique_recur_mutex_lock file_lock(file_mtx_);
auto active_download = active_downloads_.at(chunk);
active_downloads_.erase(chunk);
event_system::instance().raise<download_chunk_end>(
fsi_.api_path, fsi_.source_path, chunk, read_state_.size(),
read_state_.count(), get_api_error());
auto state = get_read_state();
unique_recur_mutex_lock lock(rw_mtx_);
auto active_download = get_active_downloads().at(chunk);
get_active_downloads().erase(chunk);
if (get_api_error() == api_error::success) {
auto progress = (static_cast<double>(read_state_.count()) /
static_cast<double>(read_state_.size()) * 100.0);
auto progress = (static_cast<double>(state.count()) /
static_cast<double>(state.size())) *
100.0;
event_system::instance().raise<download_progress>(
fsi_.api_path, fsi_.source_path, progress);
if (read_state_.all() && not notified_) {
get_api_path(), get_source_path(), progress);
if (state.all() && not notified_) {
notified_ = true;
event_system::instance().raise<download_end>(
fsi_.api_path, fsi_.source_path, get_api_error());
get_api_path(), get_source_path(), get_api_error());
}
} else if (not notified_) {
notified_ = true;
event_system::instance().raise<download_end>(
fsi_.api_path, fsi_.source_path, get_api_error());
get_api_path(), get_source_path(), get_api_error());
}
file_lock.unlock();
lock.unlock();
active_download->notify(get_api_error());
@ -168,9 +338,9 @@ void open_file::download_chunk(std::size_t chunk, bool skip_active,
}
};
data_buffer data;
auto res = provider_.read_file_bytes(get_api_path(), data_size,
data_offset, data, stop_requested_);
data_buffer buffer;
auto res = get_provider().read_file_bytes(
get_api_path(), data_size, data_offset, buffer, stop_requested_);
if (res != api_error::success) {
set_api_error(res);
notify_complete();
@ -183,7 +353,7 @@ void open_file::download_chunk(std::size_t chunk, bool skip_active,
res = do_io([&]() -> api_error {
std::size_t bytes_written{};
if (not nf_->write(data, data_offset, &bytes_written)) {
if (not nf_->write(buffer, data_offset, &bytes_written)) {
return api_error::os_error;
}
@ -198,48 +368,50 @@ void open_file::download_chunk(std::size_t chunk, bool skip_active,
return;
}
unique_recur_mutex_lock file_lock(file_mtx_);
read_state_.set(chunk);
file_lock.unlock();
set_read_state(chunk);
notify_complete();
}).wait();
}
}
void open_file::download_range(std::size_t start_chunk, std::size_t end_chunk,
void open_file::download_range(std::size_t begin_chunk, std::size_t end_chunk,
bool should_reset) {
for (std::size_t chunk = start_chunk; chunk <= end_chunk; ++chunk) {
for (std::size_t chunk = begin_chunk;
(get_api_error() == api_error::success) && (chunk <= end_chunk);
++chunk) {
download_chunk(chunk, false, should_reset);
if (get_api_error() != api_error::success) {
return;
}
}
}
auto open_file::get_allocated() const -> bool {
recur_mutex_lock file_lock(get_mutex());
return allocated;
}
auto open_file::get_read_state() const -> boost::dynamic_bitset<> {
recur_mutex_lock file_lock(file_mtx_);
recur_mutex_lock file_lock(get_mutex());
return read_state_;
}
auto open_file::get_read_state(std::size_t chunk) const -> bool {
recur_mutex_lock file_lock(file_mtx_);
return read_state_[chunk];
return get_read_state()[chunk];
}
auto open_file::is_complete() const -> bool {
recur_mutex_lock file_lock(file_mtx_);
return read_state_.all();
}
auto open_file::is_complete() const -> bool { return get_read_state().all(); }
auto open_file::native_operation(
i_open_file::native_operation_callback callback) -> api_error {
unique_recur_mutex_lock file_lock(file_mtx_);
if (stop_requested_) {
return api_error::download_stopped;
return set_api_error(api_error::download_stopped);
}
file_lock.unlock();
auto res = check_start();
if (res != api_error::success) {
return res;
}
unique_recur_mutex_lock rw_lock(rw_mtx_);
return do_io([&]() -> api_error { return callback(nf_->get_handle()); });
}
@ -248,38 +420,48 @@ auto open_file::native_operation(
i_open_file::native_operation_callback callback) -> api_error {
REPERTORY_USES_FUNCTION_NAME();
if (fsi_.directory) {
return api_error::invalid_operation;
if (is_directory()) {
return set_api_error(api_error::invalid_operation);
}
unique_recur_mutex_lock file_lock(file_mtx_);
if (stop_requested_) {
return api_error::download_stopped;
return set_api_error(api_error::download_stopped);
}
auto res = check_start();
if (res != api_error::success) {
return res;
}
res = adjust_cache_size(new_file_size, false);
if (res != api_error::success) {
return res;
}
file_lock.unlock();
auto is_empty_file = new_file_size == 0U;
auto last_chunk = is_empty_file
? std::size_t(0U)
: static_cast<std::size_t>(utils::divide_with_ceiling(
new_file_size, chunk_size_)) -
new_file_size, get_chunk_size())) -
1U;
file_lock.lock();
if (not is_empty_file && (last_chunk < read_state_.size())) {
file_lock.unlock();
update_background_reader(0U);
unique_recur_mutex_lock rw_lock(rw_mtx_);
auto read_state = get_read_state();
if (not is_empty_file && (last_chunk < read_state.size())) {
rw_lock.unlock();
update_reader(0U);
download_chunk(last_chunk, false, true);
if (get_api_error() != api_error::success) {
return get_api_error();
}
file_lock.lock();
rw_lock.lock();
}
read_state = get_read_state();
auto original_file_size = get_file_size();
auto res = do_io([&]() -> api_error { return callback(nf_->get_handle()); });
res = do_io([&]() -> api_error { return callback(nf_->get_handle()); });
if (res != api_error::success) {
utils::error::raise_api_path_error(function_name, get_api_path(),
utils::get_last_error_code(),
@ -288,59 +470,73 @@ auto open_file::native_operation(
}
{
auto file_size = nf_->size().value_or(0U);
if (file_size != new_file_size) {
auto file_size = nf_->size();
if (not file_size.has_value()) {
utils::error::raise_api_path_error(
function_name, get_api_path(), api_error::file_size_mismatch,
"allocated file size mismatch|expected|" +
std::to_string(new_file_size) + "|actual|" +
std::to_string(file_size));
fmt::format("failed to get file size|error|{}",
utils::get_last_error_code()));
return set_api_error(api_error::error);
}
if (file_size.value() != new_file_size) {
utils::error::raise_api_path_error(
function_name, get_api_path(), api_error::file_size_mismatch,
fmt::format("file size mismatch|expected|{}|actual|{}", new_file_size,
file_size.value()));
return set_api_error(api_error::error);
}
}
if (is_empty_file || (read_state_.size() != (last_chunk + 1U))) {
auto old_size = read_state_.size();
read_state_.resize(is_empty_file ? 0U : last_chunk + 1U);
if (is_empty_file || (read_state.size() != (last_chunk + 1U))) {
auto old_size = read_state.size();
read_state.resize(is_empty_file ? 0U : last_chunk + 1U);
if (not is_empty_file) {
for (std::size_t chunk = old_size; chunk <= last_chunk; ++chunk) {
read_state_.set(chunk);
read_state.set(chunk);
}
}
set_read_state(read_state);
last_chunk_size_ = static_cast<std::size_t>(
new_file_size <= chunk_size_ ? new_file_size
: (new_file_size % chunk_size_) == 0U ? chunk_size_
: new_file_size % chunk_size_);
set_last_chunk_size(static_cast<std::size_t>(
new_file_size <= get_chunk_size() ? new_file_size
: (new_file_size % get_chunk_size()) == 0U
? get_chunk_size()
: new_file_size % get_chunk_size()));
}
if (original_file_size != new_file_size) {
set_modified();
if (original_file_size == new_file_size) {
return res;
}
set_modified();
fsi_.size = new_file_size;
auto now = std::to_string(utils::time::get_time_now());
res = provider_.set_item_meta(
fsi_.api_path, {
{META_CHANGED, now},
{META_MODIFIED, now},
{META_SIZE, std::to_string(new_file_size)},
{META_WRITTEN, now},
});
if (res != api_error::success) {
utils::error::raise_api_path_error(function_name, get_api_path(), res,
"failed to set file meta");
return set_api_error(res);
}
set_file_size(new_file_size);
auto now = std::to_string(utils::time::get_time_now());
res = get_provider().set_item_meta(
get_api_path(), {
{META_CHANGED, now},
{META_MODIFIED, now},
{META_SIZE, std::to_string(new_file_size)},
{META_WRITTEN, now},
});
if (res == api_error::success) {
return res;
}
return res;
utils::error::raise_api_path_error(function_name, get_api_path(), res,
"failed to set file meta");
return set_api_error(res);
}
auto open_file::read(std::size_t read_size, std::uint64_t read_offset,
data_buffer &data) -> api_error {
if (fsi_.directory) {
return api_error::invalid_operation;
if (is_directory()) {
return set_api_error(api_error::invalid_operation);
}
if (stop_requested_) {
return set_api_error(api_error::download_stopped);
}
read_size =
@ -349,12 +545,17 @@ auto open_file::read(std::size_t read_size, std::uint64_t read_offset,
return api_error::success;
}
auto res = check_start();
if (res != api_error::success) {
return res;
}
const auto read_from_source = [this, &data, &read_offset,
&read_size]() -> api_error {
return do_io([this, &data, &read_offset, &read_size]() -> api_error {
if (provider_.is_read_only()) {
return provider_.read_file_bytes(fsi_.api_path, read_size, read_offset,
data, stop_requested_);
if (get_provider().is_read_only()) {
return get_provider().read_file_bytes(
get_api_path(), read_size, read_offset, data, stop_requested_);
}
data.resize(read_size);
@ -365,49 +566,48 @@ auto open_file::read(std::size_t read_size, std::uint64_t read_offset,
});
};
unique_recur_mutex_lock file_lock(file_mtx_);
if (read_state_.all()) {
if (get_read_state().all()) {
reset_timeout();
return read_from_source();
}
file_lock.unlock();
auto start_chunk = static_cast<std::size_t>(read_offset / chunk_size_);
auto begin_chunk = static_cast<std::size_t>(read_offset / get_chunk_size());
auto end_chunk =
static_cast<std::size_t>((read_size + read_offset) / chunk_size_);
static_cast<std::size_t>((read_size + read_offset) / get_chunk_size());
update_background_reader(start_chunk);
update_reader(begin_chunk);
download_range(start_chunk, end_chunk, true);
download_range(begin_chunk, end_chunk, true);
if (get_api_error() != api_error::success) {
return get_api_error();
}
file_lock.lock();
unique_recur_mutex_lock rw_lock(rw_mtx_);
return get_api_error() == api_error::success ? read_from_source()
: get_api_error();
}
void open_file::remove(std::uint64_t handle) {
recur_mutex_lock file_lock(file_mtx_);
open_file_base::remove(handle);
if (modified_ && read_state_.all() &&
recur_mutex_lock rw_lock(rw_mtx_);
if (is_modified() && get_read_state().all() &&
(get_api_error() == api_error::success)) {
mgr_.queue_upload(*this);
modified_ = false;
open_file_base::set_modified(false);
}
if (removed_ && (get_open_file_count() == 0U)) {
removed_ = false;
if (is_removed() && (get_open_file_count() == 0U)) {
open_file_base::set_removed(false);
}
}
void open_file::remove_all() {
recur_mutex_lock file_lock(file_mtx_);
open_file_base::remove_all();
modified_ = false;
removed_ = true;
recur_mutex_lock rw_lock(rw_mtx_);
open_file_base::set_modified(false);
open_file_base::set_removed(true);
mgr_.remove_upload(get_api_path());
@ -415,8 +615,12 @@ void open_file::remove_all() {
}
auto open_file::resize(std::uint64_t new_file_size) -> api_error {
if (fsi_.directory) {
return api_error::invalid_operation;
if (is_directory()) {
return set_api_error(api_error::invalid_operation);
}
if (new_file_size == get_file_size()) {
return api_error::success;
}
return native_operation(
@ -426,123 +630,62 @@ auto open_file::resize(std::uint64_t new_file_size) -> api_error {
});
}
auto open_file::close() -> bool {
REPERTORY_USES_FUNCTION_NAME();
if (fsi_.directory || stop_requested_) {
return false;
}
stop_requested_ = true;
unique_mutex_lock reader_lock(io_thread_mtx_);
io_thread_notify_.notify_all();
reader_lock.unlock();
if (reader_thread_) {
reader_thread_->join();
reader_thread_.reset();
}
if (not open_file_base::close()) {
return false;
}
auto err = get_api_error();
if (err == api_error::success || err == api_error::download_incomplete ||
err == api_error::download_stopped) {
if (modified_ && not read_state_.all()) {
set_api_error(api_error::download_incomplete);
} else if (not modified_ && (fsi_.size > 0U) && not read_state_.all()) {
set_api_error(api_error::download_stopped);
}
err = get_api_error();
}
nf_->close();
if (modified_) {
if (err == api_error::success) {
mgr_.queue_upload(*this);
return true;
}
if (err == api_error::download_incomplete) {
mgr_.store_resume(*this);
return true;
}
}
if (err == api_error::success) {
return true;
}
mgr_.remove_resume(get_api_path(), get_source_path());
if (not utils::file::file(fsi_.source_path).remove()) {
utils::error::raise_api_path_error(
function_name, get_api_path(), fsi_.source_path,
utils::get_last_error_code(), "failed to delete file");
}
auto parent = utils::path::get_parent_path(fsi_.source_path);
fsi_.source_path =
utils::path::combine(parent, {utils::create_uuid_string()});
auto res =
provider_.set_item_meta(fsi_.api_path, META_SOURCE, fsi_.source_path);
if (res != api_error::success) {
utils::error::raise_api_path_error(function_name, get_api_path(),
fsi_.source_path, res,
"failed to set file meta");
}
return true;
}
void open_file::set_modified() {
if (not modified_) {
modified_ = true;
if (not is_modified()) {
open_file_base::set_modified(true);
mgr_.store_resume(*this);
}
if (not removed_) {
removed_ = true;
if (not is_removed()) {
open_file_base::set_removed(true);
mgr_.remove_upload(get_api_path());
}
}
void open_file::update_background_reader(std::size_t read_chunk) {
recur_mutex_lock reader_lock(file_mtx_);
read_chunk_ = read_chunk;
void open_file::set_read_state(std::size_t chunk) {
recur_mutex_lock file_lock(get_mutex());
read_state_.set(chunk);
}
if (not reader_thread_ && not stop_requested_) {
reader_thread_ = std::make_unique<std::thread>([this]() {
std::size_t next_chunk{};
while (not stop_requested_) {
unique_recur_mutex_lock file_lock(file_mtx_);
if ((fsi_.size == 0U) || read_state_.all()) {
file_lock.unlock();
void open_file::set_read_state(boost::dynamic_bitset<> read_state) {
recur_mutex_lock file_lock(get_mutex());
read_state_ = std::move(read_state);
}
unique_mutex_lock io_lock(io_thread_mtx_);
if (not stop_requested_ && io_thread_queue_.empty()) {
io_thread_notify_.wait(io_lock);
}
io_thread_notify_.notify_all();
io_lock.unlock();
} else {
do {
next_chunk = read_chunk_ =
((read_chunk_ + 1U) >= read_state_.size()) ? 0U
: read_chunk_ + 1U;
} while ((next_chunk != 0U) && (active_downloads_.find(next_chunk) !=
active_downloads_.end()));
void open_file::update_reader(std::size_t chunk) {
recur_mutex_lock rw_lock(rw_mtx_);
read_chunk_ = chunk;
file_lock.unlock();
download_chunk(next_chunk, true, false);
}
}
});
if (reader_thread_ || stop_requested_) {
return;
}
reader_thread_ = std::make_unique<std::thread>([this]() {
unique_recur_mutex_lock lock(rw_mtx_);
auto next_chunk{read_chunk_};
auto read_chunk{read_chunk_};
lock.unlock();
while (not stop_requested_) {
lock.lock();
auto read_state = get_read_state();
if ((get_file_size() == 0U) || read_state.all()) {
lock.unlock();
wait_for_io(stop_requested_);
continue;
}
if (read_chunk != read_chunk_) {
next_chunk = read_chunk = read_chunk_;
}
next_chunk = next_chunk + 1U >= read_state.size() ? 0U : next_chunk + 1U;
lock.unlock();
download_chunk(next_chunk, true, false);
}
});
}
auto open_file::write(std::uint64_t write_offset, const data_buffer &data,
@ -551,41 +694,44 @@ auto open_file::write(std::uint64_t write_offset, const data_buffer &data,
bytes_written = 0U;
if (fsi_.directory || provider_.is_read_only()) {
return api_error::invalid_operation;
if (is_directory() || get_provider().is_read_only()) {
return set_api_error(api_error::invalid_operation);
}
if (data.empty()) {
return api_error::success;
}
unique_recur_mutex_lock write_lock(file_mtx_);
if (stop_requested_) {
return api_error::download_stopped;
return set_api_error(api_error::download_stopped);
}
write_lock.unlock();
auto start_chunk = static_cast<std::size_t>(write_offset / chunk_size_);
auto res = check_start();
if (res != api_error::success) {
return res;
}
auto begin_chunk = static_cast<std::size_t>(write_offset / get_chunk_size());
auto end_chunk =
static_cast<std::size_t>((write_offset + data.size()) / chunk_size_);
static_cast<std::size_t>((write_offset + data.size()) / get_chunk_size());
update_background_reader(start_chunk);
update_reader(begin_chunk);
download_range(start_chunk, std::min(read_state_.size() - 1U, end_chunk),
download_range(begin_chunk, std::min(get_read_state().size() - 1U, end_chunk),
true);
if (get_api_error() != api_error::success) {
return get_api_error();
}
write_lock.lock();
if ((write_offset + data.size()) > fsi_.size) {
auto res = resize(write_offset + data.size());
unique_recur_mutex_lock rw_lock(rw_mtx_);
if ((write_offset + data.size()) > get_file_size()) {
res = resize(write_offset + data.size());
if (res != api_error::success) {
return res;
}
}
auto res = do_io([&]() -> api_error {
res = do_io([&]() -> api_error {
if (not nf_->write(data, write_offset, &bytes_written)) {
return api_error::os_error;
}
@ -598,11 +744,11 @@ auto open_file::write(std::uint64_t write_offset, const data_buffer &data,
}
auto now = std::to_string(utils::time::get_time_now());
res = provider_.set_item_meta(fsi_.api_path, {
{META_CHANGED, now},
{META_MODIFIED, now},
{META_WRITTEN, now},
});
res = get_provider().set_item_meta(get_api_path(), {
{META_CHANGED, now},
{META_MODIFIED, now},
{META_WRITTEN, now},
});
if (res != api_error::success) {
utils::error::raise_api_path_error(function_name, get_api_path(), res,
"failed to set file meta");

View File

@ -35,14 +35,16 @@ void open_file_base::download::notify(const api_error &err) {
}
auto open_file_base::download::wait() -> api_error {
if (not complete_) {
unique_mutex_lock lock(mtx_);
if (not complete_) {
notify_.wait(lock);
}
notify_.notify_all();
if (complete_) {
return error_;
}
unique_mutex_lock lock(mtx_);
if (not complete_) {
notify_.wait(lock);
}
notify_.notify_all();
return error_;
}
@ -65,12 +67,14 @@ auto open_file_base::io_item::get_result() -> api_error {
open_file_base::open_file_base(std::uint64_t chunk_size,
std::uint8_t chunk_timeout, filesystem_item fsi,
i_provider &provider)
: open_file_base(chunk_size, chunk_timeout, fsi, {}, provider) {}
i_provider &provider, bool disable_io)
: open_file_base(chunk_size, chunk_timeout, fsi, {}, provider, disable_io) {
}
open_file_base::open_file_base(
std::uint64_t chunk_size, std::uint8_t chunk_timeout, filesystem_item fsi,
std::map<std::uint64_t, open_file_data> open_data, i_provider &provider)
std::map<std::uint64_t, open_file_data> open_data, i_provider &provider,
bool disable_io)
: chunk_size_(chunk_size),
chunk_timeout_(chunk_timeout),
fsi_(std::move(fsi)),
@ -80,7 +84,7 @@ open_file_base::open_file_base(
: fsi.size % chunk_size)),
open_data_(std::move(open_data)),
provider_(provider) {
if (not fsi.directory) {
if (not fsi.directory && not disable_io) {
io_thread_ = std::make_unique<std::thread>([this] { file_io_thread(); });
}
}
@ -115,7 +119,7 @@ auto open_file_base::can_close() const -> bool {
return true;
}
if (is_download_complete()) {
if (is_complete()) {
return true;
}
@ -123,12 +127,30 @@ auto open_file_base::can_close() const -> bool {
return true;
}
const std::chrono::system_clock::time_point last_access = last_access_;
const auto duration = std::chrono::duration_cast<std::chrono::seconds>(
std::chrono::system_clock::time_point last_access{last_access_};
auto duration = std::chrono::duration_cast<std::chrono::seconds>(
std::chrono::system_clock::now() - last_access);
return (duration.count() >= chunk_timeout_);
}
auto open_file_base::close() -> bool {
unique_mutex_lock io_lock(io_thread_mtx_);
if (io_stop_requested_ || not io_thread_) {
io_thread_notify_.notify_all();
io_lock.unlock();
return false;
}
io_stop_requested_ = true;
io_thread_notify_.notify_all();
io_lock.unlock();
io_thread_->join();
io_thread_.reset();
return true;
}
auto open_file_base::do_io(std::function<api_error()> action) -> api_error {
unique_mutex_lock io_lock(io_thread_mtx_);
auto item = std::make_shared<io_item>(action);
@ -187,6 +209,36 @@ auto open_file_base::get_file_size() const -> std::uint64_t {
return fsi_.size;
}
[[nodiscard]] auto open_file_base::get_last_chunk_size() const -> std::size_t {
recur_mutex_lock file_lock(file_mtx_);
return last_chunk_size_;
}
void open_file_base::set_file_size(std::uint64_t size) {
recur_mutex_lock file_lock(file_mtx_);
fsi_.size = size;
}
void open_file_base::set_last_chunk_size(std::size_t size) {
recur_mutex_lock file_lock(file_mtx_);
last_chunk_size_ = size;
}
void open_file_base::set_modified(bool modified) {
recur_mutex_lock file_lock(file_mtx_);
modified_ = modified;
}
void open_file_base::set_removed(bool removed) {
recur_mutex_lock file_lock(file_mtx_);
removed_ = removed;
}
void open_file_base::set_source_path(std::string source_path) {
recur_mutex_lock file_lock(file_mtx_);
fsi_.source_path = std::move(source_path);
}
auto open_file_base::get_filesystem_item() const -> filesystem_item {
recur_mutex_lock file_lock(file_mtx_);
return fsi_;
@ -194,8 +246,9 @@ auto open_file_base::get_filesystem_item() const -> filesystem_item {
auto open_file_base::get_handles() const -> std::vector<std::uint64_t> {
recur_mutex_lock file_lock(file_mtx_);
std::vector<std::uint64_t> ret;
for (auto &&item : open_data_) {
for (const auto &item : open_data_) {
ret.emplace_back(item.first);
}
@ -230,11 +283,31 @@ auto open_file_base::get_open_file_count() const -> std::size_t {
return open_data_.size();
}
auto open_file_base::get_source_path() const -> std::string {
recur_mutex_lock file_lock(file_mtx_);
return fsi_.source_path;
}
auto open_file_base::has_handle(std::uint64_t handle) const -> bool {
recur_mutex_lock file_lock(file_mtx_);
return open_data_.find(handle) != open_data_.end();
}
auto open_file_base::is_modified() const -> bool {
recur_mutex_lock file_lock(file_mtx_);
return modified_;
}
auto open_file_base::is_removed() const -> bool {
recur_mutex_lock file_lock(file_mtx_);
return removed_;
}
void open_file_base::notify_io() {
mutex_lock io_lock(io_thread_mtx_);
io_thread_notify_.notify_all();
}
void open_file_base::remove(std::uint64_t handle) {
recur_mutex_lock file_lock(file_mtx_);
if (open_data_.find(handle) == open_data_.end()) {
@ -261,7 +334,7 @@ void open_file_base::remove_all() {
auto open_data = open_data_;
open_data_.clear();
for (auto &&data : open_data) {
for (const auto &data : open_data) {
event_system::instance().raise<filesystem_item_handle_closed>(
fsi_.api_path, data.first, fsi_.source_path, fsi_.directory, modified_);
}
@ -276,15 +349,15 @@ void open_file_base::reset_timeout() {
auto open_file_base::set_api_error(const api_error &err) -> api_error {
mutex_lock error_lock(error_mtx_);
if (error_ != err) {
return ((error_ = (error_ == api_error::success ||
error_ == api_error::download_incomplete ||
error_ == api_error::download_stopped
? err
: error_)));
if (error_ == err) {
return error_;
}
return error_;
return ((error_ = (error_ == api_error::success ||
error_ == api_error::download_incomplete ||
error_ == api_error::download_stopped
? err
: error_)));
}
void open_file_base::set_api_path(const std::string &api_path) {
@ -293,24 +366,12 @@ void open_file_base::set_api_path(const std::string &api_path) {
fsi_.api_parent = utils::path::get_parent_api_path(api_path);
}
auto open_file_base::close() -> bool {
void open_file_base::wait_for_io(stop_type &stop_requested) {
unique_mutex_lock io_lock(io_thread_mtx_);
if (not fsi_.directory && not io_stop_requested_) {
io_stop_requested_ = true;
io_thread_notify_.notify_all();
io_lock.unlock();
if (io_thread_) {
io_thread_->join();
io_thread_.reset();
return true;
}
return false;
if (not stop_requested && io_thread_queue_.empty()) {
io_thread_notify_.wait(io_lock);
}
io_thread_notify_.notify_all();
io_lock.unlock();
return false;
}
} // namespace repertory

View File

@ -0,0 +1,367 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "file_manager/ring_buffer_base.hpp"
#include "events/event_system.hpp"
#include "file_manager/events.hpp"
#include "file_manager/open_file_base.hpp"
#include "platform/platform.hpp"
#include "providers/i_provider.hpp"
#include "types/repertory.hpp"
#include "utils/common.hpp"
#include "utils/error_utils.hpp"
namespace repertory {
ring_buffer_base::ring_buffer_base(std::uint64_t chunk_size,
std::uint8_t chunk_timeout,
filesystem_item fsi, i_provider &provider,
std::size_t ring_size, bool disable_io)
: open_file_base(chunk_size, chunk_timeout, fsi, provider, disable_io),
read_state_(ring_size),
total_chunks_(static_cast<std::size_t>(
utils::divide_with_ceiling(fsi.size, chunk_size))) {
if (disable_io) {
if (fsi.size > 0U) {
read_state_.resize(std::min(total_chunks_, read_state_.size()));
ring_end_ =
std::min(total_chunks_ - 1U, ring_begin_ + read_state_.size() - 1U);
read_state_.set(0U, read_state_.size(), false);
}
} else {
if (ring_size < min_ring_size) {
throw std::runtime_error("ring size must be greater than or equal to 5");
}
ring_end_ = std::min(total_chunks_ - 1U, ring_begin_ + ring_size - 1U);
read_state_.set(0U, ring_size, false);
}
}
auto ring_buffer_base::check_start() -> api_error {
REPERTORY_USES_FUNCTION_NAME();
try {
if (on_check_start()) {
return api_error::success;
}
event_system::instance().raise<download_begin>(get_api_path(),
get_source_path());
reader_thread_ =
std::make_unique<std::thread>([this]() { reader_thread(); });
return api_error::success;
} catch (const std::exception &ex) {
utils::error::raise_api_path_error(function_name, get_api_path(),
get_source_path(), ex,
"failed to start");
return api_error::error;
}
}
auto ring_buffer_base::close() -> bool {
stop_requested_ = true;
unique_mutex_lock chunk_lock(chunk_mtx_);
chunk_notify_.notify_all();
chunk_lock.unlock();
auto res = open_file_base::close();
if (reader_thread_) {
reader_thread_->join();
reader_thread_.reset();
}
return res;
}
auto ring_buffer_base::download_chunk(std::size_t chunk,
bool skip_active) -> api_error {
unique_mutex_lock chunk_lock(chunk_mtx_);
const auto unlock_and_notify = [this, &chunk_lock]() {
chunk_notify_.notify_all();
chunk_lock.unlock();
};
const auto unlock_and_return =
[&unlock_and_notify](api_error res) -> api_error {
unlock_and_notify();
return res;
};
if (chunk < ring_begin_ || chunk > ring_end_) {
return unlock_and_return(api_error::invalid_ring_buffer_position);
}
if (get_active_downloads().find(chunk) != get_active_downloads().end()) {
if (skip_active) {
return unlock_and_return(api_error::success);
}
auto active_download = get_active_downloads().at(chunk);
unlock_and_notify();
return active_download->wait();
}
if (read_state_[chunk % read_state_.size()]) {
return unlock_and_return(api_error::success);
}
auto active_download{std::make_shared<download>()};
get_active_downloads()[chunk] = active_download;
return use_buffer(chunk, [&](data_buffer &buffer) -> api_error {
auto data_offset{chunk * get_chunk_size()};
auto data_size{
chunk == (total_chunks_ - 1U) ? get_last_chunk_size()
: get_chunk_size(),
};
unlock_and_notify();
auto result{
get_provider().read_file_bytes(get_api_path(), data_size, data_offset,
buffer, stop_requested_),
};
chunk_lock.lock();
if (chunk < ring_begin_ || chunk > ring_end_) {
result = api_error::invalid_ring_buffer_position;
}
if (result == api_error::success) {
result = on_chunk_downloaded(chunk, buffer);
if (result == api_error::success) {
read_state_[chunk % read_state_.size()] = true;
auto progress = (static_cast<double>(chunk + 1U) /
static_cast<double>(total_chunks_)) *
100.0;
event_system::instance().raise<download_progress>(
get_api_path(), get_source_path(), progress);
}
}
get_active_downloads().erase(chunk);
unlock_and_notify();
active_download->notify(result);
return result;
});
}
void ring_buffer_base::forward(std::size_t count) {
update_position(count, true);
}
auto ring_buffer_base::get_read_state() const -> boost::dynamic_bitset<> {
recur_mutex_lock file_lock(get_mutex());
return read_state_;
}
auto ring_buffer_base::get_read_state(std::size_t chunk) const -> bool {
recur_mutex_lock file_lock(get_mutex());
return read_state_[chunk % read_state_.size()];
}
auto ring_buffer_base::read(std::size_t read_size, std::uint64_t read_offset,
data_buffer &data) -> api_error {
if (is_directory()) {
return api_error::invalid_operation;
}
reset_timeout();
read_size =
utils::calculate_read_size(get_file_size(), read_size, read_offset);
if (read_size == 0U) {
return api_error::success;
}
auto begin_chunk{static_cast<std::size_t>(read_offset / get_chunk_size())};
read_offset = read_offset - (begin_chunk * get_chunk_size());
unique_mutex_lock read_lock(read_mtx_);
auto res = check_start();
if (res != api_error::success) {
return res;
}
for (std::size_t chunk = begin_chunk;
not stop_requested_ && (res == api_error::success) && (read_size > 0U);
++chunk) {
reset_timeout();
if (chunk > ring_pos_) {
forward(chunk - ring_pos_);
} else if (chunk < ring_pos_) {
reverse(ring_pos_ - chunk);
}
res = download_chunk(chunk, false);
if (res != api_error::success) {
if (res == api_error::invalid_ring_buffer_position) {
read_lock.unlock();
// TODO limit retry
return read(read_size, read_offset, data);
}
return res;
}
reset_timeout();
std::size_t bytes_read{};
res = on_read_chunk(
chunk,
std::min(static_cast<std::size_t>(get_chunk_size() - read_offset),
read_size),
read_offset, data, bytes_read);
if (res != api_error::success) {
return res;
}
reset_timeout();
read_size -= bytes_read;
read_offset = 0U;
}
return stop_requested_ ? api_error::download_stopped : res;
}
void ring_buffer_base::reader_thread() {
unique_mutex_lock chunk_lock(chunk_mtx_);
auto next_chunk{ring_pos_};
chunk_notify_.notify_all();
chunk_lock.unlock();
while (not stop_requested_) {
chunk_lock.lock();
next_chunk = next_chunk + 1U > ring_end_ ? ring_begin_ : next_chunk + 1U;
const auto check_and_wait = [this, &chunk_lock, &next_chunk]() {
if (stop_requested_) {
chunk_notify_.notify_all();
chunk_lock.unlock();
return;
}
if (get_read_state().all()) {
chunk_notify_.wait(chunk_lock);
next_chunk = ring_pos_;
}
chunk_notify_.notify_all();
chunk_lock.unlock();
};
if (read_state_[next_chunk % read_state_.size()]) {
check_and_wait();
continue;
}
chunk_notify_.notify_all();
chunk_lock.unlock();
download_chunk(next_chunk, true);
}
event_system::instance().raise<download_end>(
get_api_path(), get_source_path(), api_error::download_stopped);
}
void ring_buffer_base::reverse(std::size_t count) {
update_position(count, false);
}
void ring_buffer_base::set(std::size_t first_chunk, std::size_t current_chunk) {
mutex_lock chunk_lock(chunk_mtx_);
if (first_chunk >= total_chunks_) {
chunk_notify_.notify_all();
throw std::runtime_error("first chunk must be less than total chunks");
}
ring_begin_ = first_chunk;
ring_end_ =
std::min(total_chunks_ - 1U, ring_begin_ + read_state_.size() - 1U);
if (current_chunk > ring_end_) {
chunk_notify_.notify_all();
throw std::runtime_error(
"current chunk must be less than or equal to last chunk");
}
ring_pos_ = current_chunk;
read_state_.set(0U, read_state_.size(), true);
chunk_notify_.notify_all();
}
void ring_buffer_base::set_api_path(const std::string &api_path) {
mutex_lock chunk_lock(chunk_mtx_);
open_file_base::set_api_path(api_path);
chunk_notify_.notify_all();
}
void ring_buffer_base::update_position(std::size_t count, bool is_forward) {
mutex_lock chunk_lock(chunk_mtx_);
if (is_forward) {
if ((ring_pos_ + count) > (total_chunks_ - 1U)) {
count = (total_chunks_ - 1U) - ring_pos_;
}
} else {
count = std::min(ring_pos_, count);
}
if (is_forward ? (ring_pos_ + count) <= ring_end_
: (ring_pos_ - count) >= ring_begin_) {
ring_pos_ += is_forward ? count : -count;
} else {
auto delta = is_forward ? count - (ring_end_ - ring_pos_)
: count - (ring_pos_ - ring_begin_);
if (delta >= read_state_.size()) {
read_state_.set(0U, read_state_.size(), false);
ring_pos_ += is_forward ? count : -count;
ring_begin_ += is_forward ? delta : -delta;
} else {
for (std::size_t idx = 0U; idx < delta; ++idx) {
if (is_forward) {
read_state_[(ring_begin_ + idx) % read_state_.size()] = false;
} else {
read_state_[(ring_end_ - idx) % read_state_.size()] = false;
}
}
ring_begin_ += is_forward ? delta : -delta;
ring_pos_ += is_forward ? count : -count;
}
ring_end_ =
std::min(total_chunks_ - 1U, ring_begin_ + read_state_.size() - 1U);
}
chunk_notify_.notify_all();
}
} // namespace repertory

View File

@ -21,73 +21,30 @@
*/
#include "file_manager/ring_buffer_open_file.hpp"
#include "app_config.hpp"
#include "file_manager/events.hpp"
#include "file_manager/open_file_base.hpp"
#include "platform/platform.hpp"
#include "providers/i_provider.hpp"
#include "types/repertory.hpp"
#include "utils/common.hpp"
#include "utils/encrypting_reader.hpp"
#include "utils/file_utils.hpp"
#include "utils/error_utils.hpp"
#include "utils/path.hpp"
#include "utils/utils.hpp"
namespace repertory {
ring_buffer_open_file::ring_buffer_open_file(std::string buffer_directory,
std::uint64_t chunk_size,
std::uint8_t chunk_timeout,
filesystem_item fsi,
i_provider &provider)
: ring_buffer_open_file(std::move(buffer_directory), chunk_size,
chunk_timeout, std::move(fsi), provider,
(1024ULL * 1024ULL * 1024ULL) / chunk_size) {}
ring_buffer_open_file::ring_buffer_open_file(std::string buffer_directory,
std::uint64_t chunk_size,
std::uint8_t chunk_timeout,
filesystem_item fsi,
i_provider &provider,
std::size_t ring_size)
: open_file_base(chunk_size, chunk_timeout, fsi, provider),
ring_state_(ring_size),
total_chunks_(static_cast<std::size_t>(
utils::divide_with_ceiling(fsi.size, chunk_size_))) {
if ((ring_size % 2U) != 0U) {
throw std::runtime_error("ring size must be a multiple of 2");
}
if (ring_size < 4U) {
throw std::runtime_error("ring size must be greater than or equal to 4");
}
if (fsi.size < (ring_state_.size() * chunk_size)) {
: ring_buffer_base(chunk_size, chunk_timeout, fsi, provider, ring_size,
false),
source_path_(utils::path::combine(buffer_directory,
{
utils::create_uuid_string(),
})) {
if (not can_handle_file(fsi.size, chunk_size, ring_size)) {
throw std::runtime_error("file size is less than ring buffer size");
}
last_chunk_ = ring_state_.size() - 1U;
ring_state_.set(0U, ring_state_.size(), true);
buffer_directory = utils::path::absolute(buffer_directory);
if (not utils::file::directory(buffer_directory).create_directory()) {
throw std::runtime_error("failed to create buffer directory|path|" +
buffer_directory + "|err|" +
std::to_string(utils::get_last_error_code()));
}
fsi_.source_path =
utils::path::combine(buffer_directory, {utils::create_uuid_string()});
nf_ = utils::file::file::open_or_create_file(fsi_.source_path);
if (not*nf_) {
throw std::runtime_error("failed to create buffer file|err|" +
std::to_string(utils::get_last_error_code()));
}
if (not nf_->truncate(ring_state_.size() * chunk_size)) {
nf_->close();
throw std::runtime_error("failed to resize buffer file|err|" +
std::to_string(utils::get_last_error_code()));
}
}
ring_buffer_open_file::~ring_buffer_open_file() {
@ -95,107 +52,24 @@ ring_buffer_open_file::~ring_buffer_open_file() {
close();
if (not nf_) {
return;
}
nf_->close();
if (not utils::file::file(fsi_.source_path).remove()) {
nf_.reset();
if (not utils::file::file(source_path_).remove()) {
utils::error::raise_api_path_error(
function_name, fsi_.api_path, fsi_.source_path,
function_name, get_api_path(), source_path_,
utils::get_last_error_code(), "failed to delete file");
}
}
auto ring_buffer_open_file::download_chunk(std::size_t chunk) -> api_error {
unique_mutex_lock chunk_lock(chunk_mtx_);
if (active_downloads_.find(chunk) != active_downloads_.end()) {
auto active_download = active_downloads_.at(chunk);
chunk_notify_.notify_all();
chunk_lock.unlock();
return active_download->wait();
}
if (ring_state_[chunk % ring_state_.size()]) {
auto active_download = std::make_shared<download>();
active_downloads_[chunk] = active_download;
ring_state_[chunk % ring_state_.size()] = false;
chunk_notify_.notify_all();
chunk_lock.unlock();
data_buffer buffer((chunk == (total_chunks_ - 1U)) ? last_chunk_size_
: chunk_size_);
stop_type stop_requested = !!ring_state_[chunk % ring_state_.size()];
auto res =
provider_.read_file_bytes(fsi_.api_path, buffer.size(),
chunk * chunk_size_, buffer, stop_requested);
if (res == api_error::success) {
res = do_io([&]() -> api_error {
std::size_t bytes_written{};
if (not nf_->write(buffer, (chunk % ring_state_.size()) * chunk_size_,
&bytes_written)) {
return api_error::os_error;
}
return api_error::success;
});
}
active_download->notify(res);
chunk_lock.lock();
active_downloads_.erase(chunk);
chunk_notify_.notify_all();
return res;
}
chunk_notify_.notify_all();
chunk_lock.unlock();
return api_error::success;
}
void ring_buffer_open_file::forward(std::size_t count) {
mutex_lock chunk_lock(chunk_mtx_);
if ((current_chunk_ + count) > (total_chunks_ - 1U)) {
count = (total_chunks_ - 1U) - current_chunk_;
}
if ((current_chunk_ + count) <= last_chunk_) {
current_chunk_ += count;
} else {
const auto added = count - (last_chunk_ - current_chunk_);
if (added >= ring_state_.size()) {
ring_state_.set(0U, ring_state_.size(), true);
current_chunk_ += count;
first_chunk_ += added;
last_chunk_ =
std::min(total_chunks_ - 1U, first_chunk_ + ring_state_.size() - 1U);
} else {
for (std::size_t idx = 0U; idx < added; ++idx) {
ring_state_[(first_chunk_ + idx) % ring_state_.size()] = true;
}
first_chunk_ += added;
current_chunk_ += count;
last_chunk_ =
std::min(total_chunks_ - 1U, first_chunk_ + ring_state_.size() - 1U);
}
}
chunk_notify_.notify_all();
}
auto ring_buffer_open_file::get_read_state() const -> boost::dynamic_bitset<> {
recur_mutex_lock file_lock(file_mtx_);
auto read_state = ring_state_;
return read_state.flip();
}
auto ring_buffer_open_file::get_read_state(std::size_t chunk) const -> bool {
recur_mutex_lock file_lock(file_mtx_);
return not ring_state_[chunk % ring_state_.size()];
}
auto ring_buffer_open_file::is_download_complete() const -> bool {
return false;
auto ring_buffer_open_file::can_handle_file(std::uint64_t file_size,
std::size_t chunk_size,
std::size_t ring_size) -> bool {
return file_size >= (static_cast<std::uint64_t>(ring_size) * chunk_size);
}
auto ring_buffer_open_file::native_operation(
@ -203,121 +77,75 @@ auto ring_buffer_open_file::native_operation(
return do_io([&]() -> api_error { return callback(nf_->get_handle()); });
}
void ring_buffer_open_file::reverse(std::size_t count) {
mutex_lock chunk_lock(chunk_mtx_);
if (current_chunk_ < count) {
count = current_chunk_;
auto ring_buffer_open_file::on_check_start() -> bool {
REPERTORY_USES_FUNCTION_NAME();
if (nf_) {
return true;
}
if ((current_chunk_ - count) >= first_chunk_) {
current_chunk_ -= count;
} else {
const auto removed = count - (current_chunk_ - first_chunk_);
if (removed >= ring_state_.size()) {
ring_state_.set(0U, ring_state_.size(), true);
current_chunk_ -= count;
first_chunk_ = current_chunk_;
last_chunk_ =
std::min(total_chunks_ - 1U, first_chunk_ + ring_state_.size() - 1U);
} else {
for (std::size_t idx = 0U; idx < removed; ++idx) {
ring_state_[(last_chunk_ - idx) % ring_state_.size()] = true;
}
first_chunk_ -= removed;
current_chunk_ -= count;
last_chunk_ =
std::min(total_chunks_ - 1U, first_chunk_ + ring_state_.size() - 1U);
}
}
chunk_notify_.notify_all();
}
auto ring_buffer_open_file::read(std::size_t read_size,
std::uint64_t read_offset, data_buffer &data)
-> api_error {
if (fsi_.directory) {
return api_error::invalid_operation;
}
reset_timeout();
read_size = utils::calculate_read_size(fsi_.size, read_size, read_offset);
if (read_size == 0U) {
return api_error::success;
}
const auto start_chunk_index =
static_cast<std::size_t>(read_offset / chunk_size_);
read_offset = read_offset - (start_chunk_index * chunk_size_);
data_buffer buffer(chunk_size_);
auto res = api_error::success;
for (std::size_t chunk = start_chunk_index;
(res == api_error::success) && (read_size > 0U); ++chunk) {
if (chunk > current_chunk_) {
forward(chunk - current_chunk_);
} else if (chunk < current_chunk_) {
reverse(current_chunk_ - chunk);
}
reset_timeout();
res = download_chunk(chunk);
if (res == api_error::success) {
const auto to_read = std::min(
static_cast<std::size_t>(chunk_size_ - read_offset), read_size);
res = do_io([this, &buffer, &chunk, &data, read_offset,
&to_read]() -> api_error {
std::size_t bytes_read{};
auto ret =
nf_->read(buffer, ((chunk % ring_state_.size()) * chunk_size_),
&bytes_read)
? api_error::success
: api_error::os_error;
if (ret == api_error::success) {
data.insert(data.end(),
buffer.begin() + static_cast<std::int64_t>(read_offset),
buffer.begin() +
static_cast<std::int64_t>(read_offset + to_read));
reset_timeout();
}
return ret;
});
read_offset = 0U;
read_size -= to_read;
}
}
return res;
}
void ring_buffer_open_file::set(std::size_t first_chunk,
std::size_t current_chunk) {
mutex_lock chunk_lock(chunk_mtx_);
if (first_chunk >= total_chunks_) {
chunk_notify_.notify_all();
throw std::runtime_error("first chunk must be less than total chunks");
}
first_chunk_ = first_chunk;
last_chunk_ = first_chunk_ + ring_state_.size() - 1U;
if (current_chunk > last_chunk_) {
chunk_notify_.notify_all();
auto buffer_directory{utils::path::get_parent_path(source_path_)};
if (not utils::file::directory(buffer_directory).create_directory()) {
throw std::runtime_error(
"current chunk must be less than or equal to last chunk");
fmt::format("failed to create buffer directory|path|{}|err|{}",
buffer_directory, utils::get_last_error_code()));
}
current_chunk_ = current_chunk;
ring_state_.set(0U, ring_state_.size(), false);
nf_ = utils::file::file::open_or_create_file(source_path_);
if (not nf_ || not *nf_) {
throw std::runtime_error(fmt::format("failed to create buffer file|err|{}",
utils::get_last_error_code()));
}
chunk_notify_.notify_all();
if (not nf_->truncate(get_ring_size() * get_chunk_size())) {
nf_->close();
nf_.reset();
throw std::runtime_error(fmt::format("failed to resize buffer file|err|{}",
utils::get_last_error_code()));
}
return false;
}
void ring_buffer_open_file::set_api_path(const std::string &api_path) {
mutex_lock chunk_lock(chunk_mtx_);
open_file_base::set_api_path(api_path);
chunk_notify_.notify_all();
auto ring_buffer_open_file::on_chunk_downloaded(
std::size_t chunk, const data_buffer &buffer) -> api_error {
return do_io([&]() -> api_error {
std::size_t bytes_written{};
if (nf_->write(buffer, (chunk % get_ring_size()) * get_chunk_size(),
&bytes_written)) {
return api_error::success;
}
return api_error::os_error;
});
}
auto ring_buffer_open_file::on_read_chunk(
std::size_t chunk, std::size_t read_size, std::uint64_t read_offset,
data_buffer &data, std::size_t &bytes_read) -> api_error {
data_buffer buffer(read_size);
auto res = do_io([&]() -> api_error {
return nf_->read(
buffer,
(((chunk % get_ring_size()) * get_chunk_size()) + read_offset),
&bytes_read)
? api_error::success
: api_error::os_error;
});
if (res != api_error::success) {
return res;
}
data.insert(data.end(), buffer.begin(), buffer.end());
return api_error::success;
}
auto ring_buffer_open_file::use_buffer(
std::size_t /* chunk */,
std::function<api_error(data_buffer &)> func) -> api_error {
data_buffer buffer;
return func(buffer);
}
} // namespace repertory

View File

@ -53,7 +53,8 @@ void upload::upload_thread() {
error_ =
provider_.upload_file(fsi_.api_path, fsi_.source_path, stop_requested_);
if (not utils::file::reset_modified_time(fsi_.source_path)) {
if (error_ == api_error::success &&
not utils::file::reset_modified_time(fsi_.source_path)) {
utils::error::raise_api_path_error(
function_name, fsi_.api_path, fsi_.source_path,
utils::get_last_error_code(), "failed to reset modified time");

View File

@ -28,9 +28,8 @@
#endif // defined(PROJECT_ENABLE_OPENSSL)
#if defined(PROJECT_REQUIRE_ALPINE) && !defined(PROJECT_IS_MINGW)
#include <filesystem>
#include <cstdlib>
#include <pthread.h>
#include <stdlib.h>
#endif // defined(PROJECT_REQUIRE_ALPINE) && !defined (PROJECT_IS_MINGW)
#if defined(PROJECT_ENABLE_LIBSODIUM)
@ -44,6 +43,7 @@
#include "spdlog/spdlog.h"
#include "initialize.hpp"
#if defined(PROJECT_REQUIRE_ALPINE) && !defined(PROJECT_IS_MINGW)
#include "utils/path.hpp"
#endif // defined(PROJECT_REQUIRE_ALPINE) && !defined (PROJECT_IS_MINGW)

View File

@ -24,6 +24,8 @@
#include "platform/unix_platform.hpp"
#include "app_config.hpp"
#include "events/event_system.hpp"
#include "events/events.hpp"
#include "providers/i_provider.hpp"
#include "types/startup_exception.hpp"
#include "utils/common.hpp"

View File

@ -23,12 +23,14 @@
#include "platform/win32_platform.hpp"
#include "events/event_system.hpp"
#include "events/events.hpp"
#include "providers/i_provider.hpp"
#include "utils/error_utils.hpp"
namespace repertory {
auto lock_data::get_mount_state(const provider_type & /*pt*/, json &mount_state)
-> bool {
auto lock_data::get_mount_state(const provider_type & /*pt*/,
json &mount_state) -> bool {
const auto ret = get_mount_state(mount_state);
if (ret) {
const auto mount_id =

View File

@ -22,16 +22,34 @@
#include "providers/base_provider.hpp"
#include "app_config.hpp"
#include "db/meta_db.hpp"
#include "events/event_system.hpp"
#include "events/events.hpp"
#include "file_manager/cache_size_mgr.hpp"
#include "file_manager/i_file_manager.hpp"
#include "platform/platform.hpp"
#include "utils/error_utils.hpp"
#include "utils/file_utils.hpp"
#include "utils/path.hpp"
#include "utils/polling.hpp"
#include "utils/tasks.hpp"
#include "utils/time.hpp"
namespace repertory {
void base_provider::add_all_items(const stop_type &stop_requested) {
REPERTORY_USES_FUNCTION_NAME();
api_file_list list{};
std::string marker;
auto res{api_error::more_data};
while (not stop_requested && res == api_error::more_data) {
res = get_file_list(list, marker);
if (res != api_error::success && res != api_error::more_data) {
utils::error::raise_error(function_name, res, "failed to get file list");
}
}
}
auto base_provider::create_api_file(std::string path, std::string key,
std::uint64_t size, std::uint64_t file_time)
-> api_file {
@ -403,18 +421,7 @@ auto base_provider::get_total_item_count() const -> std::uint64_t {
}
auto base_provider::get_used_drive_space() const -> std::uint64_t {
REPERTORY_USES_FUNCTION_NAME();
try {
auto used_space = get_used_drive_space_impl();
get_file_mgr()->update_used_space(used_space);
return used_space;
} catch (const std::exception &ex) {
utils::error::raise_error(function_name, ex,
"failed to get used drive space");
}
return 0U;
return db3_->get_total_size();
}
auto base_provider::is_file_writeable(const std::string &api_path) const
@ -428,99 +435,34 @@ auto base_provider::is_file_writeable(const std::string &api_path) const
return not exists;
}
void base_provider::remove_deleted_files(bool source_only) {
void base_provider::process_removed_directories(
std::deque<removed_item> removed_list, const stop_type &stop_requested) {
for (const auto &item : removed_list) {
if (stop_requested) {
return;
}
if (not item.directory) {
continue;
}
db3_->remove_api_path(item.api_path);
event_system::instance().raise<directory_removed_externally>(
item.api_path, item.source_path);
}
}
void base_provider::process_removed_files(std::deque<removed_item> removed_list,
const stop_type &stop_requested) {
REPERTORY_USES_FUNCTION_NAME();
if (not is_read_only()) {
auto source_list =
utils::file::directory{config_.get_cache_directory()}.get_files();
for (auto &&source_file : source_list) {
filesystem_item fsi{};
if (get_filesystem_item_from_source_path(source_file->get_path(), fsi) !=
api_error::item_not_found) {
continue;
}
auto reference_time =
source_file->get_time(config_.get_eviction_uses_accessed_time()
? utils::file::time_type::accessed
: utils::file::time_type::modified);
if (not reference_time.has_value()) {
continue;
}
auto delay = (config_.get_eviction_delay_mins() * 60UL) *
utils::time::NANOS_PER_SECOND;
if ((reference_time.value() + static_cast<std::uint64_t>(delay)) >=
utils::time::get_time_now()) {
continue;
}
event_system::instance().raise<orphaned_source_file_detected>(
source_file->get_path());
if (not source_file->remove()) {
continue;
}
event_system::instance().raise<orphaned_source_file_removed>(
source_file->get_path());
}
}
if (source_only) {
return;
}
struct removed_item {
std::string api_path{};
bool directory{};
std::string source_path{};
};
api_file_list list{};
auto res = get_file_list(list);
if (res != api_error::success) {
utils::error::raise_error(function_name, res, "failed to get file list");
return;
}
std::vector<removed_item> removed_list{};
for (auto &&api_path : db3_->get_api_path_list()) {
api_meta_map meta{};
if (get_item_meta(api_path, meta) != api_error::success) {
continue;
auto orphaned_directory =
utils::path::combine(get_config().get_data_directory(), {"orphaned"});
for (const auto &item : removed_list) {
if (stop_requested) {
return;
}
if (utils::string::to_bool(meta[META_DIRECTORY])) {
bool exists{};
if (is_directory(api_path, exists) != api_error::success) {
continue;
}
if (not exists) {
removed_list.emplace_back(removed_item{api_path, true, ""});
}
continue;
}
bool exists{};
if (is_file(api_path, exists) != api_error::success) {
continue;
}
if (exists) {
continue;
}
removed_list.emplace_back(removed_item{api_path, false, meta[META_SOURCE]});
}
const auto orphaned_directory =
utils::path::combine(config_.get_data_directory(), {"orphaned"});
for (auto &&item : removed_list) {
if (item.directory) {
continue;
}
@ -536,8 +478,8 @@ void base_provider::remove_deleted_files(bool source_only) {
continue;
}
const auto parts = utils::string::split(item.api_path, '/', false);
const auto orphaned_file = utils::path::combine(
auto parts = utils::string::split(item.api_path, '/', false);
auto orphaned_file = utils::path::combine(
orphaned_directory, {utils::path::strip_to_file_name(item.source_path) +
'_' + parts[parts.size() - 1U]});
@ -568,16 +510,77 @@ void base_provider::remove_deleted_files(bool source_only) {
event_system::instance().raise<file_removed_externally>(item.api_path,
item.source_path);
}
}
for (auto &&item : removed_list) {
if (not item.directory) {
continue;
}
void base_provider::process_removed_items(const stop_type &stop_requested) {
auto list = db3_->get_api_path_list();
[[maybe_unused]] auto res =
std::all_of(list.begin(), list.end(), [&](auto &&api_path) -> bool {
if (stop_requested) {
return false;
}
db3_->remove_api_path(item.api_path);
event_system::instance().raise<directory_removed_externally>(
item.api_path, item.source_path);
tasks::instance().schedule({
[this, api_path](auto &&task_stopped) {
api_meta_map meta{};
if (get_item_meta(api_path, meta) != api_error::success) {
return;
}
if (utils::string::to_bool(meta[META_DIRECTORY])) {
return;
}
// bool exists{};
// if (is_directory(api_path, exists) != api_error::success) {
// return;
// }
//
// if (exists) {
// return;
// }
//
// // process_removed_directories(
// // {
// // removed_item{api_path, true, ""},
// // },
// // stop_requested2);
//
// return;
// }
bool exists{};
if (is_file(api_path, exists) != api_error::success) {
return;
}
if (exists) {
return;
}
process_removed_files(
{
removed_item{api_path, false, meta[META_SOURCE]},
},
task_stopped);
},
});
return not stop_requested;
});
}
void base_provider::remove_deleted_items(const stop_type &stop_requested) {
add_all_items(stop_requested);
if (stop_requested) {
return;
}
remove_unmatched_source_files(stop_requested);
if (stop_requested) {
return;
}
process_removed_items(stop_requested);
}
auto base_provider::remove_file(const std::string &api_path) -> api_error {
@ -662,6 +665,53 @@ auto base_provider::remove_item_meta(const std::string &api_path,
return db3_->remove_item_meta(api_path, key);
}
void base_provider::remove_unmatched_source_files(
const stop_type &stop_requested) {
if (is_read_only()) {
return;
}
const auto &cfg = get_config();
auto source_list =
utils::file::directory{cfg.get_cache_directory()}.get_files();
for (const auto &source_file : source_list) {
if (stop_requested) {
return;
}
filesystem_item fsi{};
if (get_filesystem_item_from_source_path(source_file->get_path(), fsi) !=
api_error::item_not_found) {
continue;
}
auto reference_time =
source_file->get_time(cfg.get_eviction_uses_accessed_time()
? utils::file::time_type::accessed
: utils::file::time_type::modified);
if (not reference_time.has_value()) {
continue;
}
auto delay =
(cfg.get_eviction_delay_mins() * 60UL) * utils::time::NANOS_PER_SECOND;
if ((reference_time.value() + static_cast<std::uint64_t>(delay)) >=
utils::time::get_time_now()) {
continue;
}
event_system::instance().raise<orphaned_source_file_detected>(
source_file->get_path());
if (not source_file->remove()) {
continue;
}
event_system::instance().raise<orphaned_source_file_removed>(
source_file->get_path());
}
}
auto base_provider::set_item_meta(const std::string &api_path,
const std::string &key,
const std::string &value) -> api_error {
@ -678,7 +728,7 @@ auto base_provider::start(api_item_added_callback api_item_added,
api_item_added_ = api_item_added;
fm_ = mgr;
db3_ = std::make_unique<meta_db>(config_);
db3_ = create_meta_db(config_);
api_meta_map meta{};
if (get_item_meta("/", meta) == api_error::item_not_found) {
@ -689,17 +739,19 @@ auto base_provider::start(api_item_added_callback api_item_added,
auto online{false};
auto unmount_requested{false};
{
const auto &cfg = get_config();
repertory::event_consumer consumer(
"unmount_requested",
[&unmount_requested](const event &) { unmount_requested = true; });
for (std::uint16_t idx = 0U; not online && not unmount_requested &&
(idx < config_.get_online_check_retry_secs());
(idx < cfg.get_online_check_retry_secs());
++idx) {
online = is_online();
if (not online) {
event_system::instance().raise<provider_offline>(
config_.get_host_config().host_name_or_ip,
config_.get_host_config().api_port);
cfg.get_host_config().host_name_or_ip,
cfg.get_host_config().api_port);
std::this_thread::sleep_for(1s);
}
}
@ -709,14 +761,19 @@ auto base_provider::start(api_item_added_callback api_item_added,
return false;
}
remove_deleted_files(true);
cache_size_mgr::instance().initialize(&config_);
polling::instance().set_callback({
"check_deleted",
polling::frequency::low,
[this](auto &&stop_requested) { remove_deleted_items(stop_requested); },
});
polling::instance().set_callback({"check_deleted", polling::frequency::low,
[this]() { remove_deleted_files(false); }});
return true;
}
void base_provider::stop() {
cache_size_mgr::instance().stop();
polling::instance().remove_callback("check_deleted");
db3_.reset();
}

View File

@ -22,12 +22,14 @@
#include "providers/s3/s3_provider.hpp"
#include "app_config.hpp"
#include "comm/curl/curl_comm.hpp"
#include "comm/i_http_comm.hpp"
#include "events/event_system.hpp"
#include "events/events.hpp"
#include "file_manager/i_file_manager.hpp"
#include "types/repertory.hpp"
#include "types/s3.hpp"
#include "utils/collection.hpp"
#include "utils/config.hpp"
#include "utils/encrypting_reader.hpp"
#include "utils/encryption.hpp"
#include "utils/error_utils.hpp"
@ -36,17 +38,13 @@
#include "utils/polling.hpp"
#include "utils/string.hpp"
#include "utils/time.hpp"
#include <utils/config.hpp>
namespace repertory {
s3_provider::s3_provider(app_config &config, i_http_comm &comm)
: base_provider(config, comm) {
get_comm().enable_s3_path_style(config.get_s3_config().use_path_style);
}
: base_provider(config, comm) {}
auto s3_provider::add_if_not_found(api_file &file,
const std::string &object_name) const
-> api_error {
auto s3_provider::add_if_not_found(
api_file &file, const std::string &object_name) const -> api_error {
api_meta_map meta{};
if (get_item_meta(file.api_path, meta) == api_error::item_not_found) {
auto err = create_path_directories(
@ -72,7 +70,7 @@ auto s3_provider::convert_api_date(std::string_view date) -> std::uint64_t {
utils::string::split(date_parts.at(1U), 'Z', true).at(0U)) *
1000000UL;
struct tm tm1{};
struct tm tm1 {};
#if defined(_WIN32)
utils::time::strptime(date_time.c_str(), "%Y-%m-%dT%T", &tm1);
return nanos + utils::time::windows_time_t_to_unix_time(_mkgmtime(&tm1));
@ -87,8 +85,8 @@ auto s3_provider::create_directory_impl(const std::string &api_path,
api_meta_map &meta) -> api_error {
REPERTORY_USES_FUNCTION_NAME();
const auto cfg = get_config().get_s3_config();
const auto is_encrypted = not cfg.encryption_token.empty();
const auto &cfg = get_s3_config();
auto is_encrypted = not cfg.encryption_token.empty();
stop_type stop_requested{false};
if (is_encrypted) {
@ -111,7 +109,7 @@ auto s3_provider::create_directory_impl(const std::string &api_path,
{utils::collection::to_hex_string(result)}));
}
const auto object_name =
auto object_name =
utils::path::create_api_path(is_encrypted ? meta[META_KEY] : api_path);
curl::requests::http_put_file put_file{};
@ -140,7 +138,8 @@ auto s3_provider::create_file_extra(const std::string &api_path,
api_meta_map &meta) -> api_error {
REPERTORY_USES_FUNCTION_NAME();
if (not get_config().get_s3_config().encryption_token.empty()) {
const auto &cfg = get_s3_config();
if (not cfg.encryption_token.empty()) {
std::string encrypted_file_path;
auto res = get_item_meta(utils::path::get_parent_api_path(api_path),
META_KEY, encrypted_file_path);
@ -152,7 +151,7 @@ auto s3_provider::create_file_extra(const std::string &api_path,
data_buffer result;
utils::encryption::encrypt_data(
get_config().get_s3_config().encryption_token,
cfg.encryption_token,
*(utils::string::split(api_path, '/', false).end() - 1U), result);
meta[META_KEY] = utils::path::create_api_path(
@ -163,18 +162,20 @@ auto s3_provider::create_file_extra(const std::string &api_path,
return api_error::success;
}
auto s3_provider::create_path_directories(const std::string &api_path,
const std::string &key) const
-> api_error {
auto s3_provider::create_path_directories(
const std::string &api_path, const std::string &key) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
if (api_path == "/") {
return api_error::success;
}
const auto encryption_token = get_config().get_s3_config().encryption_token;
const auto is_encrypted = not encryption_token.empty();
const auto &cfg = get_s3_config();
auto encryption_token = cfg.encryption_token;
auto is_encrypted = not encryption_token.empty();
const auto path_parts = utils::string::split(api_path, '/', false);
const auto key_parts = utils::string::split(key, '/', false);
auto path_parts = utils::string::split(api_path, '/', false);
auto key_parts = utils::string::split(key, '/', false);
if (is_encrypted && key_parts.size() != path_parts.size()) {
return api_error::error;
@ -190,12 +191,43 @@ auto s3_provider::create_path_directories(const std::string &api_path,
cur_path = utils::path::create_api_path(
utils::path::combine(cur_path, {path_parts.at(idx)}));
auto exists{false};
auto res = is_directory(cur_path, exists);
if (res != api_error::success) {
return res;
}
if (not exists) {
curl::requests::http_put_file put_file{};
put_file.allow_timeout = true;
put_file.aws_service = "aws:amz:" + cfg.region + ":s3";
put_file.path = (is_encrypted ? cur_key : cur_path) + '/';
stop_type stop_requested{false};
long response_code{};
if (not get_comm().make_request(put_file, response_code,
stop_requested)) {
utils::error::raise_api_path_error(function_name, cur_path,
api_error::comm_error,
"failed to create directory object");
return api_error::comm_error;
}
if (response_code != http_error_codes::ok) {
utils::error::raise_api_path_error(function_name, cur_path,
response_code,
"failed to create directory object");
return api_error::comm_error;
}
}
api_meta_map meta{};
auto res = get_item_meta(cur_path, meta);
res = get_item_meta(cur_path, meta);
if (res == api_error::item_not_found) {
auto dir = create_api_file(cur_path, cur_key, 0U,
get_last_modified(true, cur_path));
get_api_item_added()(true, dir);
continue;
}
@ -210,9 +242,9 @@ auto s3_provider::create_path_directories(const std::string &api_path,
auto s3_provider::decrypt_object_name(std::string &object_name) const
-> api_error {
auto parts = utils::string::split(object_name, '/', false);
for (auto &&part : parts) {
for (auto &part : parts) {
if (not utils::encryption::decrypt_file_name(
get_config().get_s3_config().encryption_token, part)) {
get_s3_config().encryption_token, part)) {
return api_error::decryption_error;
}
}
@ -226,8 +258,8 @@ auto s3_provider::get_directory_item_count(const std::string &api_path) const
REPERTORY_USES_FUNCTION_NAME();
try {
const auto cfg = get_config().get_s3_config();
const auto is_encrypted = not cfg.encryption_token.empty();
const auto &cfg = get_s3_config();
auto is_encrypted = not cfg.encryption_token.empty();
std::string key;
if (is_encrypted) {
auto res = get_item_meta(api_path, META_KEY, key);
@ -236,7 +268,7 @@ auto s3_provider::get_directory_item_count(const std::string &api_path) const
}
}
const auto object_name =
auto object_name =
api_path == "/"
? ""
: utils::path::create_api_path(is_encrypted ? key : api_path);
@ -245,35 +277,52 @@ auto s3_provider::get_directory_item_count(const std::string &api_path) const
long response_code{};
auto prefix = object_name.empty() ? object_name : object_name + "/";
if (not get_object_list(response_data, response_code, "/", prefix)) {
return 0U;
auto grab_more{true};
std::string token{};
std::uint64_t total_count{};
while (grab_more) {
if (not get_object_list(response_data, response_code, "/", prefix,
token)) {
return total_count;
}
if (response_code == http_error_codes::not_found) {
return total_count;
}
if (response_code != http_error_codes::ok) {
return total_count;
}
pugi::xml_document doc;
auto res = doc.load_string(response_data.c_str());
if (res.status != pugi::xml_parse_status::status_ok) {
return total_count;
}
grab_more = doc.select_node("/ListBucketResult/IsTruncated")
.node()
.text()
.as_bool();
if (grab_more) {
token = doc.select_node("/ListBucketResult/NextContinuationToken")
.node()
.text()
.as_string();
}
auto node_list =
doc.select_nodes("/ListBucketResult/CommonPrefixes/Prefix");
total_count += node_list.size();
node_list = doc.select_nodes("/ListBucketResult/Contents");
total_count += node_list.size();
if (not prefix.empty()) {
--total_count;
}
}
if (response_code == http_error_codes::not_found) {
return 0U;
}
if (response_code != http_error_codes::ok) {
return 0U;
}
pugi::xml_document doc;
auto res = doc.load_string(response_data.c_str());
if (res.status != pugi::xml_parse_status::status_ok) {
return 0U;
}
auto node_list =
doc.select_nodes("/ListBucketResult/CommonPrefixes/Prefix");
std::uint64_t ret = node_list.size();
node_list = doc.select_nodes("/ListBucketResult/Contents");
ret += node_list.size();
if (not prefix.empty()) {
--ret;
}
return ret;
return total_count;
} catch (const std::exception &e) {
utils::error::raise_error(function_name, e, "exception occurred");
}
@ -281,13 +330,12 @@ auto s3_provider::get_directory_item_count(const std::string &api_path) const
return 0U;
}
auto s3_provider::get_directory_items_impl(const std::string &api_path,
directory_item_list &list) const
-> api_error {
auto s3_provider::get_directory_items_impl(
const std::string &api_path, directory_item_list &list) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
const auto cfg = get_config().get_s3_config();
const auto is_encrypted = not cfg.encryption_token.empty();
const auto &cfg = get_s3_config();
auto is_encrypted = not cfg.encryption_token.empty();
auto ret = api_error::success;
std::string key;
@ -298,7 +346,7 @@ auto s3_provider::get_directory_items_impl(const std::string &api_path,
}
}
const auto object_name =
auto object_name =
api_path == "/"
? ""
: utils::path::create_api_path(is_encrypted ? key : api_path);
@ -307,103 +355,129 @@ auto s3_provider::get_directory_items_impl(const std::string &api_path,
long response_code{};
auto prefix = object_name.empty() ? object_name : object_name + "/";
if (not get_object_list(response_data, response_code, "/", prefix)) {
return api_error::comm_error;
}
if (response_code == http_error_codes::not_found) {
return api_error::directory_not_found;
}
if (response_code != http_error_codes::ok) {
utils::error::raise_api_path_error(function_name, api_path, response_code,
"failed to get directory items");
return api_error::comm_error;
}
pugi::xml_document doc;
auto parse_res = doc.load_string(response_data.c_str());
if (parse_res.status != pugi::xml_parse_status::status_ok) {
return api_error::error;
}
const auto add_directory_item =
[&](bool directory, const std::string &name, std::uint64_t last_modified,
std::function<std::uint64_t(const directory_item &)> get_size)
-> api_error {
auto child_api_path =
utils::path::create_api_path(utils::path::combine("/", {name}));
std::string child_object_name;
if (is_encrypted) {
child_object_name = child_api_path;
if (not utils::encryption::decrypt_file_path(cfg.encryption_token,
child_api_path)) {
return api_error::decryption_error;
}
auto grab_more{true};
std::string token{};
while (grab_more) {
if (not get_object_list(response_data, response_code, "/", prefix, token)) {
return api_error::comm_error;
}
directory_item dir_item{};
dir_item.api_path = child_api_path;
dir_item.api_parent = utils::path::get_parent_api_path(dir_item.api_path);
dir_item.directory = directory;
dir_item.size = get_size(dir_item);
ret = get_item_meta(child_api_path, dir_item.meta);
if (ret == api_error::item_not_found) {
if (directory) {
ret = create_path_directories(child_api_path, child_object_name);
if (ret != api_error::success) {
return ret;
}
} else {
auto file = create_api_file(child_api_path, child_object_name,
dir_item.size, last_modified);
ret = add_if_not_found(file, child_object_name);
if (ret != api_error::success) {
return ret;
if (response_code == http_error_codes::not_found) {
return api_error::directory_not_found;
}
if (response_code != http_error_codes::ok) {
utils::error::raise_api_path_error(function_name, api_path, response_code,
"failed to get directory items");
return api_error::comm_error;
}
pugi::xml_document doc;
auto parse_res = doc.load_string(response_data.c_str());
if (parse_res.status != pugi::xml_parse_status::status_ok) {
return api_error::error;
}
grab_more = doc.select_node("/ListBucketResult/IsTruncated")
.node()
.text()
.as_bool();
if (grab_more) {
token = doc.select_node("/ListBucketResult/NextContinuationToken")
.node()
.text()
.as_string();
}
const auto add_directory_item =
[&](bool directory, const std::string &name,
std::uint64_t last_modified,
std::function<std::uint64_t(const directory_item &)> get_size)
-> api_error {
auto child_api_path =
utils::path::create_api_path(utils::path::combine("/", {name}));
std::string child_object_name;
if (is_encrypted) {
child_object_name = child_api_path;
if (not utils::encryption::decrypt_file_path(cfg.encryption_token,
child_api_path)) {
return api_error::decryption_error;
}
}
directory_item dir_item{};
dir_item.api_path = child_api_path;
dir_item.api_parent = utils::path::get_parent_api_path(dir_item.api_path);
dir_item.directory = directory;
dir_item.size = get_size(dir_item);
ret = get_item_meta(child_api_path, dir_item.meta);
if (ret == api_error::item_not_found) {
if (directory) {
ret = create_path_directories(child_api_path, child_object_name);
if (ret != api_error::success) {
return ret;
}
} else {
auto file = create_api_file(child_api_path, child_object_name,
dir_item.size, last_modified);
ret = add_if_not_found(file, child_object_name);
if (ret != api_error::success) {
return ret;
}
}
ret = get_item_meta(child_api_path, dir_item.meta);
}
if (ret != api_error::success) {
return ret;
}
list.push_back(std::move(dir_item));
return api_error::success;
};
auto node_list =
doc.select_nodes("/ListBucketResult/CommonPrefixes/Prefix");
for (const auto &node : node_list) {
add_directory_item(
true, node.node().text().as_string(), 0U,
[](const directory_item &) -> std::uint64_t { return 0U; });
}
if (ret != api_error::success) {
return ret;
}
node_list = doc.select_nodes("/ListBucketResult/Contents");
for (const auto &node : node_list) {
auto child_object_name = utils::path::create_api_path(
node.node().select_node("Key").node().text().as_string());
if (child_object_name == utils::path::create_api_path(prefix)) {
continue;
}
list.push_back(std::move(dir_item));
return api_error::success;
};
auto node_list = doc.select_nodes("/ListBucketResult/CommonPrefixes/Prefix");
for (auto &&node : node_list) {
add_directory_item(
true, node.node().text().as_string(), 0U,
[](const directory_item &) -> std::uint64_t { return 0U; });
}
node_list = doc.select_nodes("/ListBucketResult/Contents");
for (auto &&node : node_list) {
auto child_object_name = utils::path::create_api_path(
node.node().select_node("Key").node().text().as_string());
if (child_object_name != utils::path::create_api_path(prefix)) {
auto size = node.node().select_node("Size").node().text().as_ullong();
auto last_modified = convert_api_date(
node.node().select_node("LastModified").node().text().as_string());
add_directory_item(
false, child_object_name, last_modified,
[&is_encrypted, &size](const directory_item &) -> std::uint64_t {
return is_encrypted ? utils::encryption::encrypting_reader::
calculate_decrypted_size(size)
: size;
});
add_directory_item(false, child_object_name, last_modified,
[this, &is_encrypted, &size](
const directory_item &dir_item) -> std::uint64_t {
std::string size_str;
if (get_item_meta(dir_item.api_path, META_SIZE,
size_str) == api_error::success) {
return utils::string::to_uint64(size_str);
}
return is_encrypted
? utils::encryption::encrypting_reader::
calculate_decrypted_size(size)
: size;
});
}
}
return ret;
}
auto s3_provider::get_file(const std::string &api_path, api_file &file) const
-> api_error {
auto s3_provider::get_file(const std::string &api_path,
api_file &file) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
try {
@ -420,12 +494,20 @@ auto s3_provider::get_file(const std::string &api_path, api_file &file) const
file.api_parent = utils::path::get_parent_api_path(api_path);
file.accessed_date = file.changed_date = file.creation_date =
file.modified_date = result.last_modified;
file.file_size =
is_encrypted
? utils::encryption::encrypting_reader::calculate_decrypted_size(
result.content_length)
: result.content_length;
file.key = is_encrypted ? utils::path::create_api_path(api_path) : "";
file.key = is_encrypted ? utils::path::create_api_path(object_name) : "";
std::string size_str;
if (get_item_meta(file.api_path, META_SIZE, size_str) ==
api_error::success) {
file.file_size = utils::string::to_uint64(size_str);
} else {
file.file_size =
is_encrypted
? utils::encryption::encrypting_reader::calculate_decrypted_size(
result.content_length)
: result.content_length;
}
return add_if_not_found(file, object_name);
} catch (const std::exception &e) {
utils::error::raise_error(function_name, e, "exception occurred");
@ -434,12 +516,14 @@ auto s3_provider::get_file(const std::string &api_path, api_file &file) const
return api_error::error;
}
auto s3_provider::get_file_list(api_file_list &list) const -> api_error {
auto s3_provider::get_file_list(api_file_list &list,
std::string &marker) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
std::string response_data;
long response_code{};
if (not get_object_list(response_data, response_code)) {
if (not get_object_list(response_data, response_code, std::nullopt,
std::nullopt, marker)) {
return api_error::comm_error;
}
@ -457,52 +541,59 @@ auto s3_provider::get_file_list(api_file_list &list) const -> api_error {
return api_error::comm_error;
}
auto grab_more =
doc.select_node("/ListBucketResult/IsTruncated").node().text().as_bool();
if (grab_more) {
marker = doc.select_node("/ListBucketResult/NextContinuationToken")
.node()
.text()
.as_string();
}
auto node_list = doc.select_nodes("/ListBucketResult/Contents");
for (auto &&node : node_list) {
auto api_path =
for (const auto &node : node_list) {
auto object_name =
std::string{node.node().select_node("Key").node().text().as_string()};
if (not utils::string::ends_with(api_path, "/")) {
auto is_encrypted =
not get_config().get_s3_config().encryption_token.empty();
if (is_encrypted) {
auto err = decrypt_object_name(api_path);
if (err != api_error::success) {
return err;
}
}
auto api_path{object_name};
if (utils::string::ends_with(api_path, "/")) {
continue;
}
auto size = node.node().select_node("Size").node().text().as_ullong();
api_file file{};
file.api_path = utils::path::create_api_path(api_path);
file.api_parent = utils::path::get_parent_api_path(file.api_path);
file.accessed_date = file.changed_date = file.creation_date =
file.modified_date = convert_api_date(node.node()
.select_node("LastModified")
.node()
.text()
.as_string());
file.file_size =
is_encrypted
? utils::encryption::encrypting_reader::calculate_decrypted_size(
size)
: size;
file.key = is_encrypted ? utils::path::create_api_path(api_path) : "";
auto err = add_if_not_found(file, api_path);
auto is_encrypted = not get_s3_config().encryption_token.empty();
if (is_encrypted) {
auto err = decrypt_object_name(api_path);
if (err != api_error::success) {
return err;
}
list.push_back(std::move(file));
}
auto size = node.node().select_node("Size").node().text().as_ullong();
api_file file{};
file.api_path = utils::path::create_api_path(api_path);
file.api_parent = utils::path::get_parent_api_path(file.api_path);
file.accessed_date = file.changed_date = file.creation_date =
file.modified_date = convert_api_date(
node.node().select_node("LastModified").node().text().as_string());
file.file_size =
is_encrypted
? utils::encryption::encrypting_reader::calculate_decrypted_size(
size)
: size;
file.key = is_encrypted ? utils::path::create_api_path(object_name) : "";
auto err = add_if_not_found(file, file.key);
if (err != api_error::success) {
return err;
}
list.push_back(std::move(file));
}
return api_error::success;
return grab_more ? api_error::more_data : api_error::success;
}
auto s3_provider::get_last_modified(bool directory,
const std::string &api_path) const
-> std::uint64_t {
auto s3_provider::get_last_modified(
bool directory, const std::string &api_path) const -> std::uint64_t {
bool is_encrypted{};
std::string object_name;
head_object_result result{};
@ -512,14 +603,13 @@ auto s3_provider::get_last_modified(bool directory,
: utils::time::get_time_now();
}
auto s3_provider::get_object_info(bool directory, const std::string &api_path,
bool &is_encrypted, std::string &object_name,
head_object_result &result) const
-> api_error {
auto s3_provider::get_object_info(
bool directory, const std::string &api_path, bool &is_encrypted,
std::string &object_name, head_object_result &result) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
try {
const auto cfg = get_config().get_s3_config();
const auto &cfg = get_s3_config();
is_encrypted = not cfg.encryption_token.empty();
std::string key;
@ -564,14 +654,13 @@ auto s3_provider::get_object_info(bool directory, const std::string &api_path,
return api_error::error;
}
auto s3_provider::get_object_list(std::string &response_data,
long &response_code,
std::optional<std::string> delimiter,
std::optional<std::string> prefix) const
-> bool {
auto s3_provider::get_object_list(
std::string &response_data, long &response_code,
std::optional<std::string> delimiter, std::optional<std::string> prefix,
std::optional<std::string> token) const -> bool {
curl::requests::http_get get{};
get.allow_timeout = true;
get.aws_service = "aws:amz:" + get_config().get_s3_config().region + ":s3";
get.aws_service = "aws:amz:" + get_s3_config().region + ":s3";
get.path = '/';
get.query["list-type"] = "2";
if (delimiter.has_value() && not delimiter.value().empty()) {
@ -581,6 +670,9 @@ auto s3_provider::get_object_list(std::string &response_data,
get.query["prefix"] = prefix.value();
utils::string::left_trim(get.query["prefix"], '/');
}
if (token.has_value() && not token.value().empty()) {
get.query["continuation-token"] = token.value();
}
get.response_handler = [&response_data](const data_buffer &data,
long /*response_code*/) {
response_data = std::string(data.begin(), data.end());
@ -594,39 +686,8 @@ auto s3_provider::get_total_drive_space() const -> std::uint64_t {
return std::numeric_limits<std::int64_t>::max() / std::int64_t(2);
}
auto s3_provider::get_used_drive_space_impl() const -> std::uint64_t {
std::string response_data;
long response_code{};
if (not get_object_list(response_data, response_code)) {
return 0U;
}
if (response_code != http_error_codes::ok) {
return 0U;
}
pugi::xml_document doc;
auto res = doc.load_string(response_data.c_str());
if (res.status != pugi::xml_parse_status::status_ok) {
return 0U;
}
const auto cfg = get_config().get_s3_config();
const auto is_encrypted = not cfg.encryption_token.empty();
auto node_list = doc.select_nodes("/ListBucketResult/Contents");
return std::accumulate(
node_list.begin(), node_list.end(), std::uint64_t(0U),
[&is_encrypted](std::uint64_t total, auto node) -> std::uint64_t {
auto size = node.node().select_node("Size").node().text().as_ullong();
return total + (is_encrypted ? utils::encryption::encrypting_reader::
calculate_decrypted_size(size)
: size);
});
}
auto s3_provider::is_directory(const std::string &api_path, bool &exists) const
-> api_error {
auto s3_provider::is_directory(const std::string &api_path,
bool &exists) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
exists = false;
@ -653,8 +714,8 @@ auto s3_provider::is_directory(const std::string &api_path, bool &exists) const
return api_error::error;
}
auto s3_provider::is_file(const std::string &api_path, bool &exists) const
-> api_error {
auto s3_provider::is_file(const std::string &api_path,
bool &exists) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
exists = false;
@ -691,8 +752,8 @@ auto s3_provider::read_file_bytes(const std::string &api_path, std::size_t size,
REPERTORY_USES_FUNCTION_NAME();
try {
const auto cfg = get_config().get_s3_config();
const auto is_encrypted = not cfg.encryption_token.empty();
const auto &cfg = get_s3_config();
auto is_encrypted = not cfg.encryption_token.empty();
std::string key;
if (is_encrypted) {
auto res = get_item_meta(api_path, META_KEY, key);
@ -701,7 +762,7 @@ auto s3_provider::read_file_bytes(const std::string &api_path, std::size_t size,
}
}
const auto object_name =
auto object_name =
utils::path::create_api_path(is_encrypted ? key : api_path);
const auto read_bytes =
@ -761,30 +822,31 @@ auto s3_provider::read_file_bytes(const std::string &api_path, std::size_t size,
return res;
};
if (is_encrypted) {
std::string temp;
auto res = get_item_meta(api_path, META_SIZE, temp);
if (res != api_error::success) {
return res;
}
const auto total_size = utils::string::to_uint64(temp);
return utils::encryption::read_encrypted_range(
{offset, offset + size - 1U},
utils::encryption::generate_key<utils::encryption::hash_256_t>(
cfg.encryption_token),
[&](data_buffer &ct_buffer, std::uint64_t start_offset,
std::uint64_t end_offset) -> bool {
return read_bytes((end_offset - start_offset + 1U),
start_offset,
ct_buffer) == api_error::success;
},
total_size, data)
? api_error::success
: api_error::decryption_error;
if (not is_encrypted) {
return read_bytes(size, offset, data);
}
return read_bytes(size, offset, data);
std::string temp;
auto res = get_item_meta(api_path, META_SIZE, temp);
if (res != api_error::success) {
return res;
}
auto total_size = utils::string::to_uint64(temp);
return utils::encryption::read_encrypted_range(
{offset, offset + size - 1U},
utils::encryption::generate_key<utils::encryption::hash_256_t>(
cfg.encryption_token),
[&](data_buffer &ct_buffer, std::uint64_t start_offset,
std::uint64_t end_offset) -> bool {
return read_bytes((end_offset - start_offset + 1U),
start_offset,
ct_buffer) == api_error::success;
},
total_size, data)
? api_error::success
: api_error::decryption_error;
} catch (const std::exception &e) {
utils::error::raise_error(function_name, e, "exception occurred");
}
@ -796,8 +858,8 @@ auto s3_provider::remove_directory_impl(const std::string &api_path)
-> api_error {
REPERTORY_USES_FUNCTION_NAME();
const auto cfg = get_config().get_s3_config();
const auto is_encrypted = not cfg.encryption_token.empty();
const auto &cfg = get_s3_config();
auto is_encrypted = not cfg.encryption_token.empty();
std::string key;
if (is_encrypted) {
@ -807,7 +869,7 @@ auto s3_provider::remove_directory_impl(const std::string &api_path)
}
}
const auto object_name =
auto object_name =
utils::path::create_api_path(is_encrypted ? key : api_path);
curl::requests::http_delete del{};
@ -838,8 +900,8 @@ auto s3_provider::remove_directory_impl(const std::string &api_path)
auto s3_provider::remove_file_impl(const std::string &api_path) -> api_error {
REPERTORY_USES_FUNCTION_NAME();
const auto cfg = get_config().get_s3_config();
const auto is_encrypted = not cfg.encryption_token.empty();
const auto &cfg = get_s3_config();
auto is_encrypted = not cfg.encryption_token.empty();
std::string key;
if (is_encrypted) {
@ -849,7 +911,7 @@ auto s3_provider::remove_file_impl(const std::string &api_path) -> api_error {
}
}
const auto object_name =
auto object_name =
utils::path::create_api_path(is_encrypted ? key : api_path);
curl::requests::http_delete del{};
@ -886,6 +948,8 @@ auto s3_provider::rename_file(const std::string & /* from_api_path */,
auto s3_provider::start(api_item_added_callback api_item_added,
i_file_manager *mgr) -> bool {
event_system::instance().raise<service_started>("s3_provider");
s3_config_ = get_config().get_s3_config();
get_comm().enable_s3_path_style(s3_config_.use_path_style);
return base_provider::start(api_item_added, mgr);
}
@ -909,8 +973,8 @@ auto s3_provider::upload_file_impl(const std::string &api_path,
file_size = opt_size.value();
}
const auto cfg = get_config().get_s3_config();
const auto is_encrypted = not cfg.encryption_token.empty();
const auto &cfg = get_s3_config();
auto is_encrypted = not cfg.encryption_token.empty();
std::string key;
if (is_encrypted) {
@ -920,7 +984,7 @@ auto s3_provider::upload_file_impl(const std::string &api_path,
}
}
const auto object_name =
auto object_name =
utils::path::create_api_path(is_encrypted ? key : api_path);
curl::requests::http_put_file put_file{};

View File

@ -37,8 +37,7 @@
#include "utils/utils.hpp"
namespace {
[[nodiscard]] auto get_bucket(repertory::sia_config cfg) -> std::string {
repertory::utils::string::trim(cfg.bucket);
[[nodiscard]] auto get_bucket(const repertory::sia_config &cfg) -> std::string {
if (cfg.bucket.empty()) {
return "default";
}
@ -68,7 +67,7 @@ auto sia_provider::create_directory_impl(const std::string &api_path,
curl::requests::http_put_file put_file{};
put_file.allow_timeout = true;
put_file.path = "/api/worker/objects" + api_path + "/";
put_file.query["bucket"] = get_bucket(get_config().get_sia_config());
put_file.query["bucket"] = get_bucket(get_sia_config());
long response_code{};
stop_type stop_requested{};
@ -100,7 +99,7 @@ auto sia_provider::get_directory_item_count(const std::string &api_path) const
std::uint64_t item_count{};
if (object_list.contains("entries")) {
for (auto &&entry : object_list.at("entries")) {
for (const auto &entry : object_list.at("entries")) {
try {
auto name = entry.at("name").get<std::string>();
auto entry_api_path = utils::path::create_api_path(name);
@ -137,7 +136,7 @@ auto sia_provider::get_directory_items_impl(const std::string &api_path,
}
if (object_list.contains("entries")) {
for (auto &&entry : object_list.at("entries")) {
for (const auto &entry : object_list.at("entries")) {
try {
auto name = entry.at("name").get<std::string>();
auto entry_api_path = utils::path::create_api_path(name);
@ -214,7 +213,9 @@ auto sia_provider::get_file(const std::string &api_path, api_file &file) const
return api_error::success;
}
auto sia_provider::get_file_list(api_file_list &list) const -> api_error {
auto sia_provider::get_file_list(api_file_list &list,
std::string & /* marker */) const
-> api_error {
REPERTORY_USES_FUNCTION_NAME();
using dir_func = std::function<api_error(std::string api_path)>;
@ -226,7 +227,7 @@ auto sia_provider::get_file_list(api_file_list &list) const -> api_error {
}
if (object_list.contains("entries")) {
for (auto &&entry : object_list.at("entries")) {
for (const auto &entry : object_list.at("entries")) {
auto name = entry.at("name").get<std::string>();
auto entry_api_path = utils::path::create_api_path(name);
@ -287,7 +288,7 @@ auto sia_provider::get_object_info(const std::string &api_path,
curl::requests::http_get get{};
get.allow_timeout = true;
get.path = "/api/bus/objects" + api_path;
get.query["bucket"] = get_bucket(get_config().get_sia_config());
get.query["bucket"] = get_bucket(get_sia_config());
get.response_handler = [&object_info](const data_buffer &data,
long response_code) {
@ -328,7 +329,7 @@ auto sia_provider::get_object_list(const std::string &api_path,
curl::requests::http_get get{};
get.allow_timeout = true;
get.path = "/api/bus/objects" + api_path + "/";
get.query["bucket"] = get_bucket(get_config().get_sia_config());
get.query["bucket"] = get_bucket(get_sia_config());
get.response_handler = [&object_list](const data_buffer &data,
long response_code) {
@ -362,7 +363,7 @@ auto sia_provider::get_total_drive_space() const -> std::uint64_t {
curl::requests::http_get get{};
get.allow_timeout = true;
get.path = "/api/autopilot/config";
get.query["bucket"] = get_bucket(get_config().get_sia_config());
get.query["bucket"] = get_bucket(get_sia_config());
json config_data{};
get.response_handler = [&config_data](const data_buffer &data,
@ -393,37 +394,6 @@ auto sia_provider::get_total_drive_space() const -> std::uint64_t {
return 0U;
}
auto sia_provider::get_used_drive_space_impl() const -> std::uint64_t {
REPERTORY_USES_FUNCTION_NAME();
curl::requests::http_get get{};
get.allow_timeout = true;
get.path = "/api/bus/stats/objects";
get.query["bucket"] = get_bucket(get_config().get_sia_config());
json object_data{};
get.response_handler = [&object_data](const data_buffer &data,
long response_code) {
if (response_code == http_error_codes::ok) {
object_data = nlohmann::json::parse(data.begin(), data.end());
}
};
long response_code{};
stop_type stop_requested{};
if (not get_comm().make_request(get, response_code, stop_requested)) {
return 0U;
}
if (response_code != http_error_codes::ok) {
utils::error::raise_error(function_name, response_code,
"failed to get used drive space");
return 0U;
}
return object_data["totalObjectsSize"].get<std::uint64_t>();
}
auto sia_provider::is_directory(const std::string &api_path, bool &exists) const
-> api_error {
REPERTORY_USES_FUNCTION_NAME();
@ -494,7 +464,7 @@ auto sia_provider::is_online() const -> bool {
curl::requests::http_get get{};
get.allow_timeout = true;
get.path = "/api/bus/consensus/state";
get.query["bucket"] = get_bucket(get_config().get_sia_config());
get.query["bucket"] = get_bucket(get_sia_config());
json state_data{};
get.response_handler = [&state_data](const data_buffer &data,
@ -535,7 +505,7 @@ auto sia_provider::read_file_bytes(const std::string &api_path,
curl::requests::http_get get{};
get.path = "/api/worker/objects" + api_path;
get.query["bucket"] = get_bucket(get_config().get_sia_config());
get.query["bucket"] = get_bucket(get_sia_config());
get.range = {{
offset,
offset + size - 1U,
@ -590,7 +560,7 @@ auto sia_provider::remove_directory_impl(const std::string &api_path)
curl::requests::http_delete del{};
del.allow_timeout = true;
del.path = "/api/bus/objects" + api_path + "/";
del.query["bucket"] = get_bucket(get_config().get_sia_config());
del.query["bucket"] = get_bucket(get_sia_config());
long response_code{};
stop_type stop_requested{};
@ -616,7 +586,7 @@ auto sia_provider::remove_file_impl(const std::string &api_path) -> api_error {
curl::requests::http_delete del{};
del.allow_timeout = true;
del.path = "/api/bus/objects" + api_path;
del.query["bucket"] = get_bucket(get_config().get_sia_config());
del.query["bucket"] = get_bucket(get_sia_config());
long response_code{};
stop_type stop_requested{};
@ -648,7 +618,7 @@ auto sia_provider::rename_file(const std::string &from_api_path,
{"mode", "single"},
});
post.path = "/api/bus/objects/rename";
post.query["bucket"] = get_bucket(get_config().get_sia_config());
post.query["bucket"] = get_bucket(get_sia_config());
long response_code{};
stop_type stop_requested{};
@ -673,6 +643,7 @@ auto sia_provider::rename_file(const std::string &from_api_path,
auto sia_provider::start(api_item_added_callback api_item_added,
i_file_manager *mgr) -> bool {
event_system::instance().raise<service_started>("sia_provider");
sia_config_ = get_config().get_sia_config();
return base_provider::start(api_item_added, mgr);
}
@ -689,7 +660,7 @@ auto sia_provider::upload_file_impl(const std::string &api_path,
curl::requests::http_put_file put_file{};
put_file.path = "/api/worker/objects" + api_path;
put_file.query["bucket"] = get_bucket(get_config().get_sia_config());
put_file.query["bucket"] = get_bucket(get_sia_config());
put_file.source_path = source_path;
long response_code{};

View File

@ -22,10 +22,14 @@
#include "rpc/server/full_server.hpp"
#include "app_config.hpp"
#include "events/event_system.hpp"
#include "events/events.hpp"
#include "file_manager/cache_size_mgr.hpp"
#include "file_manager/i_file_manager.hpp"
#include "providers/i_provider.hpp"
#include "types/repertory.hpp"
#include "types/rpc.hpp"
#include "utils/error_utils.hpp"
#include "utils/file.hpp"
#include "utils/path.hpp"
@ -36,25 +40,20 @@ full_server::full_server(app_config &config, i_provider &provider,
void full_server::handle_get_directory_items(const httplib::Request &req,
httplib::Response &res) {
const auto api_path =
utils::path::create_api_path(req.get_param_value("api_path"));
const auto list = fm_.get_directory_items(api_path);
json items = {{"items", std::vector<json>()}};
for (const auto &item : list) {
items["items"].emplace_back(item.to_json());
}
res.set_content(items.dump(), "application/json");
auto api_path = utils::path::create_api_path(req.get_param_value("api_path"));
res.set_content(json({
{"items", fm_.get_directory_items(api_path)},
})
.dump(),
"application/json");
res.status = 200;
}
void full_server::handle_get_drive_information(const httplib::Request & /*req*/,
httplib::Response &res) {
auto dir_size =
utils::file::directory(get_config().get_cache_directory()).size();
res.set_content(
json({
{"cache_space_used", dir_size},
{"cache_space_used", cache_size_mgr::instance().size()},
{"drive_space_total", provider_.get_total_drive_space()},
{"drive_space_used", provider_.get_used_drive_space()},
{"item_count", provider_.get_total_item_count()},
@ -66,9 +65,9 @@ void full_server::handle_get_drive_information(const httplib::Request & /*req*/,
void full_server::handle_get_open_files(const httplib::Request & /*req*/,
httplib::Response &res) {
const auto list = fm_.get_open_files();
auto list = fm_.get_open_files();
json open_files = {{"items", std::vector<json>()}};
json open_files;
for (const auto &kv : list) {
open_files["items"].emplace_back(json({
{"path", kv.first},
@ -81,7 +80,10 @@ void full_server::handle_get_open_files(const httplib::Request & /*req*/,
void full_server::handle_get_pinned_files(const httplib::Request & /*req*/,
httplib::Response &res) {
res.set_content(json({{"items", provider_.get_pinned_files()}}).dump(),
res.set_content(json({
{"items", provider_.get_pinned_files()},
})
.dump(),
"application/json");
res.status = 200;
}
@ -90,11 +92,10 @@ void full_server::handle_get_pinned_status(const httplib::Request &req,
httplib::Response &res) {
REPERTORY_USES_FUNCTION_NAME();
const auto api_path =
utils::path::create_api_path(req.get_param_value("api_path"));
auto api_path = utils::path::create_api_path(req.get_param_value("api_path"));
std::string pinned;
const auto result = provider_.get_item_meta(api_path, META_PINNED, pinned);
auto result = provider_.get_item_meta(api_path, META_PINNED, pinned);
if (result != api_error::success) {
utils::error::raise_api_path_error(function_name, api_path, result,
"failed to get pinned status");
@ -103,8 +104,10 @@ void full_server::handle_get_pinned_status(const httplib::Request &req,
}
res.set_content(
json(
{{"pinned", pinned.empty() ? false : utils::string::to_bool(pinned)}})
json({
{"pinned",
pinned.empty() ? false : utils::string::to_bool(pinned)},
})
.dump(),
"application/json");
res.status = 200;
@ -114,8 +117,7 @@ void full_server::handle_pin_file(const httplib::Request &req,
httplib::Response &res) {
REPERTORY_USES_FUNCTION_NAME();
const auto api_path =
utils::path::create_api_path(req.get_param_value("api_path"));
auto api_path = utils::path::create_api_path(req.get_param_value("api_path"));
bool exists{};
auto result = provider_.is_file(api_path, exists);
@ -143,8 +145,7 @@ void full_server::handle_unpin_file(const httplib::Request &req,
httplib::Response &res) {
REPERTORY_USES_FUNCTION_NAME();
const auto api_path =
utils::path::create_api_path(req.get_param_value("api_path"));
auto api_path = utils::path::create_api_path(req.get_param_value("api_path"));
bool exists{};
auto result = provider_.is_file(api_path, exists);

View File

@ -22,6 +22,8 @@
#include "rpc/server/server.hpp"
#include "app_config.hpp"
#include "events/event_system.hpp"
#include "events/events.hpp"
#include "utils/base64.hpp"
#include "utils/error_utils.hpp"

View File

@ -25,15 +25,40 @@
#include "utils/string.hpp"
namespace repertory {
auto download_type_from_string(
std::string type, const download_type &default_type) -> download_type {
auto database_type_from_string(std::string type,
database_type default_type) -> database_type {
type = utils::string::to_lower(utils::string::trim(type));
if (type == "direct") {
return download_type::direct;
if (type == "rocksdb") {
return database_type::rocksdb;
}
if (type == "fallback") {
return download_type::fallback;
if (type == "sqlite") {
return database_type::sqlite;
}
return default_type;
}
auto database_type_to_string(const database_type &type) -> std::string {
switch (type) {
case database_type::rocksdb:
return "rocksdb";
case database_type::sqlite:
return "sqlite";
default:
return "rocksdb";
}
}
auto download_type_from_string(std::string type,
download_type default_type) -> download_type {
type = utils::string::to_lower(utils::string::trim(type));
if (type == "default") {
return download_type::default_;
}
if (type == "direct") {
return download_type::direct;
}
if (type == "ring_buffer") {
@ -45,14 +70,14 @@ auto download_type_from_string(
auto download_type_to_string(const download_type &type) -> std::string {
switch (type) {
case download_type::default_:
return "default";
case download_type::direct:
return "direct";
case download_type::fallback:
return "fallback";
case download_type::ring_buffer:
return "ring_buffer";
default:
return "fallback";
return "default";
}
}
@ -62,6 +87,7 @@ static const std::unordered_map<api_error, std::string> LOOKUP = {
{api_error::bad_address, "bad_address"},
{api_error::buffer_overflow, "buffer_overflow"},
{api_error::buffer_too_small, "buffer_too_small"},
{api_error::cache_not_initialized, "cache_not_initialized"},
{api_error::comm_error, "comm_error"},
{api_error::decryption_error, "decryption_error"},
{api_error::directory_end_of_files, "directory_end_of_files"},
@ -80,10 +106,12 @@ static const std::unordered_map<api_error, std::string> LOOKUP = {
{api_error::invalid_handle, "invalid_handle"},
{api_error::invalid_operation, "invalid_operation"},
{api_error::invalid_ring_buffer_multiple, "invalid_ring_buffer_multiple"},
{api_error::invalid_ring_buffer_position, "invalid_ring_buffer_position"},
{api_error::invalid_ring_buffer_size, "invalid_ring_buffer_size"},
{api_error::invalid_version, "invalid_version"},
{api_error::item_exists, "item_exists"},
{api_error::item_not_found, "item_not_found"},
{api_error::more_data, "more_data"},
{api_error::no_disk_space, "no_disk_space"},
{api_error::not_implemented, "not_implemented"},
{api_error::not_supported, "not_supported"},

View File

@ -45,15 +45,15 @@ void get_api_authentication_data(std::string &user, std::string &password,
if (success) {
if (user.empty() && password.empty()) {
password = data["ApiAuth"].get<std::string>();
user = data["ApiUser"].get<std::string>();
password = data[JSON_API_AUTH].get<std::string>();
user = data[JSON_API_USER].get<std::string>();
}
port = data["ApiPort"].get<std::uint16_t>();
port = data[JSON_API_PORT].get<std::uint16_t>();
}
}
[[nodiscard]] auto
get_provider_type_from_args(std::vector<const char *> args) -> provider_type {
[[nodiscard]] auto get_provider_type_from_args(std::vector<const char *> args)
-> provider_type {
if (has_option(args, options::s3_option)) {
return provider_type::s3;
}
@ -67,8 +67,8 @@ get_provider_type_from_args(std::vector<const char *> args) -> provider_type {
return provider_type::sia;
}
auto has_option(std::vector<const char *> args,
const std::string &option_name) -> bool {
auto has_option(std::vector<const char *> args, const std::string &option_name)
-> bool {
return std::find_if(args.begin(), args.end(),
[&option_name](const auto &value) -> bool {
return option_name == value;
@ -80,8 +80,8 @@ auto has_option(std::vector<const char *> args, const option &opt) -> bool {
}
auto parse_option(std::vector<const char *> args,
const std::string &option_name,
std::uint8_t count) -> std::vector<std::string> {
const std::string &option_name, std::uint8_t count)
-> std::vector<std::string> {
std::vector<std::string> ret;
auto found{false};
for (std::size_t i = 0U; not found && (i < args.size()); i++) {
@ -119,9 +119,10 @@ auto parse_string_option(std::vector<const char *> args, const option &opt,
return ret;
}
auto parse_drive_options(
std::vector<const char *> args, [[maybe_unused]] provider_type &prov,
[[maybe_unused]] std::string &data_directory) -> std::vector<std::string> {
auto parse_drive_options(std::vector<const char *> args,
[[maybe_unused]] provider_type &prov,
[[maybe_unused]] std::string &data_directory)
-> std::vector<std::string> {
// Strip out options from command line
const auto &option_list = options::option_list;
std::vector<std::string> drive_args;

View File

@ -22,6 +22,9 @@
#include "utils/polling.hpp"
#include "app_config.hpp"
#include "events/event_system.hpp"
#include "events/events.hpp"
#include "utils/tasks.hpp"
namespace repertory {
polling polling::instance_;
@ -29,93 +32,125 @@ polling polling::instance_;
void polling::frequency_thread(
std::function<std::uint32_t()> get_frequency_seconds, frequency freq) {
while (not stop_requested_) {
std::deque<std::future<void>> futures;
unique_mutex_lock l(mutex_);
if (not stop_requested_ &&
notify_.wait_for(l, std::chrono::seconds(get_frequency_seconds())) ==
std::cv_status::timeout) {
for (const auto &kv : items_) {
if (kv.second.freq == freq) {
futures.emplace_back(
std::async(std::launch::async, [this, &freq, kv]() -> void {
if (config_->get_event_level() == event_level::trace ||
freq != frequency::second) {
event_system::instance().raise<polling_item_begin>(kv.first);
}
kv.second.action();
if (config_->get_event_level() == event_level::trace ||
freq != frequency::second) {
event_system::instance().raise<polling_item_end>(kv.first);
}
}));
}
}
l.unlock();
unique_mutex_lock lock(mutex_);
auto futures = std::accumulate(
items_.begin(), items_.end(), std::deque<tasks::task_ptr>{},
[this, &freq](auto &&list, auto &&item) {
if (item.second.freq != freq) {
return list;
}
while (not futures.empty()) {
futures.front().wait();
futures.pop_front();
}
auto future = tasks::instance().schedule({
[this, &freq, item](auto &&task_stopped) {
if (config_->get_event_level() == event_level::trace ||
freq != frequency::second) {
event_system::instance().raise<polling_item_begin>(
item.first);
}
item.second.action(task_stopped);
if (config_->get_event_level() == event_level::trace ||
freq != frequency::second) {
event_system::instance().raise<polling_item_end>(item.first);
}
},
});
list.emplace_back(future);
return list;
});
lock.unlock();
while (not futures.empty()) {
futures.front()->wait();
futures.pop_front();
}
if (stop_requested_) {
return;
}
lock.lock();
notify_.wait_for(lock, std::chrono::seconds(get_frequency_seconds()));
}
}
void polling::remove_callback(const std::string &name) {
mutex_lock l(mutex_);
mutex_lock lock(mutex_);
items_.erase(name);
}
void polling::set_callback(const polling_item &pi) {
mutex_lock l(mutex_);
items_[pi.name] = pi;
void polling::set_callback(const polling_item &item) {
mutex_lock lock(mutex_);
items_[item.name] = item;
}
void polling::start(app_config *config) {
mutex_lock l(start_stop_mutex_);
if (not high_frequency_thread_) {
event_system::instance().raise<service_started>("polling");
config_ = config;
stop_requested_ = false;
high_frequency_thread_ = std::make_unique<std::thread>([this]() -> void {
this->frequency_thread(
[this]() -> std::uint32_t {
return config_->get_high_frequency_interval_secs();
},
frequency::high);
});
low_frequency_thread_ = std::make_unique<std::thread>([this]() -> void {
this->frequency_thread(
[this]() -> std::uint32_t {
return config_->get_low_frequency_interval_secs();
},
frequency::low);
});
second_frequency_thread_ = std::make_unique<std::thread>([this]() -> void {
this->frequency_thread([]() -> std::uint32_t { return 1U; },
frequency::second);
});
mutex_lock lock(start_stop_mutex_);
if (frequency_threads_.at(0U)) {
return;
}
event_system::instance().raise<service_started>("polling");
config_ = config;
stop_requested_ = false;
tasks::instance().start(config);
auto idx{0U};
frequency_threads_.at(idx++) =
std::make_unique<std::thread>([this]() -> void {
this->frequency_thread(
[this]() -> std::uint32_t {
return config_->get_high_frequency_interval_secs();
},
frequency::high);
});
frequency_threads_.at(idx++) =
std::make_unique<std::thread>([this]() -> void {
this->frequency_thread(
[this]() -> std::uint32_t {
return config_->get_low_frequency_interval_secs();
},
frequency::low);
});
frequency_threads_.at(idx++) =
std::make_unique<std::thread>([this]() -> void {
this->frequency_thread(
[this]() -> std::uint32_t {
return config_->get_med_frequency_interval_secs();
},
frequency::medium);
});
frequency_threads_.at(idx++) =
std::make_unique<std::thread>([this]() -> void {
this->frequency_thread([]() -> std::uint32_t { return 1U; },
frequency::second);
});
}
void polling::stop() {
if (high_frequency_thread_) {
event_system::instance().raise<service_shutdown_begin>("polling");
mutex_lock l(start_stop_mutex_);
if (high_frequency_thread_) {
{
stop_requested_ = true;
mutex_lock l2(mutex_);
notify_.notify_all();
}
high_frequency_thread_->join();
low_frequency_thread_->join();
second_frequency_thread_->join();
high_frequency_thread_.reset();
low_frequency_thread_.reset();
second_frequency_thread_.reset();
}
event_system::instance().raise<service_shutdown_end>("polling");
mutex_lock lock(start_stop_mutex_);
if (not frequency_threads_.at(0U)) {
return;
}
event_system::instance().raise<service_shutdown_begin>("polling");
stop_requested_ = true;
tasks::instance().stop();
unique_mutex_lock thread_lock(mutex_);
notify_.notify_all();
thread_lock.unlock();
for (auto &&thread : frequency_threads_) {
thread->join();
thread.reset();
}
event_system::instance().raise<service_shutdown_end>("polling");
}
} // namespace repertory

View File

@ -0,0 +1,157 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "utils/tasks.hpp"
#include "app_config.hpp"
#include "utils/error_utils.hpp"
namespace repertory {
tasks tasks::instance_;
void tasks::task_wait::set_result(bool result) {
unique_mutex_lock lock(mtx);
if (complete) {
notify.notify_all();
return;
}
complete = true;
success = result;
notify.notify_all();
}
auto tasks::task_wait::wait() const -> bool {
unique_mutex_lock lock(mtx);
while (not complete) {
notify.wait(lock);
}
return success;
}
auto tasks::schedule(task item) -> task_ptr {
++count_;
while (not stop_requested_ && (count_ >= task_threads_.size())) {
std::this_thread::sleep_for(
std::chrono::milliseconds(config_->get_task_wait_ms()));
}
scheduled_task runnable{item};
unique_mutex_lock lock(mutex_);
if (stop_requested_) {
runnable.wait->set_result(false);
notify_.notify_all();
return runnable.wait;
}
tasks_.push_back(runnable);
notify_.notify_all();
return runnable.wait;
}
void tasks::start(app_config *config) {
mutex_lock start_stop_lock(start_stop_mutex_);
if (not task_threads_.empty()) {
return;
}
config_ = config;
count_ = 0U;
stop_requested_ = false;
tasks_.clear();
for (std::uint32_t idx = 0U; idx < std::thread::hardware_concurrency();
++idx) {
task_threads_.emplace_back(
std::make_unique<std::jthread>([this]() { task_thread(); }));
}
}
void tasks::stop() {
mutex_lock start_stop_lock(start_stop_mutex_);
if (task_threads_.empty()) {
return;
}
stop_requested_ = true;
unique_mutex_lock lock(mutex_);
notify_.notify_all();
lock.unlock();
task_threads_.clear();
lock.lock();
tasks_.clear();
notify_.notify_all();
lock.unlock();
}
void tasks::task_thread() {
REPERTORY_USES_FUNCTION_NAME();
unique_mutex_lock lock(mutex_);
const auto release = [&]() {
notify_.notify_all();
lock.unlock();
};
release();
while (not stop_requested_) {
lock.lock();
while (not stop_requested_ && tasks_.empty()) {
notify_.wait(lock);
}
if (stop_requested_) {
release();
return;
}
if (tasks_.empty()) {
release();
continue;
}
auto runnable = tasks_.front();
tasks_.pop_front();
release();
try {
runnable.item.action(stop_requested_);
runnable.wait->set_result(true);
--count_;
} catch (const std::exception &e) {
runnable.wait->set_result(false);
utils::error::raise_error(function_name, e, "failed to execute task");
}
lock.lock();
release();
}
}
} // namespace repertory

View File

@ -52,6 +52,8 @@ auto from_api_error(const api_error &err) -> int {
return -EEXIST;
case api_error::file_in_use:
return -EBUSY;
case api_error::invalid_handle:
return -EBADF;
case api_error::invalid_operation:
return -EINVAL;
case api_error::item_not_found:

Some files were not shown because too many files have changed in this diff Show More