2.0.0-rc (#9)
Some checks failed
BlockStorage/repertory_osx/pipeline/head This commit looks good
BlockStorage/repertory_windows/pipeline/head This commit looks good
BlockStorage/repertory/pipeline/head There was a failure building this commit
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
BlockStorage/repertory_osx_builds/pipeline/head There was a failure building this commit
Some checks failed
BlockStorage/repertory_osx/pipeline/head This commit looks good
BlockStorage/repertory_windows/pipeline/head This commit looks good
BlockStorage/repertory/pipeline/head There was a failure building this commit
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
BlockStorage/repertory_osx_builds/pipeline/head There was a failure building this commit
### Issues * \#1 \[bug\] Unable to mount S3 due to 'item_not_found' exception * \#2 Require bucket name for S3 mounts * \#3 \[bug\] File size is not being updated in S3 mount * \#4 Upgrade to libfuse-3.x.x * \#5 Switch to renterd for Sia support * \#6 Switch to cpp-httplib to further reduce dependencies * \#7 Remove global_data and calculate used disk space per provider * \#8 Switch to libcurl for S3 mount support ### Changes from v1.x.x * Added read-only encrypt provider * Pass-through mount point that transparently encrypts source data using `XChaCha20-Poly1305` * Added S3 encryption support via `XChaCha20-Poly1305` * Added replay protection to remote mounts * Added support base64 writes in remote FUSE * Created static linked Linux binaries for `amd64` and `aarch64` using `musl-libc` * Removed legacy Sia renter support * Removed Skynet support * Fixed multiple remote mount WinFSP API issues on \*NIX servers * Implemented chunked read and write * Writes for non-cached files are performed in chunks of 8Mib * Removed `repertory-ui` support * Removed `FreeBSD` support * Switched to `libsodium` over `CryptoPP` * Switched to `XChaCha20-Poly1305` for remote mounts * Updated `GoogleTest` to v1.14.0 * Updated `JSON for Modern C++` to v3.11.2 * Updated `OpenSSL` to v1.1.1w * Updated `RocksDB` to v8.5.3 * Updated `WinFSP` to 2023 * Updated `boost` to v1.78.0 * Updated `cURL` to v8.3.0 * Updated `zlib` to v1.3 * Use `upload_manager` for all providers * Adds a delay to uploads to prevent excessive API calls * Supports re-upload after mount restart for incomplete uploads * NOTE: Uploads for all providers are full file (no resume support) * Multipart upload support is planned for S3 Reviewed-on: #9
This commit is contained in:
@@ -1,105 +1,196 @@
|
||||
/*
|
||||
Copyright <2018-2022> <scott.e.graves@protonmail.com>
|
||||
Copyright <2018-2023> <scott.e.graves@protonmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
|
||||
associated documentation files (the "Software"), to deal in the Software without restriction,
|
||||
including without limitation the rights to use, copy, modify, merge, publish, distribute,
|
||||
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or
|
||||
substantial portions of the Software.
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
|
||||
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
|
||||
OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
#include "providers/base_provider.hpp"
|
||||
|
||||
#include "app_config.hpp"
|
||||
#include "file_manager/i_file_manager.hpp"
|
||||
#include "types/repertory.hpp"
|
||||
#include "types/startup_exception.hpp"
|
||||
#include "utils/error_utils.hpp"
|
||||
#include "utils/file_utils.hpp"
|
||||
#include "utils/global_data.hpp"
|
||||
#include "utils/native_file.hpp"
|
||||
#include "utils/path_utils.hpp"
|
||||
|
||||
namespace repertory {
|
||||
base_provider::base_provider(app_config &config) : config_(config), meta_db_(config) {}
|
||||
base_provider::base_provider(app_config &config) : config_(config) {}
|
||||
|
||||
void base_provider::notify_directory_added(const std::string &api_path,
|
||||
const std::string &api_parent) {
|
||||
recur_mutex_lock l(notify_added_mutex_);
|
||||
|
||||
const auto now = utils::get_file_time_now();
|
||||
api_item_added_(api_path, api_parent, "", true, now, now, now, now);
|
||||
}
|
||||
|
||||
void base_provider::update_filesystem_item(const bool &directory, const api_error &error,
|
||||
const std::string &api_path,
|
||||
filesystem_item &fsi) const {
|
||||
if (error == api_error::success) {
|
||||
fsi.directory = directory;
|
||||
fsi.lock = fsi.lock ? fsi.lock : std::make_shared<std::recursive_mutex>();
|
||||
fsi.api_path = api_path;
|
||||
fsi.api_parent = utils::path::get_parent_api_path(api_path);
|
||||
} else {
|
||||
event_system::instance().raise<filesystem_item_get_failed>(
|
||||
api_path, std::to_string(static_cast<int>(error)));
|
||||
void base_provider::calculate_used_drive_space(bool add_missing) {
|
||||
api_file_list list{};
|
||||
if (get_file_list(list) != api_error::success) {
|
||||
return;
|
||||
}
|
||||
|
||||
used_space_ = std::accumulate(
|
||||
list.begin(), list.end(), std::uint64_t(0U),
|
||||
[this, &add_missing](std::uint64_t total_size, const auto &file) {
|
||||
if (add_missing && not meta_db_->get_item_meta_exists(file.api_path)) {
|
||||
[[maybe_unused]] auto res = this->notify_file_added(
|
||||
file.api_path, utils::path::get_parent_api_path(file.api_path),
|
||||
0);
|
||||
}
|
||||
|
||||
return total_size + file.file_size;
|
||||
});
|
||||
}
|
||||
|
||||
api_error base_provider::create_directory_clone_source_meta(const std::string &source_api_path,
|
||||
const std::string &api_path) {
|
||||
void base_provider::cleanup() {
|
||||
remove_deleted_files();
|
||||
remove_unknown_source_files();
|
||||
remove_expired_orphaned_files();
|
||||
}
|
||||
|
||||
auto base_provider::create_directory_clone_source_meta(
|
||||
const std::string &source_api_path, const std::string &api_path)
|
||||
-> api_error {
|
||||
api_meta_map meta{};
|
||||
auto ret = get_item_meta(source_api_path, meta);
|
||||
if (ret == api_error::success) {
|
||||
ret = create_directory(api_path, meta);
|
||||
}
|
||||
return ret;
|
||||
return ret == api_error::item_not_found ? api_error::directory_not_found
|
||||
: ret;
|
||||
}
|
||||
|
||||
api_error base_provider::create_file(const std::string &api_path, api_meta_map &meta) {
|
||||
const auto isDir = is_directory(api_path);
|
||||
const auto isFile = is_file(api_path);
|
||||
auto ret = isDir ? api_error::directory_exists
|
||||
: isFile ? api_error::file_exists
|
||||
: api_error::success;
|
||||
if (ret == api_error::success) {
|
||||
const auto source =
|
||||
utils::path::combine(get_config().get_cache_directory(), {utils::create_uuid_string()});
|
||||
auto base_provider::create_file(const std::string &api_path, api_meta_map &meta)
|
||||
-> api_error {
|
||||
bool exists{};
|
||||
auto res = is_directory(api_path, exists);
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
}
|
||||
if (exists) {
|
||||
return api_error::directory_exists;
|
||||
}
|
||||
|
||||
res = is_file(api_path, exists);
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
}
|
||||
if (exists) {
|
||||
return api_error::item_exists;
|
||||
}
|
||||
|
||||
if ((res = meta_db_->set_item_meta(api_path, meta)) != api_error::success) {
|
||||
return res;
|
||||
}
|
||||
|
||||
{
|
||||
native_file_ptr nf;
|
||||
if ((ret = native_file::create_or_open(source, nf)) == api_error::success) {
|
||||
nf->close();
|
||||
res = native_file::create_or_open(meta[META_SOURCE], nf);
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
}
|
||||
nf->close();
|
||||
}
|
||||
|
||||
stop_type stop_requested = false;
|
||||
return upload_file(api_path, meta[META_SOURCE], meta[META_ENCRYPTION_TOKEN],
|
||||
stop_requested);
|
||||
}
|
||||
|
||||
auto base_provider::get_api_path_from_source(const std::string &source_path,
|
||||
std::string &api_path) const
|
||||
-> api_error {
|
||||
return meta_db_->get_api_path_from_source(source_path, api_path);
|
||||
}
|
||||
|
||||
auto base_provider::get_directory_items(const std::string &api_path,
|
||||
directory_item_list &list) const
|
||||
-> api_error {
|
||||
auto res = populate_directory_items(api_path, list);
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
}
|
||||
|
||||
std::sort(list.begin(), list.end(), [](const auto &a, const auto &b) -> bool {
|
||||
return (a.directory && not b.directory) ||
|
||||
(not(b.directory && not a.directory) &&
|
||||
(a.api_path.compare(b.api_path) < 0));
|
||||
});
|
||||
|
||||
list.insert(list.begin(), directory_item{
|
||||
"..",
|
||||
"",
|
||||
true,
|
||||
});
|
||||
list.insert(list.begin(), directory_item{
|
||||
".",
|
||||
"",
|
||||
true,
|
||||
});
|
||||
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
auto base_provider::get_file(const std::string &api_path, api_file &file) const
|
||||
-> api_error {
|
||||
auto ret = api_error::success;
|
||||
try {
|
||||
if ((ret = populate_file(api_path, file)) != api_error::success) {
|
||||
event_system::instance().raise<file_get_failed>(api_path,
|
||||
api_error_to_string(ret));
|
||||
}
|
||||
|
||||
if (ret == api_error::success) {
|
||||
if (((ret = set_item_meta(api_path, meta)) != api_error::success) ||
|
||||
((ret = set_source_path(api_path, source)) != api_error::success) ||
|
||||
((ret = upload_file(api_path, source, meta[META_ENCRYPTION_TOKEN])) !=
|
||||
api_error::success)) {
|
||||
meta_db_.remove_item_meta(format_api_path(api_path));
|
||||
utils::file::delete_file(source);
|
||||
}
|
||||
std::string sz;
|
||||
if ((ret = get_item_meta(api_path, META_SIZE, sz)) != api_error::success) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
file.file_size = utils::string::to_uint64(sz);
|
||||
return ret;
|
||||
} catch (const std::exception &e) {
|
||||
event_system::instance().raise<file_get_failed>(
|
||||
api_path, e.what() ? e.what() : "failed to get file");
|
||||
}
|
||||
|
||||
return api_error::error;
|
||||
}
|
||||
|
||||
auto base_provider::get_file_size(const std::string &api_path,
|
||||
std::uint64_t &file_size) const -> api_error {
|
||||
api_file file{};
|
||||
const auto ret = get_file(api_path, file);
|
||||
if (ret == api_error::success) {
|
||||
file_size = file.file_size;
|
||||
} else {
|
||||
event_system::instance().raise<file_get_size_failed>(
|
||||
api_path, api_error_to_string(ret));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error base_provider::get_api_path_from_source(const std::string &source_path,
|
||||
std::string &api_path) const {
|
||||
const auto ret = meta_db_.get_api_path_from_source(source_path, api_path);
|
||||
restore_api_path(api_path);
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error base_provider::get_filesystem_item(const std::string &api_path, const bool &directory,
|
||||
filesystem_item &fsi) const {
|
||||
auto base_provider::get_filesystem_item(const std::string &api_path,
|
||||
bool directory,
|
||||
filesystem_item &fsi) const
|
||||
-> api_error {
|
||||
auto ret = api_error::error;
|
||||
if (directory) {
|
||||
ret = is_directory(api_path) ? api_error::success : api_error::item_not_found;
|
||||
bool exists{};
|
||||
ret = is_directory(api_path, exists);
|
||||
if (ret != api_error::success) {
|
||||
return ret;
|
||||
}
|
||||
ret = exists ? api_error::success : api_error::item_not_found;
|
||||
update_filesystem_item(true, ret, api_path, fsi);
|
||||
} else {
|
||||
api_file file{};
|
||||
@@ -109,16 +200,25 @@ api_error base_provider::get_filesystem_item(const std::string &api_path, const
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error base_provider::get_filesystem_item_and_file(const std::string &api_path, api_file &file,
|
||||
filesystem_item &fsi) const {
|
||||
auto base_provider::get_filesystem_item_and_file(const std::string &api_path,
|
||||
api_file &file,
|
||||
filesystem_item &fsi) const
|
||||
-> api_error {
|
||||
auto ret = get_item_meta(api_path, META_SOURCE, fsi.source_path);
|
||||
if (ret == api_error::success) {
|
||||
ret = get_file(api_path, file);
|
||||
if (ret == api_error::success) {
|
||||
fsi.encryption_token = file.encryption_token;
|
||||
fsi.size = file.file_size;
|
||||
} else if (not is_file(api_path)) {
|
||||
ret = api_error::item_not_found;
|
||||
} else {
|
||||
bool exists{};
|
||||
ret = is_file(api_path, exists);
|
||||
if (ret != api_error::success) {
|
||||
return ret;
|
||||
}
|
||||
if (not exists) {
|
||||
ret = api_error::item_not_found;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -126,12 +226,13 @@ api_error base_provider::get_filesystem_item_and_file(const std::string &api_pat
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error base_provider::get_filesystem_item_from_source_path(const std::string &source_path,
|
||||
filesystem_item &fsi) const {
|
||||
auto base_provider::get_filesystem_item_from_source_path(
|
||||
const std::string &source_path, filesystem_item &fsi) const -> api_error {
|
||||
auto ret = api_error::item_not_found;
|
||||
if (not source_path.empty()) {
|
||||
std::string api_path;
|
||||
if ((ret = get_api_path_from_source(source_path, api_path)) == api_error::success) {
|
||||
if ((ret = get_api_path_from_source(source_path, api_path)) ==
|
||||
api_error::success) {
|
||||
ret = get_filesystem_item(api_path, false, fsi);
|
||||
}
|
||||
}
|
||||
@@ -139,71 +240,308 @@ api_error base_provider::get_filesystem_item_from_source_path(const std::string
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error base_provider::get_item_meta(const std::string &api_path, api_meta_map &meta) const {
|
||||
auto ret = meta_db_.get_item_meta(format_api_path(api_path), meta);
|
||||
auto base_provider::get_item_meta(const std::string &api_path,
|
||||
api_meta_map &meta) const -> api_error {
|
||||
auto ret = meta_db_->get_item_meta(api_path, meta);
|
||||
if (ret == api_error::item_not_found) {
|
||||
auto get_meta = false;
|
||||
if (is_directory(api_path)) {
|
||||
notify_directory_added(api_path, utils::path::get_parent_api_path(api_path));
|
||||
get_meta = true;
|
||||
} else if (is_file(api_path)) {
|
||||
std::uint64_t file_size = 0u;
|
||||
if ((ret = get_file_size(api_path, file_size)) == api_error::success) {
|
||||
get_meta = ((ret = notify_file_added(api_path, utils::path::get_parent_api_path(api_path),
|
||||
file_size)) == api_error::success);
|
||||
bool exists{};
|
||||
ret = is_directory(api_path, exists);
|
||||
if (ret != api_error::success) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (exists) {
|
||||
ret = notify_directory_added(api_path,
|
||||
utils::path::get_parent_api_path(api_path));
|
||||
if (ret == api_error::success) {
|
||||
get_meta = true;
|
||||
}
|
||||
} else {
|
||||
ret = is_file(api_path, exists);
|
||||
if (ret != api_error::success) {
|
||||
return ret;
|
||||
}
|
||||
if (exists) {
|
||||
std::uint64_t file_size{};
|
||||
if ((ret = get_file_size(api_path, file_size)) == api_error::success) {
|
||||
get_meta = ((ret = notify_file_added(
|
||||
api_path, utils::path::get_parent_api_path(api_path),
|
||||
file_size)) == api_error::success);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (get_meta) {
|
||||
ret = meta_db_.get_item_meta(format_api_path(api_path), meta);
|
||||
}
|
||||
|
||||
ret = get_meta ? meta_db_->get_item_meta(api_path, meta)
|
||||
: api_error::item_not_found;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error base_provider::get_item_meta(const std::string &api_path, const std::string &key,
|
||||
std::string &value) const {
|
||||
auto ret = meta_db_.get_item_meta(format_api_path(api_path), key, value);
|
||||
auto base_provider::get_item_meta(const std::string &api_path,
|
||||
const std::string &key,
|
||||
std::string &value) const -> api_error {
|
||||
auto ret = meta_db_->get_item_meta(api_path, key, value);
|
||||
if (ret == api_error::item_not_found) {
|
||||
auto get_meta = false;
|
||||
if (is_directory(api_path)) {
|
||||
notify_directory_added(api_path, utils::path::get_parent_api_path(api_path));
|
||||
get_meta = true;
|
||||
} else if (is_file(api_path)) {
|
||||
std::uint64_t file_size = 0u;
|
||||
if ((ret = get_file_size(api_path, file_size)) == api_error::success) {
|
||||
get_meta = ((ret = notify_file_added(api_path, utils::path::get_parent_api_path(api_path),
|
||||
file_size)) == api_error::success);
|
||||
bool exists{};
|
||||
ret = is_directory(api_path, exists);
|
||||
if (ret != api_error::success) {
|
||||
return ret;
|
||||
}
|
||||
if (exists) {
|
||||
ret = notify_directory_added(api_path,
|
||||
utils::path::get_parent_api_path(api_path));
|
||||
if (ret == api_error::success) {
|
||||
get_meta = true;
|
||||
}
|
||||
} else {
|
||||
ret = is_file(api_path, exists);
|
||||
if (ret != api_error::success) {
|
||||
return ret;
|
||||
}
|
||||
if (exists) {
|
||||
std::uint64_t file_size{};
|
||||
if ((ret = get_file_size(api_path, file_size)) == api_error::success) {
|
||||
get_meta = ((ret = notify_file_added(
|
||||
api_path, utils::path::get_parent_api_path(api_path),
|
||||
file_size)) == api_error::success);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (get_meta) {
|
||||
ret = meta_db_.get_item_meta(format_api_path(api_path), key, value);
|
||||
}
|
||||
|
||||
ret = get_meta ? meta_db_->get_item_meta(api_path, key, value)
|
||||
: api_error::item_not_found;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::uint64_t base_provider::get_used_drive_space() const {
|
||||
return global_data::instance().get_used_drive_space();
|
||||
auto base_provider::get_used_drive_space() const -> std::uint64_t {
|
||||
std::uint64_t used_space = used_space_;
|
||||
fm_->update_used_space(used_space);
|
||||
return used_space;
|
||||
}
|
||||
|
||||
bool base_provider::start(api_item_added_callback api_item_added, i_open_file_table *oft) {
|
||||
auto base_provider::notify_directory_added(const std::string &api_path,
|
||||
const std::string &api_parent)
|
||||
-> api_error {
|
||||
recur_mutex_lock l(notify_added_mutex_);
|
||||
|
||||
const auto now = utils::get_file_time_now();
|
||||
api_file file{};
|
||||
file.api_path = api_path;
|
||||
file.api_parent = api_parent;
|
||||
file.accessed_date = now;
|
||||
file.changed_date = now;
|
||||
file.creation_date = now;
|
||||
file.file_size = 0U;
|
||||
file.modified_date = now;
|
||||
return api_item_added_(true, file);
|
||||
}
|
||||
|
||||
auto base_provider::processed_orphaned_file(const std::string &source_path,
|
||||
const std::string &api_path) const
|
||||
-> bool {
|
||||
const auto orphaned_directory =
|
||||
utils::path::combine(get_config().get_data_directory(), {"orphaned"});
|
||||
if (utils::file::create_full_directory_path(orphaned_directory)) {
|
||||
event_system::instance().raise<orphaned_file_detected>(source_path);
|
||||
const auto parts = utils::string::split(api_path, '/', false);
|
||||
const auto orphaned_file = utils::path::combine(
|
||||
orphaned_directory, {utils::path::strip_to_file_name(source_path) +
|
||||
'_' + parts[parts.size() - 1U]});
|
||||
|
||||
if (utils::file::reset_modified_time(source_path) &&
|
||||
utils::file::move_file(source_path, orphaned_file)) {
|
||||
event_system::instance().raise<orphaned_file_processed>(source_path,
|
||||
orphaned_file);
|
||||
return true;
|
||||
}
|
||||
|
||||
event_system::instance().raise<orphaned_file_processing_failed>(
|
||||
source_path, orphaned_file,
|
||||
std::to_string(utils::get_last_error_code()));
|
||||
return false;
|
||||
}
|
||||
|
||||
utils::error::raise_error(
|
||||
__FUNCTION__, std::to_string(utils::get_last_error_code()),
|
||||
"failed to create orphaned director|sp|" + orphaned_directory);
|
||||
return false;
|
||||
}
|
||||
|
||||
void base_provider::remove_deleted_files() {
|
||||
std::vector<std::string> removed_files{};
|
||||
|
||||
api_file_list list{};
|
||||
if (get_file_list(list) == api_error::success) {
|
||||
if (not list.empty()) {
|
||||
auto iterator = meta_db_->create_iterator(false);
|
||||
for (iterator->SeekToFirst(); not stop_requested_ && iterator->Valid();
|
||||
iterator->Next()) {
|
||||
const auto meta_api_path = iterator->key().ToString();
|
||||
if (meta_api_path.empty()) {
|
||||
const auto res = meta_db_->remove_item_meta(meta_api_path);
|
||||
if (res != api_error::success) {
|
||||
utils::error::raise_api_path_error(__FUNCTION__, meta_api_path, res,
|
||||
"failed to remove item meta");
|
||||
}
|
||||
} else {
|
||||
auto api_path = meta_api_path;
|
||||
const auto it = std::find_if(list.begin(), list.end(),
|
||||
[&api_path](const auto &file) -> bool {
|
||||
return file.api_path == api_path;
|
||||
});
|
||||
if (it == list.end()) {
|
||||
removed_files.emplace_back(api_path);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
while (not stop_requested_ && not removed_files.empty()) {
|
||||
const auto api_path = removed_files.back();
|
||||
removed_files.pop_back();
|
||||
|
||||
bool exists{};
|
||||
auto res = is_directory(api_path, exists);
|
||||
if (res != api_error::success) {
|
||||
continue;
|
||||
}
|
||||
|
||||
std::string source_path;
|
||||
if (not exists &&
|
||||
(check_file_exists(api_path) == api_error::item_not_found) &&
|
||||
(meta_db_->get_item_meta(api_path, META_SOURCE, source_path) ==
|
||||
api_error::success)) {
|
||||
if (not source_path.empty()) {
|
||||
fm_->perform_locked_operation(
|
||||
[this, &api_path, &source_path](i_provider &) -> bool {
|
||||
if (fm_->has_no_open_file_handles()) {
|
||||
const auto res = meta_db_->remove_item_meta(api_path);
|
||||
if (res == api_error::success) {
|
||||
event_system::instance().raise<file_removed_externally>(
|
||||
api_path, source_path);
|
||||
processed_orphaned_file(source_path, api_path);
|
||||
} else {
|
||||
utils::error::raise_api_path_error(
|
||||
__FUNCTION__, api_path, source_path, res,
|
||||
"failed to remove item meta for externally removed file");
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void base_provider::remove_expired_orphaned_files() {
|
||||
const auto orphaned_directory =
|
||||
utils::path::combine(get_config().get_data_directory(), {"orphaned"});
|
||||
const auto files = utils::file::get_directory_files(orphaned_directory, true);
|
||||
for (const auto &file : files) {
|
||||
if (utils::file::is_modified_date_older_than(
|
||||
file, std::chrono::hours(
|
||||
get_config().get_orphaned_file_retention_days() * 24))) {
|
||||
if (utils::file::retry_delete_file(file)) {
|
||||
event_system::instance().raise<orphaned_file_deleted>(file);
|
||||
}
|
||||
}
|
||||
if (stop_requested_) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void base_provider::remove_unknown_source_files() {
|
||||
auto files = utils::file::get_directory_files(
|
||||
get_config().get_cache_directory(), true);
|
||||
while (not stop_requested_ && not files.empty()) {
|
||||
const auto file = files.front();
|
||||
files.pop_front();
|
||||
|
||||
std::string api_path;
|
||||
if (not meta_db_->get_source_path_exists(file)) {
|
||||
processed_orphaned_file(file);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto base_provider::rename_file(const std::string &from_api_path,
|
||||
const std::string &to_api_path) -> api_error {
|
||||
std::string source_path;
|
||||
auto ret = get_item_meta(from_api_path, META_SOURCE, source_path);
|
||||
if (ret != api_error::success) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::string encryption_token;
|
||||
ret = get_item_meta(from_api_path, META_ENCRYPTION_TOKEN, encryption_token);
|
||||
if (ret != api_error::success) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = handle_rename_file(from_api_path, to_api_path, source_path);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
auto base_provider::start(api_item_added_callback api_item_added,
|
||||
i_file_manager *fm) -> bool {
|
||||
meta_db_ = std::make_unique<meta_db>(config_);
|
||||
|
||||
api_item_added_ = api_item_added;
|
||||
oft_ = oft;
|
||||
fm_ = fm;
|
||||
|
||||
auto unmount_requested = false;
|
||||
{
|
||||
repertory::event_consumer ec("unmount_requested",
|
||||
[&unmount_requested](const event &) { unmount_requested = true; });
|
||||
for (std::uint16_t i = 0u; not unmount_requested && not is_online() &&
|
||||
repertory::event_consumer ec(
|
||||
"unmount_requested",
|
||||
[&unmount_requested](const event &) { unmount_requested = true; });
|
||||
for (std::uint16_t i = 0U; not unmount_requested && not is_online() &&
|
||||
(i < get_config().get_online_check_retry_secs());
|
||||
i++) {
|
||||
event_system::instance().raise<provider_offline>(
|
||||
get_config().get_host_config().host_name_or_ip, get_config().get_host_config().api_port);
|
||||
get_config().get_host_config().host_name_or_ip,
|
||||
get_config().get_host_config().api_port);
|
||||
std::this_thread::sleep_for(1s);
|
||||
}
|
||||
}
|
||||
return unmount_requested;
|
||||
|
||||
auto ret = not unmount_requested && is_online();
|
||||
if (ret) {
|
||||
// Force root creation
|
||||
api_meta_map meta{};
|
||||
auto res = get_item_meta("/", meta);
|
||||
if (res != api_error::success) {
|
||||
throw startup_exception("failed to create root|err|" +
|
||||
api_error_to_string(res));
|
||||
}
|
||||
|
||||
calculate_used_drive_space(false);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void base_provider::stop() { meta_db_.reset(); }
|
||||
|
||||
void base_provider::update_filesystem_item(bool directory,
|
||||
const api_error &error,
|
||||
const std::string &api_path,
|
||||
filesystem_item &fsi) const {
|
||||
if (error == api_error::success) {
|
||||
fsi.directory = directory;
|
||||
fsi.api_path = api_path;
|
||||
fsi.api_parent = utils::path::get_parent_api_path(api_path);
|
||||
} else {
|
||||
event_system::instance().raise<filesystem_item_get_failed>(
|
||||
api_path, std::to_string(static_cast<int>(error)));
|
||||
}
|
||||
}
|
||||
} // namespace repertory
|
||||
|
813
src/providers/encrypt/encrypt_provider.cpp
Normal file
813
src/providers/encrypt/encrypt_provider.cpp
Normal file
@@ -0,0 +1,813 @@
|
||||
/*
|
||||
Copyright <2018-2023> <scott.e.graves@protonmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
#include "providers/encrypt/encrypt_provider.hpp"
|
||||
|
||||
#include "events/event_system.hpp"
|
||||
#include "events/events.hpp"
|
||||
#include "platform/win32_platform.hpp"
|
||||
#include "types/repertory.hpp"
|
||||
#include "utils/encrypting_reader.hpp"
|
||||
#include "utils/path_utils.hpp"
|
||||
#include "utils/polling.hpp"
|
||||
#include "utils/rocksdb_utils.hpp"
|
||||
|
||||
namespace repertory {
|
||||
encrypt_provider::encrypt_provider(app_config &config) : config_(config) {}
|
||||
|
||||
auto encrypt_provider::create_api_file(const std::string api_path,
|
||||
bool directory,
|
||||
const std::string &source_path)
|
||||
-> api_file {
|
||||
#ifdef _WIN32
|
||||
struct _stat64 buf {};
|
||||
_stat64(source_path.c_str(), &buf);
|
||||
#else
|
||||
struct stat buf {};
|
||||
stat(source_path.c_str(), &buf);
|
||||
#endif
|
||||
|
||||
api_file file{};
|
||||
file.api_path = api_path;
|
||||
file.api_parent = utils::path::get_parent_api_path(api_path);
|
||||
file.file_size =
|
||||
directory
|
||||
? 0U
|
||||
: utils::encryption::encrypting_reader::calculate_encrypted_size(
|
||||
source_path);
|
||||
file.source_path = source_path;
|
||||
#ifdef __APPLE__
|
||||
file.changed_date =
|
||||
buf.st_ctimespec.tv_nsec + (buf.st_ctimespec.tv_sec * NANOS_PER_SECOND);
|
||||
file.accessed_date =
|
||||
buf.st_atimespec.tv_nsec + (buf.st_atimespec.tv_sec * NANOS_PER_SECOND);
|
||||
file.creation_date = buf.st_birthtimespec.tv_nsec +
|
||||
(buf.st_birthtimespec.tv_sec * NANOS_PER_SECOND);
|
||||
file.modified_date =
|
||||
buf.st_mtimespec.tv_nsec + (buf.st_mtimespec.tv_sec * NANOS_PER_SECOND);
|
||||
#elif _WIN32
|
||||
FILETIME ft{};
|
||||
utils::unix_time_to_filetime(utils::time64_to_unix_time(buf.st_atime), ft);
|
||||
file.accessed_date =
|
||||
(static_cast<std::uint64_t>(ft.dwHighDateTime) << 32U) | ft.dwLowDateTime;
|
||||
|
||||
utils::unix_time_to_filetime(utils::time64_to_unix_time(buf.st_mtime), ft);
|
||||
file.changed_date =
|
||||
(static_cast<std::uint64_t>(ft.dwHighDateTime) << 32U) | ft.dwLowDateTime;
|
||||
|
||||
utils::unix_time_to_filetime(utils::time64_to_unix_time(buf.st_ctime), ft);
|
||||
file.creation_date =
|
||||
(static_cast<std::uint64_t>(ft.dwHighDateTime) << 32U) | ft.dwLowDateTime;
|
||||
|
||||
utils::unix_time_to_filetime(utils::time64_to_unix_time(buf.st_mtime), ft);
|
||||
file.modified_date =
|
||||
(static_cast<std::uint64_t>(ft.dwHighDateTime) << 32U) | ft.dwLowDateTime;
|
||||
#else
|
||||
file.changed_date =
|
||||
buf.st_mtim.tv_nsec + (buf.st_mtim.tv_sec * NANOS_PER_SECOND);
|
||||
file.accessed_date =
|
||||
buf.st_atim.tv_nsec + (buf.st_atim.tv_sec * NANOS_PER_SECOND);
|
||||
file.creation_date =
|
||||
buf.st_ctim.tv_nsec + (buf.st_ctim.tv_sec * NANOS_PER_SECOND);
|
||||
file.modified_date =
|
||||
buf.st_mtim.tv_nsec + (buf.st_mtim.tv_sec * NANOS_PER_SECOND);
|
||||
#endif
|
||||
|
||||
return file;
|
||||
}
|
||||
|
||||
void encrypt_provider::create_item_meta(api_meta_map &meta, bool directory,
|
||||
const api_file &file) {
|
||||
#ifdef _WIN32
|
||||
struct _stat64 buf {};
|
||||
_stat64(file.source_path.c_str(), &buf);
|
||||
#else
|
||||
struct stat buf {};
|
||||
stat(file.source_path.c_str(), &buf);
|
||||
#endif
|
||||
|
||||
meta[META_ACCESSED] = std::to_string(file.accessed_date);
|
||||
#ifdef _WIN32
|
||||
meta[META_ATTRIBUTES] =
|
||||
std::to_string(::GetFileAttributesA(file.source_path.c_str()));
|
||||
#endif
|
||||
#ifdef __APPLE__
|
||||
meta[META_BACKUP];
|
||||
#endif
|
||||
meta[META_CHANGED] = std::to_string(file.changed_date);
|
||||
meta[META_CREATION] = std::to_string(file.creation_date);
|
||||
meta[META_DIRECTORY] = utils::string::from_bool(directory);
|
||||
meta[META_GID] = std::to_string(buf.st_gid);
|
||||
meta[META_MODE] = std::to_string(buf.st_mode);
|
||||
meta[META_MODIFIED] = std::to_string(file.modified_date);
|
||||
#ifdef __APPLE__
|
||||
meta[META_OSXFLAGS];
|
||||
#endif
|
||||
meta[META_SIZE] = std::to_string(file.file_size);
|
||||
meta[META_SOURCE] = file.source_path;
|
||||
meta[META_UID] = std::to_string(buf.st_uid);
|
||||
meta[META_WRITTEN] = std::to_string(file.modified_date);
|
||||
}
|
||||
|
||||
auto encrypt_provider::create_directory(const std::string &api_path,
|
||||
api_meta_map & /*meta*/) -> api_error {
|
||||
if (api_path == "/") {
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
return api_error::not_implemented;
|
||||
}
|
||||
|
||||
auto encrypt_provider::get_api_path_from_source(const std::string &source_path,
|
||||
std::string &api_path) const
|
||||
-> api_error {
|
||||
try {
|
||||
std::string api_path_data{};
|
||||
db_->Get(rocksdb::ReadOptions(), file_family_, source_path, &api_path_data);
|
||||
if (not api_path_data.empty()) {
|
||||
api_path = json::parse(api_path_data).at("api_path").get<std::string>();
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
std::string dir_api_path{};
|
||||
db_->Get(rocksdb::ReadOptions(), dir_family_, source_path, &dir_api_path);
|
||||
if (dir_api_path.empty()) {
|
||||
return api_error::item_not_found;
|
||||
}
|
||||
|
||||
api_path = dir_api_path;
|
||||
return api_error::success;
|
||||
} catch (const std::exception &ex) {
|
||||
utils::error::raise_error(__FUNCTION__, ex, source_path,
|
||||
"failed to get api path from source path");
|
||||
}
|
||||
|
||||
return api_error::error;
|
||||
}
|
||||
|
||||
auto encrypt_provider::get_directory_item_count(
|
||||
const std::string &api_path) const -> std::uint64_t {
|
||||
std::string source_path{};
|
||||
db_->Get(rocksdb::ReadOptions(), source_family_, api_path, &source_path);
|
||||
if (source_path.empty()) {
|
||||
return 0U;
|
||||
}
|
||||
|
||||
std::string dir_api_path{};
|
||||
db_->Get(rocksdb::ReadOptions(), dir_family_, source_path, &dir_api_path);
|
||||
if (dir_api_path.empty()) {
|
||||
return 0U;
|
||||
}
|
||||
|
||||
const auto cfg = config_.get_encrypt_config();
|
||||
|
||||
std::uint64_t count{};
|
||||
try {
|
||||
for ([[maybe_unused]] const auto &dir_entry :
|
||||
std::filesystem::directory_iterator(source_path)) {
|
||||
count++;
|
||||
}
|
||||
} catch (const std::exception &ex) {
|
||||
utils::error::raise_error(__FUNCTION__, ex, cfg.path,
|
||||
"failed to get directory item count");
|
||||
return 0U;
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
auto encrypt_provider::get_directory_items(const std::string &api_path,
|
||||
directory_item_list &list) const
|
||||
-> api_error {
|
||||
bool exists{};
|
||||
auto res = is_file(api_path, exists);
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
}
|
||||
if (exists) {
|
||||
return api_error::item_exists;
|
||||
}
|
||||
|
||||
std::string source_path{};
|
||||
db_->Get(rocksdb::ReadOptions(), source_family_, api_path, &source_path);
|
||||
if (source_path.empty()) {
|
||||
return api_error::directory_not_found;
|
||||
}
|
||||
|
||||
std::string dir_api_path{};
|
||||
db_->Get(rocksdb::ReadOptions(), dir_family_, source_path, &dir_api_path);
|
||||
if (dir_api_path.empty()) {
|
||||
return api_error::directory_not_found;
|
||||
}
|
||||
|
||||
try {
|
||||
for (const auto &dir_entry :
|
||||
std::filesystem::directory_iterator(source_path)) {
|
||||
try {
|
||||
std::string api_path{};
|
||||
if (dir_entry.is_directory()) {
|
||||
db_->Get(rocksdb::ReadOptions(), dir_family_,
|
||||
dir_entry.path().string(), &api_path);
|
||||
if (api_path.empty()) {
|
||||
const auto cfg = config_.get_encrypt_config();
|
||||
for (const auto &child_dir_entry :
|
||||
std::filesystem::directory_iterator(dir_entry.path())) {
|
||||
if (process_directory_entry(child_dir_entry, cfg, api_path)) {
|
||||
api_path = utils::path::get_parent_api_path(api_path);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (api_path.empty()) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
std::string api_path_data{};
|
||||
db_->Get(rocksdb::ReadOptions(), file_family_,
|
||||
dir_entry.path().string(), &api_path_data);
|
||||
if (api_path_data.empty()) {
|
||||
const auto cfg = config_.get_encrypt_config();
|
||||
if (not process_directory_entry(dir_entry, cfg, api_path)) {
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
api_path =
|
||||
json::parse(api_path_data).at("api_path").get<std::string>();
|
||||
}
|
||||
}
|
||||
|
||||
auto file = create_api_file(api_path, dir_entry.is_directory(),
|
||||
dir_entry.path().string());
|
||||
|
||||
directory_item di{};
|
||||
di.api_parent = file.api_parent;
|
||||
di.api_path = file.api_path;
|
||||
di.directory = dir_entry.is_directory();
|
||||
di.resolved = true;
|
||||
di.size = file.file_size;
|
||||
create_item_meta(di.meta, di.directory, file);
|
||||
|
||||
list.emplace_back(std::move(di));
|
||||
} catch (const std::exception &ex) {
|
||||
utils::error::raise_error(__FUNCTION__, ex, dir_entry.path().string(),
|
||||
"failed to process directory item");
|
||||
}
|
||||
}
|
||||
} catch (const std::exception &ex) {
|
||||
utils::error::raise_error(__FUNCTION__, ex, source_path,
|
||||
"failed to get directory items");
|
||||
return api_error::error;
|
||||
}
|
||||
|
||||
std::sort(list.begin(), list.end(), [](const auto &a, const auto &b) -> bool {
|
||||
return (a.directory && not b.directory) ||
|
||||
(not(b.directory && not a.directory) &&
|
||||
(a.api_path.compare(b.api_path) < 0));
|
||||
});
|
||||
|
||||
list.insert(list.begin(), directory_item{
|
||||
"..",
|
||||
"",
|
||||
true,
|
||||
});
|
||||
list.insert(list.begin(), directory_item{
|
||||
".",
|
||||
"",
|
||||
true,
|
||||
});
|
||||
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
auto encrypt_provider::get_file(const std::string &api_path,
|
||||
api_file &file) const -> api_error {
|
||||
bool exists{};
|
||||
auto res = is_directory(api_path, exists);
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
}
|
||||
if (exists) {
|
||||
return api_error::directory_exists;
|
||||
}
|
||||
|
||||
std::string source_path{};
|
||||
db_->Get(rocksdb::ReadOptions(), source_family_, api_path, &source_path);
|
||||
if (source_path.empty()) {
|
||||
return api_error::item_not_found;
|
||||
}
|
||||
|
||||
file = create_api_file(api_path, false, source_path);
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
auto encrypt_provider::process_directory_entry(
|
||||
const std::filesystem::directory_entry &dir_entry,
|
||||
const encrypt_config &cfg, std::string &api_path) const -> bool {
|
||||
if (dir_entry.is_regular_file() && not dir_entry.is_symlink() &&
|
||||
not dir_entry.is_directory()) {
|
||||
const auto relative_path = dir_entry.path().lexically_relative(cfg.path);
|
||||
|
||||
std::string api_path_data{};
|
||||
db_->Get(rocksdb::ReadOptions(), file_family_, dir_entry.path().string(),
|
||||
&api_path_data);
|
||||
|
||||
std::string api_parent{};
|
||||
db_->Get(rocksdb::ReadOptions(), dir_family_,
|
||||
dir_entry.path().parent_path().string(), &api_parent);
|
||||
|
||||
if (api_path_data.empty() || api_parent.empty()) {
|
||||
stop_type stop_requested = false;
|
||||
utils::encryption::encrypting_reader reader(
|
||||
relative_path.filename().string(), dir_entry.path().string(),
|
||||
stop_requested, cfg.encryption_token,
|
||||
relative_path.parent_path().string());
|
||||
if (api_parent.empty()) {
|
||||
auto encrypted_parts =
|
||||
utils::string::split(reader.get_encrypted_file_path(), '/', false);
|
||||
|
||||
std::size_t idx{1U};
|
||||
|
||||
std::string current_source_path{cfg.path};
|
||||
std::string current_encrypted_path{};
|
||||
for (const auto &part : relative_path.parent_path()) {
|
||||
if (part.string() == "/") {
|
||||
continue;
|
||||
}
|
||||
|
||||
current_source_path =
|
||||
utils::path::combine(current_source_path, {part.string()});
|
||||
|
||||
std::string parent_api_path{};
|
||||
db_->Get(rocksdb::ReadOptions(), dir_family_, current_source_path,
|
||||
&parent_api_path);
|
||||
if (parent_api_path.empty()) {
|
||||
parent_api_path = utils::path::create_api_path(
|
||||
current_encrypted_path + '/' + encrypted_parts[idx]);
|
||||
db_->Put(rocksdb::WriteOptions(), dir_family_, current_source_path,
|
||||
parent_api_path);
|
||||
db_->Put(rocksdb::WriteOptions(), source_family_, parent_api_path,
|
||||
current_source_path);
|
||||
event_system::instance().raise<filesystem_item_added>(
|
||||
parent_api_path,
|
||||
utils::path::get_parent_api_path(parent_api_path), true);
|
||||
} else {
|
||||
encrypted_parts[idx] =
|
||||
utils::string::split(parent_api_path, '/', false)[idx];
|
||||
}
|
||||
|
||||
current_encrypted_path = utils::path::create_api_path(
|
||||
current_encrypted_path + '/' + encrypted_parts[idx++]);
|
||||
}
|
||||
|
||||
api_parent = current_encrypted_path;
|
||||
}
|
||||
|
||||
if (api_path_data.empty()) {
|
||||
api_path = utils::path::create_api_path(
|
||||
api_parent + "/" + reader.get_encrypted_file_name());
|
||||
|
||||
auto iv_list = reader.get_iv_list();
|
||||
json data = {
|
||||
{"api_path", api_path},
|
||||
{"iv_list", iv_list},
|
||||
{"original_file_size", dir_entry.file_size()},
|
||||
};
|
||||
db_->Put(rocksdb::WriteOptions(), file_family_,
|
||||
dir_entry.path().string(), data.dump());
|
||||
db_->Put(rocksdb::WriteOptions(), source_family_, api_path,
|
||||
dir_entry.path().string());
|
||||
event_system::instance().raise<filesystem_item_added>(
|
||||
api_path, api_parent, false);
|
||||
} else {
|
||||
api_path = json::parse(api_path_data)["api_path"].get<std::string>();
|
||||
}
|
||||
} else {
|
||||
api_path = json::parse(api_path_data)["api_path"].get<std::string>();
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
auto encrypt_provider::get_file_list(api_file_list &list) const -> api_error {
|
||||
const auto cfg = config_.get_encrypt_config();
|
||||
|
||||
try {
|
||||
for (const auto &dir_entry :
|
||||
std::filesystem::recursive_directory_iterator(cfg.path)) {
|
||||
std::string api_path{};
|
||||
if (process_directory_entry(dir_entry, cfg, api_path)) {
|
||||
list.emplace_back(create_api_file(api_path, dir_entry.is_directory(),
|
||||
dir_entry.path().string()));
|
||||
}
|
||||
}
|
||||
|
||||
return api_error::success;
|
||||
} catch (const std::exception &ex) {
|
||||
utils::error::raise_error(__FUNCTION__, ex, cfg.path,
|
||||
"failed to get file list");
|
||||
}
|
||||
|
||||
return api_error::error;
|
||||
}
|
||||
|
||||
auto encrypt_provider::get_file_size(const std::string &api_path,
|
||||
std::uint64_t &file_size) const
|
||||
-> api_error {
|
||||
std::string source_path{};
|
||||
db_->Get(rocksdb::ReadOptions(), source_family_, api_path, &source_path);
|
||||
if (source_path.empty()) {
|
||||
return api_error::item_not_found;
|
||||
}
|
||||
|
||||
try {
|
||||
file_size = utils::encryption::encrypting_reader::calculate_encrypted_size(
|
||||
source_path);
|
||||
return api_error::success;
|
||||
} catch (const std::exception &ex) {
|
||||
utils::error::raise_error(__FUNCTION__, ex, api_path,
|
||||
"failed to get file size");
|
||||
}
|
||||
|
||||
return api_error::error;
|
||||
}
|
||||
|
||||
auto encrypt_provider::get_filesystem_item(const std::string &api_path,
|
||||
bool directory,
|
||||
filesystem_item &fsi) const
|
||||
-> api_error {
|
||||
std::string source_path{};
|
||||
db_->Get(rocksdb::ReadOptions(), source_family_, api_path, &source_path);
|
||||
if (source_path.empty()) {
|
||||
return api_error::item_not_found;
|
||||
}
|
||||
|
||||
if (directory) {
|
||||
std::string api_path{};
|
||||
db_->Get(rocksdb::ReadOptions(), dir_family_, source_path, &api_path);
|
||||
if (api_path.empty()) {
|
||||
return api_error::item_not_found;
|
||||
}
|
||||
fsi.api_parent = utils::path::get_parent_api_path(api_path);
|
||||
fsi.api_path = api_path;
|
||||
fsi.directory = true;
|
||||
fsi.size = 0U;
|
||||
fsi.source_path = source_path;
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
std::string api_path_data{};
|
||||
db_->Get(rocksdb::ReadOptions(), file_family_, source_path, &api_path_data);
|
||||
if (api_path_data.empty()) {
|
||||
return api_error::item_not_found;
|
||||
}
|
||||
|
||||
auto data = json::parse(api_path_data);
|
||||
fsi.api_path = data["api_path"].get<std::string>();
|
||||
fsi.api_parent = utils::path::get_parent_api_path(fsi.api_path);
|
||||
fsi.directory = false;
|
||||
fsi.size = utils::encryption::encrypting_reader::calculate_encrypted_size(
|
||||
source_path);
|
||||
fsi.source_path = source_path;
|
||||
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
auto encrypt_provider::get_filesystem_item_from_source_path(
|
||||
const std::string &source_path, filesystem_item &fsi) const -> api_error {
|
||||
std::string api_path{};
|
||||
auto res = get_api_path_from_source(source_path, api_path);
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
}
|
||||
|
||||
bool exists{};
|
||||
res = is_directory(api_path, exists);
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
}
|
||||
if (exists) {
|
||||
return api_error::directory_exists;
|
||||
}
|
||||
|
||||
return get_filesystem_item(api_path, false, fsi);
|
||||
}
|
||||
|
||||
auto encrypt_provider::get_filesystem_item_and_file(const std::string &api_path,
|
||||
api_file &file,
|
||||
filesystem_item &fsi) const
|
||||
-> api_error {
|
||||
bool exists{};
|
||||
auto res = is_directory(api_path, exists);
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
}
|
||||
if (exists) {
|
||||
return api_error::directory_exists;
|
||||
}
|
||||
|
||||
auto ret = get_filesystem_item(api_path, exists, fsi);
|
||||
if (ret != api_error::success) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
file = create_api_file(api_path, false, fsi.source_path);
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
auto encrypt_provider::get_pinned_files() const -> std::vector<std::string> {
|
||||
return {};
|
||||
}
|
||||
|
||||
auto encrypt_provider::get_item_meta(const std::string &api_path,
|
||||
api_meta_map &meta) const -> api_error {
|
||||
std::string source_path{};
|
||||
db_->Get(rocksdb::ReadOptions(), source_family_, api_path, &source_path);
|
||||
if (source_path.empty()) {
|
||||
return api_error::item_not_found;
|
||||
}
|
||||
|
||||
bool exists{};
|
||||
auto res = is_directory(api_path, exists);
|
||||
if (res != api_error::success) {
|
||||
return res;
|
||||
}
|
||||
|
||||
auto file = create_api_file(api_path, exists, source_path);
|
||||
create_item_meta(meta, exists, file);
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
auto encrypt_provider::get_item_meta(const std::string &api_path,
|
||||
const std::string &key,
|
||||
std::string &value) const -> api_error {
|
||||
api_meta_map meta{};
|
||||
auto ret = get_item_meta(api_path, meta);
|
||||
if (ret != api_error::success) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
value = meta[key];
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
auto encrypt_provider::get_total_drive_space() const -> std::uint64_t {
|
||||
const auto cfg = config_.get_encrypt_config();
|
||||
return utils::file::get_total_drive_space(cfg.path);
|
||||
}
|
||||
|
||||
auto encrypt_provider::get_total_item_count() const -> std::uint64_t {
|
||||
std::uint64_t ret{};
|
||||
|
||||
auto iterator = std::unique_ptr<rocksdb::Iterator>(
|
||||
db_->NewIterator(rocksdb::ReadOptions(), source_family_));
|
||||
for (iterator->SeekToFirst(); iterator->Valid(); iterator->Next()) {
|
||||
ret++;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
auto encrypt_provider::get_used_drive_space() const -> std::uint64_t {
|
||||
const auto cfg = config_.get_encrypt_config();
|
||||
return get_total_drive_space() - utils::file::get_free_drive_space(cfg.path);
|
||||
}
|
||||
|
||||
auto encrypt_provider::is_directory(const std::string &api_path,
|
||||
bool &exists) const -> api_error {
|
||||
std::string source_path{};
|
||||
db_->Get(rocksdb::ReadOptions(), source_family_, api_path, &source_path);
|
||||
if (source_path.empty()) {
|
||||
exists = false;
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
exists = utils::file::is_directory(source_path);
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
auto encrypt_provider::is_file(const std::string &api_path, bool &exists) const
|
||||
-> api_error {
|
||||
std::string source_path{};
|
||||
db_->Get(rocksdb::ReadOptions(), source_family_, api_path, &source_path);
|
||||
if (source_path.empty()) {
|
||||
exists = false;
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
exists = utils::file::is_file(source_path);
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
auto encrypt_provider::is_file_writeable(const std::string & /*api_path*/) const
|
||||
-> bool {
|
||||
return false;
|
||||
}
|
||||
|
||||
auto encrypt_provider::is_online() const -> bool {
|
||||
return std::filesystem::exists(config_.get_encrypt_config().path);
|
||||
}
|
||||
|
||||
auto encrypt_provider::is_rename_supported() const -> bool { return false; }
|
||||
|
||||
auto encrypt_provider::read_file_bytes(const std::string &api_path,
|
||||
std::size_t size, std::uint64_t offset,
|
||||
data_buffer &data,
|
||||
stop_type &stop_requested) -> api_error {
|
||||
std::string source_path{};
|
||||
db_->Get(rocksdb::ReadOptions(), source_family_, api_path, &source_path);
|
||||
if (source_path.empty()) {
|
||||
return api_error::item_not_found;
|
||||
}
|
||||
|
||||
std::string api_path_data{};
|
||||
db_->Get(rocksdb::ReadOptions(), file_family_, source_path, &api_path_data);
|
||||
if (api_path_data.empty()) {
|
||||
return api_error::item_not_found;
|
||||
}
|
||||
|
||||
std::uint64_t file_size{};
|
||||
if (not utils::file::get_file_size(source_path, file_size)) {
|
||||
return api_error::os_error;
|
||||
}
|
||||
|
||||
std::vector<
|
||||
std::array<unsigned char, crypto_aead_xchacha20poly1305_IETF_NPUBBYTES>>
|
||||
iv_list{};
|
||||
|
||||
const auto cfg = config_.get_encrypt_config();
|
||||
|
||||
unique_recur_mutex_lock reader_lookup_lock(reader_lookup_mtx_);
|
||||
|
||||
auto file_data = json::parse(api_path_data);
|
||||
if (file_data.at("original_file_size").get<std::uint64_t>() != file_size) {
|
||||
const auto relative_path =
|
||||
std::filesystem::path(source_path).lexically_relative(cfg.path);
|
||||
|
||||
auto ri = std::make_shared<reader_info>();
|
||||
ri->reader = std::make_unique<utils::encryption::encrypting_reader>(
|
||||
relative_path.filename().string(), source_path, stop_requested,
|
||||
cfg.encryption_token, relative_path.parent_path().string());
|
||||
reader_lookup_[source_path] = ri;
|
||||
iv_list = ri->reader->get_iv_list();
|
||||
|
||||
file_data["original_file_size"] = file_size;
|
||||
file_data["iv_list"] = iv_list;
|
||||
auto res = db_->Put(rocksdb::WriteOptions(), file_family_, source_path,
|
||||
file_data.dump());
|
||||
if (not res.ok()) {
|
||||
utils::error::raise_error(__FUNCTION__, res.code(), source_path,
|
||||
"failed to update meta db");
|
||||
return api_error::error;
|
||||
}
|
||||
} else {
|
||||
iv_list =
|
||||
file_data["iv_list"]
|
||||
.get<std::vector<
|
||||
std::array<unsigned char,
|
||||
crypto_aead_xchacha20poly1305_IETF_NPUBBYTES>>>();
|
||||
if (reader_lookup_.find(source_path) == reader_lookup_.end()) {
|
||||
auto ri = std::make_shared<reader_info>();
|
||||
ri->reader = std::make_unique<utils::encryption::encrypting_reader>(
|
||||
api_path, source_path, stop_requested, cfg.encryption_token,
|
||||
std::move(iv_list));
|
||||
reader_lookup_[source_path] = ri;
|
||||
}
|
||||
}
|
||||
|
||||
if (file_size == 0U || size == 0U) {
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
auto ri = reader_lookup_.at(source_path);
|
||||
ri->last_access_time = std::chrono::system_clock::now();
|
||||
reader_lookup_lock.unlock();
|
||||
|
||||
mutex_lock reader_lock(ri->reader_mtx);
|
||||
ri->reader->set_read_position(offset);
|
||||
data.resize(size);
|
||||
|
||||
const auto res = ri->reader->reader_function(data.data(), 1u, data.size(),
|
||||
ri->reader.get());
|
||||
if (res == 0) {
|
||||
return api_error::os_error;
|
||||
}
|
||||
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
void encrypt_provider::remove_deleted_files() {
|
||||
struct removed_item {
|
||||
std::string api_path{};
|
||||
bool directory{};
|
||||
std::string source_path{};
|
||||
};
|
||||
|
||||
std::vector<removed_item> removed_list{};
|
||||
auto iterator = std::unique_ptr<rocksdb::Iterator>(
|
||||
db_->NewIterator(rocksdb::ReadOptions(), source_family_));
|
||||
for (iterator->SeekToFirst(); iterator->Valid(); iterator->Next()) {
|
||||
auto source_path = iterator->value().ToString();
|
||||
if (not std::filesystem::exists(source_path)) {
|
||||
auto api_path =
|
||||
utils::string::split(iterator->key().ToString(), '|', false)[1U];
|
||||
|
||||
std::string value{};
|
||||
db_->Get(rocksdb::ReadOptions(), file_family_, source_path, &value);
|
||||
|
||||
removed_list.emplace_back(
|
||||
removed_item{api_path, value.empty(), source_path});
|
||||
}
|
||||
}
|
||||
|
||||
for (const auto &item : removed_list) {
|
||||
if (not item.directory) {
|
||||
db_->Delete(rocksdb::WriteOptions(), source_family_, item.api_path);
|
||||
db_->Delete(rocksdb::WriteOptions(), file_family_, item.source_path);
|
||||
event_system::instance().raise<file_removed_externally>(item.api_path,
|
||||
item.source_path);
|
||||
}
|
||||
}
|
||||
|
||||
for (const auto &item : removed_list) {
|
||||
if (item.directory) {
|
||||
db_->Delete(rocksdb::WriteOptions(), source_family_, item.api_path);
|
||||
db_->Delete(rocksdb::WriteOptions(), dir_family_, item.source_path);
|
||||
event_system::instance().raise<directory_removed_externally>(
|
||||
item.api_path, item.source_path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto encrypt_provider::start(api_item_added_callback /*api_item_added*/,
|
||||
i_file_manager * /*fm*/) -> bool {
|
||||
if (not is_online()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
auto families = std::vector<rocksdb::ColumnFamilyDescriptor>();
|
||||
families.emplace_back(rocksdb::kDefaultColumnFamilyName,
|
||||
rocksdb::ColumnFamilyOptions());
|
||||
families.emplace_back("dir", rocksdb::ColumnFamilyOptions());
|
||||
families.emplace_back("file", rocksdb::ColumnFamilyOptions());
|
||||
families.emplace_back("source", rocksdb::ColumnFamilyOptions());
|
||||
|
||||
auto handles = std::vector<rocksdb::ColumnFamilyHandle *>();
|
||||
|
||||
utils::db::create_rocksdb(config_, DB_NAME, families, handles, db_);
|
||||
|
||||
std::size_t idx{};
|
||||
dir_family_ = handles[idx++];
|
||||
file_family_ = handles[idx++];
|
||||
source_family_ = handles[idx++];
|
||||
|
||||
const auto cfg = config_.get_encrypt_config();
|
||||
|
||||
std::string source_path{};
|
||||
db_->Get(rocksdb::ReadOptions(), source_family_, "/", &source_path);
|
||||
if (source_path.empty()) {
|
||||
db_->Put(rocksdb::WriteOptions(), source_family_, "/", cfg.path);
|
||||
source_path = cfg.path;
|
||||
}
|
||||
|
||||
std::string dir_api_path{};
|
||||
db_->Get(rocksdb::ReadOptions(), dir_family_, source_path, &dir_api_path);
|
||||
if (dir_api_path.empty()) {
|
||||
db_->Put(rocksdb::WriteOptions(), dir_family_, source_path, "/");
|
||||
}
|
||||
|
||||
polling::instance().set_callback({"check_deleted", polling::frequency::low,
|
||||
[this]() { remove_deleted_files(); }});
|
||||
|
||||
event_system::instance().raise<service_started>("encrypt_provider");
|
||||
return true;
|
||||
}
|
||||
|
||||
void encrypt_provider::stop() {
|
||||
event_system::instance().raise<service_shutdown_begin>("encrypt_provider");
|
||||
polling::instance().remove_callback("check_deleted");
|
||||
db_.reset();
|
||||
event_system::instance().raise<service_shutdown_end>("encrypt_provider");
|
||||
}
|
||||
} // namespace repertory
|
@@ -1,192 +0,0 @@
|
||||
/*
|
||||
Copyright <2018-2022> <scott.e.graves@protonmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
|
||||
associated documentation files (the "Software"), to deal in the Software without restriction,
|
||||
including without limitation the rights to use, copy, modify, merge, publish, distribute,
|
||||
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or
|
||||
substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
|
||||
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
|
||||
OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#if defined(REPERTORY_TESTING_NEW)
|
||||
#include "providers/passthrough/passthroughprovider.hpp"
|
||||
#include "utils/file_utils.hpp"
|
||||
#include "utils/path_utils.hpp"
|
||||
|
||||
namespace repertory {
|
||||
std::string CPassthroughProvider::ConstructFullPath(const std::string &api_path) const {
|
||||
return utils::path::combine(passthroughLocation_, {api_path});
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::CreateDirectory(const std::string &api_path,
|
||||
const api_meta_map &meta) {
|
||||
const auto fullPath = ConstructFullPath(api_path);
|
||||
auto ret = utils::file::create_full_directory_path(fullPath) ? api_error::OSErrorCode
|
||||
: api_error::Success;
|
||||
if (ret == api_error::Success) {
|
||||
ret = set_item_meta(api_path, meta);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::get_file_list(ApiFileList &fileList) const {
|
||||
const auto fullPath = ConstructFullPath("/");
|
||||
const auto fl = utils::file::get_directory_files(fullPath, false, true);
|
||||
for (const auto &file : fl) {
|
||||
const auto api_path = utils::path::create_api_path(file.substr(fullPath.length()));
|
||||
/*
|
||||
struct ApiFile {
|
||||
std::string ApiFilePath{};
|
||||
std::string ApiParent{};
|
||||
std::uint64_t AccessedDate = 0;
|
||||
std::uint64_t ChangedDate = 0;
|
||||
std::uint64_t CreationDate = 0;
|
||||
std::string EncryptionToken{};
|
||||
std::uint64_t FileSize = 0;
|
||||
std::uint64_t ModifiedDate = 0;
|
||||
bool Recoverable = false;
|
||||
double Redundancy = 0.0;
|
||||
std::string SourceFilePath{};
|
||||
};
|
||||
*/
|
||||
ApiFile apiFile{
|
||||
api_path,
|
||||
utils::path::get_parent_api_path(api_path),
|
||||
};
|
||||
// apiFile.Recoverable = not IsProcessing(api_path);
|
||||
apiFile.Redundancy = 3.0;
|
||||
apiFile.SourceFilePath = file;
|
||||
// utils::file::UpdateApiFileInfo(apiFile);
|
||||
fileList.emplace_back(apiFile);
|
||||
}
|
||||
|
||||
return api_error::Success;
|
||||
}
|
||||
|
||||
std::uint64_t CPassthroughProvider::get_directory_item_count(const std::string &api_path) const {
|
||||
const auto fullPath = ConstructFullPath(api_path);
|
||||
return 0;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::get_directory_items(const std::string &api_path,
|
||||
directory_item_list &list) const {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::GetFile(const std::string &api_path, ApiFile &file) const {
|
||||
return api_error::Error;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::GetFileSize(const std::string &api_path,
|
||||
std::uint64_t &fileSize) const {
|
||||
return api_error::Error;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::get_filesystem_item(const std::string &api_path,
|
||||
const bool &directory,
|
||||
FileSystemItem &fileSystemItem) const {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
|
||||
api_error
|
||||
CPassthroughProvider::get_filesystem_item_from_source_path(const std::string &sourceFilePath,
|
||||
FileSystemItem &fileSystemItem) const {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::get_item_meta(const std::string &api_path,
|
||||
api_meta_map &meta) const {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::get_item_meta(const std::string &api_path,
|
||||
const std::string &key, std::string &value) const {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
|
||||
std::uint64_t CPassthroughProvider::get_total_drive_space() const { return 0; }
|
||||
|
||||
std::uint64_t CPassthroughProvider::get_total_item_count() const { return 0; }
|
||||
|
||||
std::uint64_t CPassthroughProvider::get_used_drive_space() const { return 0; }
|
||||
|
||||
bool CPassthroughProvider::IsDirectory(const std::string &api_path) const {
|
||||
const auto fullPath = ConstructFullPath(api_path);
|
||||
return utils::file::is_directory(fullPath);
|
||||
}
|
||||
|
||||
bool CPassthroughProvider::IsFile(const std::string &api_path) const {
|
||||
const auto fullPath = ConstructFullPath(api_path);
|
||||
return utils::file::is_file(fullPath);
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::notify_file_added(const std::string &api_path,
|
||||
const std::string &api_parent,
|
||||
const std::uint64_t &size) {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::read_file_bytes(const std::string &apiFilepath,
|
||||
const std::size_t &size,
|
||||
const std::uint64_t &offset,
|
||||
std::vector<char> &data,
|
||||
const bool &stop_requested) {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::RemoveDirectory(const std::string &api_path) {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::RemoveFile(const std::string &api_path) {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::RenameFile(const std::string &fromApiPath,
|
||||
const std::string &toApiPath) {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::remove_item_meta(const std::string &api_path,
|
||||
const std::string &key) {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::set_item_meta(const std::string &api_path,
|
||||
const std::string &key, const std::string &value) {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::set_item_meta(const std::string &api_path,
|
||||
const api_meta_map &meta) {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::set_source_path(const std::string &api_path,
|
||||
const std::string &sourcePath) {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
|
||||
bool CPassthroughProvider::Start(ApiItemAdded apiItemAdded, i_open_file_table *openFileTable) {
|
||||
return false;
|
||||
}
|
||||
|
||||
void CPassthroughProvider::Stop() {}
|
||||
|
||||
api_error CPassthroughProvider::upload_file(const std::string &api_path,
|
||||
const std::string &sourcePath,
|
||||
const std::string &encryptionToken) {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
} // namespace repertory
|
||||
|
||||
#endif // defined(REPERTORY_TESTING_NEW)
|
@@ -1,76 +1,82 @@
|
||||
/*
|
||||
Copyright <2018-2022> <scott.e.graves@protonmail.com>
|
||||
Copyright <2018-2023> <scott.e.graves@protonmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
|
||||
associated documentation files (the "Software"), to deal in the Software without restriction,
|
||||
including without limitation the rights to use, copy, modify, merge, publish, distribute,
|
||||
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or
|
||||
substantial portions of the Software.
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
|
||||
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
|
||||
OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
#include "providers/provider.hpp"
|
||||
#include "comm/aws_s3/aws_s3_comm.hpp"
|
||||
#include "comm/curl/curl_comm.hpp"
|
||||
#include "comm/i_comm.hpp"
|
||||
#include "comm/i_s3_comm.hpp"
|
||||
|
||||
#include "app_config.hpp"
|
||||
#include "comm/curl/curl_comm.hpp"
|
||||
#include "comm/i_http_comm.hpp"
|
||||
#include "comm/i_s3_comm.hpp"
|
||||
#include "comm/s3/s3_comm.hpp"
|
||||
#include "events/events.hpp"
|
||||
#include "providers/passthrough/passthroughprovider.hpp"
|
||||
#include "providers/encrypt/encrypt_provider.hpp"
|
||||
#include "providers/s3/s3_provider.hpp"
|
||||
#include "providers/sia/sia_provider.hpp"
|
||||
#include "providers/skynet/skynet_provider.hpp"
|
||||
#include "types/startup_exception.hpp"
|
||||
|
||||
namespace repertory {
|
||||
template <typename i, typename t>
|
||||
static void create_comm(std::unique_ptr<i> &comm, app_config &config) {
|
||||
namespace {
|
||||
template <typename intf_t, typename comm_t, typename config_t>
|
||||
inline void create_comm(std::unique_ptr<intf_t> &comm, const config_t &config) {
|
||||
if (comm) {
|
||||
throw startup_exception("'create_provider' should only be called once");
|
||||
throw repertory::startup_exception(
|
||||
"'create_provider' should only be called once");
|
||||
}
|
||||
comm = std::make_unique<t>(config);
|
||||
}
|
||||
|
||||
std::unique_ptr<i_provider> create_provider(const provider_type &pt, app_config &config) {
|
||||
comm = std::make_unique<comm_t>(config);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
namespace repertory {
|
||||
auto create_provider(const provider_type &pt, app_config &config)
|
||||
-> std::unique_ptr<i_provider> {
|
||||
static std::mutex mutex;
|
||||
mutex_lock lock(mutex);
|
||||
|
||||
static std::unique_ptr<i_comm> comm;
|
||||
static std::unique_ptr<i_http_comm> comm;
|
||||
#if defined(REPERTORY_ENABLE_S3)
|
||||
static std::unique_ptr<i_s3_comm> s3_comm;
|
||||
static std::unique_ptr<i_s3_comm> s3_comm_;
|
||||
#endif // defined(REPERTORY_ENABLE_S3)
|
||||
|
||||
switch (pt) {
|
||||
case provider_type::sia:
|
||||
create_comm<i_comm, curl_comm>(comm, config);
|
||||
return std::unique_ptr<i_provider>(dynamic_cast<i_provider *>(new sia_provider(config, *comm)));
|
||||
case provider_type::sia: {
|
||||
create_comm<i_http_comm, curl_comm, host_config>(comm,
|
||||
config.get_host_config());
|
||||
return std::unique_ptr<i_provider>(
|
||||
dynamic_cast<i_provider *>(new sia_provider(config, *comm)));
|
||||
}
|
||||
#if defined(REPERTORY_ENABLE_S3)
|
||||
case provider_type::s3:
|
||||
create_comm<i_s3_comm, aws_s3_comm>(s3_comm, config);
|
||||
case provider_type::s3: {
|
||||
create_comm<i_s3_comm, s3_comm, app_config>(s3_comm_, config);
|
||||
return std::unique_ptr<i_provider>(
|
||||
dynamic_cast<i_provider *>(new s3_provider(config, *s3_comm)));
|
||||
dynamic_cast<i_provider *>(new s3_provider(config, *s3_comm_)));
|
||||
}
|
||||
#endif // defined(REPERTORY_ENABLE_S3)
|
||||
#if defined(REPERTORY_ENABLE_SKYNET)
|
||||
case provider_type::skynet:
|
||||
create_comm<i_comm, curl_comm>(comm, config);
|
||||
case provider_type::encrypt: {
|
||||
return std::unique_ptr<i_provider>(
|
||||
dynamic_cast<i_provider *>(new skynet_provider(config, *comm)));
|
||||
#endif // defined(REPERTORY_ENABLE_SKYNET)
|
||||
#if defined(REPERTORY_TESTING_NEW)
|
||||
case provider_type::passthrough:
|
||||
return std::unique_ptr<i_provider>(
|
||||
dynamic_cast<i_provider *>(new CPassthroughProvider(config)));
|
||||
#endif // defined(REPERTORY_TESTING_NEW)
|
||||
dynamic_cast<i_provider *>(new encrypt_provider(config)));
|
||||
}
|
||||
case provider_type::unknown:
|
||||
default:
|
||||
throw startup_exception("provider not supported: " + app_config::get_provider_display_name(pt));
|
||||
throw startup_exception("provider not supported: " +
|
||||
app_config::get_provider_display_name(pt));
|
||||
}
|
||||
}
|
||||
} // namespace repertory
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,649 +0,0 @@
|
||||
/*
|
||||
Copyright <2018-2022> <scott.e.graves@protonmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
|
||||
associated documentation files (the "Software"), to deal in the Software without restriction,
|
||||
including without limitation the rights to use, copy, modify, merge, publish, distribute,
|
||||
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or
|
||||
substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
|
||||
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
|
||||
OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#if defined(REPERTORY_ENABLE_SKYNET)
|
||||
|
||||
#include "providers/skynet/skynet_provider.hpp"
|
||||
#include "comm/i_comm.hpp"
|
||||
#include "app_config.hpp"
|
||||
#include "drives/i_open_file_table.hpp"
|
||||
#include "events/events.hpp"
|
||||
#include "types/repertory.hpp"
|
||||
#include "types/skynet.hpp"
|
||||
#include "types/startup_exception.hpp"
|
||||
#include "utils/encryption.hpp"
|
||||
#include "utils/encrypting_reader.hpp"
|
||||
#include "utils/file_utils.hpp"
|
||||
#include "utils/global_data.hpp"
|
||||
#include "utils/path_utils.hpp"
|
||||
|
||||
namespace repertory {
|
||||
skynet_provider::skynet_provider(app_config &config, i_comm &comm)
|
||||
: base_provider(config),
|
||||
comm_(comm),
|
||||
directory_db_(config),
|
||||
upload_manager_(
|
||||
config, [this](const std::string &api_path) -> bool { return this->is_file(api_path); },
|
||||
[this](const upload_manager::upload &upload, json &data, json &error) -> api_error {
|
||||
return this->upload_handler(upload, data, error);
|
||||
},
|
||||
[this](const std::string &api_path, const std::string &source_path, const json &data) {
|
||||
return this->upload_completed(api_path, source_path, data);
|
||||
}) {
|
||||
next_download_index_ = 0u;
|
||||
next_upload_index_ = 0u;
|
||||
update_portal_list();
|
||||
E_SUBSCRIBE_EXACT(skynet_portal_list_changed,
|
||||
[this](const skynet_portal_list_changed &) { this->update_portal_list(); });
|
||||
|
||||
// Remove legacy encrypted files
|
||||
api_file_list list;
|
||||
get_file_list(list);
|
||||
for (const auto &file : list) {
|
||||
std::string token;
|
||||
get_item_meta(file.api_path, "token", token);
|
||||
if (not token.empty()) {
|
||||
remove_file(file.api_path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
api_error skynet_provider::create_directory(const std::string &api_path, const api_meta_map &meta) {
|
||||
auto ret = api_error::success;
|
||||
if (utils::path::is_trash_directory(api_path)) {
|
||||
ret = api_error::access_denied;
|
||||
} else {
|
||||
#ifdef _WIN32
|
||||
ret = is_directory(api_path) ? api_error::directory_exists
|
||||
: is_file(api_path) ? api_error::file_exists
|
||||
: api_error::success;
|
||||
if (ret != api_error::success) {
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
if ((ret = directory_db_.create_directory(api_path)) != api_error::success) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
set_item_meta(api_path, meta);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error skynet_provider::create_file(const std::string &api_path, api_meta_map &meta) {
|
||||
if (meta[META_SIZE].empty()) {
|
||||
meta[META_SIZE] = "0";
|
||||
}
|
||||
|
||||
// When META_ID is present, an external import is occurring.
|
||||
// Need to skip the encryption token in this scenario.
|
||||
if (meta[META_ID].empty() && meta[META_ENCRYPTION_TOKEN].empty()) {
|
||||
meta[META_ENCRYPTION_TOKEN] = get_config().get_skynet_config().encryption_token;
|
||||
}
|
||||
|
||||
auto ret = base_provider::create_file(api_path, meta);
|
||||
if (ret == api_error::success) {
|
||||
ret = directory_db_.create_file(api_path);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
json skynet_provider::export_all() const {
|
||||
json ret = {{"success", std::vector<json>()}, {"failed", std::vector<std::string>()}};
|
||||
|
||||
api_file_list list;
|
||||
get_file_list(list);
|
||||
for (const auto &file : list) {
|
||||
process_export(ret, file.api_path);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
json skynet_provider::export_list(const std::vector<std::string> &api_path_list) const {
|
||||
json ret = {{"success", std::vector<json>()}, {"failed", std::vector<std::string>()}};
|
||||
|
||||
for (const auto &api_path : api_path_list) {
|
||||
process_export(ret, api_path);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::uint64_t skynet_provider::get_directory_item_count(const std::string &api_path) const {
|
||||
return is_directory(api_path) ? directory_db_.get_directory_item_count(api_path) : 0u;
|
||||
}
|
||||
|
||||
api_error skynet_provider::get_directory_items(const std::string &api_path,
|
||||
directory_item_list &list) const {
|
||||
if (is_file(api_path)) {
|
||||
return api_error::item_is_file;
|
||||
}
|
||||
|
||||
if (not is_directory(api_path)) {
|
||||
const_cast<skynet_provider *>(this)->remove_item_meta(api_path);
|
||||
return api_error::directory_not_found;
|
||||
}
|
||||
|
||||
directory_db_.populate_sub_directories(
|
||||
api_path,
|
||||
[this](directory_item &di, const bool &) {
|
||||
this->get_item_meta(di.api_path, di.meta);
|
||||
this->oft_->update_directory_item(di);
|
||||
},
|
||||
list);
|
||||
|
||||
directory_db_.populate_directory_files(
|
||||
api_path,
|
||||
[this](directory_item &di, const bool &) {
|
||||
di.api_parent = utils::path::get_parent_api_path(di.api_path);
|
||||
this->get_item_meta(di.api_path, di.meta);
|
||||
this->oft_->update_directory_item(di);
|
||||
},
|
||||
list);
|
||||
|
||||
std::sort(list.begin(), list.end(), [](const auto &a, const auto &b) -> bool {
|
||||
return (a.directory && not b.directory) ? true
|
||||
: (b.directory && not a.directory) ? false
|
||||
: (a.api_path.compare(b.api_path) < 0);
|
||||
});
|
||||
|
||||
list.insert(list.begin(), directory_item{
|
||||
"..",
|
||||
"",
|
||||
true,
|
||||
});
|
||||
list.insert(list.begin(), directory_item{
|
||||
".",
|
||||
"",
|
||||
true,
|
||||
});
|
||||
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
api_error skynet_provider::get_file(const std::string &api_path, api_file &file) const {
|
||||
const auto ret =
|
||||
directory_db_.get_file(api_path, file, [this](api_file &file) { populate_api_file(file); });
|
||||
|
||||
if (ret != api_error::success) {
|
||||
if (not is_directory(api_path)) {
|
||||
const_cast<skynet_provider *>(this)->remove_item_meta(api_path);
|
||||
}
|
||||
event_system::instance().raise<file_get_failed>(api_path,
|
||||
std::to_string(static_cast<int>(ret)));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error skynet_provider::get_file_list(api_file_list &list) const {
|
||||
const auto ret =
|
||||
directory_db_.get_file_list(list, [this](api_file &file) { populate_api_file(file); });
|
||||
if (ret != api_error::success) {
|
||||
event_system::instance().raise<file_get_api_list_failed>(std::to_string(static_cast<int>(ret)));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error skynet_provider::get_file_size(const std::string &api_path,
|
||||
std::uint64_t &file_size) const {
|
||||
api_file file{};
|
||||
const auto ret = get_file(api_path, file);
|
||||
if (ret == api_error::success) {
|
||||
file_size = file.file_size;
|
||||
} else {
|
||||
event_system::instance().raise<file_get_size_failed>(api_path,
|
||||
std::to_string(static_cast<int>(ret)));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
host_config skynet_provider::get_host_config(const bool &upload) {
|
||||
const auto format_host_config = [&](host_config hc) {
|
||||
hc.path = upload ? "/skynet/skyfile" : "/";
|
||||
return hc;
|
||||
};
|
||||
|
||||
unique_mutex_lock portal_lock(portal_mutex_);
|
||||
const auto portal_list = upload ? upload_list_ : download_list_;
|
||||
portal_lock.unlock();
|
||||
|
||||
auto &next_upload_index = upload ? next_upload_index_ : next_download_index_;
|
||||
auto idx = next_upload_index++;
|
||||
if (idx >= portal_list->size()) {
|
||||
idx = next_upload_index = 0u;
|
||||
}
|
||||
|
||||
return format_host_config((*portal_list)[idx]);
|
||||
}
|
||||
|
||||
std::size_t skynet_provider::get_retry_count() const {
|
||||
mutex_lock portal_lock(portal_mutex_);
|
||||
return std::max(download_list_->size(),
|
||||
static_cast<std::size_t>(get_config().get_retry_read_count()));
|
||||
}
|
||||
|
||||
api_error skynet_provider::get_skynet_metadata(const std::string &skylink, json &json_meta) {
|
||||
auto ret = api_error::error;
|
||||
|
||||
http_headers headers;
|
||||
const auto retry_count = get_retry_count();
|
||||
|
||||
for (std::size_t i = 0u; (ret != api_error::success) && (i < retry_count); i++) {
|
||||
json data, error;
|
||||
const auto hc = get_host_config(false);
|
||||
if (comm_.get(hc, "/skynet/metadata/" + skylink, data, error) == api_error::success) {
|
||||
headers["skynet-file-metadata"] = data.dump();
|
||||
ret = api_error::success;
|
||||
} else {
|
||||
std::vector<char> buffer;
|
||||
if (comm_.get_range_and_headers(hc, utils::path::create_api_path(skylink), 0u,
|
||||
{{"format", "concat"}}, "", buffer, {{0, 0}}, error, headers,
|
||||
stop_requested_) == api_error::success) {
|
||||
ret = api_error::success;
|
||||
} else if (not error.empty()) {
|
||||
event_system::instance().raise<repertory_exception>(__FUNCTION__, error.dump(2));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ret == api_error::success) {
|
||||
json_meta = json::parse(headers["skynet-file-metadata"]);
|
||||
if (json_meta["subfiles"].empty()) {
|
||||
auto sub_file = json_meta;
|
||||
sub_file["len"] =
|
||||
utils::string::to_uint64(utils::string::split(headers["content-range"], '/')[1]);
|
||||
json sub_files = {{json_meta["filename"], sub_file}};
|
||||
json_meta["subfiles"] = sub_files;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error skynet_provider::import_skylink(const skylink_import &si) {
|
||||
json json_meta;
|
||||
auto ret = get_skynet_metadata(si.skylink, json_meta);
|
||||
if (ret == api_error::success) {
|
||||
const auto encrypted = not si.token.empty();
|
||||
for (auto sub_file : json_meta["subfiles"]) {
|
||||
const auto meta_file_name = sub_file["filename"].get<std::string>();
|
||||
auto file_name = meta_file_name;
|
||||
if (encrypted) {
|
||||
if ((ret = utils::encryption::decrypt_file_name(si.token, file_name)) !=
|
||||
api_error::success) {
|
||||
event_system::instance().raise<skynet_import_decryption_failed>(
|
||||
si.skylink, sub_file["filename"], ret);
|
||||
}
|
||||
}
|
||||
|
||||
if (ret == api_error::success) {
|
||||
const auto api_path =
|
||||
utils::path::create_api_path(utils::path::combine(si.directory, {file_name}));
|
||||
const auto api_parent = utils::path::get_parent_api_path(api_path);
|
||||
const auto parts = utils::string::split(api_parent, '/', false);
|
||||
|
||||
std::string sub_directory = "/";
|
||||
for (std::size_t i = 0u; (ret == api_error::success) && (i < parts.size()); i++) {
|
||||
sub_directory =
|
||||
utils::path::create_api_path(utils::path::combine(sub_directory, {parts[i]}));
|
||||
if (not is_directory(sub_directory)) {
|
||||
if ((ret = directory_db_.create_directory(sub_directory)) == api_error::success) {
|
||||
base_provider::notify_directory_added(
|
||||
sub_directory, utils::path::get_parent_api_path(sub_directory));
|
||||
} else {
|
||||
event_system::instance().raise<skynet_import_directory_failed>(si.skylink,
|
||||
sub_directory, ret);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ret == api_error::success) {
|
||||
auto file_size = sub_file["len"].get<std::uint64_t>();
|
||||
if (encrypted) {
|
||||
file_size = utils::encryption::encrypting_reader::calculate_decrypted_size(file_size);
|
||||
}
|
||||
|
||||
const auto skylink =
|
||||
si.skylink + ((json_meta["filename"].get<std::string>() == meta_file_name)
|
||||
? ""
|
||||
: "/" + meta_file_name);
|
||||
api_meta_map meta{};
|
||||
meta[META_ID] = json({{"skylink", skylink}}).dump();
|
||||
meta[META_ENCRYPTION_TOKEN] = si.token;
|
||||
if ((ret = create_file(api_path, meta)) == api_error::success) {
|
||||
const auto now = utils::get_file_time_now();
|
||||
api_item_added_(api_path, api_parent, "", false, now, now, now, now);
|
||||
|
||||
if (file_size > 0u) {
|
||||
set_item_meta(api_path, META_SIZE, std::to_string(file_size));
|
||||
global_data::instance().increment_used_drive_space(file_size);
|
||||
}
|
||||
} else {
|
||||
event_system::instance().raise<skynet_import_file_failed>(si.skylink, api_path, ret);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool skynet_provider::is_directory(const std::string &api_path) const {
|
||||
return (api_path == "/") || directory_db_.is_directory(api_path);
|
||||
}
|
||||
|
||||
bool skynet_provider::is_file(const std::string &api_path) const {
|
||||
return (api_path != "/") && directory_db_.is_file(api_path);
|
||||
}
|
||||
|
||||
bool skynet_provider::is_file_writeable(const std::string &api_path) const {
|
||||
auto ret = true;
|
||||
std::string id;
|
||||
get_item_meta(api_path, META_ID, id);
|
||||
if (not id.empty()) {
|
||||
try {
|
||||
const auto skynet_data = json::parse(id);
|
||||
const auto skylink = skynet_data["skylink"].get<std::string>();
|
||||
ret = not utils::string::contains(skylink, "/");
|
||||
} catch (const std::exception &e) {
|
||||
event_system::instance().raise<repertory_exception>(
|
||||
__FUNCTION__, e.what() ? e.what() : "exception occurred");
|
||||
ret = false;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool skynet_provider::is_processing(const std::string &api_path) const {
|
||||
return upload_manager_.is_processing(api_path);
|
||||
}
|
||||
|
||||
void skynet_provider::notify_directory_added(const std::string &api_path,
|
||||
const std::string &api_parent) {
|
||||
if (api_path == "/") {
|
||||
if (directory_db_.create_directory("/") == api_error::success) {
|
||||
base_provider::notify_directory_added(api_path, api_parent);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
api_error skynet_provider::notify_file_added(const std::string &, const std::string &,
|
||||
const std::uint64_t &) {
|
||||
return api_error::not_implemented;
|
||||
}
|
||||
|
||||
void skynet_provider::populate_api_file(api_file &file) const {
|
||||
api_meta_map meta{};
|
||||
this->get_item_meta(file.api_path, meta);
|
||||
|
||||
file.api_parent = utils::path::get_parent_api_path(file.api_path);
|
||||
file.accessed_date = utils::string::to_uint64(meta[META_ACCESSED]);
|
||||
file.changed_date = utils::string::to_uint64(meta[META_MODIFIED]);
|
||||
file.created_date = utils::string::to_uint64(meta[META_CREATION]);
|
||||
file.encryption_token = meta[META_ENCRYPTION_TOKEN].empty() ? "" : meta[META_ENCRYPTION_TOKEN];
|
||||
file.file_size = utils::string::to_uint64(meta[META_SIZE]);
|
||||
file.modified_date = utils::string::to_uint64(meta[META_MODIFIED]);
|
||||
file.recoverable = not is_processing(file.api_path);
|
||||
file.redundancy = 3.0;
|
||||
file.source_path = meta[META_SOURCE];
|
||||
}
|
||||
|
||||
void skynet_provider::process_export(json &result, const std::string &api_path) const {
|
||||
try {
|
||||
std::string id;
|
||||
std::string token;
|
||||
if (is_file(api_path) && (get_item_meta(api_path, META_ID, id) == api_error::success) &&
|
||||
(get_item_meta(api_path, META_ENCRYPTION_TOKEN, token) == api_error::success)) {
|
||||
auto directory = utils::path::get_parent_api_path(api_path);
|
||||
|
||||
const auto skylink = json::parse(id)["skylink"].get<std::string>();
|
||||
if (utils::string::contains(skylink, "/")) {
|
||||
const auto pos = skylink.find('/');
|
||||
const auto path =
|
||||
utils::path::create_api_path(utils::path::remove_file_name(skylink.substr(pos)));
|
||||
if (path != "/") {
|
||||
directory =
|
||||
utils::path::create_api_path(directory.substr(0, directory.length() - path.length()));
|
||||
}
|
||||
}
|
||||
|
||||
result["success"].emplace_back(
|
||||
json({{"skylink", skylink},
|
||||
{"token", token},
|
||||
{"directory", directory},
|
||||
{"filename", utils::path::strip_to_file_name(api_path)}}));
|
||||
} else {
|
||||
result["failed"].emplace_back(api_path);
|
||||
}
|
||||
} catch (const std::exception &e) {
|
||||
result["failed"].emplace_back(api_path);
|
||||
event_system::instance().raise<repertory_exception>(
|
||||
__FUNCTION__, e.what() ? e.what() : "export failed: " + api_path);
|
||||
}
|
||||
}
|
||||
|
||||
api_error skynet_provider::read_file_bytes(const std::string &api_path, const std::size_t &size,
|
||||
const std::uint64_t &offset, std::vector<char> &data,
|
||||
const bool &stop_requested) {
|
||||
if (size == 0u) {
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
std::string id;
|
||||
auto ret = get_item_meta(api_path, META_ID, id);
|
||||
if (ret == api_error::success) {
|
||||
ret = api_error::download_failed;
|
||||
|
||||
const auto skynet_data = json::parse(id);
|
||||
const auto path = utils::path::create_api_path(skynet_data["skylink"].get<std::string>());
|
||||
const auto ranges = http_ranges({{offset, offset + size - 1}});
|
||||
|
||||
std::string encryption_token;
|
||||
get_item_meta(api_path, META_ENCRYPTION_TOKEN, encryption_token);
|
||||
|
||||
std::uint64_t file_size{};
|
||||
{
|
||||
std::string temp;
|
||||
get_item_meta(api_path, META_SIZE, temp);
|
||||
file_size = utils::string::to_uint64(temp);
|
||||
}
|
||||
|
||||
const auto retry_count = get_retry_count();
|
||||
for (std::size_t i = 0u; not stop_requested && (ret != api_error::success) && (i < retry_count);
|
||||
i++) {
|
||||
json error;
|
||||
ret = (comm_.get_range(get_host_config(false), path, file_size, {{"format", "concat"}},
|
||||
encryption_token, data, ranges, error,
|
||||
stop_requested) == api_error::success)
|
||||
? api_error::success
|
||||
: api_error::download_failed;
|
||||
if (ret != api_error::success) {
|
||||
event_system::instance().raise<file_read_bytes_failed>(api_path, error.dump(2), i + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error skynet_provider::remove_directory(const std::string &api_path) {
|
||||
const auto ret = directory_db_.remove_directory(api_path);
|
||||
if (ret == api_error::success) {
|
||||
remove_item_meta(api_path);
|
||||
event_system::instance().raise<directory_removed>(api_path);
|
||||
} else {
|
||||
event_system::instance().raise<directory_remove_failed>(api_path,
|
||||
std::to_string(static_cast<int>(ret)));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error skynet_provider::remove_file(const std::string &api_path) {
|
||||
upload_manager_.remove_upload(api_path);
|
||||
const auto ret =
|
||||
directory_db_.remove_file(api_path) ? api_error::success : api_error::item_not_found;
|
||||
if (ret == api_error::success) {
|
||||
remove_item_meta(api_path);
|
||||
event_system::instance().raise<file_removed>(api_path);
|
||||
} else {
|
||||
event_system::instance().raise<file_remove_failed>(api_path,
|
||||
std::to_string(static_cast<int>(ret)));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error skynet_provider::rename_file(const std::string &from_api_path,
|
||||
const std::string &to_api_path) {
|
||||
std::string id;
|
||||
auto ret = get_item_meta(from_api_path, META_ID, id);
|
||||
if (ret == api_error::success) {
|
||||
ret = api_error::access_denied;
|
||||
|
||||
const auto skynet_data = json::parse(id.empty() ? R"({"skylink":""})" : id);
|
||||
const auto skylink = skynet_data["skylink"].get<std::string>();
|
||||
if (utils::string::contains(skylink, "/")) {
|
||||
const auto pos = skylink.find('/');
|
||||
const auto len = skylink.size() - pos;
|
||||
if (to_api_path.size() >= len) {
|
||||
const auto comp1 = to_api_path.substr(to_api_path.size() - len);
|
||||
const auto comp2 = skylink.substr(pos);
|
||||
ret = (comp1 == comp2) ? api_error::success : ret;
|
||||
}
|
||||
} else {
|
||||
ret = (skylink.empty() || (utils::path::strip_to_file_name(from_api_path) ==
|
||||
utils::path::strip_to_file_name(to_api_path)))
|
||||
? api_error::success
|
||||
: ret;
|
||||
}
|
||||
|
||||
if (ret == api_error::success) {
|
||||
std::string current_source;
|
||||
if ((ret = get_item_meta(from_api_path, META_SOURCE, current_source)) == api_error::success) {
|
||||
if (not upload_manager_.execute_if_not_processing({from_api_path, to_api_path}, [&]() {
|
||||
if ((ret = directory_db_.rename_file(from_api_path, to_api_path)) ==
|
||||
api_error::success) {
|
||||
meta_db_.rename_item_meta(current_source, from_api_path, to_api_path);
|
||||
}
|
||||
})) {
|
||||
ret = api_error::file_in_use;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool skynet_provider::start(api_item_added_callback api_item_added, i_open_file_table *oft) {
|
||||
const auto ret = base_provider::start(api_item_added, oft);
|
||||
if (not ret) {
|
||||
api_file_list list;
|
||||
if (get_file_list(list) != api_error::success) {
|
||||
throw startup_exception("failed to determine used space");
|
||||
}
|
||||
|
||||
const auto total_size =
|
||||
std::accumulate(list.begin(), list.end(), std::uint64_t(0),
|
||||
[](std::uint64_t t, const api_file &file) { return t + file.file_size; });
|
||||
global_data::instance().initialize_used_drive_space(total_size);
|
||||
upload_manager_.start();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void skynet_provider::stop() {
|
||||
stop_requested_ = true;
|
||||
upload_manager_.stop();
|
||||
}
|
||||
|
||||
bool skynet_provider::update_portal_list() {
|
||||
auto portal_list = get_config().get_skynet_config().portal_list;
|
||||
auto download_portal_list = std::make_shared<std::vector<host_config>>();
|
||||
auto upload_portal_list = std::make_shared<std::vector<host_config>>();
|
||||
|
||||
std::copy_if(portal_list.begin(), portal_list.end(), std::back_inserter(*upload_portal_list),
|
||||
[](const auto &portal) -> bool {
|
||||
return not portal.auth_url.empty() && not portal.auth_user.empty();
|
||||
});
|
||||
for (const auto &portal : portal_list) {
|
||||
if (upload_portal_list->empty()) {
|
||||
upload_portal_list->emplace_back(portal);
|
||||
}
|
||||
download_portal_list->emplace_back(portal);
|
||||
}
|
||||
|
||||
unique_mutex_lock portal_lock(portal_mutex_);
|
||||
download_list_ = std::move(download_portal_list);
|
||||
upload_list_ = std::move(upload_portal_list);
|
||||
portal_lock.unlock();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void skynet_provider::upload_completed(const std::string &api_path, const std::string &,
|
||||
const json &data) {
|
||||
set_item_meta(api_path, META_ID, data.dump());
|
||||
}
|
||||
|
||||
api_error skynet_provider::upload_file(const std::string &api_path, const std::string &source_path,
|
||||
const std::string &encryption_token) {
|
||||
std::uint64_t file_size = 0u;
|
||||
utils::file::get_file_size(source_path, file_size);
|
||||
auto ret = set_source_path(api_path, source_path);
|
||||
if (ret == api_error::success) {
|
||||
if (((ret = set_item_meta(api_path, META_SIZE, std::to_string(file_size))) ==
|
||||
api_error::success) &&
|
||||
((ret = set_item_meta(api_path, META_ENCRYPTION_TOKEN, encryption_token)) ==
|
||||
api_error::success) &&
|
||||
(file_size != 0u)) {
|
||||
ret = upload_manager_.queue_upload(api_path, source_path, encryption_token);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error skynet_provider::upload_handler(const upload_manager::upload &upload, json &data,
|
||||
json &error) {
|
||||
const auto file_name = utils::path::strip_to_file_name(upload.api_path);
|
||||
|
||||
auto ret = api_error::upload_failed;
|
||||
if (not upload.cancel) {
|
||||
event_system::instance().raise<file_upload_begin>(upload.api_path, upload.source_path);
|
||||
ret = (comm_.post_multipart_file(get_host_config(true), "", file_name, upload.source_path,
|
||||
upload.encryption_token, data, error,
|
||||
upload.cancel) == api_error::success)
|
||||
? api_error::success
|
||||
: api_error::upload_failed;
|
||||
}
|
||||
|
||||
event_system::instance().raise<file_upload_end>(upload.api_path, upload.source_path, ret);
|
||||
return ret;
|
||||
}
|
||||
} // namespace repertory
|
||||
|
||||
#endif // defined(REPERTORY_ENABLE_SKYNET)
|
Reference in New Issue
Block a user