initial commit
This commit is contained in:
209
src/providers/base_provider.cpp
Normal file
209
src/providers/base_provider.cpp
Normal file
@@ -0,0 +1,209 @@
|
||||
/*
|
||||
Copyright <2018-2022> <scott.e.graves@protonmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
|
||||
associated documentation files (the "Software"), to deal in the Software without restriction,
|
||||
including without limitation the rights to use, copy, modify, merge, publish, distribute,
|
||||
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or
|
||||
substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
|
||||
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
|
||||
OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#include "providers/base_provider.hpp"
|
||||
#include "app_config.hpp"
|
||||
#include "types/repertory.hpp"
|
||||
#include "utils/file_utils.hpp"
|
||||
#include "utils/global_data.hpp"
|
||||
#include "utils/native_file.hpp"
|
||||
#include "utils/path_utils.hpp"
|
||||
|
||||
namespace repertory {
|
||||
base_provider::base_provider(app_config &config) : config_(config), meta_db_(config) {}
|
||||
|
||||
void base_provider::notify_directory_added(const std::string &api_path,
|
||||
const std::string &api_parent) {
|
||||
recur_mutex_lock l(notify_added_mutex_);
|
||||
|
||||
const auto now = utils::get_file_time_now();
|
||||
api_item_added_(api_path, api_parent, "", true, now, now, now, now);
|
||||
}
|
||||
|
||||
void base_provider::update_filesystem_item(const bool &directory, const api_error &error,
|
||||
const std::string &api_path,
|
||||
filesystem_item &fsi) const {
|
||||
if (error == api_error::success) {
|
||||
fsi.directory = directory;
|
||||
fsi.lock = fsi.lock ? fsi.lock : std::make_shared<std::recursive_mutex>();
|
||||
fsi.api_path = api_path;
|
||||
fsi.api_parent = utils::path::get_parent_api_path(api_path);
|
||||
} else {
|
||||
event_system::instance().raise<filesystem_item_get_failed>(
|
||||
api_path, std::to_string(static_cast<int>(error)));
|
||||
}
|
||||
}
|
||||
|
||||
api_error base_provider::create_directory_clone_source_meta(const std::string &source_api_path,
|
||||
const std::string &api_path) {
|
||||
api_meta_map meta{};
|
||||
auto ret = get_item_meta(source_api_path, meta);
|
||||
if (ret == api_error::success) {
|
||||
ret = create_directory(api_path, meta);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error base_provider::create_file(const std::string &api_path, api_meta_map &meta) {
|
||||
const auto isDir = is_directory(api_path);
|
||||
const auto isFile = is_file(api_path);
|
||||
auto ret = isDir ? api_error::directory_exists
|
||||
: isFile ? api_error::file_exists
|
||||
: api_error::success;
|
||||
if (ret == api_error::success) {
|
||||
const auto source =
|
||||
utils::path::combine(get_config().get_cache_directory(), {utils::create_uuid_string()});
|
||||
|
||||
native_file_ptr nf;
|
||||
if ((ret = native_file::create_or_open(source, nf)) == api_error::success) {
|
||||
nf->close();
|
||||
}
|
||||
|
||||
if (ret == api_error::success) {
|
||||
if (((ret = set_item_meta(api_path, meta)) != api_error::success) ||
|
||||
((ret = set_source_path(api_path, source)) != api_error::success) ||
|
||||
((ret = upload_file(api_path, source, meta[META_ENCRYPTION_TOKEN])) !=
|
||||
api_error::success)) {
|
||||
meta_db_.remove_item_meta(format_api_path(api_path));
|
||||
utils::file::delete_file(source);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error base_provider::get_api_path_from_source(const std::string &source_path,
|
||||
std::string &api_path) const {
|
||||
const auto ret = meta_db_.get_api_path_from_source(source_path, api_path);
|
||||
restore_api_path(api_path);
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error base_provider::get_filesystem_item(const std::string &api_path, const bool &directory,
|
||||
filesystem_item &fsi) const {
|
||||
auto ret = api_error::error;
|
||||
if (directory) {
|
||||
ret = is_directory(api_path) ? api_error::success : api_error::item_not_found;
|
||||
update_filesystem_item(true, ret, api_path, fsi);
|
||||
} else {
|
||||
api_file file{};
|
||||
ret = get_filesystem_item_and_file(api_path, file, fsi);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error base_provider::get_filesystem_item_and_file(const std::string &api_path, api_file &file,
|
||||
filesystem_item &fsi) const {
|
||||
auto ret = get_item_meta(api_path, META_SOURCE, fsi.source_path);
|
||||
if (ret == api_error::success) {
|
||||
ret = get_file(api_path, file);
|
||||
if (ret == api_error::success) {
|
||||
fsi.encryption_token = file.encryption_token;
|
||||
fsi.size = file.file_size;
|
||||
} else if (not is_file(api_path)) {
|
||||
ret = api_error::item_not_found;
|
||||
}
|
||||
}
|
||||
|
||||
update_filesystem_item(false, ret, api_path, fsi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error base_provider::get_filesystem_item_from_source_path(const std::string &source_path,
|
||||
filesystem_item &fsi) const {
|
||||
auto ret = api_error::item_not_found;
|
||||
if (not source_path.empty()) {
|
||||
std::string api_path;
|
||||
if ((ret = get_api_path_from_source(source_path, api_path)) == api_error::success) {
|
||||
ret = get_filesystem_item(api_path, false, fsi);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error base_provider::get_item_meta(const std::string &api_path, api_meta_map &meta) const {
|
||||
auto ret = meta_db_.get_item_meta(format_api_path(api_path), meta);
|
||||
if (ret == api_error::item_not_found) {
|
||||
auto get_meta = false;
|
||||
if (is_directory(api_path)) {
|
||||
notify_directory_added(api_path, utils::path::get_parent_api_path(api_path));
|
||||
get_meta = true;
|
||||
} else if (is_file(api_path)) {
|
||||
std::uint64_t file_size = 0u;
|
||||
if ((ret = get_file_size(api_path, file_size)) == api_error::success) {
|
||||
get_meta = ((ret = notify_file_added(api_path, utils::path::get_parent_api_path(api_path),
|
||||
file_size)) == api_error::success);
|
||||
}
|
||||
}
|
||||
if (get_meta) {
|
||||
ret = meta_db_.get_item_meta(format_api_path(api_path), meta);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error base_provider::get_item_meta(const std::string &api_path, const std::string &key,
|
||||
std::string &value) const {
|
||||
auto ret = meta_db_.get_item_meta(format_api_path(api_path), key, value);
|
||||
if (ret == api_error::item_not_found) {
|
||||
auto get_meta = false;
|
||||
if (is_directory(api_path)) {
|
||||
notify_directory_added(api_path, utils::path::get_parent_api_path(api_path));
|
||||
get_meta = true;
|
||||
} else if (is_file(api_path)) {
|
||||
std::uint64_t file_size = 0u;
|
||||
if ((ret = get_file_size(api_path, file_size)) == api_error::success) {
|
||||
get_meta = ((ret = notify_file_added(api_path, utils::path::get_parent_api_path(api_path),
|
||||
file_size)) == api_error::success);
|
||||
}
|
||||
}
|
||||
if (get_meta) {
|
||||
ret = meta_db_.get_item_meta(format_api_path(api_path), key, value);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::uint64_t base_provider::get_used_drive_space() const {
|
||||
return global_data::instance().get_used_drive_space();
|
||||
}
|
||||
|
||||
bool base_provider::start(api_item_added_callback api_item_added, i_open_file_table *oft) {
|
||||
api_item_added_ = api_item_added;
|
||||
oft_ = oft;
|
||||
|
||||
auto unmount_requested = false;
|
||||
{
|
||||
repertory::event_consumer ec("unmount_requested",
|
||||
[&unmount_requested](const event &) { unmount_requested = true; });
|
||||
for (std::uint16_t i = 0u; not unmount_requested && not is_online() &&
|
||||
(i < get_config().get_online_check_retry_secs());
|
||||
i++) {
|
||||
event_system::instance().raise<provider_offline>(
|
||||
get_config().get_host_config().host_name_or_ip, get_config().get_host_config().api_port);
|
||||
std::this_thread::sleep_for(1s);
|
||||
}
|
||||
}
|
||||
return unmount_requested;
|
||||
}
|
||||
} // namespace repertory
|
192
src/providers/passthrough/passthroughprovider.cpp
Normal file
192
src/providers/passthrough/passthroughprovider.cpp
Normal file
@@ -0,0 +1,192 @@
|
||||
/*
|
||||
Copyright <2018-2022> <scott.e.graves@protonmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
|
||||
associated documentation files (the "Software"), to deal in the Software without restriction,
|
||||
including without limitation the rights to use, copy, modify, merge, publish, distribute,
|
||||
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or
|
||||
substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
|
||||
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
|
||||
OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#if defined(REPERTORY_TESTING_NEW)
|
||||
#include "providers/passthrough/passthroughprovider.hpp"
|
||||
#include "utils/file_utils.hpp"
|
||||
#include "utils/path_utils.hpp"
|
||||
|
||||
namespace repertory {
|
||||
std::string CPassthroughProvider::ConstructFullPath(const std::string &api_path) const {
|
||||
return utils::path::combine(passthroughLocation_, {api_path});
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::CreateDirectory(const std::string &api_path,
|
||||
const api_meta_map &meta) {
|
||||
const auto fullPath = ConstructFullPath(api_path);
|
||||
auto ret = utils::file::create_full_directory_path(fullPath) ? api_error::OSErrorCode
|
||||
: api_error::Success;
|
||||
if (ret == api_error::Success) {
|
||||
ret = set_item_meta(api_path, meta);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::get_file_list(ApiFileList &fileList) const {
|
||||
const auto fullPath = ConstructFullPath("/");
|
||||
const auto fl = utils::file::get_directory_files(fullPath, false, true);
|
||||
for (const auto &file : fl) {
|
||||
const auto api_path = utils::path::create_api_path(file.substr(fullPath.length()));
|
||||
/*
|
||||
struct ApiFile {
|
||||
std::string ApiFilePath{};
|
||||
std::string ApiParent{};
|
||||
std::uint64_t AccessedDate = 0;
|
||||
std::uint64_t ChangedDate = 0;
|
||||
std::uint64_t CreationDate = 0;
|
||||
std::string EncryptionToken{};
|
||||
std::uint64_t FileSize = 0;
|
||||
std::uint64_t ModifiedDate = 0;
|
||||
bool Recoverable = false;
|
||||
double Redundancy = 0.0;
|
||||
std::string SourceFilePath{};
|
||||
};
|
||||
*/
|
||||
ApiFile apiFile{
|
||||
api_path,
|
||||
utils::path::get_parent_api_path(api_path),
|
||||
};
|
||||
// apiFile.Recoverable = not IsProcessing(api_path);
|
||||
apiFile.Redundancy = 3.0;
|
||||
apiFile.SourceFilePath = file;
|
||||
// utils::file::UpdateApiFileInfo(apiFile);
|
||||
fileList.emplace_back(apiFile);
|
||||
}
|
||||
|
||||
return api_error::Success;
|
||||
}
|
||||
|
||||
std::uint64_t CPassthroughProvider::get_directory_item_count(const std::string &api_path) const {
|
||||
const auto fullPath = ConstructFullPath(api_path);
|
||||
return 0;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::get_directory_items(const std::string &api_path,
|
||||
directory_item_list &list) const {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::GetFile(const std::string &api_path, ApiFile &file) const {
|
||||
return api_error::Error;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::GetFileSize(const std::string &api_path,
|
||||
std::uint64_t &fileSize) const {
|
||||
return api_error::Error;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::get_filesystem_item(const std::string &api_path,
|
||||
const bool &directory,
|
||||
FileSystemItem &fileSystemItem) const {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
|
||||
api_error
|
||||
CPassthroughProvider::get_filesystem_item_from_source_path(const std::string &sourceFilePath,
|
||||
FileSystemItem &fileSystemItem) const {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::get_item_meta(const std::string &api_path,
|
||||
api_meta_map &meta) const {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::get_item_meta(const std::string &api_path,
|
||||
const std::string &key, std::string &value) const {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
|
||||
std::uint64_t CPassthroughProvider::get_total_drive_space() const { return 0; }
|
||||
|
||||
std::uint64_t CPassthroughProvider::get_total_item_count() const { return 0; }
|
||||
|
||||
std::uint64_t CPassthroughProvider::get_used_drive_space() const { return 0; }
|
||||
|
||||
bool CPassthroughProvider::IsDirectory(const std::string &api_path) const {
|
||||
const auto fullPath = ConstructFullPath(api_path);
|
||||
return utils::file::is_directory(fullPath);
|
||||
}
|
||||
|
||||
bool CPassthroughProvider::IsFile(const std::string &api_path) const {
|
||||
const auto fullPath = ConstructFullPath(api_path);
|
||||
return utils::file::is_file(fullPath);
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::notify_file_added(const std::string &api_path,
|
||||
const std::string &api_parent,
|
||||
const std::uint64_t &size) {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::read_file_bytes(const std::string &apiFilepath,
|
||||
const std::size_t &size,
|
||||
const std::uint64_t &offset,
|
||||
std::vector<char> &data,
|
||||
const bool &stop_requested) {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::RemoveDirectory(const std::string &api_path) {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::RemoveFile(const std::string &api_path) {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::RenameFile(const std::string &fromApiPath,
|
||||
const std::string &toApiPath) {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::remove_item_meta(const std::string &api_path,
|
||||
const std::string &key) {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::set_item_meta(const std::string &api_path,
|
||||
const std::string &key, const std::string &value) {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::set_item_meta(const std::string &api_path,
|
||||
const api_meta_map &meta) {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
|
||||
api_error CPassthroughProvider::set_source_path(const std::string &api_path,
|
||||
const std::string &sourcePath) {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
|
||||
bool CPassthroughProvider::Start(ApiItemAdded apiItemAdded, i_open_file_table *openFileTable) {
|
||||
return false;
|
||||
}
|
||||
|
||||
void CPassthroughProvider::Stop() {}
|
||||
|
||||
api_error CPassthroughProvider::upload_file(const std::string &api_path,
|
||||
const std::string &sourcePath,
|
||||
const std::string &encryptionToken) {
|
||||
return api_error::NotImplemented;
|
||||
}
|
||||
} // namespace repertory
|
||||
|
||||
#endif // defined(REPERTORY_TESTING_NEW)
|
76
src/providers/provider.cpp
Normal file
76
src/providers/provider.cpp
Normal file
@@ -0,0 +1,76 @@
|
||||
/*
|
||||
Copyright <2018-2022> <scott.e.graves@protonmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
|
||||
associated documentation files (the "Software"), to deal in the Software without restriction,
|
||||
including without limitation the rights to use, copy, modify, merge, publish, distribute,
|
||||
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or
|
||||
substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
|
||||
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
|
||||
OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#include "providers/provider.hpp"
|
||||
#include "comm/aws_s3/aws_s3_comm.hpp"
|
||||
#include "comm/curl/curl_comm.hpp"
|
||||
#include "comm/i_comm.hpp"
|
||||
#include "comm/i_s3_comm.hpp"
|
||||
#include "app_config.hpp"
|
||||
#include "events/events.hpp"
|
||||
#include "providers/passthrough/passthroughprovider.hpp"
|
||||
#include "providers/s3/s3_provider.hpp"
|
||||
#include "providers/sia/sia_provider.hpp"
|
||||
#include "providers/skynet/skynet_provider.hpp"
|
||||
#include "types/startup_exception.hpp"
|
||||
|
||||
namespace repertory {
|
||||
template <typename i, typename t>
|
||||
static void create_comm(std::unique_ptr<i> &comm, app_config &config) {
|
||||
if (comm) {
|
||||
throw startup_exception("'create_provider' should only be called once");
|
||||
}
|
||||
comm = std::make_unique<t>(config);
|
||||
}
|
||||
|
||||
std::unique_ptr<i_provider> create_provider(const provider_type &pt, app_config &config) {
|
||||
static std::mutex mutex;
|
||||
mutex_lock lock(mutex);
|
||||
|
||||
static std::unique_ptr<i_comm> comm;
|
||||
#if defined(REPERTORY_ENABLE_S3)
|
||||
static std::unique_ptr<i_s3_comm> s3_comm;
|
||||
#endif // defined(REPERTORY_ENABLE_S3)
|
||||
|
||||
switch (pt) {
|
||||
case provider_type::sia:
|
||||
create_comm<i_comm, curl_comm>(comm, config);
|
||||
return std::unique_ptr<i_provider>(dynamic_cast<i_provider *>(new sia_provider(config, *comm)));
|
||||
#if defined(REPERTORY_ENABLE_S3)
|
||||
case provider_type::s3:
|
||||
create_comm<i_s3_comm, aws_s3_comm>(s3_comm, config);
|
||||
return std::unique_ptr<i_provider>(
|
||||
dynamic_cast<i_provider *>(new s3_provider(config, *s3_comm)));
|
||||
#endif // defined(REPERTORY_ENABLE_S3)
|
||||
#if defined(REPERTORY_ENABLE_SKYNET)
|
||||
case provider_type::skynet:
|
||||
create_comm<i_comm, curl_comm>(comm, config);
|
||||
return std::unique_ptr<i_provider>(
|
||||
dynamic_cast<i_provider *>(new skynet_provider(config, *comm)));
|
||||
#endif // defined(REPERTORY_ENABLE_SKYNET)
|
||||
#if defined(REPERTORY_TESTING_NEW)
|
||||
case provider_type::passthrough:
|
||||
return std::unique_ptr<i_provider>(
|
||||
dynamic_cast<i_provider *>(new CPassthroughProvider(config)));
|
||||
#endif // defined(REPERTORY_TESTING_NEW)
|
||||
case provider_type::unknown:
|
||||
default:
|
||||
throw startup_exception("provider not supported: " + app_config::get_provider_display_name(pt));
|
||||
}
|
||||
}
|
||||
} // namespace repertory
|
562
src/providers/s3/s3_provider.cpp
Normal file
562
src/providers/s3/s3_provider.cpp
Normal file
@@ -0,0 +1,562 @@
|
||||
/*
|
||||
Copyright <2018-2022> <scott.e.graves@protonmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
|
||||
associated documentation files (the "Software"), to deal in the Software without restriction,
|
||||
including without limitation the rights to use, copy, modify, merge, publish, distribute,
|
||||
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or
|
||||
substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
|
||||
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
|
||||
OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#if defined(REPERTORY_ENABLE_S3)
|
||||
#include "providers/s3/s3_provider.hpp"
|
||||
#include "comm/i_s3_comm.hpp"
|
||||
#include "app_config.hpp"
|
||||
#include "db/meta_db.hpp"
|
||||
#include "drives/i_open_file_table.hpp"
|
||||
#include "types/repertory.hpp"
|
||||
#include "types/startup_exception.hpp"
|
||||
#include "utils/encryption.hpp"
|
||||
#include "utils/file_utils.hpp"
|
||||
#include "utils/global_data.hpp"
|
||||
#include "utils/path_utils.hpp"
|
||||
|
||||
namespace repertory {
|
||||
s3_provider::s3_provider(app_config &config, i_s3_comm &s3_comm)
|
||||
: base_provider(config),
|
||||
s3_comm_(s3_comm),
|
||||
directory_db_(config),
|
||||
upload_manager_(
|
||||
config, [this](const std::string &api_path) -> bool { return this->is_file(api_path); },
|
||||
[this](const upload_manager::upload &upload, json &data, json &error) -> api_error {
|
||||
return this->upload_handler(upload, data, error);
|
||||
},
|
||||
[this](const std::string &api_path, const std::string &source_path, const json &data) {
|
||||
return this->upload_completed(api_path, source_path, data);
|
||||
}) {}
|
||||
|
||||
void s3_provider::build_directories() {
|
||||
api_file_list list{};
|
||||
s3_comm_.get_file_list([](const std::string & /*api_path*/) -> std::string { return ""; },
|
||||
[](const std::string & /*key*/,
|
||||
const std::string &object_name) -> std::string { return object_name; },
|
||||
list);
|
||||
total_item_count_ = list.size();
|
||||
|
||||
std::unordered_map<std::string, std::uint8_t> directories;
|
||||
for (const auto &file : list) {
|
||||
if (directories.find(file.api_parent) == directories.end()) {
|
||||
directories[file.api_parent] = 0u;
|
||||
|
||||
const auto directory_parts = utils::string::split(file.api_parent, '/', false);
|
||||
|
||||
std::string current_directory;
|
||||
for (std::size_t i = 0u; i < (directory_parts.size() - 1u); i++) {
|
||||
current_directory = current_directory.empty()
|
||||
? utils::path::create_api_path(directory_parts[0u])
|
||||
: utils::path::combine(current_directory, {directory_parts[i]});
|
||||
directories[current_directory] = 0u;
|
||||
}
|
||||
}
|
||||
}
|
||||
list.clear();
|
||||
|
||||
for (const auto &kv : directories) {
|
||||
if (not directory_db_.is_directory(kv.first)) {
|
||||
auto api_path = kv.first;
|
||||
restore_api_path(api_path);
|
||||
this->notify_directory_added(api_path, utils::path::get_parent_api_path(api_path));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
api_error s3_provider::create_directory(const std::string &api_path, const api_meta_map &meta) {
|
||||
#ifdef _WIN32
|
||||
auto ret = is_directory(api_path) ? api_error::directory_exists
|
||||
: is_file(api_path) ? api_error::file_exists
|
||||
: api_error::success;
|
||||
if (ret != api_error::success) {
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
auto ret = api_error::success;
|
||||
#endif
|
||||
if (utils::path::is_trash_directory(api_path)) {
|
||||
ret = api_error::access_denied;
|
||||
} else {
|
||||
if ((utils::path::get_parent_api_path(api_path) == "/") &&
|
||||
s3_comm_.get_s3_config().bucket.empty()) {
|
||||
ret = s3_comm_.create_bucket(api_path);
|
||||
}
|
||||
|
||||
if (ret == api_error::success) {
|
||||
ret = directory_db_.create_directory(format_api_path(api_path));
|
||||
}
|
||||
}
|
||||
|
||||
if (ret == api_error::success) {
|
||||
set_item_meta(api_path, meta);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error s3_provider::create_file(const std::string &api_path, api_meta_map &meta) {
|
||||
if (meta[META_SIZE].empty()) {
|
||||
meta[META_SIZE] = "0";
|
||||
}
|
||||
|
||||
if (meta[META_ENCRYPTION_TOKEN].empty()) {
|
||||
meta[META_ENCRYPTION_TOKEN] = s3_comm_.get_s3_config().encryption_token;
|
||||
}
|
||||
|
||||
return base_provider::create_file(api_path, meta);
|
||||
}
|
||||
|
||||
std::string s3_provider::format_api_path(std::string api_path) const {
|
||||
return s3_comm_.get_s3_config().bucket.empty()
|
||||
? api_path
|
||||
: utils::path::create_api_path(
|
||||
utils::path::combine(s3_comm_.get_s3_config().bucket, {api_path}));
|
||||
}
|
||||
|
||||
std::uint64_t s3_provider::get_directory_item_count(const std::string &api_path) const {
|
||||
return s3_comm_.get_directory_item_count(api_path, [this](directory_item &di,
|
||||
const bool &format_path) {
|
||||
this->update_item_meta(di, format_path);
|
||||
}) + directory_db_.get_sub_directory_count(format_api_path(api_path));
|
||||
}
|
||||
|
||||
api_error s3_provider::get_directory_items(const std::string &api_path,
|
||||
directory_item_list &list) const {
|
||||
const auto res = s3_comm_.get_directory_items(
|
||||
api_path,
|
||||
[this](directory_item &di, const bool &format_path) {
|
||||
this->update_item_meta(di, format_path);
|
||||
},
|
||||
list);
|
||||
|
||||
directory_db_.populate_sub_directories(
|
||||
format_api_path(api_path),
|
||||
[this](directory_item &di, const bool &format_path) {
|
||||
if (format_path) {
|
||||
restore_api_path(di.api_path);
|
||||
di.api_parent = utils::path::get_parent_api_path(di.api_path);
|
||||
}
|
||||
this->get_item_meta(di.api_path, di.meta);
|
||||
this->oft_->update_directory_item(di);
|
||||
},
|
||||
list);
|
||||
|
||||
std::sort(list.begin(), list.end(), [](const auto &a, const auto &b) -> bool {
|
||||
return (a.directory && not b.directory) ||
|
||||
(not(b.directory && not a.directory) && (a.api_path.compare(b.api_path) < 0));
|
||||
});
|
||||
|
||||
list.insert(list.begin(), directory_item{
|
||||
"..",
|
||||
"",
|
||||
true,
|
||||
});
|
||||
list.insert(list.begin(), directory_item{
|
||||
".",
|
||||
"",
|
||||
true,
|
||||
});
|
||||
|
||||
return ((res == api_error::success) ? api_error::success : api_error::error);
|
||||
}
|
||||
|
||||
api_error s3_provider::get_file(const std::string &api_path, api_file &file) const {
|
||||
auto real_api_path = format_api_path(api_path);
|
||||
const auto ret = s3_comm_.get_file(
|
||||
api_path,
|
||||
[this, &api_path, &real_api_path]() -> std::string {
|
||||
auto key = *(utils::string::split(api_path, '/', false).end() - 1u);
|
||||
std::string meta_api_path;
|
||||
meta_db_.get_api_path_from_key(key, meta_api_path);
|
||||
if (meta_api_path.empty()) {
|
||||
meta_db_.get_item_meta(real_api_path, META_KEY, key);
|
||||
}
|
||||
return key;
|
||||
},
|
||||
[this, &real_api_path](const std::string &key,
|
||||
const std::string &object_name) -> std::string {
|
||||
if (key.empty()) {
|
||||
return object_name;
|
||||
}
|
||||
|
||||
meta_db_.get_api_path_from_key(key, real_api_path);
|
||||
|
||||
auto object_parts = utils::string::split(object_name, '/', false);
|
||||
object_parts[object_parts.size() - 1u] =
|
||||
*(utils::string::split(real_api_path, '/', false).end() - 1u);
|
||||
return utils::string::join(object_parts, '/');
|
||||
},
|
||||
[this, &real_api_path]() -> std::string {
|
||||
std::string encryption_token;
|
||||
meta_db_.get_item_meta(real_api_path, META_ENCRYPTION_TOKEN, encryption_token);
|
||||
return encryption_token;
|
||||
},
|
||||
file);
|
||||
if (ret == api_error::success) {
|
||||
restore_api_path(file.api_path);
|
||||
file.api_parent = utils::path::get_parent_api_path(file.api_path);
|
||||
} else {
|
||||
event_system::instance().raise<file_get_failed>(api_path,
|
||||
std::to_string(static_cast<int>(ret)));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error s3_provider::get_file_list(api_file_list &list) const {
|
||||
const auto ret = s3_comm_.get_file_list(
|
||||
[this](const std::string &api_path) -> std::string {
|
||||
std::string encryption_token;
|
||||
meta_db_.get_item_meta(api_path, META_ENCRYPTION_TOKEN, encryption_token);
|
||||
return encryption_token;
|
||||
},
|
||||
[this](const std::string &key, const std::string &object_name) -> std::string {
|
||||
std::string real_api_path;
|
||||
meta_db_.get_api_path_from_key(key, real_api_path);
|
||||
if (real_api_path.empty()) {
|
||||
return object_name;
|
||||
}
|
||||
|
||||
auto object_parts = utils::string::split(object_name, '/', false);
|
||||
object_parts[object_parts.size() - 1u] =
|
||||
*(utils::string::split(real_api_path, '/', false).end() - 1u);
|
||||
return utils::string::join(object_parts, '/');
|
||||
},
|
||||
list);
|
||||
|
||||
for (auto &file : list) {
|
||||
restore_api_path(file.api_path);
|
||||
file.api_parent = utils::path::get_parent_api_path(file.api_path);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error s3_provider::get_file_size(const std::string &api_path, std::uint64_t &file_size) const {
|
||||
api_file file{};
|
||||
const auto ret = get_file(api_path, file);
|
||||
if (ret == api_error::success) {
|
||||
file_size = file.file_size;
|
||||
} else {
|
||||
event_system::instance().raise<file_get_size_failed>(api_path,
|
||||
std::to_string(static_cast<int>(ret)));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool s3_provider::is_directory(const std::string &api_path) const {
|
||||
if (api_path == "/") {
|
||||
return true;
|
||||
}
|
||||
return directory_db_.is_directory(format_api_path(api_path));
|
||||
}
|
||||
|
||||
bool s3_provider::is_file(const std::string &api_path) const {
|
||||
if (api_path == "/") {
|
||||
return false;
|
||||
}
|
||||
|
||||
return not directory_db_.is_directory(format_api_path(api_path)) &&
|
||||
s3_comm_.exists(api_path, [&]() -> std::string {
|
||||
std::string key;
|
||||
meta_db_.get_item_meta(format_api_path(api_path), META_KEY, key);
|
||||
return key;
|
||||
});
|
||||
}
|
||||
|
||||
bool s3_provider::is_online() const { return s3_comm_.is_online(); }
|
||||
|
||||
bool s3_provider::is_processing(const std::string &api_path) const {
|
||||
return upload_manager_.is_processing(api_path);
|
||||
}
|
||||
|
||||
void s3_provider::notify_directory_added(const std::string &api_path,
|
||||
const std::string &api_parent) {
|
||||
base_provider::notify_directory_added(api_path, api_parent);
|
||||
directory_db_.create_directory(format_api_path(api_path), true);
|
||||
}
|
||||
|
||||
api_error s3_provider::notify_file_added(const std::string &api_path,
|
||||
const std::string & /*api_parent*/,
|
||||
const std::uint64_t & /*size*/) {
|
||||
recur_mutex_lock l(notify_added_mutex_);
|
||||
|
||||
api_file file{};
|
||||
auto decrypted = false;
|
||||
std::string key;
|
||||
std::string file_name;
|
||||
|
||||
auto ret = s3_comm_.get_file(
|
||||
api_path,
|
||||
[this, &api_path, &decrypted, &file_name, &key]() -> std::string {
|
||||
const auto temp_key = *(utils::string::split(api_path, '/', false).end() - 1u);
|
||||
file_name = temp_key;
|
||||
auto decrypted_file_name = file_name;
|
||||
if (utils::encryption::decrypt_file_name(s3_comm_.get_s3_config().encryption_token,
|
||||
decrypted_file_name) == api_error::success) {
|
||||
decrypted = true;
|
||||
key = temp_key;
|
||||
file_name = decrypted_file_name;
|
||||
return key;
|
||||
}
|
||||
|
||||
return "";
|
||||
},
|
||||
[&file_name](const std::string &, const std::string &object_name) -> std::string {
|
||||
auto object_parts = utils::string::split(object_name, '/', false);
|
||||
object_parts[object_parts.size() - 1u] = file_name;
|
||||
return utils::string::join(object_parts, '/');
|
||||
},
|
||||
[this, &decrypted]() -> std::string {
|
||||
return decrypted ? s3_comm_.get_s3_config().encryption_token : "";
|
||||
},
|
||||
file);
|
||||
|
||||
restore_api_path(file.api_path);
|
||||
|
||||
file.api_parent = utils::path::get_parent_api_path(file.api_path);
|
||||
if (ret == api_error::success) {
|
||||
api_item_added_(file.api_path, file.api_parent, file.source_path, false, file.created_date,
|
||||
file.accessed_date, file.modified_date, file.changed_date);
|
||||
|
||||
set_item_meta(file.api_path, META_SIZE, std::to_string(file.file_size));
|
||||
set_item_meta(file.api_path, META_KEY, key);
|
||||
set_item_meta(file.api_path, META_ENCRYPTION_TOKEN,
|
||||
decrypted ? s3_comm_.get_s3_config().encryption_token : "");
|
||||
|
||||
if (file.file_size) {
|
||||
global_data::instance().increment_used_drive_space(file.file_size);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error s3_provider::read_file_bytes(const std::string &api_path, const std::size_t &size,
|
||||
const std::uint64_t &offset, std::vector<char> &data,
|
||||
const bool &stop_requested) {
|
||||
const auto res = s3_comm_.read_file_bytes(
|
||||
api_path, size, offset, data,
|
||||
[this, &api_path]() -> std::string {
|
||||
std::string key;
|
||||
meta_db_.get_item_meta(format_api_path(api_path), META_KEY, key);
|
||||
return key;
|
||||
},
|
||||
[this, &api_path]() -> std::uint64_t {
|
||||
std::string temp;
|
||||
meta_db_.get_item_meta(format_api_path(api_path), META_SIZE, temp);
|
||||
return utils::string::to_uint64(temp);
|
||||
},
|
||||
[this, &api_path]() -> std::string {
|
||||
std::string encryption_token;
|
||||
meta_db_.get_item_meta(format_api_path(api_path), META_ENCRYPTION_TOKEN, encryption_token);
|
||||
return encryption_token;
|
||||
},
|
||||
stop_requested);
|
||||
return ((res == api_error::success) ? api_error::success : api_error::download_failed);
|
||||
}
|
||||
|
||||
api_error s3_provider::remove_directory(const std::string &api_path) {
|
||||
auto ret = directory_db_.remove_directory(format_api_path(api_path));
|
||||
if (ret == api_error::success) {
|
||||
if ((utils::path::get_parent_api_path(api_path) == "/") &&
|
||||
s3_comm_.get_s3_config().bucket.empty()) {
|
||||
ret = s3_comm_.remove_bucket(api_path);
|
||||
}
|
||||
}
|
||||
|
||||
if (ret == api_error::success) {
|
||||
remove_item_meta(api_path);
|
||||
event_system::instance().raise<directory_removed>(api_path);
|
||||
} else {
|
||||
event_system::instance().raise<directory_remove_failed>(api_path,
|
||||
std::to_string(static_cast<int>(ret)));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error s3_provider::remove_file(const std::string &api_path) {
|
||||
upload_manager_.remove_upload(api_path);
|
||||
const auto res = s3_comm_.remove_file(api_path, [&]() -> std::string {
|
||||
std::string key;
|
||||
meta_db_.get_item_meta(format_api_path(api_path), META_KEY, key);
|
||||
return key;
|
||||
});
|
||||
|
||||
if (res == api_error::success) {
|
||||
remove_item_meta(api_path);
|
||||
event_system::instance().raise<file_removed>(api_path);
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
event_system::instance().raise<file_remove_failed>(api_path,
|
||||
std::to_string(static_cast<int>(res)));
|
||||
return api_error::error;
|
||||
}
|
||||
|
||||
api_error s3_provider::rename_file(const std::string & /*from_api_path*/,
|
||||
const std::string & /*to_api_path*/) {
|
||||
// TODO Pending encrypted file name support
|
||||
return api_error::not_implemented;
|
||||
/* std::string curSource; */
|
||||
/* auto ret = get_item_meta(fromApiPath, META_SOURCE, curSource); */
|
||||
/* if (ret == api_error::success) { */
|
||||
/* ret = s3_comm_.RenameFile(fromApiPath, toApiPath); */
|
||||
/* if (ret == api_error::success) { */
|
||||
/* metaDb_.RenameItemMeta(curSource, FormatApiFilePath(fromApiPath), */
|
||||
/* FormatApiFilePath(toApiPath)); */
|
||||
/* } else { */
|
||||
/* event_system::instance().raise<FileRenameFailed>(fromApiPath, toApiPath, */
|
||||
/* std::to_string(static_cast<int>(ret))); */
|
||||
/* } */
|
||||
/* } */
|
||||
/* return ret; */
|
||||
}
|
||||
|
||||
std::string &s3_provider::restore_api_path(std::string &api_path) const {
|
||||
if (api_path != "/") {
|
||||
if (not s3_comm_.get_s3_config().bucket.empty()) {
|
||||
api_path = utils::path::create_api_path(
|
||||
api_path.substr(s3_comm_.get_s3_config().bucket.length() + 1u));
|
||||
}
|
||||
}
|
||||
return api_path;
|
||||
}
|
||||
|
||||
bool s3_provider::start(api_item_added_callback api_item_added, i_open_file_table *oft) {
|
||||
const auto unmount_requested = base_provider::start(api_item_added, oft);
|
||||
if (not unmount_requested && is_online()) {
|
||||
build_directories();
|
||||
upload_manager_.start();
|
||||
}
|
||||
|
||||
api_file_list list;
|
||||
if (get_file_list(list) != api_error::success) {
|
||||
throw startup_exception("failed to determine used space");
|
||||
}
|
||||
|
||||
const auto total_size =
|
||||
std::accumulate(list.begin(), list.end(), std::uint64_t(0),
|
||||
[](std::uint64_t t, const api_file &file) { return t + file.file_size; });
|
||||
global_data::instance().initialize_used_drive_space(total_size);
|
||||
|
||||
return unmount_requested;
|
||||
}
|
||||
|
||||
void s3_provider::stop() { upload_manager_.stop(); }
|
||||
|
||||
void s3_provider::update_item_meta(directory_item &di, const bool &format_path) const {
|
||||
if (format_path) {
|
||||
restore_api_path(di.api_path);
|
||||
di.api_parent = utils::path::get_parent_api_path(di.api_path);
|
||||
}
|
||||
|
||||
if (not di.directory && not di.resolved) {
|
||||
std::string real_api_path{};
|
||||
const auto get_api_path = [&]() -> api_error {
|
||||
if (meta_db_.get_api_path_from_key(
|
||||
*(utils::string::split(di.api_path, '/', false).end() - 1u), real_api_path) ==
|
||||
api_error::success) {
|
||||
di.api_path = real_api_path;
|
||||
if (format_path) {
|
||||
restore_api_path(di.api_path);
|
||||
di.api_parent = utils::path::get_parent_api_path(di.api_path);
|
||||
}
|
||||
return api_error::success;
|
||||
}
|
||||
return api_error::item_not_found;
|
||||
};
|
||||
|
||||
if (get_api_path() != api_error::success) {
|
||||
this->get_item_meta(di.api_path, di.meta);
|
||||
get_api_path();
|
||||
}
|
||||
|
||||
di.resolved = true;
|
||||
}
|
||||
|
||||
if (di.meta.empty()) {
|
||||
this->get_item_meta(di.api_path, di.meta);
|
||||
}
|
||||
|
||||
if (not di.directory) {
|
||||
if (di.meta[META_SIZE].empty()) {
|
||||
const_cast<s3_provider *>(this)->set_item_meta(di.api_path, META_SIZE,
|
||||
std::to_string(di.size));
|
||||
} else {
|
||||
di.size = utils::string::to_uint64(di.meta[META_SIZE]);
|
||||
}
|
||||
this->oft_->update_directory_item(di);
|
||||
}
|
||||
}
|
||||
|
||||
api_error s3_provider::upload_file(const std::string &api_path, const std::string &source_path,
|
||||
const std::string &encryption_token) {
|
||||
std::uint64_t file_size = 0u;
|
||||
utils::file::get_file_size(source_path, file_size);
|
||||
|
||||
auto ret = set_source_path(api_path, source_path);
|
||||
if (ret == api_error::success) {
|
||||
if (((ret = set_item_meta(api_path, META_SIZE, std::to_string(file_size))) ==
|
||||
api_error::success) &&
|
||||
((ret = set_item_meta(api_path, META_ENCRYPTION_TOKEN, encryption_token)) ==
|
||||
api_error::success)) {
|
||||
if (file_size == 0) {
|
||||
ret = s3_comm_.upload_file(
|
||||
api_path, source_path, encryption_token,
|
||||
[this, &api_path]() -> std::string {
|
||||
std::string key;
|
||||
meta_db_.get_item_meta(format_api_path(api_path), META_KEY, key);
|
||||
return key;
|
||||
},
|
||||
[this, &api_path](const std::string &key) -> api_error {
|
||||
return set_item_meta(api_path, META_KEY, key);
|
||||
},
|
||||
false);
|
||||
} else {
|
||||
ret = upload_manager_.queue_upload(api_path, source_path, encryption_token);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error s3_provider::upload_handler(const upload_manager::upload &upload, json &, json &) {
|
||||
event_system::instance().raise<file_upload_begin>(upload.api_path, upload.source_path);
|
||||
auto ret = api_error::upload_failed;
|
||||
if (not upload.cancel) {
|
||||
ret = (s3_comm_.upload_file(
|
||||
upload.api_path, upload.source_path, upload.encryption_token,
|
||||
[this, &upload]() -> std::string {
|
||||
std::string key;
|
||||
meta_db_.get_item_meta(format_api_path(upload.api_path), META_KEY, key);
|
||||
return key;
|
||||
},
|
||||
[this, &upload](const std::string &key) -> api_error {
|
||||
return set_item_meta(upload.api_path, META_KEY, key);
|
||||
},
|
||||
upload.cancel) == api_error::success)
|
||||
? api_error::success
|
||||
: api_error::upload_failed;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
} // namespace repertory
|
||||
#endif // REPERTORY_ENABLE_S3
|
700
src/providers/sia/sia_provider.cpp
Normal file
700
src/providers/sia/sia_provider.cpp
Normal file
@@ -0,0 +1,700 @@
|
||||
/*
|
||||
Copyright <2018-2022> <scott.e.graves@protonmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
|
||||
associated documentation files (the "Software"), to deal in the Software without restriction,
|
||||
including without limitation the rights to use, copy, modify, merge, publish, distribute,
|
||||
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or
|
||||
substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
|
||||
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
|
||||
OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#include "providers/sia/sia_provider.hpp"
|
||||
#include "comm/i_comm.hpp"
|
||||
#include "app_config.hpp"
|
||||
#include "db/meta_db.hpp"
|
||||
#include "drives/i_open_file_table.hpp"
|
||||
#include "events/events.hpp"
|
||||
#include "types/repertory.hpp"
|
||||
#include "types/startup_exception.hpp"
|
||||
#include "utils/file_utils.hpp"
|
||||
#include "utils/global_data.hpp"
|
||||
#include "utils/path_utils.hpp"
|
||||
#include "utils/polling.hpp"
|
||||
|
||||
namespace repertory {
|
||||
sia_provider::sia_provider(app_config &config, i_comm &comm) : base_provider(config), comm_(comm) {}
|
||||
|
||||
void sia_provider::calculate_total_drive_space() {
|
||||
std::uint64_t ret = 0u;
|
||||
json result, error;
|
||||
auto storage_cost = get_config().get_storage_byte_month();
|
||||
const auto success = (get_comm().get("/renter/prices", result, error) == api_error::success);
|
||||
if (success || (storage_cost > 0)) {
|
||||
if (success) {
|
||||
storage_cost = result["storageterabytemonth"].get<std::string>();
|
||||
get_config().set_storage_byte_month(storage_cost);
|
||||
}
|
||||
if (get_comm().get("/renter", result, error) == api_error::success) {
|
||||
const auto funds = utils::hastings_string_to_api_currency(
|
||||
result["settings"]["allowance"]["funds"].get<std::string>());
|
||||
if ((storage_cost > 0) && (funds > 0)) {
|
||||
const auto funds_in_hastings = utils::api_currency_to_hastings(funds);
|
||||
ttmath::Parser<api_currency> parser;
|
||||
parser.Parse(funds_in_hastings.ToString() + " / " + storage_cost.ToString() + " * 1e12");
|
||||
ret = not parser.stack.empty() ? parser.stack[0u].value.ToUInt() : 0u;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
total_drive_space_ = ret;
|
||||
}
|
||||
|
||||
api_error sia_provider::calculate_used_drive_space(std::uint64_t &used_space) {
|
||||
api_file_list list;
|
||||
const auto ret = get_file_list(list);
|
||||
used_space = std::accumulate(
|
||||
list.begin(), list.end(), std::uint64_t(0), [this](std::uint64_t a, const auto &v) {
|
||||
if (not meta_db_.get_item_meta_exists(v.api_path)) {
|
||||
this->notify_file_added(v.api_path, utils::path::get_parent_api_path(v.api_path), 0);
|
||||
}
|
||||
return a + v.file_size;
|
||||
});
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error sia_provider::check_file_exists(const std::string &api_path) const {
|
||||
json data, error;
|
||||
auto ret = get_comm().get("/renter/file" + api_path, data, error);
|
||||
if ((ret != api_error::success) && check_not_found(error)) {
|
||||
ret = api_error::item_not_found;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool sia_provider::check_directory_found(const json &error) const {
|
||||
return ((error.find("message") != error.end()) &&
|
||||
(utils::string::contains(error["message"].get<std::string>(),
|
||||
"a siadir already exists at that location")));
|
||||
}
|
||||
|
||||
bool sia_provider::check_not_found(const json &error) const {
|
||||
return (
|
||||
(error.find("message") != error.end()) &&
|
||||
(utils::string::contains(error["message"].get<std::string>(), "no file known") ||
|
||||
utils::string::contains(error["message"].get<std::string>(), "path does not exist") ||
|
||||
utils::string::contains(error["message"].get<std::string>(), "no such file or directory") ||
|
||||
utils::string::contains(error["message"].get<std::string>(), "cannot find the file") ||
|
||||
utils::string::contains(error["message"].get<std::string>(),
|
||||
"no siadir known with that path") ||
|
||||
utils::string::contains(error["message"].get<std::string>(), "cannot find the path")));
|
||||
}
|
||||
|
||||
void sia_provider::cleanup() {
|
||||
remove_deleted_files();
|
||||
remove_unknown_source_files();
|
||||
remove_expired_orphaned_files();
|
||||
}
|
||||
|
||||
void sia_provider::create_api_file(const json &json_file, const std::string &path_name,
|
||||
api_file &file) const {
|
||||
file.api_path = utils::path::create_api_path(json_file[path_name].get<std::string>());
|
||||
file.api_parent = utils::path::get_parent_api_path(file.api_path);
|
||||
file.file_size = json_file["filesize"].get<std::uint64_t>();
|
||||
file.recoverable = json_file["recoverable"].get<bool>();
|
||||
file.redundancy = json_file["redundancy"].get<double>();
|
||||
file.source_path = json_file["localpath"].get<std::string>();
|
||||
|
||||
set_api_file_dates(json_file, file);
|
||||
}
|
||||
|
||||
api_error sia_provider::create_directory(const std::string &api_path, const api_meta_map &meta) {
|
||||
#ifdef _WIN32
|
||||
auto ret = is_directory(api_path) ? api_error::directory_exists
|
||||
: is_file(api_path) ? api_error::file_exists
|
||||
: api_error::success;
|
||||
#else
|
||||
auto ret = api_error::success;
|
||||
#endif
|
||||
if (ret == api_error::success) {
|
||||
json result, error;
|
||||
ret = get_comm().post("/renter/dir" + api_path, {{"action", "create"}}, result, error);
|
||||
if (ret == api_error::success) {
|
||||
ret = set_item_meta(api_path, meta);
|
||||
} else if (check_directory_found(error)) {
|
||||
ret = api_error::directory_exists;
|
||||
} else {
|
||||
event_system::instance().raise<repertory_exception>(__FUNCTION__, error.dump(2));
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void sia_provider::drive_space_thread() {
|
||||
while (not stop_requested_) {
|
||||
unique_mutex_lock l(start_stop_mutex_);
|
||||
if (not stop_requested_) {
|
||||
start_stop_notify_.wait_for(l, 5s);
|
||||
}
|
||||
l.unlock();
|
||||
|
||||
if (not stop_requested_) {
|
||||
calculate_total_drive_space();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
api_error sia_provider::get_directory(const std::string &api_path, json &result,
|
||||
json &error) const {
|
||||
return get_comm().get("/renter/dir" + api_path, result, error);
|
||||
}
|
||||
|
||||
std::uint64_t sia_provider::get_directory_item_count(const std::string &api_path) const {
|
||||
std::uint64_t ret = 0u;
|
||||
|
||||
json result, error;
|
||||
const auto res = get_directory(api_path, result, error);
|
||||
if (res == api_error::success) {
|
||||
const auto directory_count = result["directories"].size() - 1;
|
||||
const auto file_count = result["files"].size();
|
||||
ret = file_count + directory_count;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error sia_provider::get_directory_items(const std::string &api_path,
|
||||
directory_item_list &list) const {
|
||||
list.clear();
|
||||
|
||||
json result, error;
|
||||
auto ret = get_directory(api_path, result, error);
|
||||
if (ret == api_error::success) {
|
||||
const auto create_directory_item = [this](const std::string &item_path, const bool &directory,
|
||||
const json &item, directory_item &di) {
|
||||
di.api_path = item_path;
|
||||
di.api_parent = utils::path::get_parent_api_path(di.api_path);
|
||||
di.directory = directory;
|
||||
if (directory) {
|
||||
const auto directory_count = item["numsubdirs"].get<std::uint64_t>();
|
||||
const auto file_count = item["numfiles"].get<std::uint64_t>();
|
||||
di.size = (directory_count + file_count);
|
||||
} else {
|
||||
di.size = item["filesize"].get<std::uint64_t>();
|
||||
}
|
||||
this->get_item_meta(di.api_path, di.meta);
|
||||
this->oft_->update_directory_item(di);
|
||||
};
|
||||
|
||||
const auto process_item = [&](const bool &directory, const json &item) {
|
||||
const auto item_path = utils::path::create_api_path(
|
||||
item[app_config::get_provider_path_name(get_config().get_provider_type())]
|
||||
.get<std::string>());
|
||||
const auto is_root_path = (item_path == api_path);
|
||||
|
||||
directory_item di{};
|
||||
create_directory_item(item_path, directory, item, di);
|
||||
if (is_root_path) {
|
||||
di.api_path = ".";
|
||||
}
|
||||
list.emplace_back(di);
|
||||
if (is_root_path) {
|
||||
if (item_path == "/") {
|
||||
di.api_path = "..";
|
||||
list.emplace_back(di);
|
||||
} else {
|
||||
directory_item dot_dot_di{};
|
||||
json result, error;
|
||||
const auto res = get_directory(di.api_parent, result, error);
|
||||
if (res == api_error::success) {
|
||||
create_directory_item(di.api_parent, true, result["directories"][0u], dot_dot_di);
|
||||
}
|
||||
dot_dot_di.api_path = "..";
|
||||
list.emplace_back(dot_dot_di);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
for (const auto &dir : result["directories"]) {
|
||||
process_item(true, dir);
|
||||
}
|
||||
|
||||
for (const auto &file : result["files"]) {
|
||||
process_item(false, file);
|
||||
}
|
||||
|
||||
ret = api_error::success;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error sia_provider::get_file(const std::string &api_path, api_file &file) const {
|
||||
json data, error;
|
||||
const auto ret = get_comm().get("/renter/file" + api_path, data, error);
|
||||
if (ret == api_error::success) {
|
||||
const std::string path_name =
|
||||
app_config::get_provider_path_name(get_config().get_provider_type());
|
||||
create_api_file(data["file"], path_name, file);
|
||||
} else {
|
||||
event_system::instance().raise<file_get_failed>(api_path, error.dump(2));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error sia_provider::get_file_list(api_file_list &list) const {
|
||||
auto ret = api_error::success;
|
||||
json data;
|
||||
json error;
|
||||
if ((ret = get_comm().get("/renter/files", data, error)) == api_error::success) {
|
||||
for (const auto &file_data : data["files"]) {
|
||||
const std::string path_name =
|
||||
app_config::get_provider_path_name(get_config().get_provider_type());
|
||||
api_file file{};
|
||||
create_api_file(file_data, path_name, file);
|
||||
|
||||
list.emplace_back(file);
|
||||
}
|
||||
} else {
|
||||
event_system::instance().raise<file_get_api_list_failed>(error.dump(2));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error sia_provider::get_file_size(const std::string &api_path, std::uint64_t &file_size) const {
|
||||
file_size = 0u;
|
||||
|
||||
json data, error;
|
||||
const auto ret = get_comm().get("/renter/file" + api_path, data, error);
|
||||
if (ret == api_error::success) {
|
||||
file_size = data["file"]["filesize"].get<std::uint64_t>();
|
||||
} else {
|
||||
event_system::instance().raise<file_get_size_failed>(api_path, error.dump(2));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error sia_provider::get_filesystem_item(const std::string &api_path, const bool &directory,
|
||||
filesystem_item &fsi) const {
|
||||
auto ret = api_error::error;
|
||||
if (directory) {
|
||||
json result, error;
|
||||
ret = get_directory(api_path, result, error);
|
||||
if (ret != api_error::success) {
|
||||
if (check_not_found(error)) {
|
||||
ret = api_error::item_not_found;
|
||||
} else {
|
||||
event_system::instance().raise<filesystem_item_get_failed>(api_path, error.dump(2));
|
||||
}
|
||||
}
|
||||
|
||||
update_filesystem_item(true, ret, api_path, fsi);
|
||||
} else {
|
||||
api_file file{};
|
||||
ret = get_filesystem_item_and_file(api_path, file, fsi);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error sia_provider::get_filesystem_item_and_file(const std::string &api_path, api_file &file,
|
||||
filesystem_item &fsi) const {
|
||||
auto ret = get_item_meta(api_path, META_SOURCE, fsi.source_path);
|
||||
if (ret == api_error::success) {
|
||||
json data, error;
|
||||
ret = get_comm().get("/renter/file" + api_path, data, error);
|
||||
if (ret == api_error::success) {
|
||||
create_api_file(data["file"],
|
||||
app_config::get_provider_path_name(get_config().get_provider_type()), file);
|
||||
fsi.size = file.file_size;
|
||||
} else if (check_not_found(error)) {
|
||||
ret = api_error::item_not_found;
|
||||
} else {
|
||||
event_system::instance().raise<filesystem_item_get_failed>(api_path, error.dump(2));
|
||||
}
|
||||
}
|
||||
|
||||
update_filesystem_item(false, ret, api_path, fsi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::uint64_t sia_provider::get_total_item_count() const {
|
||||
std::uint64_t ret = 0u;
|
||||
|
||||
json result, error;
|
||||
const auto res = get_directory("/", result, error);
|
||||
if (res == api_error::success) {
|
||||
ret = result["directories"][0u]["aggregatenumfiles"].get<std::uint64_t>();
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool sia_provider::is_directory(const std::string &api_path) const {
|
||||
auto ret = false;
|
||||
|
||||
json result, error;
|
||||
const auto res = (api_path == "/") ? api_error::success : get_directory(api_path, result, error);
|
||||
if (res == api_error::success) {
|
||||
ret = true;
|
||||
} else if (not check_not_found(error)) {
|
||||
event_system::instance().raise<repertory_exception>(__FUNCTION__, error.dump(2));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool sia_provider::is_file(const std::string &api_path) const {
|
||||
return (api_path != "/") && (check_file_exists(api_path) == api_error::success);
|
||||
}
|
||||
|
||||
bool sia_provider::is_online() const {
|
||||
// TODO Expand here for true online detection (i.e. wallet unlocked)
|
||||
// TODO Return error string for display
|
||||
json data, error;
|
||||
return (get_comm().get("/wallet", data, error) == api_error::success);
|
||||
}
|
||||
|
||||
api_error sia_provider::notify_file_added(const std::string &api_path,
|
||||
const std::string &api_parent,
|
||||
const std::uint64_t &size) {
|
||||
recur_mutex_lock l(notify_added_mutex_);
|
||||
|
||||
json data;
|
||||
json error;
|
||||
auto ret = get_comm().get("/renter/file" + api_path, data, error);
|
||||
if (ret == api_error::success) {
|
||||
api_file file{};
|
||||
create_api_file(data["file"],
|
||||
app_config::get_provider_path_name(get_config().get_provider_type()), file);
|
||||
|
||||
get_api_item_added()(api_path, api_parent, file.source_path, false, file.created_date,
|
||||
file.accessed_date, file.modified_date, file.changed_date);
|
||||
|
||||
if (size) {
|
||||
global_data::instance().increment_used_drive_space(size);
|
||||
}
|
||||
} else {
|
||||
event_system::instance().raise<repertory_exception>(__FUNCTION__, error.dump(2));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool sia_provider::processed_orphaned_file(const std::string &source_path,
|
||||
const std::string &api_path) const {
|
||||
auto ret = false;
|
||||
if (not oft_->contains_restore(api_path)) {
|
||||
const auto orphaned_directory =
|
||||
utils::path::combine(get_config().get_data_directory(), {"orphaned"});
|
||||
|
||||
event_system::instance().raise<orphaned_file_detected>(source_path);
|
||||
const auto orphaned_file = utils::path::combine(
|
||||
orphaned_directory,
|
||||
{utils::path::strip_to_file_name(api_path.empty() ? source_path : api_path)});
|
||||
|
||||
if (utils::file::reset_modified_time(source_path) &&
|
||||
utils::file::move_file(source_path, orphaned_file)) {
|
||||
event_system::instance().raise<orphaned_file_processed>(source_path, orphaned_file);
|
||||
ret = true;
|
||||
} else {
|
||||
event_system::instance().raise<orphaned_file_processing_failed>(
|
||||
source_path, orphaned_file, std::to_string(utils::get_last_error_code()));
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error sia_provider::read_file_bytes(const std::string &api_path, const std::size_t &size,
|
||||
const std::uint64_t &offset, std::vector<char> &buffer,
|
||||
const bool &stop_requested) {
|
||||
auto ret = api_error::download_failed;
|
||||
const auto retry_count = get_config().get_retry_read_count();
|
||||
for (std::uint16_t i = 0u; not stop_requested && (ret != api_error::success) && (i < retry_count);
|
||||
i++) {
|
||||
json error;
|
||||
ret = get_comm().get_raw("/renter/download" + api_path,
|
||||
{{"httpresp", "true"},
|
||||
{"async", "false"},
|
||||
{"offset", utils::string::from_int64(offset)},
|
||||
{"length", utils::string::from_int64(size)}},
|
||||
buffer, error, stop_requested);
|
||||
if (ret != api_error::success) {
|
||||
event_system::instance().raise<file_read_bytes_failed>(api_path, error.dump(2), i + 1u);
|
||||
if (not stop_requested && ((i + 1) < retry_count)) {
|
||||
std::this_thread::sleep_for(1s);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void sia_provider::remove_deleted_files() {
|
||||
std::vector<std::string> removed_files{};
|
||||
|
||||
api_file_list list{};
|
||||
if (get_file_list(list) == api_error::success) {
|
||||
if (not list.empty()) {
|
||||
auto iterator = meta_db_.create_iterator(false);
|
||||
for (iterator->SeekToFirst(); not stop_requested_ && iterator->Valid(); iterator->Next()) {
|
||||
const auto api_path = iterator->key().ToString();
|
||||
if (api_path.empty()) {
|
||||
meta_db_.remove_item_meta(api_path);
|
||||
} else {
|
||||
auto it = std::find_if(list.begin(), list.end(), [&api_path](const auto &file) -> bool {
|
||||
return file.api_path == api_path;
|
||||
});
|
||||
if (it == list.end()) {
|
||||
removed_files.emplace_back(api_path);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
while (not stop_requested_ && not removed_files.empty()) {
|
||||
const auto api_path = removed_files.back();
|
||||
removed_files.pop_back();
|
||||
|
||||
std::string source_path;
|
||||
if (not is_directory(api_path) && (check_file_exists(api_path) == api_error::item_not_found) &&
|
||||
(meta_db_.get_item_meta(api_path, META_SOURCE, source_path) == api_error::success)) {
|
||||
if (not source_path.empty()) {
|
||||
oft_->perform_locked_operation(
|
||||
[this, &api_path, &source_path](i_open_file_table &, i_provider &) -> bool {
|
||||
if (oft_->has_no_open_file_handles()) {
|
||||
std::uint64_t used_space = 0u;
|
||||
if (calculate_used_drive_space(used_space) == api_error::success) {
|
||||
meta_db_.remove_item_meta(api_path);
|
||||
event_system::instance().raise<file_removed_externally>(api_path, source_path);
|
||||
|
||||
std::uint64_t fileSize = 0u;
|
||||
if (utils::file::get_file_size(source_path, fileSize) &&
|
||||
processed_orphaned_file(source_path, api_path)) {
|
||||
global_data::instance().update_used_space(fileSize, 0u, true);
|
||||
}
|
||||
|
||||
global_data::instance().initialize_used_drive_space(used_space);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
api_error sia_provider::remove_directory(const std::string &api_path) {
|
||||
auto ret = api_error::directory_not_empty;
|
||||
if (get_directory_item_count(api_path) == 0u) {
|
||||
json result, error;
|
||||
ret = get_comm().post("/renter/dir" + api_path, {{"action", "delete"}}, result, error);
|
||||
if (ret == api_error::success) {
|
||||
meta_db_.remove_item_meta(api_path);
|
||||
event_system::instance().raise<directory_removed>(api_path);
|
||||
ret = api_error::success;
|
||||
} else if (check_not_found(error)) {
|
||||
meta_db_.remove_item_meta(api_path);
|
||||
ret = api_error::directory_not_found;
|
||||
} else {
|
||||
event_system::instance().raise<directory_remove_failed>(api_path, error.dump(2));
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void sia_provider::remove_expired_orphaned_files() {
|
||||
const auto orphaned_directory =
|
||||
utils::path::combine(get_config().get_data_directory(), {"orphaned"});
|
||||
const auto files = utils::file::get_directory_files(orphaned_directory, true);
|
||||
for (const auto &file : files) {
|
||||
if (utils::file::is_modified_date_older_than(
|
||||
file, std::chrono::hours(get_config().get_orphaned_file_retention_days() * 24))) {
|
||||
if (utils::file::delete_file(file)) {
|
||||
event_system::instance().raise<orphaned_file_deleted>(file);
|
||||
}
|
||||
}
|
||||
if (stop_requested_) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
api_error sia_provider::remove_file(const std::string &api_path) {
|
||||
json result, error;
|
||||
auto ret = get_comm().post("/renter/delete" + api_path, {{"root", "false"}}, result, error);
|
||||
auto not_found = false;
|
||||
if ((ret == api_error::success) || (not_found = check_not_found(error))) {
|
||||
if (not not_found) {
|
||||
event_system::instance().raise<file_removed>(api_path);
|
||||
}
|
||||
ret = not_found ? api_error::item_not_found : api_error::success;
|
||||
|
||||
meta_db_.remove_item_meta(api_path);
|
||||
} else {
|
||||
event_system::instance().raise<file_remove_failed>(api_path, error.dump(2));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void sia_provider::remove_unknown_source_files() {
|
||||
auto files = utils::file::get_directory_files(get_config().get_cache_directory(), true);
|
||||
while (not stop_requested_ && not files.empty()) {
|
||||
const auto file = files.front();
|
||||
files.pop_front();
|
||||
|
||||
std::string api_path;
|
||||
if (not meta_db_.get_source_path_exists(file)) {
|
||||
processed_orphaned_file(file);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
api_error sia_provider::rename_file(const std::string &from_api_path,
|
||||
const std::string &to_api_path) {
|
||||
std::string current_source;
|
||||
auto ret = get_item_meta(from_api_path, META_SOURCE, current_source);
|
||||
if (ret == api_error::success) {
|
||||
json result, error;
|
||||
const auto propertyName =
|
||||
"new" + app_config::get_provider_path_name(get_config().get_provider_type());
|
||||
const auto dest_api_path = to_api_path.substr(1);
|
||||
ret = get_comm().post("/renter/rename" + from_api_path, {{propertyName, dest_api_path}}, result,
|
||||
error);
|
||||
if (ret == api_error::success) {
|
||||
meta_db_.rename_item_meta(current_source, from_api_path, to_api_path);
|
||||
} else if (check_not_found(error)) {
|
||||
ret = api_error::item_not_found;
|
||||
} else {
|
||||
event_system::instance().raise<file_rename_failed>(from_api_path, to_api_path, error.dump(2));
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void sia_provider::set_api_file_dates(const json &file_data, api_file &file) const {
|
||||
file.accessed_date = utils::convert_api_date(file_data["accesstime"].get<std::string>());
|
||||
file.changed_date = utils::convert_api_date(file_data["changetime"].get<std::string>());
|
||||
file.created_date = utils::convert_api_date(file_data["createtime"].get<std::string>());
|
||||
file.modified_date = utils::convert_api_date(file_data["modtime"].get<std::string>());
|
||||
}
|
||||
|
||||
bool sia_provider::start(api_item_added_callback api_item_added, i_open_file_table *oft) {
|
||||
const auto unmount_requested = base_provider::start(api_item_added, oft);
|
||||
if (not unmount_requested && is_online()) {
|
||||
{
|
||||
json data, error;
|
||||
const auto res = comm_.get("/daemon/version", data, error);
|
||||
if (res == api_error::success) {
|
||||
if (utils::compare_version_strings(
|
||||
data["version"].get<std::string>(),
|
||||
app_config::get_provider_minimum_version(get_config().get_provider_type())) < 0) {
|
||||
throw startup_exception("incompatible daemon version: " +
|
||||
data["version"].get<std::string>());
|
||||
}
|
||||
} else {
|
||||
throw startup_exception("failed to get version from daemon");
|
||||
}
|
||||
}
|
||||
|
||||
mutex_lock l(start_stop_mutex_);
|
||||
if (not drive_space_thread_) {
|
||||
stop_requested_ = false;
|
||||
|
||||
calculate_total_drive_space();
|
||||
|
||||
std::uint64_t used_space = 0u;
|
||||
if (calculate_used_drive_space(used_space) != api_error::success) {
|
||||
throw startup_exception("failed to determine used space");
|
||||
}
|
||||
global_data::instance().initialize_used_drive_space(used_space);
|
||||
|
||||
drive_space_thread_ = std::make_unique<std::thread>([this] {
|
||||
cleanup();
|
||||
if (not stop_requested_) {
|
||||
polling::instance().set_callback({"check_deleted", true, [this]() {
|
||||
remove_deleted_files();
|
||||
remove_expired_orphaned_files();
|
||||
}});
|
||||
drive_space_thread();
|
||||
polling::instance().remove_callback("check_deleted");
|
||||
}
|
||||
});
|
||||
}
|
||||
} else {
|
||||
throw startup_exception("failed to connect to api");
|
||||
}
|
||||
return unmount_requested;
|
||||
}
|
||||
|
||||
void sia_provider::stop() {
|
||||
unique_mutex_lock l(start_stop_mutex_);
|
||||
if (drive_space_thread_) {
|
||||
event_system::instance().raise<service_shutdown>("sia_provider");
|
||||
stop_requested_ = true;
|
||||
|
||||
start_stop_notify_.notify_all();
|
||||
l.unlock();
|
||||
|
||||
drive_space_thread_->join();
|
||||
drive_space_thread_.reset();
|
||||
}
|
||||
}
|
||||
|
||||
api_error sia_provider::upload_file(const std::string &api_path, const std::string &source_path,
|
||||
const std::string &) {
|
||||
event_system::instance().raise<file_upload_begin>(api_path, source_path);
|
||||
|
||||
auto ret = set_source_path(api_path, source_path);
|
||||
if (ret == api_error::success) {
|
||||
std::uint64_t fileSize;
|
||||
if (utils::file::get_file_size(source_path, fileSize)) {
|
||||
json result, error;
|
||||
ret = (get_comm().post("/renter/upload" + api_path,
|
||||
{{"source", &source_path[0]}, {"force", "true"}}, result,
|
||||
error) == api_error::success)
|
||||
? api_error::success
|
||||
: api_error::upload_failed;
|
||||
if (ret != api_error::success) {
|
||||
if (check_not_found(error)) {
|
||||
ret = (get_comm().post("/renter/upload" + api_path, {{"source", &source_path[0]}}, result,
|
||||
error) == api_error::success)
|
||||
? api_error::success
|
||||
: api_error::upload_failed;
|
||||
}
|
||||
if (ret != api_error::success) {
|
||||
event_system::instance().raise<file_upload_failed>(api_path, source_path, error.dump(2));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ret = api_error::os_error;
|
||||
event_system::instance().raise<file_upload_failed>(api_path, source_path,
|
||||
"Failed to get source file size");
|
||||
}
|
||||
} else {
|
||||
event_system::instance().raise<file_upload_failed>(api_path, source_path,
|
||||
"Failed to set source path");
|
||||
}
|
||||
|
||||
event_system::instance().raise<file_upload_end>(api_path, source_path, ret);
|
||||
return ret;
|
||||
}
|
||||
} // namespace repertory
|
649
src/providers/skynet/skynet_provider.cpp
Normal file
649
src/providers/skynet/skynet_provider.cpp
Normal file
@@ -0,0 +1,649 @@
|
||||
/*
|
||||
Copyright <2018-2022> <scott.e.graves@protonmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
|
||||
associated documentation files (the "Software"), to deal in the Software without restriction,
|
||||
including without limitation the rights to use, copy, modify, merge, publish, distribute,
|
||||
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or
|
||||
substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
|
||||
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
|
||||
OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#if defined(REPERTORY_ENABLE_SKYNET)
|
||||
|
||||
#include "providers/skynet/skynet_provider.hpp"
|
||||
#include "comm/i_comm.hpp"
|
||||
#include "app_config.hpp"
|
||||
#include "drives/i_open_file_table.hpp"
|
||||
#include "events/events.hpp"
|
||||
#include "types/repertory.hpp"
|
||||
#include "types/skynet.hpp"
|
||||
#include "types/startup_exception.hpp"
|
||||
#include "utils/encryption.hpp"
|
||||
#include "utils/encrypting_reader.hpp"
|
||||
#include "utils/file_utils.hpp"
|
||||
#include "utils/global_data.hpp"
|
||||
#include "utils/path_utils.hpp"
|
||||
|
||||
namespace repertory {
|
||||
skynet_provider::skynet_provider(app_config &config, i_comm &comm)
|
||||
: base_provider(config),
|
||||
comm_(comm),
|
||||
directory_db_(config),
|
||||
upload_manager_(
|
||||
config, [this](const std::string &api_path) -> bool { return this->is_file(api_path); },
|
||||
[this](const upload_manager::upload &upload, json &data, json &error) -> api_error {
|
||||
return this->upload_handler(upload, data, error);
|
||||
},
|
||||
[this](const std::string &api_path, const std::string &source_path, const json &data) {
|
||||
return this->upload_completed(api_path, source_path, data);
|
||||
}) {
|
||||
next_download_index_ = 0u;
|
||||
next_upload_index_ = 0u;
|
||||
update_portal_list();
|
||||
E_SUBSCRIBE_EXACT(skynet_portal_list_changed,
|
||||
[this](const skynet_portal_list_changed &) { this->update_portal_list(); });
|
||||
|
||||
// Remove legacy encrypted files
|
||||
api_file_list list;
|
||||
get_file_list(list);
|
||||
for (const auto &file : list) {
|
||||
std::string token;
|
||||
get_item_meta(file.api_path, "token", token);
|
||||
if (not token.empty()) {
|
||||
remove_file(file.api_path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
api_error skynet_provider::create_directory(const std::string &api_path, const api_meta_map &meta) {
|
||||
auto ret = api_error::success;
|
||||
if (utils::path::is_trash_directory(api_path)) {
|
||||
ret = api_error::access_denied;
|
||||
} else {
|
||||
#ifdef _WIN32
|
||||
ret = is_directory(api_path) ? api_error::directory_exists
|
||||
: is_file(api_path) ? api_error::file_exists
|
||||
: api_error::success;
|
||||
if (ret != api_error::success) {
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
if ((ret = directory_db_.create_directory(api_path)) != api_error::success) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
set_item_meta(api_path, meta);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error skynet_provider::create_file(const std::string &api_path, api_meta_map &meta) {
|
||||
if (meta[META_SIZE].empty()) {
|
||||
meta[META_SIZE] = "0";
|
||||
}
|
||||
|
||||
// When META_ID is present, an external import is occurring.
|
||||
// Need to skip the encryption token in this scenario.
|
||||
if (meta[META_ID].empty() && meta[META_ENCRYPTION_TOKEN].empty()) {
|
||||
meta[META_ENCRYPTION_TOKEN] = get_config().get_skynet_config().encryption_token;
|
||||
}
|
||||
|
||||
auto ret = base_provider::create_file(api_path, meta);
|
||||
if (ret == api_error::success) {
|
||||
ret = directory_db_.create_file(api_path);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
json skynet_provider::export_all() const {
|
||||
json ret = {{"success", std::vector<json>()}, {"failed", std::vector<std::string>()}};
|
||||
|
||||
api_file_list list;
|
||||
get_file_list(list);
|
||||
for (const auto &file : list) {
|
||||
process_export(ret, file.api_path);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
json skynet_provider::export_list(const std::vector<std::string> &api_path_list) const {
|
||||
json ret = {{"success", std::vector<json>()}, {"failed", std::vector<std::string>()}};
|
||||
|
||||
for (const auto &api_path : api_path_list) {
|
||||
process_export(ret, api_path);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::uint64_t skynet_provider::get_directory_item_count(const std::string &api_path) const {
|
||||
return is_directory(api_path) ? directory_db_.get_directory_item_count(api_path) : 0u;
|
||||
}
|
||||
|
||||
api_error skynet_provider::get_directory_items(const std::string &api_path,
|
||||
directory_item_list &list) const {
|
||||
if (is_file(api_path)) {
|
||||
return api_error::item_is_file;
|
||||
}
|
||||
|
||||
if (not is_directory(api_path)) {
|
||||
const_cast<skynet_provider *>(this)->remove_item_meta(api_path);
|
||||
return api_error::directory_not_found;
|
||||
}
|
||||
|
||||
directory_db_.populate_sub_directories(
|
||||
api_path,
|
||||
[this](directory_item &di, const bool &) {
|
||||
this->get_item_meta(di.api_path, di.meta);
|
||||
this->oft_->update_directory_item(di);
|
||||
},
|
||||
list);
|
||||
|
||||
directory_db_.populate_directory_files(
|
||||
api_path,
|
||||
[this](directory_item &di, const bool &) {
|
||||
di.api_parent = utils::path::get_parent_api_path(di.api_path);
|
||||
this->get_item_meta(di.api_path, di.meta);
|
||||
this->oft_->update_directory_item(di);
|
||||
},
|
||||
list);
|
||||
|
||||
std::sort(list.begin(), list.end(), [](const auto &a, const auto &b) -> bool {
|
||||
return (a.directory && not b.directory) ? true
|
||||
: (b.directory && not a.directory) ? false
|
||||
: (a.api_path.compare(b.api_path) < 0);
|
||||
});
|
||||
|
||||
list.insert(list.begin(), directory_item{
|
||||
"..",
|
||||
"",
|
||||
true,
|
||||
});
|
||||
list.insert(list.begin(), directory_item{
|
||||
".",
|
||||
"",
|
||||
true,
|
||||
});
|
||||
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
api_error skynet_provider::get_file(const std::string &api_path, api_file &file) const {
|
||||
const auto ret =
|
||||
directory_db_.get_file(api_path, file, [this](api_file &file) { populate_api_file(file); });
|
||||
|
||||
if (ret != api_error::success) {
|
||||
if (not is_directory(api_path)) {
|
||||
const_cast<skynet_provider *>(this)->remove_item_meta(api_path);
|
||||
}
|
||||
event_system::instance().raise<file_get_failed>(api_path,
|
||||
std::to_string(static_cast<int>(ret)));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error skynet_provider::get_file_list(api_file_list &list) const {
|
||||
const auto ret =
|
||||
directory_db_.get_file_list(list, [this](api_file &file) { populate_api_file(file); });
|
||||
if (ret != api_error::success) {
|
||||
event_system::instance().raise<file_get_api_list_failed>(std::to_string(static_cast<int>(ret)));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error skynet_provider::get_file_size(const std::string &api_path,
|
||||
std::uint64_t &file_size) const {
|
||||
api_file file{};
|
||||
const auto ret = get_file(api_path, file);
|
||||
if (ret == api_error::success) {
|
||||
file_size = file.file_size;
|
||||
} else {
|
||||
event_system::instance().raise<file_get_size_failed>(api_path,
|
||||
std::to_string(static_cast<int>(ret)));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
host_config skynet_provider::get_host_config(const bool &upload) {
|
||||
const auto format_host_config = [&](host_config hc) {
|
||||
hc.path = upload ? "/skynet/skyfile" : "/";
|
||||
return hc;
|
||||
};
|
||||
|
||||
unique_mutex_lock portal_lock(portal_mutex_);
|
||||
const auto portal_list = upload ? upload_list_ : download_list_;
|
||||
portal_lock.unlock();
|
||||
|
||||
auto &next_upload_index = upload ? next_upload_index_ : next_download_index_;
|
||||
auto idx = next_upload_index++;
|
||||
if (idx >= portal_list->size()) {
|
||||
idx = next_upload_index = 0u;
|
||||
}
|
||||
|
||||
return format_host_config((*portal_list)[idx]);
|
||||
}
|
||||
|
||||
std::size_t skynet_provider::get_retry_count() const {
|
||||
mutex_lock portal_lock(portal_mutex_);
|
||||
return std::max(download_list_->size(),
|
||||
static_cast<std::size_t>(get_config().get_retry_read_count()));
|
||||
}
|
||||
|
||||
api_error skynet_provider::get_skynet_metadata(const std::string &skylink, json &json_meta) {
|
||||
auto ret = api_error::error;
|
||||
|
||||
http_headers headers;
|
||||
const auto retry_count = get_retry_count();
|
||||
|
||||
for (std::size_t i = 0u; (ret != api_error::success) && (i < retry_count); i++) {
|
||||
json data, error;
|
||||
const auto hc = get_host_config(false);
|
||||
if (comm_.get(hc, "/skynet/metadata/" + skylink, data, error) == api_error::success) {
|
||||
headers["skynet-file-metadata"] = data.dump();
|
||||
ret = api_error::success;
|
||||
} else {
|
||||
std::vector<char> buffer;
|
||||
if (comm_.get_range_and_headers(hc, utils::path::create_api_path(skylink), 0u,
|
||||
{{"format", "concat"}}, "", buffer, {{0, 0}}, error, headers,
|
||||
stop_requested_) == api_error::success) {
|
||||
ret = api_error::success;
|
||||
} else if (not error.empty()) {
|
||||
event_system::instance().raise<repertory_exception>(__FUNCTION__, error.dump(2));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ret == api_error::success) {
|
||||
json_meta = json::parse(headers["skynet-file-metadata"]);
|
||||
if (json_meta["subfiles"].empty()) {
|
||||
auto sub_file = json_meta;
|
||||
sub_file["len"] =
|
||||
utils::string::to_uint64(utils::string::split(headers["content-range"], '/')[1]);
|
||||
json sub_files = {{json_meta["filename"], sub_file}};
|
||||
json_meta["subfiles"] = sub_files;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error skynet_provider::import_skylink(const skylink_import &si) {
|
||||
json json_meta;
|
||||
auto ret = get_skynet_metadata(si.skylink, json_meta);
|
||||
if (ret == api_error::success) {
|
||||
const auto encrypted = not si.token.empty();
|
||||
for (auto sub_file : json_meta["subfiles"]) {
|
||||
const auto meta_file_name = sub_file["filename"].get<std::string>();
|
||||
auto file_name = meta_file_name;
|
||||
if (encrypted) {
|
||||
if ((ret = utils::encryption::decrypt_file_name(si.token, file_name)) !=
|
||||
api_error::success) {
|
||||
event_system::instance().raise<skynet_import_decryption_failed>(
|
||||
si.skylink, sub_file["filename"], ret);
|
||||
}
|
||||
}
|
||||
|
||||
if (ret == api_error::success) {
|
||||
const auto api_path =
|
||||
utils::path::create_api_path(utils::path::combine(si.directory, {file_name}));
|
||||
const auto api_parent = utils::path::get_parent_api_path(api_path);
|
||||
const auto parts = utils::string::split(api_parent, '/', false);
|
||||
|
||||
std::string sub_directory = "/";
|
||||
for (std::size_t i = 0u; (ret == api_error::success) && (i < parts.size()); i++) {
|
||||
sub_directory =
|
||||
utils::path::create_api_path(utils::path::combine(sub_directory, {parts[i]}));
|
||||
if (not is_directory(sub_directory)) {
|
||||
if ((ret = directory_db_.create_directory(sub_directory)) == api_error::success) {
|
||||
base_provider::notify_directory_added(
|
||||
sub_directory, utils::path::get_parent_api_path(sub_directory));
|
||||
} else {
|
||||
event_system::instance().raise<skynet_import_directory_failed>(si.skylink,
|
||||
sub_directory, ret);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ret == api_error::success) {
|
||||
auto file_size = sub_file["len"].get<std::uint64_t>();
|
||||
if (encrypted) {
|
||||
file_size = utils::encryption::encrypting_reader::calculate_decrypted_size(file_size);
|
||||
}
|
||||
|
||||
const auto skylink =
|
||||
si.skylink + ((json_meta["filename"].get<std::string>() == meta_file_name)
|
||||
? ""
|
||||
: "/" + meta_file_name);
|
||||
api_meta_map meta{};
|
||||
meta[META_ID] = json({{"skylink", skylink}}).dump();
|
||||
meta[META_ENCRYPTION_TOKEN] = si.token;
|
||||
if ((ret = create_file(api_path, meta)) == api_error::success) {
|
||||
const auto now = utils::get_file_time_now();
|
||||
api_item_added_(api_path, api_parent, "", false, now, now, now, now);
|
||||
|
||||
if (file_size > 0u) {
|
||||
set_item_meta(api_path, META_SIZE, std::to_string(file_size));
|
||||
global_data::instance().increment_used_drive_space(file_size);
|
||||
}
|
||||
} else {
|
||||
event_system::instance().raise<skynet_import_file_failed>(si.skylink, api_path, ret);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool skynet_provider::is_directory(const std::string &api_path) const {
|
||||
return (api_path == "/") || directory_db_.is_directory(api_path);
|
||||
}
|
||||
|
||||
bool skynet_provider::is_file(const std::string &api_path) const {
|
||||
return (api_path != "/") && directory_db_.is_file(api_path);
|
||||
}
|
||||
|
||||
bool skynet_provider::is_file_writeable(const std::string &api_path) const {
|
||||
auto ret = true;
|
||||
std::string id;
|
||||
get_item_meta(api_path, META_ID, id);
|
||||
if (not id.empty()) {
|
||||
try {
|
||||
const auto skynet_data = json::parse(id);
|
||||
const auto skylink = skynet_data["skylink"].get<std::string>();
|
||||
ret = not utils::string::contains(skylink, "/");
|
||||
} catch (const std::exception &e) {
|
||||
event_system::instance().raise<repertory_exception>(
|
||||
__FUNCTION__, e.what() ? e.what() : "exception occurred");
|
||||
ret = false;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool skynet_provider::is_processing(const std::string &api_path) const {
|
||||
return upload_manager_.is_processing(api_path);
|
||||
}
|
||||
|
||||
void skynet_provider::notify_directory_added(const std::string &api_path,
|
||||
const std::string &api_parent) {
|
||||
if (api_path == "/") {
|
||||
if (directory_db_.create_directory("/") == api_error::success) {
|
||||
base_provider::notify_directory_added(api_path, api_parent);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
api_error skynet_provider::notify_file_added(const std::string &, const std::string &,
|
||||
const std::uint64_t &) {
|
||||
return api_error::not_implemented;
|
||||
}
|
||||
|
||||
void skynet_provider::populate_api_file(api_file &file) const {
|
||||
api_meta_map meta{};
|
||||
this->get_item_meta(file.api_path, meta);
|
||||
|
||||
file.api_parent = utils::path::get_parent_api_path(file.api_path);
|
||||
file.accessed_date = utils::string::to_uint64(meta[META_ACCESSED]);
|
||||
file.changed_date = utils::string::to_uint64(meta[META_MODIFIED]);
|
||||
file.created_date = utils::string::to_uint64(meta[META_CREATION]);
|
||||
file.encryption_token = meta[META_ENCRYPTION_TOKEN].empty() ? "" : meta[META_ENCRYPTION_TOKEN];
|
||||
file.file_size = utils::string::to_uint64(meta[META_SIZE]);
|
||||
file.modified_date = utils::string::to_uint64(meta[META_MODIFIED]);
|
||||
file.recoverable = not is_processing(file.api_path);
|
||||
file.redundancy = 3.0;
|
||||
file.source_path = meta[META_SOURCE];
|
||||
}
|
||||
|
||||
void skynet_provider::process_export(json &result, const std::string &api_path) const {
|
||||
try {
|
||||
std::string id;
|
||||
std::string token;
|
||||
if (is_file(api_path) && (get_item_meta(api_path, META_ID, id) == api_error::success) &&
|
||||
(get_item_meta(api_path, META_ENCRYPTION_TOKEN, token) == api_error::success)) {
|
||||
auto directory = utils::path::get_parent_api_path(api_path);
|
||||
|
||||
const auto skylink = json::parse(id)["skylink"].get<std::string>();
|
||||
if (utils::string::contains(skylink, "/")) {
|
||||
const auto pos = skylink.find('/');
|
||||
const auto path =
|
||||
utils::path::create_api_path(utils::path::remove_file_name(skylink.substr(pos)));
|
||||
if (path != "/") {
|
||||
directory =
|
||||
utils::path::create_api_path(directory.substr(0, directory.length() - path.length()));
|
||||
}
|
||||
}
|
||||
|
||||
result["success"].emplace_back(
|
||||
json({{"skylink", skylink},
|
||||
{"token", token},
|
||||
{"directory", directory},
|
||||
{"filename", utils::path::strip_to_file_name(api_path)}}));
|
||||
} else {
|
||||
result["failed"].emplace_back(api_path);
|
||||
}
|
||||
} catch (const std::exception &e) {
|
||||
result["failed"].emplace_back(api_path);
|
||||
event_system::instance().raise<repertory_exception>(
|
||||
__FUNCTION__, e.what() ? e.what() : "export failed: " + api_path);
|
||||
}
|
||||
}
|
||||
|
||||
api_error skynet_provider::read_file_bytes(const std::string &api_path, const std::size_t &size,
|
||||
const std::uint64_t &offset, std::vector<char> &data,
|
||||
const bool &stop_requested) {
|
||||
if (size == 0u) {
|
||||
return api_error::success;
|
||||
}
|
||||
|
||||
std::string id;
|
||||
auto ret = get_item_meta(api_path, META_ID, id);
|
||||
if (ret == api_error::success) {
|
||||
ret = api_error::download_failed;
|
||||
|
||||
const auto skynet_data = json::parse(id);
|
||||
const auto path = utils::path::create_api_path(skynet_data["skylink"].get<std::string>());
|
||||
const auto ranges = http_ranges({{offset, offset + size - 1}});
|
||||
|
||||
std::string encryption_token;
|
||||
get_item_meta(api_path, META_ENCRYPTION_TOKEN, encryption_token);
|
||||
|
||||
std::uint64_t file_size{};
|
||||
{
|
||||
std::string temp;
|
||||
get_item_meta(api_path, META_SIZE, temp);
|
||||
file_size = utils::string::to_uint64(temp);
|
||||
}
|
||||
|
||||
const auto retry_count = get_retry_count();
|
||||
for (std::size_t i = 0u; not stop_requested && (ret != api_error::success) && (i < retry_count);
|
||||
i++) {
|
||||
json error;
|
||||
ret = (comm_.get_range(get_host_config(false), path, file_size, {{"format", "concat"}},
|
||||
encryption_token, data, ranges, error,
|
||||
stop_requested) == api_error::success)
|
||||
? api_error::success
|
||||
: api_error::download_failed;
|
||||
if (ret != api_error::success) {
|
||||
event_system::instance().raise<file_read_bytes_failed>(api_path, error.dump(2), i + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error skynet_provider::remove_directory(const std::string &api_path) {
|
||||
const auto ret = directory_db_.remove_directory(api_path);
|
||||
if (ret == api_error::success) {
|
||||
remove_item_meta(api_path);
|
||||
event_system::instance().raise<directory_removed>(api_path);
|
||||
} else {
|
||||
event_system::instance().raise<directory_remove_failed>(api_path,
|
||||
std::to_string(static_cast<int>(ret)));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error skynet_provider::remove_file(const std::string &api_path) {
|
||||
upload_manager_.remove_upload(api_path);
|
||||
const auto ret =
|
||||
directory_db_.remove_file(api_path) ? api_error::success : api_error::item_not_found;
|
||||
if (ret == api_error::success) {
|
||||
remove_item_meta(api_path);
|
||||
event_system::instance().raise<file_removed>(api_path);
|
||||
} else {
|
||||
event_system::instance().raise<file_remove_failed>(api_path,
|
||||
std::to_string(static_cast<int>(ret)));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error skynet_provider::rename_file(const std::string &from_api_path,
|
||||
const std::string &to_api_path) {
|
||||
std::string id;
|
||||
auto ret = get_item_meta(from_api_path, META_ID, id);
|
||||
if (ret == api_error::success) {
|
||||
ret = api_error::access_denied;
|
||||
|
||||
const auto skynet_data = json::parse(id.empty() ? R"({"skylink":""})" : id);
|
||||
const auto skylink = skynet_data["skylink"].get<std::string>();
|
||||
if (utils::string::contains(skylink, "/")) {
|
||||
const auto pos = skylink.find('/');
|
||||
const auto len = skylink.size() - pos;
|
||||
if (to_api_path.size() >= len) {
|
||||
const auto comp1 = to_api_path.substr(to_api_path.size() - len);
|
||||
const auto comp2 = skylink.substr(pos);
|
||||
ret = (comp1 == comp2) ? api_error::success : ret;
|
||||
}
|
||||
} else {
|
||||
ret = (skylink.empty() || (utils::path::strip_to_file_name(from_api_path) ==
|
||||
utils::path::strip_to_file_name(to_api_path)))
|
||||
? api_error::success
|
||||
: ret;
|
||||
}
|
||||
|
||||
if (ret == api_error::success) {
|
||||
std::string current_source;
|
||||
if ((ret = get_item_meta(from_api_path, META_SOURCE, current_source)) == api_error::success) {
|
||||
if (not upload_manager_.execute_if_not_processing({from_api_path, to_api_path}, [&]() {
|
||||
if ((ret = directory_db_.rename_file(from_api_path, to_api_path)) ==
|
||||
api_error::success) {
|
||||
meta_db_.rename_item_meta(current_source, from_api_path, to_api_path);
|
||||
}
|
||||
})) {
|
||||
ret = api_error::file_in_use;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool skynet_provider::start(api_item_added_callback api_item_added, i_open_file_table *oft) {
|
||||
const auto ret = base_provider::start(api_item_added, oft);
|
||||
if (not ret) {
|
||||
api_file_list list;
|
||||
if (get_file_list(list) != api_error::success) {
|
||||
throw startup_exception("failed to determine used space");
|
||||
}
|
||||
|
||||
const auto total_size =
|
||||
std::accumulate(list.begin(), list.end(), std::uint64_t(0),
|
||||
[](std::uint64_t t, const api_file &file) { return t + file.file_size; });
|
||||
global_data::instance().initialize_used_drive_space(total_size);
|
||||
upload_manager_.start();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void skynet_provider::stop() {
|
||||
stop_requested_ = true;
|
||||
upload_manager_.stop();
|
||||
}
|
||||
|
||||
bool skynet_provider::update_portal_list() {
|
||||
auto portal_list = get_config().get_skynet_config().portal_list;
|
||||
auto download_portal_list = std::make_shared<std::vector<host_config>>();
|
||||
auto upload_portal_list = std::make_shared<std::vector<host_config>>();
|
||||
|
||||
std::copy_if(portal_list.begin(), portal_list.end(), std::back_inserter(*upload_portal_list),
|
||||
[](const auto &portal) -> bool {
|
||||
return not portal.auth_url.empty() && not portal.auth_user.empty();
|
||||
});
|
||||
for (const auto &portal : portal_list) {
|
||||
if (upload_portal_list->empty()) {
|
||||
upload_portal_list->emplace_back(portal);
|
||||
}
|
||||
download_portal_list->emplace_back(portal);
|
||||
}
|
||||
|
||||
unique_mutex_lock portal_lock(portal_mutex_);
|
||||
download_list_ = std::move(download_portal_list);
|
||||
upload_list_ = std::move(upload_portal_list);
|
||||
portal_lock.unlock();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void skynet_provider::upload_completed(const std::string &api_path, const std::string &,
|
||||
const json &data) {
|
||||
set_item_meta(api_path, META_ID, data.dump());
|
||||
}
|
||||
|
||||
api_error skynet_provider::upload_file(const std::string &api_path, const std::string &source_path,
|
||||
const std::string &encryption_token) {
|
||||
std::uint64_t file_size = 0u;
|
||||
utils::file::get_file_size(source_path, file_size);
|
||||
auto ret = set_source_path(api_path, source_path);
|
||||
if (ret == api_error::success) {
|
||||
if (((ret = set_item_meta(api_path, META_SIZE, std::to_string(file_size))) ==
|
||||
api_error::success) &&
|
||||
((ret = set_item_meta(api_path, META_ENCRYPTION_TOKEN, encryption_token)) ==
|
||||
api_error::success) &&
|
||||
(file_size != 0u)) {
|
||||
ret = upload_manager_.queue_upload(api_path, source_path, encryption_token);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
api_error skynet_provider::upload_handler(const upload_manager::upload &upload, json &data,
|
||||
json &error) {
|
||||
const auto file_name = utils::path::strip_to_file_name(upload.api_path);
|
||||
|
||||
auto ret = api_error::upload_failed;
|
||||
if (not upload.cancel) {
|
||||
event_system::instance().raise<file_upload_begin>(upload.api_path, upload.source_path);
|
||||
ret = (comm_.post_multipart_file(get_host_config(true), "", file_name, upload.source_path,
|
||||
upload.encryption_token, data, error,
|
||||
upload.cancel) == api_error::success)
|
||||
? api_error::success
|
||||
: api_error::upload_failed;
|
||||
}
|
||||
|
||||
event_system::instance().raise<file_upload_end>(upload.api_path, upload.source_path, ret);
|
||||
return ret;
|
||||
}
|
||||
} // namespace repertory
|
||||
|
||||
#endif // defined(REPERTORY_ENABLE_SKYNET)
|
Reference in New Issue
Block a user