33 Commits

Author SHA1 Message Date
1e4d675f76 added todo
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-25 15:02:26 -05:00
e02fe870e9 fix 2024-07-25 14:58:16 -05:00
9ef9250c18 moved event to debug 2024-07-25 14:46:51 -05:00
2e4b9d3787 added flush policy 2024-07-25 14:43:16 -05:00
fd2b3c3239 fix 2024-07-25 14:38:27 -05:00
6640394bd3 fix 2024-07-25 14:21:20 -05:00
84cc726c4f don't register loggers 2024-07-25 14:18:31 -05:00
5d99d21915 refactor 2024-07-25 13:58:27 -05:00
3b8636544a refactor logging 2024-07-25 13:55:30 -05:00
14ce60ab65 fix 2024-07-25 13:38:37 -05:00
7506431b34 fix 2024-07-25 13:31:48 -05:00
8621209eb3 fix 2024-07-25 13:30:48 -05:00
d1fe317fb9 refactor console consumer 2024-07-25 13:24:54 -05:00
a5c47d3f22 updated build system 2024-07-25 13:01:03 -05:00
267c272ce5 changed thread count 2024-07-25 12:31:12 -05:00
7a8ae32b85 refactor 2024-07-25 12:30:16 -05:00
921d404a5a switch to spdlog 2024-07-25 12:28:47 -05:00
de6a7beb5e refactor 2024-07-25 12:17:12 -05:00
5a928208fd added spdlog 2024-07-25 10:55:48 -05:00
92fc163341 fix 2024-07-25 10:47:26 -05:00
ca1e03f3ea logging changes 2024-07-25 10:46:29 -05:00
de8c3ad603 fix 2024-07-25 10:27:25 -05:00
f7d56bdd48 refactor 2024-07-25 10:19:50 -05:00
afc13b45f4 refactor 2024-07-25 10:16:17 -05:00
11d2c3c69e updated build system 2024-07-25 09:31:01 -05:00
356521e176 updated build system 2024-07-25 09:26:32 -05:00
468668e518 updated build system 2024-07-25 07:21:01 -05:00
e53dec7bab updated build system 2024-07-25 07:09:45 -05:00
26cd6de110 updated build system 2024-07-25 06:58:50 -05:00
d1157ca261 updated build system 2024-07-25 06:50:56 -05:00
0d49b49482 updated build system 2024-07-25 06:49:11 -05:00
753820bd81 updated build system 2024-07-25 06:45:38 -05:00
2a98e44a5c updated build system 2024-07-25 06:37:36 -05:00
59 changed files with 537 additions and 549 deletions

View File

@ -110,6 +110,7 @@ endif()
-DPROJECT_ENABLE_LIBSODIUM=${PROJECT_ENABLE_LIBSODIUM}
-DPROJECT_ENABLE_OPENSSL=${PROJECT_ENABLE_OPENSSL}
-DPROJECT_ENABLE_PUGIXML=${PROJECT_ENABLE_PUGIXML}
-DPROJECT_ENABLE_SPDLOG=${PROJECT_ENABLE_SPDLOG}
-DPROJECT_ENABLE_SQLITE=${PROJECT_ENABLE_SQLITE}
-DPROJECT_ENABLE_STDUUID=${PROJECT_ENABLE_STDUUID}
-DPROJECT_ENABLE_TESTING=${PROJECT_ENABLE_TESTING}

View File

@ -12,6 +12,7 @@ include(cmake/libraries/fuse.cmake)
include(cmake/libraries/json.cmake)
include(cmake/libraries/libsodium.cmake)
include(cmake/libraries/pugixml.cmake)
include(cmake/libraries/spdlog.cmake)
include(cmake/libraries/sqlite.cmake)
include(cmake/libraries/stduuid.cmake)
include(cmake/libraries/testing.cmake)

View File

@ -0,0 +1,33 @@
if(PROJECT_ENABLE_SPDLOG)
if(PROJECT_BUILD)
find_package(spdlog ${SPDLOG_VERSION} REQUIRED)
add_definitions(-DPROJECT_ENABLE_SPDLOG)
include_directories(BEFORE SYSTEM
${SPDLOG_INCLUDE_DIRS}
)
link_libraries(
spdlog::spdlog
)
elseif(NOT PROJECT_IS_MINGW OR CMAKE_HOST_WIN32)
ExternalProject_Add(spdlog_project
PREFIX external
URL ${PROJECT_3RD_PARTY_DIR}/spdlog-${SPDLOG_VERSION}.tar.gz
URL_HASH SHA256=1586508029a7d0670dfcb2d97575dcdc242d3868a259742b69f100801ab4e16b
LIST_SEPARATOR |
CMAKE_ARGS ${PROJECT_EXTERNAL_CMAKE_FLAGS}
-DBUILD_SHARED_LIBS=${PROJECT_BUILD_SHARED_LIBS}
-DBUILD_STATIC_LIBS=ON
-DSPDLOG_BUILD_EXAMPLE=OFF
-DSPDLOG_FMT_EXTERNAL=OFF
-DSPDLOG_FMT_EXTERNAL_HO=OFF
)
set(PROJECT_DEPENDENCIES
${PROJECT_DEPENDENCIES}
spdlog_project
)
endif()
endif()

View File

@ -6,6 +6,7 @@ option(PROJECT_ENABLE_JSON "Enable JSON for Modern C++ library" OFF)
option(PROJECT_ENABLE_LIBSODIUM "Enable libsodium library" OFF)
option(PROJECT_ENABLE_OPENSSL "Enable OpenSSL library" OFF)
option(PROJECT_ENABLE_PUGIXML "Enable PugiXML library" OFF)
option(PROJECT_ENABLE_SPDLOG "Enable spdlog library" OFF)
option(PROJECT_ENABLE_SQLITE "Enable SQLite" OFF)
option(PROJECT_ENABLE_STDUUID "Enable stduuid library" OFF)
option(PROJECT_ENABLE_TESTING "Enable building unit tests" OFF)

View File

@ -2,9 +2,9 @@
PROJECT_NAME="repertory"
PROJECT_URL="https://git.fifthgrid.com/blockstorage/repertory"
PROJECT_COMPANY_NAME="https://git.fifthgrid.com/blockstorage"
PROJECT_URL="${PROJECT_COMPANY_NAME}/repertory"
PROJECT_COPYRIGHT="Copyright <2018-2024> <MIT License> <${PROJECT_URL}>"
PROJECT_DESC="Mount utility for Sia and S3"
@ -27,6 +27,7 @@ PROJECT_ENABLE_JSON=ON
PROJECT_ENABLE_LIBSODIUM=ON
PROJECT_ENABLE_OPENSSL=ON
PROJECT_ENABLE_PUGIXML=ON
PROJECT_ENABLE_SPDLOG=ON
PROJECT_ENABLE_SQLITE=ON
PROJECT_ENABLE_STDUUID=ON
PROJECT_ENABLE_TESTING=ON

View File

@ -1,8 +1,3 @@
option(PROJECT_ENABLE_S3 "Enable S3 support" ON)
if (PROJECT_ENABLE_S3)
add_definitions(-DPROJECT_ENABLE_S3)
endif()
set(CMAKE_CXX_FLAGS "-include common.hpp ${CMAKE_CXX_FLAGS}")
add_project_library(lib${PROJECT_NAME} "" "" "${PROJECT_ADDITIONAL_SOURCES}")

View File

@ -32,7 +32,7 @@ E_SIMPLE3(fuse_event, debug, true,
int, result, res, E_FROM_INT32
);
E_SIMPLE1(fuse_args_parsed, normal, true,
E_SIMPLE1(fuse_args_parsed, info, true,
std::string, arguments, args, E_STRING
);
// clang-format on

View File

@ -24,29 +24,26 @@
#include "events/event_system.hpp"
namespace spdlog {
class logger;
}
namespace repertory {
class console_consumer final {
E_CONSUMER();
public:
console_consumer() { E_SUBSCRIBE_ALL(process_event); }
console_consumer();
public:
~console_consumer() { E_CONSUMER_RELEASE(); }
explicit console_consumer(event_level level);
~console_consumer();
private:
void process_event(const event &e) {
#if defined(_WIN32)
#if defined(_DEBUG)
OutputDebugString((e.get_single_line() + "\n").c_str());
#endif
#endif
if (e.get_event_level() == event_level::error) {
std::cerr << e.get_single_line() << std::endl;
} else {
std::cout << e.get_single_line() << std::endl;
}
}
std::shared_ptr<spdlog::logger> logger_;
private:
void process_event(const event &e) const;
};
} // namespace repertory

View File

@ -24,40 +24,29 @@
#include "events/event_system.hpp"
namespace spdlog {
class logger;
}
namespace repertory {
class logging_consumer {
E_CONSUMER();
public:
logging_consumer(const std::string &log_directory, const event_level &level);
logging_consumer(event_level level, std::string log_directory);
~logging_consumer();
private:
const std::uint8_t MAX_LOG_FILES = 5;
const std::uint64_t MAX_LOG_FILE_SIZE = (1024 * 1024 * 5);
static constexpr const std::uint8_t MAX_LOG_FILES{5U};
static constexpr const std::uint64_t MAX_LOG_FILE_SIZE{1024ULL * 1024ULL *
5ULL};
private:
event_level event_level_ = event_level::normal;
const std::string log_directory_;
const std::string log_path_;
bool logging_active_ = true;
std::mutex log_mutex_;
std::condition_variable log_notify_;
std::deque<std::shared_ptr<event>> event_queue_;
std::unique_ptr<std::thread> logging_thread_;
FILE *log_file_ = nullptr;
std::shared_ptr<spdlog::logger> logger_;
private:
void check_log_roll(std::size_t count);
void close_log_file();
void logging_thread(bool drain);
void process_event(const event &event);
void reopen_log_file();
void process_event(const event &event) const;
};
} // namespace repertory

View File

@ -24,16 +24,17 @@
namespace repertory {
enum class event_level {
critical,
error,
warn,
normal,
info,
debug,
verbose,
trace,
};
auto event_level_from_string(std::string level) -> event_level;
auto event_level_to_string(const event_level &level) -> std::string;
auto event_level_to_string(event_level level) -> std::string;
class event {
protected:

View File

@ -28,7 +28,7 @@
namespace repertory {
// clang-format off
E_SIMPLE2(curl_error, normal, true,
E_SIMPLE2(curl_error, info, true,
std::string, url, url, E_STRING,
CURLcode, res, res, E_FROM_CURL_CODE
);
@ -39,7 +39,7 @@ E_SIMPLE3(debug_log, debug, true,
std::string, data, data, E_STRING
);
E_SIMPLE1(directory_removed, normal, true,
E_SIMPLE1(directory_removed, info, true,
std::string, api_path, ap, E_STRING
);
@ -58,23 +58,23 @@ E_SIMPLE2(drive_mount_failed, error, true,
std::string, result, res, E_STRING
);
E_SIMPLE1(drive_mounted, normal, true,
E_SIMPLE1(drive_mounted, info, true,
std::string, location, loc, E_STRING
);
E_SIMPLE1(drive_mount_result, normal, true,
E_SIMPLE1(drive_mount_result, info, true,
std::string, result, res, E_STRING
);
E_SIMPLE1(drive_unmount_pending, normal, true,
E_SIMPLE1(drive_unmount_pending, info, true,
std::string, location, loc, E_STRING
);
E_SIMPLE1(drive_unmounted, normal, true,
E_SIMPLE1(drive_unmounted, info, true,
std::string, location, loc, E_STRING
);
E_SIMPLE1(event_level_changed, normal, true,
E_SIMPLE1(event_level_changed, info, true,
std::string, new_event_level, level, E_STRING
);
@ -86,7 +86,7 @@ E_SIMPLE1(failed_upload_removed, warn, true,
std::string, api_path, ap, E_STRING
);
E_SIMPLE1(failed_upload_retry, normal, true,
E_SIMPLE1(failed_upload_retry, info, true,
std::string, api_path, ap, E_STRING
);
@ -99,7 +99,7 @@ E_SIMPLE1(file_get_api_list_failed, error, true,
std::string, error, err, E_STRING
);
E_SIMPLE1(file_pinned, normal, true,
E_SIMPLE1(file_pinned, info, true,
std::string, api_path, ap, E_STRING
);
@ -109,7 +109,7 @@ E_SIMPLE3(file_read_bytes_failed, error, true,
std::size_t, retry, retry, E_FROM_SIZE_T
);
E_SIMPLE1(file_removed, normal, true,
E_SIMPLE1(file_removed, info, true,
std::string, api_path, ap, E_STRING
);
@ -134,20 +134,20 @@ E_SIMPLE2(file_get_size_failed, error, true,
std::string, error, err, E_STRING
);
E_SIMPLE3(filesystem_item_added, normal, true,
E_SIMPLE3(filesystem_item_added, info, true,
std::string, api_path, ap, E_STRING,
std::string, parent, parent, E_STRING,
bool, directory, dir, E_FROM_BOOL
);
E_SIMPLE4(filesystem_item_closed, verbose, true,
E_SIMPLE4(filesystem_item_closed, trace, true,
std::string, api_path, ap, E_STRING,
std::string, source, src, E_STRING,
bool, directory, dir, E_FROM_BOOL,
bool, changed, changed, E_FROM_BOOL
);
E_SIMPLE5(filesystem_item_handle_closed, verbose, true,
E_SIMPLE5(filesystem_item_handle_closed, trace, true,
std::string, api_path, ap, E_STRING,
std::uint64_t, handle, handle, E_FROM_UINT64,
std::string, source, src, E_STRING,
@ -155,14 +155,14 @@ E_SIMPLE5(filesystem_item_handle_closed, verbose, true,
bool, changed, changed, E_FROM_BOOL
);
E_SIMPLE4(filesystem_item_handle_opened, verbose, true,
E_SIMPLE4(filesystem_item_handle_opened, trace, true,
std::string, api_path, ap, E_STRING,
std::uint64_t, handle, handle, E_FROM_UINT64,
std::string, source, src, E_STRING,
bool, directory, dir, E_FROM_BOOL
);
E_SIMPLE2(filesystem_item_evicted, normal, true,
E_SIMPLE2(filesystem_item_evicted, info, true,
std::string, api_path, ap, E_STRING,
std::string, source, src, E_STRING
);
@ -172,17 +172,17 @@ E_SIMPLE2(filesystem_item_get_failed, error, true,
std::string, error, err, E_STRING
);
E_SIMPLE3(filesystem_item_opened, verbose, true,
E_SIMPLE3(filesystem_item_opened, trace, true,
std::string, api_path, ap, E_STRING,
std::string, source, src, E_STRING,
bool, directory, dir, E_FROM_BOOL
);
E_SIMPLE1(file_unpinned, normal, true,
E_SIMPLE1(file_unpinned, info, true,
std::string, api_path, ap, E_STRING
);
E_SIMPLE4(file_upload_completed, normal, true,
E_SIMPLE4(file_upload_completed, info, true,
std::string, api_path, ap, E_STRING,
std::string, source, src, E_STRING,
api_error, result, res, E_FROM_API_FILE_ERROR,
@ -200,7 +200,7 @@ E_SIMPLE2(file_upload_not_found, warn, true,
std::string, source, src, E_STRING
);
E_SIMPLE2(file_upload_queued, normal, true,
E_SIMPLE2(file_upload_queued, info, true,
std::string, api_path, ap, E_STRING,
std::string, source, src, E_STRING
);
@ -209,20 +209,20 @@ E_SIMPLE1(file_upload_removed, debug, true,
std::string, api_path, ap, E_STRING
);
E_SIMPLE3(file_upload_retry, normal, true,
E_SIMPLE3(file_upload_retry, info, true,
std::string, api_path, ap, E_STRING,
std::string, source, src, E_STRING,
api_error, result, res, E_FROM_API_FILE_ERROR
);
E_SIMPLE2(file_upload_started, normal, true,
E_SIMPLE2(file_upload_started, info, true,
std::string, api_path, ap, E_STRING,
std::string, source, src, E_STRING
);
E_SIMPLE(item_scan_begin, normal, true);
E_SIMPLE(item_scan_begin, info, true);
E_SIMPLE(item_scan_end, normal, true);
E_SIMPLE(item_scan_end, info, true);
E_SIMPLE1(orphaned_file_deleted, warn, true,
std::string, source, src, E_STRING
@ -256,12 +256,12 @@ E_SIMPLE2(provider_offline, error, true,
std::uint16_t, port, port, E_FROM_UINT16
);
E_SIMPLE2(provider_upload_begin, normal, true,
E_SIMPLE2(provider_upload_begin, info, true,
std::string, api_path, ap, E_STRING,
std::string, source, src, E_STRING
);
E_SIMPLE3(provider_upload_end, normal, true,
E_SIMPLE3(provider_upload_end, info, true,
std::string, api_path, ap, E_STRING,
std::string, source, src, E_STRING,
api_error, result, res, E_FROM_API_FILE_ERROR
@ -288,9 +288,9 @@ E_SIMPLE1(service_started, debug, true,
std::string, service, svc, E_STRING
);
E_SIMPLE(unmount_requested, normal, true);
E_SIMPLE(unmount_requested, info, true);
#if !defined(_WIN32)
E_SIMPLE2(unmount_result, normal, true,
E_SIMPLE2(unmount_result, info, true,
std::string, location, loc, E_STRING,
std::string, result, res, E_STRING
);

View File

@ -27,7 +27,7 @@
namespace repertory {
// clang-format off
E_SIMPLE2(download_begin, normal, true,
E_SIMPLE2(download_begin, info, true,
std::string, api_path, ap, E_STRING,
std::string, dest_path, dest, E_STRING
);
@ -49,19 +49,19 @@ E_SIMPLE6(download_chunk_end, debug, true,
api_error, result, result, E_FROM_API_FILE_ERROR
);
E_SIMPLE3(download_end, normal, true,
E_SIMPLE3(download_end, info, true,
std::string, api_path, ap, E_STRING,
std::string, dest_path, dest, E_STRING,
api_error, result, result, E_FROM_API_FILE_ERROR
);
E_SIMPLE3(download_progress, normal, true,
E_SIMPLE3(download_progress, info, true,
std::string, api_path, ap, E_STRING,
std::string, dest_path, dest, E_STRING,
double, progress, prog, E_DOUBLE_PRECISE
);
E_SIMPLE2(download_restored, normal, true,
E_SIMPLE2(download_restored, info, true,
std::string, api_path, ap, E_STRING,
std::string, dest_path, dest, E_STRING
);
@ -72,17 +72,17 @@ E_SIMPLE3(download_restore_failed, error, true,
std::string, error, err, E_STRING
);
E_SIMPLE2(download_resumed, normal, true,
E_SIMPLE2(download_resumed, info, true,
std::string, api_path, ap, E_STRING,
std::string, dest_path, dest, E_STRING
);
E_SIMPLE2(download_stored, normal, true,
E_SIMPLE2(download_stored, info, true,
std::string, api_path, ap, E_STRING,
std::string, dest_path, dest, E_STRING
);
E_SIMPLE2(download_stored_removed, normal, true,
E_SIMPLE2(download_stored_removed, info, true,
std::string, api_path, ap, E_STRING,
std::string, dest_path, dest, E_STRING
);
@ -93,7 +93,7 @@ E_SIMPLE3(download_stored_failed, error, true,
std::string, error, err, E_STRING
);
E_SIMPLE1(item_timeout, normal, true,
E_SIMPLE1(item_timeout, debug, true,
std::string, api_path, ap, E_STRING
);
// clang-format on

View File

@ -1,24 +1,3 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef LIBREPERTORY_INCLUDE_INITIALIZE_HPP_
#define LIBREPERTORY_INCLUDE_INITIALIZE_HPP_

View File

@ -21,7 +21,6 @@
*/
#ifndef INCLUDE_PROVIDERS_S3_S3_PROVIDER_HPP_
#define INCLUDE_PROVIDERS_S3_S3_PROVIDER_HPP_
#if defined(PROJECT_ENABLE_S3)
#include "providers/base_provider.hpp"
#include "types/repertory.hpp"
@ -138,5 +137,4 @@ public:
};
} // namespace repertory
#endif // PROJECT_ENABLE_S3
#endif // INCLUDE_PROVIDERS_S3_S3_PROVIDER_HPP_

View File

@ -21,7 +21,6 @@
*/
#ifndef INCLUDE_TYPES_S3_HPP_
#define INCLUDE_TYPES_S3_HPP_
#if defined(PROJECT_ENABLE_S3)
#include "types/repertory.hpp"
#include "utils/string_utils.hpp"
@ -86,8 +85,6 @@ struct head_object_result {
return *this;
}
};
} // namespace repertory
#endif
#endif // INCLUDE_TYPES_S3_HPP_

View File

@ -33,10 +33,8 @@ static const option display_config_option = {"-dc", "--display_config"};
static const option data_directory_option = {"-dd", "--data_directory"};
static const option encrypt_option = {"-en", "--encrypt"};
static const option drive_information_option = {"-di", "--drive_information"};
#if defined(PROJECT_ENABLE_S3)
static const option name_option = {"-na", "--name"};
static const option s3_option = {"-s3", "--s3"};
#endif // defined(PROJECT_ENABLE_S3)
static const option generate_config_option = {"-gc", "--generate_config"};
static const option get_option = {"-get", "--get"};
static const option get_directory_items_option = {"-gdi",
@ -62,10 +60,8 @@ static const std::vector<option> option_list = {
data_directory_option,
drive_information_option,
encrypt_option,
#if defined(PROJECT_ENABLE_S3)
s3_option,
name_option,
#endif // defined(PROJECT_ENABLE_S3)
generate_config_option,
get_option,
get_directory_items_option,

View File

@ -1,24 +1,3 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef LIBREPERTORY_INCLUDE_VERSION_HPP_
#define LIBREPERTORY_INCLUDE_VERSION_HPP_

View File

@ -72,7 +72,7 @@ app_config::app_config(const provider_type &prov,
enable_mount_manager_(false),
#endif
enable_remote_mount_(false),
event_level_(event_level::normal),
event_level_(event_level::info),
eviction_delay_mins_(default_eviction_delay_mins),
eviction_uses_accessed_time_(false),
high_freq_interval_secs_(default_high_freq_interval_secs),
@ -195,72 +195,71 @@ auto app_config::default_rpc_port(const provider_type &prov) -> std::uint16_t {
auto app_config::get_json() const -> json {
json ret = {
{"ApiAuth", api_auth_},
{"ApiPort", api_port_},
{"ApiUser", api_user_},
{"ChunkDownloaderTimeoutSeconds", download_timeout_secs_},
{"EnableChunkDownloaderTimeout", enable_chunk_downloader_timeout_},
{"EnableCommDurationEvents", enable_comm_duration_events_},
{"EnableDriveEvents", enable_drive_events_},
{"ApiAuth", api_auth_},
{"ApiPort", api_port_},
{"ApiUser", api_user_},
{"ChunkDownloaderTimeoutSeconds", download_timeout_secs_},
{"EnableChunkDownloaderTimeout", enable_chunk_downloader_timeout_},
{"EnableCommDurationEvents", enable_comm_duration_events_},
{"EnableDriveEvents", enable_drive_events_},
#if defined(_WIN32)
{"EnableMountManager", enable_mount_manager_},
{"EnableMountManager", enable_mount_manager_},
#endif
{"EnableMaxCacheSize", enable_max_cache_size_},
{"EncryptConfig",
{
{"EncryptionToken", encrypt_config_.encryption_token},
{"Path", encrypt_config_.path},
}},
{"EventLevel", event_level_to_string(event_level_)},
{"EvictionDelayMinutes", eviction_delay_mins_},
{"EvictionUsesAccessedTime", eviction_uses_accessed_time_},
{"HighFreqIntervalSeconds", high_freq_interval_secs_},
{"HostConfig",
{
{"AgentString", hc_.agent_string},
{"ApiPassword", hc_.api_password},
{"ApiPort", hc_.api_port},
{"HostNameOrIp", hc_.host_name_or_ip},
{"TimeoutMs", hc_.timeout_ms},
}},
{"LowFreqIntervalSeconds", low_freq_interval_secs_},
{"MaxCacheSizeBytes", max_cache_size_bytes_},
{"MaxUploadCount", max_upload_count_},
{"OnlineCheckRetrySeconds", online_check_retry_secs_},
{"OrphanedFileRetentionDays", orphaned_file_retention_days_},
{"PreferredDownloadType", preferred_download_type_},
{"ReadAheadCount", read_ahead_count_},
{
"RemoteMount",
{
{"EnableRemoteMount", enable_remote_mount_},
{"IsRemoteMount", is_remote_mount_},
{"RemoteClientPoolSize", remote_client_pool_size_},
{"RemoteMaxConnections", remote_max_connections_},
{"RemoteHostNameOrIp", remote_host_name_or_ip_},
{"RemotePort", remote_port_},
{"RemoteReceiveTimeoutSeconds", remote_receive_timeout_secs_},
{"RemoteSendTimeoutSeconds", remote_send_timeout_secs_},
{"RemoteToken", remote_token_},
},
},
{"RetryReadCount", retry_read_count_},
{"RingBufferFileSize", ring_buffer_file_size_},
{"S3Config",
{
{"AccessKey", s3_config_.access_key},
{"Bucket", s3_config_.bucket},
{"CacheTimeoutSeconds", s3_config_.cache_timeout_secs},
{"EncryptionToken", s3_config_.encryption_token},
{"Region", s3_config_.region},
{"SecretKey", s3_config_.secret_key},
{"TimeoutMs", s3_config_.timeout_ms},
{"URL", s3_config_.url},
{"UsePathStyle", s3_config_.use_path_style},
{"UseRegionInURL", s3_config_.use_region_in_url},
}},
{"Version", version_}
};
{"EnableMaxCacheSize", enable_max_cache_size_},
{"EncryptConfig",
{
{"EncryptionToken", encrypt_config_.encryption_token},
{"Path", encrypt_config_.path},
}},
{"EventLevel", event_level_to_string(event_level_)},
{"EvictionDelayMinutes", eviction_delay_mins_},
{"EvictionUsesAccessedTime", eviction_uses_accessed_time_},
{"HighFreqIntervalSeconds", high_freq_interval_secs_},
{"HostConfig",
{
{"AgentString", hc_.agent_string},
{"ApiPassword", hc_.api_password},
{"ApiPort", hc_.api_port},
{"HostNameOrIp", hc_.host_name_or_ip},
{"TimeoutMs", hc_.timeout_ms},
}},
{"LowFreqIntervalSeconds", low_freq_interval_secs_},
{"MaxCacheSizeBytes", max_cache_size_bytes_},
{"MaxUploadCount", max_upload_count_},
{"OnlineCheckRetrySeconds", online_check_retry_secs_},
{"OrphanedFileRetentionDays", orphaned_file_retention_days_},
{"PreferredDownloadType", preferred_download_type_},
{"ReadAheadCount", read_ahead_count_},
{
"RemoteMount",
{
{"EnableRemoteMount", enable_remote_mount_},
{"IsRemoteMount", is_remote_mount_},
{"RemoteClientPoolSize", remote_client_pool_size_},
{"RemoteMaxConnections", remote_max_connections_},
{"RemoteHostNameOrIp", remote_host_name_or_ip_},
{"RemotePort", remote_port_},
{"RemoteReceiveTimeoutSeconds", remote_receive_timeout_secs_},
{"RemoteSendTimeoutSeconds", remote_send_timeout_secs_},
{"RemoteToken", remote_token_},
},
},
{"RetryReadCount", retry_read_count_},
{"RingBufferFileSize", ring_buffer_file_size_},
{"S3Config",
{
{"AccessKey", s3_config_.access_key},
{"Bucket", s3_config_.bucket},
{"CacheTimeoutSeconds", s3_config_.cache_timeout_secs},
{"EncryptionToken", s3_config_.encryption_token},
{"Region", s3_config_.region},
{"SecretKey", s3_config_.secret_key},
{"TimeoutMs", s3_config_.timeout_ms},
{"URL", s3_config_.url},
{"UsePathStyle", s3_config_.use_path_style},
{"UseRegionInURL", s3_config_.use_region_in_url},
}},
{"Version", version_}};
if (prov_ == provider_type::encrypt) {
ret.erase("ChunkDownloaderTimeoutSeconds");

View File

@ -680,7 +680,7 @@ void fuse_base::raise_fuse_event(std::string function_name,
}
if (((config_.get_event_level() >= fuse_event::level) && (ret != 0)) ||
(config_.get_event_level() >= event_level::verbose)) {
(config_.get_event_level() >= event_level::trace)) {
event_system::instance().raise<fuse_event>(
utils::string::right_trim(function_name, '_'), api_path, ret);
}

View File

@ -536,11 +536,12 @@ void *fuse_drive::init_impl(struct fuse_conn_info *conn) {
#endif
if (console_enabled_) {
console_consumer_ = std::make_unique<console_consumer>();
console_consumer_ =
std::make_unique<console_consumer>(config_.get_event_level());
}
logging_consumer_ = std::make_unique<logging_consumer>(
config_.get_log_directory(), config_.get_event_level());
config_.get_event_level(), config_.get_log_directory());
event_system::instance().start();
was_mounted_ = true;

View File

@ -232,10 +232,11 @@ auto remote_fuse_drive::init_impl(struct fuse_conn_info *conn) -> void * {
was_mounted_ = true;
if (console_enabled_) {
console_consumer_ = std::make_shared<console_consumer>();
console_consumer_ =
std::make_shared<console_consumer>(config_.get_event_level());
}
logging_consumer_ = std::make_shared<logging_consumer>(
config_.get_log_directory(), config_.get_event_level());
config_.get_event_level(), config_.get_log_directory());
event_system::instance().start();
if (not lock_data_.set_mount_state(true, get_mount_location(), getpid())) {

View File

@ -45,7 +45,7 @@ namespace repertory::remote_fuse {
if (config_.get_enable_drive_events() && \
(((config_.get_event_level() >= remote_fuse_server_event::level) && \
(ret < 0)) || \
(config_.get_event_level() >= event_level::verbose))) \
(config_.get_event_level() >= event_level::trace))) \
event_system::instance().raise<remote_fuse_server_event>(func, file, ret)
// clang-format off

View File

@ -34,7 +34,7 @@ namespace repertory::remote_winfsp {
if (config_.get_enable_drive_events() && \
(((config_.get_event_level() >= remote_winfsp_client_event::level) && \
(ret != STATUS_SUCCESS)) || \
(config_.get_event_level() >= event_level::verbose))) \
(config_.get_event_level() >= event_level::trace))) \
event_system::instance().raise<remote_winfsp_client_event>(func, file, ret)
// clang-format off

View File

@ -52,7 +52,7 @@ namespace repertory::remote_winfsp {
if (config_.get_enable_drive_events() && \
(((config_.get_event_level() >= remote_winfsp_server_event::level) && \
(ret != STATUS_SUCCESS)) || \
(config_.get_event_level() >= event_level::verbose))) \
(config_.get_event_level() >= event_level::trace))) \
event_system::instance().raise<remote_winfsp_server_event>(func, file, ret)
// clang-format off

View File

@ -245,10 +245,10 @@ auto remote_winfsp_drive::mount(const std::vector<std::string> &drive_args)
}
}
logging_consumer l(config_.get_log_directory(), config_.get_event_level());
logging_consumer l(config_.get_event_level(), config_.get_log_directory());
std::unique_ptr<console_consumer> c;
if (enable_console) {
c = std::make_unique<console_consumer>();
c = std::make_unique<console_consumer>(config_.get_event_level());
}
event_system::instance().start();
const auto ret =

View File

@ -51,7 +51,7 @@ E_SIMPLE3(winfsp_event, debug, true,
if (config_.get_enable_drive_events() && \
(((config_.get_event_level() >= winfsp_event::level) && \
(ret != STATUS_SUCCESS)) || \
(config_.get_event_level() >= event_level::verbose))) \
(config_.get_event_level() >= event_level::trace))) \
event_system::instance().raise<winfsp_event>(func, file, ret)
winfsp_drive::winfsp_service::winfsp_service(
@ -573,10 +573,10 @@ auto winfsp_drive::mount(const std::vector<std::string> &drive_args) -> int {
}
}
logging_consumer log(config_.get_log_directory(), config_.get_event_level());
logging_consumer log(config_.get_event_level(), config_.get_log_directory());
std::unique_ptr<console_consumer> cons;
if (enable_console) {
cons = std::make_unique<console_consumer>();
cons = std::make_unique<console_consumer>(config_.get_event_level());
}
event_system::instance().start();
auto svc = winfsp_service(lock_, *this, parsed_drive_args, config_);

View File

@ -0,0 +1,98 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "events/consumers/console_consumer.hpp"
#include "events/events.hpp"
#include "spdlog/async.h"
#include "spdlog/sinks/stdout_color_sinks.h"
#include "spdlog/spdlog.h"
namespace repertory {
console_consumer::console_consumer() : console_consumer(event_level::info) {}
console_consumer::console_consumer(event_level level) {
static const auto set_level = [](auto next_level) {
switch (next_level) {
case event_level::critical:
spdlog::get("console")->set_level(spdlog::level::critical);
break;
case event_level::error:
spdlog::get("console")->set_level(spdlog::level::err);
break;
case event_level::warn:
spdlog::get("console")->set_level(spdlog::level::warn);
break;
case event_level::info:
spdlog::get("console")->set_level(spdlog::level::info);
break;
case event_level::debug:
spdlog::get("console")->set_level(spdlog::level::debug);
break;
case event_level::trace:
spdlog::get("console")->set_level(spdlog::level::trace);
break;
default:
spdlog::get("console")->set_level(spdlog::level::info);
break;
}
};
spdlog::drop("console");
logger_ =
spdlog::create_async<spdlog::sinks::stdout_color_sink_mt>("console");
set_level(level);
spdlog::register_logger(logger_);
E_SUBSCRIBE_ALL(process_event);
E_SUBSCRIBE_EXACT(event_level_changed,
[](const event_level_changed &changed) {
set_level(event_level_from_string(
changed.get_new_event_level().get<std::string>()));
});
}
console_consumer::~console_consumer() { E_CONSUMER_RELEASE(); }
void console_consumer::process_event(const event &event) const {
switch (event.get_event_level()) {
case event_level::critical:
spdlog::get("console")->critical(event.get_single_line());
break;
case event_level::error:
spdlog::get("console")->error(event.get_single_line());
break;
case event_level::warn:
spdlog::get("console")->warn(event.get_single_line());
break;
case event_level::info:
spdlog::get("console")->info(event.get_single_line());
break;
case event_level::debug:
spdlog::get("console")->debug(event.get_single_line());
break;
case event_level::trace:
default:
spdlog::get("console")->trace(event.get_single_line());
break;
}
}
} // namespace repertory

View File

@ -22,159 +22,80 @@
#include "events/consumers/logging_consumer.hpp"
#include "events/events.hpp"
#include "types/startup_exception.hpp"
#include "utils/error_utils.hpp"
#include "utils/file_utils.hpp"
#include "spdlog/async.h"
#include "spdlog/sinks/rotating_file_sink.h"
#include "spdlog/spdlog.h"
#include "utils/path_utils.hpp"
#include "utils/unix/unix_utils.hpp"
#include "utils/utils.hpp"
namespace repertory {
logging_consumer::logging_consumer(const std::string &log_directory,
const event_level &level)
: event_level_(level),
log_directory_(utils::path::absolute(log_directory)),
log_path_(utils::path::combine(log_directory, {"repertory.log"})) {
if (not utils::file::create_full_directory_path(log_directory_)) {
throw startup_exception("failed to create log directory|sp|" +
log_directory_ + "|err|" +
std::to_string(utils::get_last_error_code()));
}
logging_consumer::logging_consumer(event_level level,
std::string log_directory) {
log_directory = utils::path::absolute(log_directory);
static const auto set_level = [](auto next_level) {
switch (next_level) {
case event_level::critical:
spdlog::get("file")->set_level(spdlog::level::critical);
break;
case event_level::error:
spdlog::get("file")->set_level(spdlog::level::err);
break;
case event_level::warn:
spdlog::get("file")->set_level(spdlog::level::warn);
break;
case event_level::info:
spdlog::get("file")->set_level(spdlog::level::info);
break;
case event_level::debug:
spdlog::get("file")->set_level(spdlog::level::debug);
break;
case event_level::trace:
spdlog::get("file")->set_level(spdlog::level::trace);
break;
default:
spdlog::get("file")->set_level(spdlog::level::info);
break;
}
};
spdlog::drop("file");
logger_ = spdlog::create_async<spdlog::sinks::rotating_file_sink_mt>(
"file", utils::path::combine(log_directory, {"repertory.log"}),
MAX_LOG_FILE_SIZE, MAX_LOG_FILES);
set_level(level);
spdlog::register_logger(logger_);
check_log_roll(0);
reopen_log_file();
E_SUBSCRIBE_ALL(process_event);
E_SUBSCRIBE_EXACT(event_level_changed,
[this](const event_level_changed &changed) {
event_level_ = event_level_from_string(
changed.get_new_event_level().get<std::string>());
[](const event_level_changed &changed) {
set_level(event_level_from_string(
changed.get_new_event_level().get<std::string>()));
});
logging_thread_ =
std::make_unique<std::thread>([this] { logging_thread(false); });
}
logging_consumer::~logging_consumer() {
E_CONSUMER_RELEASE();
logging_consumer::~logging_consumer() { E_CONSUMER_RELEASE(); }
unique_mutex_lock l(log_mutex_);
logging_active_ = false;
log_notify_.notify_all();
l.unlock();
logging_thread_->join();
logging_thread_.reset();
logging_thread(true);
close_log_file();
}
void logging_consumer::check_log_roll(std::size_t count) {
constexpr const auto *function_name = static_cast<const char *>(__FUNCTION__);
std::uint64_t file_size{};
const auto success = utils::file::get_file_size(log_path_, file_size);
if (success && (file_size + count) >= MAX_LOG_FILE_SIZE) {
close_log_file();
for (std::uint8_t i = MAX_LOG_FILES; i > 0u; i--) {
const auto temp_log_path = utils::path::combine(
log_directory_, {"repertory." + std::to_string(i) + ".log"});
if (utils::file::is_file(temp_log_path)) {
if (i == MAX_LOG_FILES) {
if (not utils::file::retry_delete_file(temp_log_path)) {
}
} else {
const auto next_file_path = utils::path::combine(
log_directory_,
{"repertory." + std::to_string(i + std::uint8_t(1)) + ".log"});
if (not utils::file::move_file(temp_log_path, next_file_path)) {
utils::error::raise_error(function_name,
utils::get_last_error_code(),
temp_log_path + "|dest|" + next_file_path,
"failed to move file");
}
}
}
}
auto backup_log_path =
utils::path::combine(log_directory_, {"repertory.1.log"});
if (not utils::file::move_file(log_path_, backup_log_path)) {
utils::error::raise_error(function_name, utils::get_last_error_code(),
log_path_ + "|dest|" + backup_log_path,
"failed to move file");
}
reopen_log_file();
void logging_consumer::process_event(const event &event) const {
switch (event.get_event_level()) {
case event_level::critical:
spdlog::get("file")->critical(event.get_single_line());
break;
case event_level::error:
spdlog::get("file")->error(event.get_single_line());
break;
case event_level::warn:
spdlog::get("file")->warn(event.get_single_line());
break;
case event_level::info:
spdlog::get("file")->info(event.get_single_line());
break;
case event_level::debug:
spdlog::get("file")->debug(event.get_single_line());
break;
case event_level::trace:
default:
spdlog::get("file")->trace(event.get_single_line());
break;
}
}
void logging_consumer::close_log_file() {
if (log_file_) {
fclose(log_file_);
log_file_ = nullptr;
}
}
void logging_consumer::logging_thread(bool drain) {
do {
std::deque<std::shared_ptr<event>> events;
{
unique_mutex_lock l(log_mutex_);
if (event_queue_.empty() && not drain) {
log_notify_.wait_for(l, 2s);
} else {
events.insert(events.end(), event_queue_.begin(), event_queue_.end());
event_queue_.clear();
}
}
while (not events.empty()) {
auto event = events.front();
events.pop_front();
if (event->get_event_level() <= event_level_) {
const std::string msg = ([&]() -> std::string {
struct tm local_time {};
utils::get_local_time_now(local_time);
std::stringstream ss;
ss << std::put_time(&local_time, "%F %T") << "|"
<< event_level_to_string(event->get_event_level()).c_str() << "|"
<< event->get_single_line().c_str() << std::endl;
return ss.str();
})();
check_log_roll(msg.length());
auto retry = true;
for (int i = 0; retry && (i < 2); i++) {
retry = (not log_file_ || (fwrite(&msg[0], 1, msg.length(),
log_file_) != msg.length()));
if (retry) {
reopen_log_file();
}
}
if (log_file_) {
fflush(log_file_);
}
}
}
} while (logging_active_);
}
void logging_consumer::process_event(const event &event) {
{
mutex_lock l(log_mutex_);
event_queue_.push_back(event.clone());
log_notify_.notify_all();
}
}
void logging_consumer::reopen_log_file() {
close_log_file();
#if defined(_WIN32)
log_file_ = _fsopen(&log_path_[0], "a+", _SH_DENYWR);
#else
log_file_ = fopen(&log_path_[0], "a+");
#endif
}
} // namespace repertory

View File

@ -26,34 +26,49 @@
namespace repertory {
auto event_level_from_string(std::string level) -> event_level {
level = utils::string::to_lower(level);
if (level == "critical" || level == "event_level::critical") {
return event_level::critical;
}
if (level == "debug" || level == "event_level::debug") {
return event_level::debug;
} else if (level == "warn" || level == "event_level::warn") {
return event_level::warn;
} else if (level == "normal" || level == "event_level::normal") {
return event_level::normal;
} else if (level == "error" || level == "event_level::error") {
return event_level::error;
} else if (level == "verbose" || level == "event_level::verbose") {
return event_level::verbose;
}
return event_level::normal;
if (level == "warn" || level == "event_level::warn") {
return event_level::warn;
}
if (level == "info" || level == "event_level::info") {
return event_level::info;
}
if (level == "error" || level == "event_level::error") {
return event_level::error;
}
if (level == "trace" || level == "event_level::trace") {
return event_level::trace;
}
return event_level::info;
}
auto event_level_to_string(const event_level &level) -> std::string {
auto event_level_to_string(event_level level) -> std::string {
switch (level) {
case event_level::critical:
return "critical";
case event_level::debug:
return "debug";
case event_level::error:
return "error";
case event_level::normal:
return "normal";
case event_level::info:
return "info";
case event_level::warn:
return "warn";
case event_level::verbose:
return "verbose";
case event_level::trace:
return "trace";
default:
return "normal";
return "info";
}
}
} // namespace repertory

View File

@ -41,10 +41,14 @@
#include <sqlite3.h>
#endif // defined(PROJECT_ENABLE_SQLITE)
#include "spdlog/spdlog.h"
#include "initialize.hpp"
namespace repertory {
auto project_initialize() -> bool {
spdlog::flush_every(std::chrono::seconds(10));
#if defined(PROJECT_REQUIRE_ALPINE) && !defined(PROJECT_IS_MINGW)
{
static constexpr const auto guard_size{4096U};

View File

@ -744,8 +744,6 @@ auto encrypt_provider::process_directory_entry(
current_encrypted_path = utils::path::create_api_path(
current_encrypted_path + '/' + encrypted_parts.at(current_idx++));
std::cout << current_source_path << ':' << current_encrypted_path
<< std::endl;
}
return current_encrypted_path;
@ -883,6 +881,7 @@ auto encrypt_provider::read_file_bytes(const std::string &api_path,
file_data["original_file_size"] = file_size;
file_data["iv_list"] = iv_list;
auto ins_res = db::db_insert{*db_, file_table}
.or_replace()
.column_value("source_path", source_path)
.column_value("data", file_data.dump())
.go();

View File

@ -56,13 +56,11 @@ auto create_provider(const provider_type &prov,
config.get_host_config());
return std::make_unique<sia_provider>(config, *comm);
}
#if defined(PROJECT_ENABLE_S3)
case provider_type::s3: {
create_comm<i_http_comm, curl_comm, s3_config>(comm,
config.get_s3_config());
return std::make_unique<s3_provider>(config, *comm);
}
#endif // defined(PROJECT_ENABLE_S3)
case provider_type::encrypt: {
return std::make_unique<encrypt_provider>(config);
}

View File

@ -19,8 +19,6 @@
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#if defined(PROJECT_ENABLE_S3)
#include "providers/s3/s3_provider.hpp"
#include "app_config.hpp"
@ -887,5 +885,3 @@ auto s3_provider::upload_file_impl(const std::string &api_path,
return api_error::success;
}
} // namespace repertory
#endif // PROJECT_ENABLE_S3

View File

@ -52,11 +52,9 @@ void get_api_authentication_data(std::string &user, std::string &password,
[[nodiscard]] auto
get_provider_type_from_args(std::vector<const char *> args) -> provider_type {
#if defined(PROJECT_ENABLE_S3)
if (has_option(args, options::s3_option)) {
return provider_type::s3;
}
#endif // defined(PROJECT_ENABLE_S3)
if (has_option(args, options::remote_mount_option)) {
return provider_type::remote;
}
@ -138,12 +136,9 @@ auto parse_drive_options(
if ((std::string(args.at(i)) == options::remote_mount_option.at(0U)) ||
(std::string(args.at(i)) == options::remote_mount_option.at(1U)) ||
(std::string(args.at(i)) == options::data_directory_option.at(0U)) ||
(std::string(args.at(i)) == options::data_directory_option.at(1U))
#if defined(PROJECT_ENABLE_S3)
|| (std::string(args.at(i)) == options::name_option.at(0U)) ||
(std::string(args.at(i)) == options::name_option.at(1U))
#endif // PROJECT_ENABLE_S3
) {
(std::string(args.at(i)) == options::data_directory_option.at(1U)) ||
(std::string(args.at(i)) == options::name_option.at(0U)) ||
(std::string(args.at(i)) == options::name_option.at(1U))) {
i++;
continue;
}
@ -164,12 +159,10 @@ auto parse_drive_options(
const auto fuse_option_list = utils::string::split(options, ',');
for (const auto &fuse_option : fuse_option_list) {
#if defined(PROJECT_ENABLE_S3)
if (fuse_option.find("s3") == 0) {
prov = provider_type::s3;
continue;
}
#endif // defined(PROJECT_ENABLE_S3)
if ((fuse_option.find("dd") == 0) ||
(fuse_option.find("data_directory") == 0)) {
const auto data = utils::string::split(fuse_option, '=');

View File

@ -38,12 +38,12 @@ void polling::frequency_thread(
if (kv.second.freq == freq) {
futures.emplace_back(
std::async(std::launch::async, [this, &freq, kv]() -> void {
if (config_->get_event_level() == event_level::verbose ||
if (config_->get_event_level() == event_level::trace ||
freq != frequency::second) {
event_system::instance().raise<polling_item_begin>(kv.first);
}
kv.second.action();
if (config_->get_event_level() == event_level::verbose ||
if (config_->get_event_level() == event_level::trace ||
freq != frequency::second) {
event_system::instance().raise<polling_item_end>(kv.first);
}

View File

@ -36,13 +36,11 @@ template <typename drive> inline void help(std::vector<const char *> args) {
std::cout << " -di,--drive_information Display mounted drive "
"information"
<< std::endl;
#if defined(PROJECT_ENABLE_S3)
std::cout << " -s3,--s3 Enables S3 mode"
<< std::endl;
std::cout << " -na,--name Unique name for S3 "
"instance [Required]"
<< std::endl;
#endif // defined(PROJECT_ENABLE_S3)
std::cout
<< " -gc,--generate_config Generate initial configuration"
<< std::endl;
@ -72,11 +70,9 @@ template <typename drive> inline void help(std::vector<const char *> args) {
std::cout << " -pw,--password Specify API password"
<< std::endl;
#if !defined(_WIN32)
#if defined(PROJECT_ENABLE_S3)
std::cout << " -o s3 Enables S3 mode for "
"'fstab' mounts"
<< std::endl;
#endif // defined(PROJECT_ENABLE_S3)
#endif // _WIN32
std::cout << " -set,--set [name] [value] Set configuration value"
<< std::endl;

View File

@ -92,7 +92,6 @@ auto main(int argc, char **argv) -> int {
}
}
#if defined(PROJECT_ENABLE_S3)
if ((res == exit_code::success) && (prov == provider_type::s3)) {
std::string data;
res = utils::cli::parse_string_option(
@ -111,7 +110,6 @@ auto main(int argc, char **argv) -> int {
}
}
}
#endif // PROJECT_ENABLE_S3
int mount_result{};
if (res == exit_code::success) {

View File

@ -47,7 +47,6 @@ protected:
void SetUp() override {
if (PROVIDER_INDEX != 0) {
if (PROVIDER_INDEX == 1) {
#if defined(PROJECT_ENABLE_S3)
EXPECT_TRUE(utils::file::delete_directory_recursively(
"./winfsp_test" + std::to_string(PROVIDER_INDEX)));
@ -79,14 +78,13 @@ protected:
.empty());
EXPECT_FALSE(
config->set_value_by_name("S3Config.Bucket", "repertory").empty());
config->set_event_level(event_level::verbose);
config->set_event_level(event_level::trace);
config->set_enable_drive_events(true);
event_system::instance().start();
comm = std::make_unique<curl_comm>(config->get_s3_config());
provider = std::make_unique<s3_provider>(*config, *comm);
drive = std::make_unique<winfsp_drive>(*config, lock_data_, *provider);
#endif
return;
}

View File

@ -535,23 +535,19 @@ TEST(fuse_drive, all_tests) {
switch (idx) {
case 0U: {
#if defined(PROJECT_ENABLE_S3)
config_ptr =
std::make_unique<app_config>(provider_type::s3, cfg_directory);
{
app_config src_cfg(provider_type::s3,
utils::path::combine(get_test_dir(), {"storj"}));
config_ptr->set_enable_drive_events(true);
config_ptr->set_event_level(event_level::verbose);
config_ptr->set_event_level(event_level::trace);
config_ptr->set_s3_config(src_cfg.get_s3_config());
}
comm_ptr = std::make_unique<curl_comm>(config_ptr->get_s3_config());
provider_ptr = std::make_unique<s3_provider>(*config_ptr, *comm_ptr);
drive_args = std::vector<std::string>({"-s3", "-na", "storj"});
#else
continue;
#endif
} break;
case 1U: {

View File

@ -602,6 +602,7 @@ static void run_tests(const app_config &cfg, i_provider &provider) {
get_file_fails_if_file_not_found(provider);
get_file_fails_if_item_is_directory(cfg, provider);
// TODO need to test read when file size changes for encrypt provider
/* get_file_list(provider);
get_file_size(provider);
get_filesystem_item(provider);
@ -669,7 +670,6 @@ TEST(providers, encrypt_provider) {
event_system::instance().stop();
}
#if defined(PROJECT_ENABLE_S3)
TEST(providers, s3_provider) {
const auto config_path =
utils::path::combine(get_test_dir(), {"s3_provider"});
@ -707,7 +707,6 @@ TEST(providers, s3_provider) {
}
event_system::instance().stop();
}
#endif
TEST(providers, sia_provider) {
const auto config_path =

View File

@ -926,7 +926,7 @@ TEST(remote_fuse, all_tests) {
config.set_remote_port(port);
config.set_remote_token("testtoken");
config.set_enable_drive_events(true);
config.set_event_level(event_level::verbose);
config.set_event_level(event_level::trace);
event_system::instance().start();
#if defined(_WIN32)

View File

@ -498,7 +498,7 @@ TEST(remote_winfsp, all_tests) {
config.set_remote_port(port);
config.set_remote_token("testtoken");
config.set_enable_drive_events(true);
config.set_event_level(event_level::verbose);
config.set_event_level(event_level::trace);
event_system::instance().start();
#if defined(_WIN32)

View File

@ -49,7 +49,7 @@ void launch_app(std::string cmd) {
EXPECT_EQ(0, code);
}
E_SIMPLE1(test_begin, normal, false, std::string, test_name, TN, E_STRING);
E_SIMPLE1(test_begin, info, false, std::string, test_name, TN, E_STRING);
#define TEST_HEADER(func) \
event_system::instance().raise<test_begin>( \
std::string(func) + \
@ -333,11 +333,9 @@ TEST_F(winfsp_test, all_tests) {
return;
}
#if !defined(PROJECT_ENABLE_S3)
if (PROVIDER_INDEX == 1U) {
return;
}
#endif
std::string mount_point;
const auto drive_args = mount_setup(mount_point);

View File

@ -1,18 +1,18 @@
#include <windows.h>
#define VER_FILEVERSION @PROJECT_MAJOR_VERSION@,@PROJECT_MINOR_VERSION@,@PROJECT_REVISION_VERSION@,@PROJECT_RELEASE_NUM@
#define VER_FILEVERSION_STR "@PROJECT_MAJOR_VERSION@.@PROJECT_MINOR_VERSION@.@PROJECT_REVISION_VERSION@-@PROJECT_RELEASE_ITER@-@PROJECT_GIT_REV@\0"
#define VER_FILEVERSION_STR "@PROJECT_MAJOR_VERSION@.@PROJECT_MINOR_VERSION@.@PROJECT_REVISION_VERSION@-@PROJECT_RELEASE_ITER@_@PROJECT_GIT_REV@\0"
#define VER_PRODUCTVERSION @PROJECT_MAJOR_VERSION@,@PROJECT_MINOR_VERSION@,@PROJECT_REVISION_VERSION@,@PROJECT_RELEASE_NUM@
#define VER_PRODUCTVERSION_STR "@PROJECT_MAJOR_VERSION@.@PROJECT_MINOR_VERSION@.@PROJECT_REVISION_VERSION@-@PROJECT_RELEASE_ITER@-@PROJECT_GIT_REV@\0"
#define VER_PRODUCTVERSION_STR "@PROJECT_MAJOR_VERSION@.@PROJECT_MINOR_VERSION@.@PROJECT_REVISION_VERSION@-@PROJECT_RELEASE_ITER@_@PROJECT_GIT_REV@\0"
#define VER_COMPANYNAME_STR "@PROJECT_COMPANY_NAME@\0"
#define VER_INTERNALNAME_STR "@PROJECT_NAME@ @PROJECT_MAJOR_VERSION@.@PROJECT_MINOR_VERSION@.@PROJECT_REVISION_VERSION@-@PROJECT_RELEASE_ITER@-@PROJECT_GIT_REV@\0"
#define VER_INTERNALNAME_STR "@PROJECT_NAME@ @PROJECT_MAJOR_VERSION@.@PROJECT_MINOR_VERSION@.@PROJECT_REVISION_VERSION@-@PROJECT_RELEASE_ITER@_@PROJECT_GIT_REV@\0"
#define VER_LEGALCOPYRIGHT_STR "@PROJECT_COPYRIGHT@\0"
#define VER_ORIGINALFILENAME_STR "@PROJECT_NAME@.exe\0"
#define VER_LEGALTRADEMARKS1_STR "\0"
#define VER_LEGALTRADEMARKS2_STR "\0"
#define VER_FILEDESCRIPTION_STR "@PROJECT_DESC@\0"
#define VER_PRODUCTNAME_STR "@PROJECT_NAME@ @PROJECT_MAJOR_VERSION@.@PROJECT_MINOR_VERSION@.@PROJECT_REVISION_VERSION@-@PROJECT_RELEASE_ITER@-@PROJECT_GIT_REV@\0"
#define VER_PRODUCTNAME_STR "@PROJECT_NAME@ @PROJECT_MAJOR_VERSION@.@PROJECT_MINOR_VERSION@.@PROJECT_REVISION_VERSION@-@PROJECT_RELEASE_ITER@_@PROJECT_GIT_REV@\0"
#ifdef DEBUG
#define VER_DEBUG VS_FF_DEBUG

View File

@ -1,11 +1,10 @@
#!/bin/bash
CURRENT_DIR=$(dirname "$0")
CURRENT_DIR=$(realpath ${CURRENT_DIR})
PROJECT_MINGW64_COPY_DEPENDENCIES=()
. "${CURRENT_DIR}/env.sh" "$1" "$2" "$3" "$4" "$5" 1>/dev/null 2>&1
PROJECT_SCRIPTS_DIR=$(dirname "$0")
PROJECT_SCRIPTS_DIR=$(realpath ${PROJECT_SCRIPTS_DIR})
. "${PROJECT_SCRIPTS_DIR}/env.sh" "$1" "$2" "$3" "$4" "$5" 1>/dev/null 2>&1
if [ "${PROJECT_IS_MINGW}" == "1" ] && [ "${PROJECT_STATIC_LINK}" == "OFF" ]; then
mkdir -p "${PROJECT_DIST_DIR}"

View File

@ -1,24 +1,23 @@
#!/bin/bash
CURRENT_DIR=$(dirname "$0")
CURRENT_DIR=$(realpath ${CURRENT_DIR})
PROJECT_SCRIPTS_DIR=$(dirname "$0")
PROJECT_SCRIPTS_DIR=$(realpath ${PROJECT_SCRIPTS_DIR})
. "${PROJECT_SCRIPTS_DIR}/env.sh" "$1" "$2" "$3" "$4" "$5" 1 1>/dev/null 2>&1
. "${CURRENT_DIR}/env.sh" "$1" "$2" "$3" "$4" "$5" 1 1>/dev/null 2>&1
if [ -f "${CURRENT_DIR}/cleanup.sh" ]; then
. "${CURRENT_DIR}/cleanup.sh" "$1" "$2" "$3" "$4" "$5"
rm ${CURRENT_DIR}/cleanup.*
if [ -f "${PROJECT_SCRIPTS_DIR}/cleanup.sh" ]; then
. "${PROJECT_SCRIPTS_DIR}/cleanup.sh" "$1" "$2" "$3" "$4" "$5"
rm ${PROJECT_SCRIPTS_DIR}/cleanup.*
fi
function create_containers() {
BUILD_TYPE=$1
for FILE in "${SOURCE_DIR}/docker/${BUILD_TYPE}/*"; do
for FILE in "${PROJECT_SOURCE_DIR}/docker/${BUILD_TYPE}/*"; do
DOCKER_CREATE_ONLY=1
DOCKER_NAME=$(basename ${FILE})
DOCKER_TAG=${PROJECT_NAME}:${DOCKER_NAME}
. "${CURRENT_DIR}/docker_common.sh"
. "${PROJECT_SCRIPTS_DIR}/docker_common.sh"
done
}

View File

@ -3,13 +3,9 @@
DEST_DIR=$1
DIST_DIR=$2
CURRENT_DIR=$(dirname "$0")
CURRENT_DIR=$(realpath ${CURRENT_DIR})
SOURCE_DIR=${CURRENT_DIR}/..
SOURCE_DIR=$(realpath ${SOURCE_DIR})
. "${CURRENT_DIR}/env.sh" "$3" "$4" "$5" "$6" "$7"
PROJECT_SCRIPTS_DIR=$(dirname "$0")
PROJECT_SCRIPTS_DIR=$(realpath ${PROJECT_SCRIPTS_DIR})
. "${PROJECT_SCRIPTS_DIR}/env.sh" "$3" "$4" "$5" "$6" "$7"
function error_exit() {
echo $1
@ -21,7 +17,7 @@ if [ ! -d "${PROJECT_DIST_DIR}" ]; then
fi
if [ "${DIST_DIR}" == "" ]; then
DIST_DIR="${CURRENT_DIR}/../dist"
DIST_DIR="${PROJECT_DIST_DIR}"
fi
DIST_DIR=$(realpath "${DIST_DIR}")
@ -37,7 +33,7 @@ if [ ! -d "${DEST_DIR}" ]; then
error_exit "dest directory not found: ${DEST_DIR}" 2
fi
pushd "${SOURCE_DIR}"
pushd "${PROJECT_SOURCE_DIR}"
BRANCH=$(git branch --show-current)
RELEASE=$(grep set\(PROJECT_RELEASE_ITER ./config.sh | sed s/\)//g | awk '{print $2}')
popd
@ -48,15 +44,14 @@ else
DEST_DIR=${DEST_DIR}/nightly
fi
FILE_PART=${PROJECT_NAME}_${PROJECT_MAJOR_VERSION}.${PROJECT_MINOR_VERSION}.${PROJECT_REVISION_VERSION}-${PROJECT_RELEASE_ITER}-${PROJECT_GIT_REV}_${PROJECT_BUILD_ARCH}_${PROJECT_OS}
echo ${DEST_DIR}
pushd "${DIST_DIR}"
cp -f ./${FILE_PART}.tar.gz ${DEST_DIR} || error_exit "failed to deliver file: ${FILE_PART}.tar.gz" 1
cp -f ./${FILE_PART}.tar.gz.sha256 ${DEST_DIR} || error_exit "failed to deliver file: ${FILE_PART}.tar.gz.sha256" 1
cp -f ./${PROJECT_OUT_FILE} ${DEST_DIR} || error_exit "failed to deliver file: ${PROJECT_OUT_FILE}" 1
cp -f ./${PROJECT_OUT_FILE}.sha256 ${DEST_DIR} || error_exit "failed to deliver file: ${PROJECT_OUT_FILE}.sha256" 1
if [ "${PROJECT_PRIVATE_KEY}" != "" ]; then
cp -f ./${FILE_PART}.tar.gz.sig ${DEST_DIR} || error_exit "failed to deliver file: ${FILE_PART}.tar.gz.sig" 1
cp -f ./${PROJECT_OUT_FILE}.sig ${DEST_DIR} || error_exit "failed to deliver file: ${PROJECT_OUT_FILE}.sig" 1
fi
popd
error_exit "delivered ${FILE_PART}" 0
error_exit "delivered ${PROJECT_FILE_PART}" 0

View File

@ -18,8 +18,8 @@ if [ "${DOCKER_CREATE_ONLY}" != "1" ]; then
docker rm ${DOCKER_CONTAINER}
fi
pushd "${SOURCE_DIR}/support"
cp -f ${SOURCE_DIR}/docker/${PROJECT_BUILD_ARCH}/${DOCKER_NAME} Dockerfile
pushd "${PROJECT_SOURCE_DIR}/support"
cp -f ${PROJECT_SOURCE_DIR}/docker/${PROJECT_BUILD_ARCH}/${DOCKER_NAME} Dockerfile
docker build ${APP_VERSION_BUILD_ARGS} \
--build-arg NUM_JOBS=${NUM_JOBS} \
-t ${DOCKER_TAG} . || exit 1
@ -31,8 +31,8 @@ if [ "${DOCKER_CREATE_ONLY}" != "1" ]; then
--env MY_NUM_JOBS=${NUM_JOBS} \
--name ${DOCKER_CONTAINER} \
-u $(id -u):$(id -g) \
-v ${SOURCE_DIR}:${SOURCE_DIR}:rw,z \
-w ${SOURCE_DIR} \
-v ${PROJECT_SOURCE_DIR}:${PROJECT_SOURCE_DIR}:rw,z \
-w ${PROJECT_SOURCE_DIR} \
${DOCKER_TAG} || exit 1
if [ "${DOCKER_SHELL}" == "1" ]; then
@ -41,7 +41,7 @@ if [ "${DOCKER_CREATE_ONLY}" != "1" ]; then
docker exec \
${DOCKER_CONTAINER} \
/bin/bash -c \
"${SOURCE_DIR}/scripts/make_common.sh \"${PROJECT_BUILD_ARCH}\" \"${PROJECT_CMAKE_BUILD_TYPE}\" \"${PROJECT_BUILD_CLEAN}\" ${IS_MINGW} ${IS_UNIX}" || exit 1
"${PROJECT_SOURCE_DIR}/scripts/make_common.sh \"${PROJECT_BUILD_ARCH}\" \"${PROJECT_CMAKE_BUILD_TYPE}\" \"${PROJECT_BUILD_CLEAN}\" ${IS_MINGW} ${IS_UNIX}" || exit 1
fi
docker stop ${DOCKER_CONTAINER}

View File

@ -7,11 +7,8 @@ PROJECT_IS_MINGW=$4
PROJECT_IS_MINGW_UNIX=$5
DISABLE_CREATE_DIRS=$6
CURRENT_DIR=$(dirname "$0")
CURRENT_DIR=$(realpath ${CURRENT_DIR})
SOURCE_DIR=${CURRENT_DIR}/..
SOURCE_DIR=$(realpath ${SOURCE_DIR})
PROJECT_SOURCE_DIR=${PROJECT_SCRIPTS_DIR}/..
PROJECT_SOURCE_DIR=$(realpath ${PROJECT_SOURCE_DIR})
NUM_JOBS=${MY_NUM_JOBS}
if [[ -z "${NUM_JOBS}" ]]; then
@ -24,11 +21,11 @@ if [[ -z "${NUM_JOBS}" ]]; then
fi
fi
pushd "${SOURCE_DIR}"
pushd "${PROJECT_SOURCE_DIR}"
PROJECT_GIT_REV=$(git rev-parse --short HEAD)
. "${CURRENT_DIR}/options.sh"
. "${PROJECT_SCRIPTS_DIR}/options.sh"
for PROJECT_LIBRARY in "${PROJECT_LIBRARIES[@]}"; do
ENABLE_NAME=PROJECT_ENABLE_${PROJECT_LIBRARY}
@ -61,7 +58,7 @@ if [ "${PROJECT_IS_MINGW_UNIX}" == "" ]; then
PROJECT_IS_MINGW_UNIX=0
fi
. "${SOURCE_DIR}/config.sh"
. "${PROJECT_SOURCE_DIR}/config.sh"
if [ "${PROJECT_ENABLE_SFML}" == "ON" ]; then
PROJECT_ENABLE_FLAC=ON
@ -120,6 +117,12 @@ elif [ "${PROJECT_BUILD_ARCH}" == "aarch64" ]; then
PROJECT_IS_ARM64=1
fi
if [ "${PROJECT_BUILD_ARCH}" == "x86_64" ]; then
PROJECT_BUILD_ARCH2="x86-64"
else
PROJECT_BUILD_ARCH2="${PROJECT_BUILD_ARCH}"
fi
if [ "${PROJECT_IS_MINGW}" == "1" ] && [ "${PROJECT_IS_MINGW_UNIX}" != "1" ]; then
MSYS=winsymlinks:nativestrict
fi
@ -143,8 +146,8 @@ else
PROJECT_REQUIRE_ALPINE=OFF
fi
if [ -f "${SOURCE_DIR}/cmake/versions.cmake" ]; then
VERSIONS=($(sed -e s/\ /=/g -e s/set\(//g -e s/\)//g "${SOURCE_DIR}/cmake/versions.cmake"))
if [ -f "${PROJECT_SOURCE_DIR}/cmake/versions.cmake" ]; then
VERSIONS=($(sed -e s/\ /=/g -e s/set\(//g -e s/\)//g "${PROJECT_SOURCE_DIR}/cmake/versions.cmake"))
PROJECT_MINGW64_DOCKER_BUILD_ARGS=()
@ -159,8 +162,8 @@ if [ -f "${SOURCE_DIR}/cmake/versions.cmake" ]; then
PROJECT_MINGW64_DOCKER_BUILD_ARGS=${PROJECT_MINGW64_DOCKER_BUILD_ARGS[*]}
fi
PROJECT_BUILD_DIR=${SOURCE_DIR}/build/${PROJECT_CMAKE_BUILD_TYPE_LOWER}/${PROJECT_LINK_TYPE}
PROJECT_DIST_DIR=${SOURCE_DIR}/dist/${PROJECT_CMAKE_BUILD_TYPE_LOWER}/${PROJECT_LINK_TYPE}
PROJECT_BUILD_DIR=${PROJECT_SOURCE_DIR}/build/${PROJECT_CMAKE_BUILD_TYPE_LOWER}/${PROJECT_LINK_TYPE}
PROJECT_DIST_DIR=${PROJECT_SOURCE_DIR}/dist/${PROJECT_CMAKE_BUILD_TYPE_LOWER}/${PROJECT_LINK_TYPE}
if [ "${PROJECT_IS_MINGW}" == "1" ]; then
PROJECT_DIST_DIR=${PROJECT_DIST_DIR}/win32
@ -181,7 +184,7 @@ fi
PROJECT_DIST_DIR=${PROJECT_DIST_DIR}/${PROJECT_NAME}
PROJECT_EXTERNAL_BUILD_ROOT=${PROJECT_BUILD_DIR}/deps
PROJECT_3RD_PARTY_DIR=${SOURCE_DIR}/support/3rd_party
PROJECT_3RD_PARTY_DIR=${PROJECT_SOURCE_DIR}/support/3rd_party
if [ "${PROJECT_ENABLE_OPENSSL}" == "ON" ]; then
if [ "${PROJECT_IS_MINGW}" == "1" ] && [ "${PROJECT_IS_MINGW_UNIX}" == "1" ]; then
@ -242,11 +245,14 @@ fi
PATH="${PROJECT_EXTERNAL_BUILD_ROOT}/bin:${PATH}"
if [ "${PROJECT_IS_MINGW}" == "1" ]; then
PROJECT_OS=win32
PROJECT_OS=windows
else
PROJECT_OS=linux
fi
PROJECT_FILE_PART=${PROJECT_NAME}_${PROJECT_MAJOR_VERSION}.${PROJECT_MINOR_VERSION}.${PROJECT_REVISION_VERSION}-${PROJECT_RELEASE_ITER}_${PROJECT_GIT_REV}_${PROJECT_OS}_${PROJECT_BUILD_ARCH2}
PROJECT_OUT_FILE=${PROJECT_FILE_PART}.tar.gz
export MSYS
export NUM_JOBS
export OPENSSL_ROOT_DIR
@ -255,6 +261,7 @@ export PKG_CONFIG_PATH
export PROJECT_3RD_PARTY_DIR
export PROJECT_APP_LIST
export PROJECT_BUILD_ARCH
export PROJECT_BUILD_ARCH2
export PROJECT_BUILD_CLEAN
export PROJECT_BUILD_DIR
export PROJECT_BUILD_SHARED_LIBS
@ -265,6 +272,7 @@ export PROJECT_COMPANY_NAME
export PROJECT_COPYRIGHT
export PROJECT_DESC
export PROJECT_DIST_DIR
export PROJECT_FILE_PART
export PROJECT_GIT_REV
export PROJECT_IS_ALPINE
export PROJECT_IS_ARM64
@ -278,17 +286,18 @@ export PROJECT_MINOR_VERSION
export PROJECT_MSYS2_PACKAGE_LIST
export PROJECT_NAME
export PROJECT_OS
export PROJECT_OUT_FILE
export PROJECT_PRIVATE_KEY
export PROJECT_PUBLIC_KEY
export PROJECT_RELEASE_ITER
export PROJECT_RELEASE_NUM
export PROJECT_REQUIRE_ALPINE
export PROJECT_REVISION_VERSION
export PROJECT_SOURCE_DIR
export PROJECT_STATIC_LINK
export PROJECT_TOOLCHAIN_FILE_CMAKE
export PROJECT_TOOLCHAIN_FILE_MESON
export PROJECT_URL
export SOURCE_DIR
for PROJECT_LIBRARY in "${PROJECT_LIBRARIES[@]}"; do
ENABLE_NAME=PROJECT_ENABLE_${PROJECT_LIBRARY}
@ -300,6 +309,7 @@ done
echo "-=[Settings]=-"
echo " App list: ${PROJECT_APP_LIST[*]}"
echo " Build arch: ${PROJECT_BUILD_ARCH}"
echo " Build arch2: ${PROJECT_BUILD_ARCH2}"
echo " Build clean: ${PROJECT_BUILD_CLEAN}"
echo " Build dir: ${PROJECT_BUILD_DIR}"
echo " Build shared libraries: ${PROJECT_BUILD_SHARED_LIBS}"
@ -311,6 +321,7 @@ echo " Copyright: ${PROJECT_COPYRIGHT}"
echo " Description: ${PROJECT_DESC}"
echo " Dist dir: ${PROJECT_DIST_DIR}"
echo " External build root: ${PROJECT_EXTERNAL_BUILD_ROOT}"
echo " File part: ${PROJECT_FILE_PART}"
echo " Is ARM64: ${PROJECT_IS_ARM64}"
echo " Is Alpine: ${PROJECT_IS_ALPINE}"
echo " Is MINGW on Unix: ${PROJECT_IS_MINGW_UNIX}"
@ -322,6 +333,7 @@ if [ "${PROJECT_IS_MINGW}" == "1" ] && [ "${PROJECT_IS_MINGW_UNIX}" == "1" ]; th
echo " MinGW docker build args: ${PROJECT_MINGW64_DOCKER_BUILD_ARGS}"
fi
echo " OPENSSL_ROOT_DIR: ${OPENSSL_ROOT_DIR}"
echo " Out file: ${PROJECT_OUT_FILE}"
echo " PATH: ${PATH}"
echo " PKG_CONFIG_PATH: ${PKG_CONFIG_PATH}"
echo " Require Alpine: ${PROJECT_REQUIRE_ALPINE}"

View File

@ -1,9 +1,5 @@
#!/bin/bash
CURRENT_DIR=$(dirname "$0")
CURRENT_DIR=$(realpath ${CURRENT_DIR})
SOURCE_DIR=${CURRENT_DIR}/..
SOURCE_DIR=$(realpath ${SOURCE_DIR})
. "${CURRENT_DIR}/env.sh" "$1" "$2" "$3" "$4" "$5" 1
PROJECT_SCRIPTS_DIR=$(dirname "$0")
PROJECT_SCRIPTS_DIR=$(realpath ${PROJECT_SCRIPTS_DIR})
. "${PROJECT_SCRIPTS_DIR}/env.sh" "$1" "$2" "$3" "$4" "$5" 1

View File

@ -1,9 +1,8 @@
#!/bin/bash
CURRENT_DIR=$(dirname "$0")
CURRENT_DIR=$(realpath ${CURRENT_DIR})
. "${CURRENT_DIR}/env.sh" "$1" "$2" "$3" "$4" "$5"
PROJECT_SCRIPTS_DIR=$(dirname "$0")
PROJECT_SCRIPTS_DIR=$(realpath ${PROJECT_SCRIPTS_DIR})
. "${PROJECT_SCRIPTS_DIR}/env.sh" "$1" "$2" "$3" "$4" "$5"
mkdir -p "${PROJECT_BUILD_DIR}/build"
@ -12,8 +11,8 @@ if [ -d "${PROJECT_DIST_DIR}" ]; then
mkdir -p "${PROJECT_DIST_DIR}"
fi
if [ -f "${SOURCE_DIR}/pre_build.sh" ]; then
"${SOURCE_DIR}/pre_build.sh"
if [ -f "${PROJECT_SOURCE_DIR}/pre_build.sh" ]; then
"${PROJECT_SOURCE_DIR}/pre_build.sh"
fi
if [ "${PROJECT_IS_MINGW}" == "1" ]; then
@ -21,7 +20,7 @@ if [ "${PROJECT_IS_MINGW}" == "1" ]; then
fi
pushd "${PROJECT_BUILD_DIR}"
cmake "${SOURCE_DIR}" \
cmake "${PROJECT_SOURCE_DIR}" \
-G"Unix Makefiles" \
-DPROJECT_COMPANY_NAME="${PROJECT_COMPANY_NAME}" \
-DPROJECT_COPYRIGHT="${PROJECT_COPYRIGHT}" \
@ -65,12 +64,12 @@ for APP in ${PROJECT_APP_LIST[@]}; do
rsync -av --progress "${PROJECT_BUILD_DIR}/build/${APP}${APP_BINARY_EXT}" "${PROJECT_DIST_DIR}/bin/"
cat <<EOF >>"${PROJECT_DIST_DIR}/${APP}${APP_BINARY_EXT}"
#!/bin/sh
CURRENT_DIR=\$(dirname "\$0")
CURRENT_DIR=\$(realpath \${CURRENT_DIR})
PROJECT_SCRIPTS_DIR=\$(dirname "\$0")
PROJECT_SCRIPTS_DIR=\$(realpath \${PROJECT_SCRIPTS_DIR})
export LD_LIBRARY_PATH="\${CURRENT_DIR}/lib:\${CURRENT_DIR}/lib64:\${LD_LIBRARY_PATH}"
export LD_LIBRARY_PATH="\${PROJECT_SCRIPTS_DIR}/lib:\${PROJECT_SCRIPTS_DIR}/lib64:\${LD_LIBRARY_PATH}"
\${CURRENT_DIR}/bin/${APP}${APP_BINARY_EXT} \$*
\${PROJECT_SCRIPTS_DIR}/bin/${APP}${APP_BINARY_EXT} \$*
EOF
chmod +x "${PROJECT_DIST_DIR}/${APP}${APP_BINARY_EXT}"
else
@ -82,26 +81,11 @@ EOF
done
if [ "${PROJECT_IS_MINGW}" == "1" ]; then
. "${CURRENT_DIR}/copy_mingw64_deps.sh" "$1" "$2" "$3" "$4" "$5"
. "${PROJECT_SCRIPTS_DIR}/copy_mingw64_deps.sh" "$1" "$2" "$3" "$4" "$5"
fi
ln -sf "${PROJECT_BUILD_DIR}/build/compile_commands.json" "${SOURCE_DIR}/compile_commands.json"
ln -sf "${PROJECT_BUILD_DIR}/build/compile_commands.json" "${PROJECT_SOURCE_DIR}/compile_commands.json"
if [ -f "${SOURCE_DIR}/post_build.sh" ]; then
"${SOURCE_DIR}/post_build.sh"
if [ -f "${PROJECT_SOURCE_DIR}/post_build.sh" ]; then
"${PROJECT_SOURCE_DIR}/post_build.sh"
fi
pushd "${PROJECT_DIST_DIR}"
IFS=$'\n'
set -f
FILE_LIST=$(find . -type f)
for FILE in ${FILE_LIST}; do
sha256sum ${FILE} >${FILE}.sha256
if [ "${PROJECT_PRIVATE_KEY}" != "" ]; then
openssl dgst -sha256 -sign "${PROJECT_PRIVATE_KEY}" -out "${FILE}.sig" "${FILE}"
openssl dgst -sha256 -verify "${PROJECT_PUBLIC_KEY}" -signature "${FILE}.sig" "${FILE}" || exit 1
fi
done
unset IFS
set +f
popd

View File

@ -2,11 +2,11 @@
TEMP_DIR=$(mktemp -d)
DIST_DIR=$1
CURRENT_DIR=$(dirname "$0")
CURRENT_DIR=$(realpath ${CURRENT_DIR})
DEST_DIR=$1
. "${CURRENT_DIR}/env.sh" "$2" "$3" "$4" "$5" "$6"
PROJECT_SCRIPTS_DIR=$(dirname "$0")
PROJECT_SCRIPTS_DIR=$(realpath ${PROJECT_SCRIPTS_DIR})
. "${PROJECT_SCRIPTS_DIR}/env.sh" "$2" "$3" "$4" "$5" "$6"
function error_exit() {
echo $1
@ -14,34 +14,56 @@ function error_exit() {
exit $2
}
function create_file_validations() {
local SOURCE_FILE=$1
sha256sum ${SOURCE_FILE} >${SOURCE_FILE}.sha256 || error_exit "failed to create sha256 for file: ${SOURCE_FILE}" 1
if [ "${PROJECT_PRIVATE_KEY}" != "" ]; then
openssl dgst -sha256 -sign "${PROJECT_PRIVATE_KEY}" -out "${SOURCE_FILE}.sig" "${SOURCE_FILE}" || error_exit "failed to create signature for file: ${SOURCE_FILE}" 1
openssl dgst -sha256 -verify "${PROJECT_PUBLIC_KEY}" -signature "${SOURCE_FILE}.sig" "${SOURCE_FILE}" || error_exit "failed to validate signature for file: ${SOURCE_FILE}" 1
fi
}
if [ ! -d "${PROJECT_DIST_DIR}" ]; then
error_exit "dist directory not found: ${PROJECT_DIST_DIR}" 2
fi
if [ "${DIST_DIR}" == "" ]; then
DIST_DIR="${CURRENT_DIR}/../dist"
if [ "${DEST_DIR}" == "" ]; then
DEST_DIR="${PROJECT_DIST_DIR}"
fi
DIST_DIR=$(realpath "${DIST_DIR}")
DEST_DIR=$(realpath "${DEST_DIR}")
if [ ! -d "${DIST_DIR}" ]; then
error_exit "dest directory not found: ${DIST_DIR}" 1
if [ ! -d "${DEST_DIR}" ]; then
error_exit "dest directory not found: ${DEST_DIR}" 1
fi
PACKAGE_ROOT="${PROJECT_DIST_DIR}/.."
PACKAGE_ROOT=$(realpath "${PACKAGE_ROOT}")
rsync -av --progress ${PROJECT_DIST_DIR}/ ${TEMP_DIR}/${PROJECT_NAME}/ || error_exit "failed to rsync" 1
FILE_PART=${PROJECT_NAME}_${PROJECT_MAJOR_VERSION}.${PROJECT_MINOR_VERSION}.${PROJECT_REVISION_VERSION}-${PROJECT_RELEASE_ITER}-${PROJECT_GIT_REV}_${PROJECT_BUILD_ARCH}_${PROJECT_OS}
pushd "${DIST_DIR}"
OUT_FILE=${FILE_PART}.tar.gz
tar cvzf "${OUT_FILE}" -C ${TEMP_DIR} . || error_exit "failed to create archive" 1
sha256sum "${OUT_FILE}" >"${OUT_FILE}.sha256" || error_exit "failed to create sha256 file" 1
if [ "${PROJECT_PRIVATE_KEY}" != "" ]; then
openssl dgst -sha256 -sign "${PROJECT_PRIVATE_KEY}" -out "${OUT_FILE}.sig" "${OUT_FILE}" || error_exit "failed to create signature file" 1
openssl dgst -sha256 -verify "${PROJECT_PUBLIC_KEY}" -signature "${OUT_FILE}.sig" "${OUT_FILE}" || error_exit "failed to validate signature file" 1
pushd "${DEST_DIR}"
if [ -f "${PROJECT_OUT_FILE}" ]; then
rm -f "${PROJECT_OUT_FILE}" || error_exit "failed to delete file: ${PROJECT_OUT_FILE}" 1
fi
if [ -f "${PROJECT_OUT_FILE}.sha256" ]; then
rm -f "${PROJECT_OUT_FILE}.sha256" || error_exit "failed to delete file: ${PROJECT_OUT_FILE}.sha256" 1
fi
if [ -f "${PROJECT_OUT_FILE}.sig" ]; then
rm -f "${PROJECT_OUT_FILE}.sig" || error_exit "failed to delete file: ${PROJECT_OUT_FILE}.sig" 1
fi
popd
error_exit "created package ${FILE_PART}" 0
rsync -av --progress ${PROJECT_DIST_DIR}/ ${TEMP_DIR}/${PROJECT_NAME}/ || error_exit "failed to rsync" 1
pushd "${TEMP_DIR}/${PROJECT_NAME}/"
IFS=$'\n'
set -f
FILE_LIST=$(find . -type f)
for FILE in ${FILE_LIST}; do
create_file_validations "${FILE}"
done
unset IFS
set +f
popd
pushd "${DEST_DIR}"
tar cvzf "${PROJECT_OUT_FILE}" -C ${TEMP_DIR} . || error_exit "failed to create archive: ${PROJECT_OUT_FILE}" 1
create_file_validations "${PROJECT_OUT_FILE}"
popd
error_exit "created package ${PROJECT_FILE_PART}" 0

View File

@ -1,13 +1,12 @@
#!/bin/bash
CURRENT_DIR=$(dirname "$0")
CURRENT_DIR=$(realpath ${CURRENT_DIR})
PROJECT_SCRIPTS_DIR=$(dirname "$0")
PROJECT_SCRIPTS_DIR=$(realpath ${PROJECT_SCRIPTS_DIR})
. "${PROJECT_SCRIPTS_DIR}/env.sh" "$1" "$2" "$3" 0 0
. "${CURRENT_DIR}/env.sh" "$1" "$2" "$3" 0 0
if [ -f "${CURRENT_DIR}/cleanup.sh" ]; then
. "${CURRENT_DIR}/cleanup.sh" "$1" "$2" "$3" 0 0
rm ${CURRENT_DIR}/cleanup.*
if [ -f "${PROJECT_SCRIPTS_DIR}/cleanup.sh" ]; then
. "${PROJECT_SCRIPTS_DIR}/cleanup.sh" "$1" "$2" "$3" 0 0
rm ${PROJECT_SCRIPTS_DIR}/cleanup.*
fi
if [ "${PROJECT_REQUIRE_ALPINE}" == "ON" ]; then
@ -15,7 +14,7 @@ if [ "${PROJECT_REQUIRE_ALPINE}" == "ON" ]; then
DOCKER_CONTAINER=${PROJECT_NAME}_${DOCKER_NAME}_${PROJECT_BUILD_ARCH}
DOCKER_TAG=${PROJECT_NAME}:${DOCKER_NAME}
. "${CURRENT_DIR}/docker_common.sh" || exit 1
. "${PROJECT_SCRIPTS_DIR}/docker_common.sh" || exit 1
else
"${SOURCE_DIR}/scripts/make_common.sh" "${PROJECT_BUILD_ARCH}" "${PROJECT_CMAKE_BUILD_TYPE}" "${PROJECT_BUILD_CLEAN}" 0 0 || exit 1
"${PROJECT_SOURCE_DIR}/scripts/make_common.sh" "${PROJECT_BUILD_ARCH}" "${PROJECT_CMAKE_BUILD_TYPE}" "${PROJECT_BUILD_CLEAN}" 0 0 || exit 1
fi

View File

@ -1,17 +1,16 @@
#!/bin/bash
CURRENT_DIR=$(dirname "$0")
CURRENT_DIR=$(realpath ${CURRENT_DIR})
PROJECT_SCRIPTS_DIR=$(dirname "$0")
PROJECT_SCRIPTS_DIR=$(realpath ${PROJECT_SCRIPTS_DIR})
. "${PROJECT_SCRIPTS_DIR}/env.sh" "$1" "$2" "$3" 1 1
. "${CURRENT_DIR}/env.sh" "$1" "$2" "$3" 1 1
if [ -f "${CURRENT_DIR}/cleanup.sh" ]; then
. "${CURRENT_DIR}/cleanup.sh" "$1" "$2" "$3" 1 1
rm ${CURRENT_DIR}/cleanup.*
if [ -f "${PROJECT_SCRIPTS_DIR}/cleanup.sh" ]; then
. "${PROJECT_SCRIPTS_DIR}/cleanup.sh" "$1" "$2" "$3" 1 1
rm ${PROJECT_SCRIPTS_DIR}/cleanup.*
fi
DOCKER_NAME=mingw64
DOCKER_CONTAINER=${PROJECT_NAME}_${DOCKER_NAME}
DOCKER_TAG=${PROJECT_NAME}:${DOCKER_NAME}
. "${CURRENT_DIR}/docker_common.sh" || exit 1
. "${PROJECT_SCRIPTS_DIR}/docker_common.sh" || exit 1

View File

@ -8,6 +8,7 @@ PROJECT_LIBRARIES=(
LIBSODIUM
OPENSSL
PUGIXML
SPDLOG
SQLITE
STDUUID
TESTING
@ -21,6 +22,7 @@ PROJECT_CLEANUP[JSON]="json-*"
PROJECT_CLEANUP[LIBSODIUM]="libsodium*"
PROJECT_CLEANUP[OPENSSL]="openssl-*"
PROJECT_CLEANUP[PUGIXML]="pugixml-*"
PROJECT_CLEANUP[SPDLOG]="spdlog-*"
PROJECT_CLEANUP[SQLITE]="sqlite-*"
PROJECT_CLEANUP[STDUUID]="stduuid-*"
PROJECT_CLEANUP[TESTING]="googletest-*"

View File

@ -1,15 +1,18 @@
#!/bin/bash
CURRENT_DIR=$(dirname "$0")
CURRENT_DIR=$(realpath ${CURRENT_DIR})
DOCKER_NAME=$1
shift
. "${CURRENT_DIR}/env.sh" "$1" "$2" "$3" "$4" "$5" 1 1>/dev/null 2>&1
PROJECT_SCRIPTS_DIR=$(dirname "$0")
PROJECT_SCRIPTS_DIR=$(realpath ${PROJECT_SCRIPTS_DIR})
. "${PROJECT_SCRIPTS_DIR}/env.sh" "$2" "$3" "$4" "$5" "$6" 1 1>/dev/null 2>&1
if [ -f "${PROJECT_SCRIPTS_DIR}/cleanup.sh" ]; then
. "${PROJECT_SCRIPTS_DIR}/cleanup.sh" "$2" "$3" "$4" "$5" "$6"
rm ${PROJECT_SCRIPTS_DIR}/cleanup.*
fi
DOCKER_CONTAINER=${PROJECT_NAME}_${DOCKER_NAME}_${PROJECT_BUILD_ARCH}_shell
DOCKER_TAG=${PROJECT_NAME}:${DOCKER_NAME}
DOCKER_SHELL=1
. "${CURRENT_DIR}/docker_common.sh" || exit 1
. "${PROJECT_SCRIPTS_DIR}/docker_common.sh" || exit 1

View File

@ -1,11 +1,10 @@
#!/bin/bash
CURRENT_DIR=$(dirname "$0")
CURRENT_DIR=$(realpath ${CURRENT_DIR})
PROJECT_MSYS2_PACKAGE_LIST=()
. "${CURRENT_DIR}/env.sh" "$1" "$2" "$3" "$4" "$5" 1 1> /dev/null 2>&1
PROJECT_SCRIPTS_DIR=$(dirname "$0")
PROJECT_SCRIPTS_DIR=$(realpath ${PROJECT_SCRIPTS_DIR})
. "${PROJECT_SCRIPTS_DIR}/env.sh" "$1" "$2" "$3" "$4" "$5" 1 1>/dev/null 2>&1
PROJECT_MSYS2_PACKAGE_LIST+=(
mingw64/mingw-w64-x86_64-autotools

BIN
support/3rd_party/spdlog-1.14.1.tar.gz vendored Normal file

Binary file not shown.

View File

@ -0,0 +1 @@
1586508029a7d0670dfcb2d97575dcdc242d3868a259742b69f100801ab4e16b *spdlog-1.14.1.tar.gz