1047 Commits

Author SHA1 Message Date
8c9c7254b4 file mgr db unit tests and fixes
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-12-08 20:35:04 -06:00
55a88e7576 file mgr db unit tests and fixes
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-12-08 12:02:55 -06:00
9c9c7cdf8b file mgr db unit tests and fixes 2024-12-08 11:55:32 -06:00
fa72388a8b file mgr db unit tests and fixes
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-12-08 11:39:52 -06:00
9eb6a377fd file mgr db unit tests and fixes 2024-12-08 11:39:17 -06:00
7567e3289c refactor file manager db 2024-12-08 10:29:53 -06:00
f276356172 added medium frequency
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-12-07 13:53:17 -06:00
7d39fc0667 added medium frequency 2024-12-07 13:44:49 -06:00
56350c8704 added medium frequency 2024-12-07 13:44:03 -06:00
7bd31b1c0a added medium frequency 2024-12-07 13:40:11 -06:00
cf5bb87b6c refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-12-07 10:48:39 -06:00
31df328be7 added TaskWaitMillis to config
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-12-07 10:43:12 -06:00
ff8d037474 removed event
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-12-07 07:27:10 -06:00
031682051f refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-12-07 07:21:15 -06:00
0e8824a8d2 refactor 2024-12-07 07:13:13 -06:00
f0ddbe7a8c refactor 2024-12-07 07:12:52 -06:00
18c5948e3f use new tasks interface
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-12-06 17:40:26 -06:00
660bc28f0c refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-12-06 14:31:29 -06:00
9c5166b921 refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-12-06 13:59:34 -06:00
4a7c76cc1c refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-12-06 13:55:05 -06:00
1e2fd53b86 refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-12-05 13:59:40 -06:00
ab09407c3d refactor 2024-12-05 13:26:58 -06:00
9b2310a3e7 refactor 2024-12-05 12:40:41 -06:00
56a56e57c8 meta db unit tests and fixes 2024-12-05 11:09:16 -06:00
7908e7e982 meta db unit tests and fixes 2024-12-05 11:08:53 -06:00
92b32b838c meta db unit tests and fixes 2024-12-05 10:50:59 -06:00
65efa8590f meta db unit tests and fixes 2024-12-05 10:46:30 -06:00
d70aa968d6 meta db unit tests and fixes 2024-12-05 09:15:22 -06:00
591cd0ad2d meta db unit tests and fixes 2024-12-05 09:13:31 -06:00
1bdc78e5e0 meta db unit tests and fixes 2024-12-05 09:08:47 -06:00
7100708dfd meta db unit tests and fixes 2024-12-05 08:34:35 -06:00
08379ea622 meta db unit tests and fixes 2024-12-05 08:08:40 -06:00
161208a1fd meta db unit tests and fixes 2024-12-05 07:00:16 -06:00
3959067f22 meta db unit tests and fixes
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-12-04 18:40:50 -06:00
65096f60b1 updates
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-12-04 17:58:52 -06:00
4b32664e46 meta db unit tests and fixes 2024-12-04 14:53:01 -06:00
cf1ee8db02 meta db unit tests and fixes 2024-12-04 14:44:41 -06:00
89d4b4245d meta db unit tests and fixes 2024-12-04 14:21:03 -06:00
1d7d221da1 meta db unit tests and fixes 2024-12-04 13:56:06 -06:00
b4621f6a4e meta db unit tests and fixes 2024-12-04 13:51:41 -06:00
c6b895ced2 meta db unit tests and fixes 2024-12-04 12:16:42 -06:00
5f51a9384e meta db unit tests and fixes 2024-12-04 12:08:19 -06:00
88736fc58a meta db unit tests and fixes 2024-12-04 11:38:02 -06:00
be96d79281 added rocksdb meta db 2024-12-04 10:11:09 -06:00
2a28eed7e8 added rocksdb 2024-12-04 08:07:37 -06:00
443aaff217 added task scheduler and refactored remove deleted items 2024-12-04 07:38:59 -06:00
b4d3bb083d refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-12-03 17:39:30 -06:00
b9ce21853f refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-12-03 16:38:38 -06:00
31e20e9af0 refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-12-03 14:32:28 -06:00
cfb7a74841 refactor 2024-12-03 14:28:01 -06:00
c8f2485ff0 refactor meta db to allow alternate implementations 2024-12-03 14:25:35 -06:00
62857e1372 refactor 2024-12-03 14:02:51 -06:00
ae3b592cf6 move event to trace 2024-12-03 10:44:40 -06:00
3365363d23 updated build system 2024-12-03 10:27:46 -06:00
f480720665 refactor 2024-12-03 10:08:19 -06:00
d0a8f9df58 refactor 2024-12-03 10:03:42 -06:00
28dc153822 refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-12-02 19:20:17 -06:00
b265dcf73f refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-12-02 19:18:23 -06:00
4b9b095104 refactor stop 2024-12-02 19:16:52 -06:00
ea8eb53a34 fixes
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-12-02 18:26:14 -06:00
412b807d3f refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-12-02 14:54:58 -06:00
14cbde2586 refactor 2024-12-02 14:37:49 -06:00
429751e1fc refactor 2024-12-02 13:57:25 -06:00
04f50f7453 refactor 2024-12-02 13:56:08 -06:00
3814b9797d refactor 2024-12-02 13:54:02 -06:00
44d810c398 refactor used drive space and get_file_list 2024-12-02 13:46:49 -06:00
c982972ee8 refactor used drive space and get_file_list 2024-12-02 13:45:41 -06:00
cfba3d3082 refactor used drive space and get_file_list 2024-12-02 13:44:04 -06:00
587527e418 evict 0 byte files 2024-12-02 12:12:05 -06:00
bb4a3bc6cb refactor 2024-12-02 12:03:26 -06:00
29615d61eb [bug] Incorrect file size displayed while upload is pending #23 2024-12-02 11:10:40 -06:00
86f910c865 [bug] Incorrect file size displayed while upload is pending #23 2024-12-02 11:05:28 -06:00
2f6a691f65 [bug] Incorrect file size displayed while upload is pending #23 2024-12-02 11:03:25 -06:00
ea59ce6dc3 [bug] Incorrect file size displayed while upload is pending #23 2024-12-02 10:57:56 -06:00
3cbe39784f update
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-12-01 19:17:00 -06:00
cb2089ccfe updated build system
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-12-01 18:52:39 -06:00
c2bdb884c2 updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-12-01 12:12:28 -06:00
7daec9d0f5 moved to debug
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-12-01 09:18:58 -06:00
a05c642f5e updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-12-01 09:04:57 -06:00
4c70641d8f refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-30 18:57:42 -06:00
e1939d4d11 refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-30 17:41:49 -06:00
6da5d9f4f0 refactor
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-11-30 17:40:48 -06:00
401344ac5a s3 fixes 2024-11-30 17:40:22 -06:00
329ba1d5e4 s3 fixes 2024-11-30 17:38:24 -06:00
7faf648919 s3 fixes
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-30 17:21:41 -06:00
8b5369d120 fix drive space 2024-11-30 16:59:11 -06:00
8af198f7a4 fix drive space 2024-11-30 16:58:26 -06:00
b1735ab0af fuse unit tests and fixes
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-29 06:47:21 -06:00
1ab36272f6 fuse unit tests and fixes 2024-11-29 06:44:26 -06:00
86e5f0359d revert
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-25 16:44:40 -06:00
ff007bca7f future support 2024-11-24 17:25:05 -06:00
d03e6e8201 refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-14 13:25:35 -06:00
d481aeae66 cleanup
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-14 12:41:21 -06:00
c832d4ce21 updated changelog
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-14 09:54:10 -06:00
a5a0e690ce updates
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-11-14 09:52:47 -06:00
74496016f0 fuse unit tests and fixes
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-14 09:42:42 -06:00
bb8ff0de59 fix aarch64
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-13 17:33:38 -06:00
c66369f302 fixes
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-11-13 17:17:32 -06:00
de800f79e9 fuse unit tests and fixes
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-11-13 14:31:53 -06:00
c0111d83ae refactor
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-11-13 12:44:11 -06:00
596b171c2c refactor 2024-11-13 12:31:10 -06:00
99785c0b41 refactor
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-11-13 12:02:57 -06:00
f2eafe0f14 fuse unit tests and fixes
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-11-13 11:58:30 -06:00
5ea9f27645 fuse unit tests and fixes
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-11-13 10:10:01 -06:00
6300270a21 fuse unit tests and fixes
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-11-13 10:03:10 -06:00
2e587fd897 fuse unit tests and fixes 2024-11-13 09:42:57 -06:00
f022be6fb9 fuse unit tests and fixes
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-11-13 08:41:35 -06:00
8ebf66a686 fuse unit tests and fixes
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-11 19:54:56 -06:00
ee695eb738 fuse unit tests and fixes
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-11 18:55:14 -06:00
80d8d6f32f fuse unit tests and fixes
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-11 18:38:47 -06:00
0b7a9c6a56 fuse unit tests and fixes 2024-11-11 18:05:59 -06:00
54b70f99cc refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-11 14:15:00 -06:00
fe0fef2f21 refactor 2024-11-11 14:12:48 -06:00
1eec19c583 cleanup 2024-11-11 13:59:20 -06:00
6b40658eac refactor 2024-11-11 13:58:47 -06:00
bd836b9ecb fuse unit tests and fixes
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-11 12:58:02 -06:00
074a026d64 fuse unit tests and fixes 2024-11-10 17:21:57 -06:00
60864649c0 fuse unit tests and fixes
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-10 17:14:19 -06:00
d2a26f0c09 fuse unit tests and fixes
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-10 17:10:29 -06:00
74546807f4 fuse unit tests and fixes
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-10 16:53:00 -06:00
d7d9199f8e fuse unit tests and fixes 2024-11-10 16:33:44 -06:00
77299455f9 fuse unit tests and fixes 2024-11-10 16:32:38 -06:00
0d5ac30e49 refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-10 16:21:26 -06:00
2c55e243f5 fuse unit tests and fixes
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-10 16:09:31 -06:00
cb2362432e cleanup
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-10 15:48:29 -06:00
9c3e464ce4 fuse unit tests and fixes
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-10 15:45:25 -06:00
4e8e188d24 fuse unit tests and fixes 2024-11-10 15:18:05 -06:00
e476b4f0c6 fuse unit tests and fixes 2024-11-10 10:26:38 -06:00
eec286845e fuse unit tests and fixes 2024-11-10 10:25:53 -06:00
c276cb8979 fuse unit tests and fixes
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-10 10:24:13 -06:00
58107e6ed8 fuse unit tests and fixes 2024-11-10 10:18:08 -06:00
eb4c8c6cc0 fuse unit tests and fixes 2024-11-10 10:13:02 -06:00
0f7dfc6ce7 cleanup
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-09 17:06:08 -06:00
a4693f8acc refactor 2024-11-09 17:02:26 -06:00
fc7c6b9705 fuse unit tests and fixes 2024-11-09 16:58:38 -06:00
43ce2d13ec fuse unit tests and fixes 2024-11-09 16:21:13 -06:00
6f6f8d4d52 temporarily disable sia testing
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-11-09 16:18:38 -06:00
54fdcf6d80 fixes
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-09 16:15:00 -06:00
fd46d3ef96 fix
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-11-09 15:56:52 -06:00
2a15d6b4a3 fixes
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-11-09 15:54:34 -06:00
acdc165102 remove tests
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-11-09 15:44:10 -06:00
bb4e3c26f4 unit test fixes
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-11-09 15:39:08 -06:00
8e4a8f0f4b refactor
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-11-09 15:20:02 -06:00
2fd6f68c97 winfsp unit tests and fixes
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-11-09 15:14:32 -06:00
f7ca4f8fa3 spelling
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-11-09 14:18:30 -06:00
4c3d759837 winfsp unit tests and fixes
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-11-09 14:11:44 -06:00
04daf393d7 winfsp unit tests and fixes
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-11-09 12:07:00 -06:00
e62477a38e winfsp unit tests and fixes 2024-11-09 12:06:17 -06:00
3338fcf91f winfsp unit tests and fixes 2024-11-09 11:50:27 -06:00
136f76c575 winfsp unit tests and fixes
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-11-09 09:36:17 -06:00
b94c124869 winfsp unit tests and fixes 2024-11-09 09:21:52 -06:00
1fe7b68f0e winfsp unit tests and fixes
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-11-09 09:19:01 -06:00
3d9ed2dfe8 spelling
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-11-09 09:17:04 -06:00
c8cc0feae5 winfsp unit tests and fixes 2024-11-09 09:16:17 -06:00
1d53cd8e8b refactor 2024-11-09 07:22:33 -06:00
73ec7f2252 winfsp unit tests and fixes
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-11-08 14:49:22 -06:00
5450ffc280 winfsp unit tests and fixes 2024-11-08 14:41:59 -06:00
a9125196ce winfsp unit tests and fixes 2024-11-08 14:35:55 -06:00
ab9765582a winfsp unit tests and fixes 2024-11-08 14:27:10 -06:00
060b2b70dc winfsp unit tests and fixes 2024-11-08 13:10:21 -06:00
739a31538f winfsp unit tests and fixes 2024-11-08 13:05:53 -06:00
67595b4d45 winfsp unit tests and fixes 2024-11-08 13:04:15 -06:00
c5003e0ee6 winfsp unit tests and fixes 2024-11-08 13:01:28 -06:00
984657a5dd fix 2024-11-08 12:41:21 -06:00
2930933f19 winfsp unit tests and fixes 2024-11-08 12:37:15 -06:00
72a6f5ae08 refactor 2024-11-08 12:33:38 -06:00
7a96a8cbf3 winfsp unit tests and fixes 2024-11-08 12:32:00 -06:00
0b4befd823 winfsp unit tests and fixes 2024-11-08 12:26:48 -06:00
151b6775b0 winfsp unit tests and fixes 2024-11-08 12:24:01 -06:00
47a6bdbcd2 winfsp unit tests and fixes 2024-11-08 11:53:21 -06:00
20ab95380a winfsp unit tests and fixes 2024-11-08 11:09:03 -06:00
73afdaedb9 winfsp unit tests and fixes 2024-11-08 10:54:23 -06:00
a231c2afaf fuse unit tests and fixes 2024-11-08 10:32:37 -06:00
74109d1195 fuse unit tests and fixes 2024-11-08 10:31:20 -06:00
2bd847b833 winfsp unit tests and fixes 2024-11-08 10:17:43 -06:00
2d74fb30de remote fixes 2024-11-08 09:40:56 -06:00
af339e6086 debugging
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-08 05:55:52 -06:00
ae5c4a11a8 fix
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-07 16:50:31 -06:00
4ce7d76500 refactor 2024-11-07 16:39:13 -06:00
a031f9d867 refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-07 14:34:01 -06:00
2f6fa792df refactor 2024-11-07 14:32:57 -06:00
be0f977dc7 refactor 2024-11-07 14:32:12 -06:00
73efae7c2f refactor 2024-11-07 14:31:56 -06:00
6b67cd676d refactor 2024-11-07 14:29:47 -06:00
2db38a87e3 refactor 2024-11-07 14:28:25 -06:00
213511a77d fix 2024-11-07 14:27:13 -06:00
a38a033143 refactor 2024-11-07 14:22:49 -06:00
619d5939b3 refactor 2024-11-07 14:22:03 -06:00
68a26d2bc6 refactor 2024-11-07 14:20:14 -06:00
2661885cf2 refactor 2024-11-07 14:19:26 -06:00
6a820837cc refactor 2024-11-07 14:16:27 -06:00
02b74402f4 refactor 2024-11-07 14:13:30 -06:00
3535a61844 refactor 2024-11-07 14:12:44 -06:00
eb4fe4ff60 refactor 2024-11-07 14:12:17 -06:00
793ec5b4a5 refactor 2024-11-07 14:08:08 -06:00
c14b637536 winfsp unit tests and fixes 2024-11-07 14:06:39 -06:00
489d9b1960 winfsp unit tests and fixes 2024-11-07 12:37:35 -06:00
2f60890d29 winfsp unit tests and fixes 2024-11-07 12:36:24 -06:00
19d4b0a247 winfsp unit tests and fixes 2024-11-07 12:33:15 -06:00
69d190e485 winfsp unit tests and fixes 2024-11-07 12:32:07 -06:00
2df84f53ed winfsp unit tests and fixes 2024-11-07 11:14:38 -06:00
876a1e9cd8 refactor 2024-11-07 09:57:21 -06:00
2945793de9 fix 2024-11-07 09:25:51 -06:00
0de0e511ee winfsp unit tests and fixes 2024-11-07 09:22:24 -06:00
c16d9f9712 winfsp unit tests and fixes 2024-11-07 09:21:00 -06:00
0903d4b83e winfsp unit tests and fixes 2024-11-07 09:17:32 -06:00
0bd1f72017 winfsp unit tests and fixes 2024-11-07 08:56:18 -06:00
7bc1440b8b winfsp unit tests and fixes 2024-11-07 08:31:39 -06:00
9562ac2c62 winfsp unit tests and fixes
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-06 11:51:39 -06:00
9da78b82f3 debugging
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-05 14:02:30 -06:00
8018ce51a0 winfsp unit tests and fixes 2024-11-05 13:52:05 -06:00
3871e44732 winfsp unit tests and fixes 2024-11-05 13:20:08 -06:00
beb1058a51 winfsp unit tests and fixes 2024-11-05 13:07:50 -06:00
259efff707 winfsp unit tests and fixes 2024-11-05 13:07:22 -06:00
3c4a4d8976 winfsp unit tests and fixes 2024-11-05 13:01:01 -06:00
e3ad402853 winfsp unit tests and fixes 2024-11-05 13:00:34 -06:00
470e7b0b07 winfsp unit tests and fixes 2024-11-05 12:53:19 -06:00
cda89d0588 winfsp unit tests and fixes-support remote mount 2024-11-05 12:46:13 -06:00
f5993d472c winfsp unit tests and fixes
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-11-04 14:17:59 -06:00
0d01862441 winfsp unit tests and fixes 2024-11-04 13:54:04 -06:00
ec2ff87ac7 winfsp unit tests and fixes 2024-11-04 13:43:59 -06:00
fd16a55745 winfsp unit tests and fixes
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-11-01 09:41:50 -05:00
f1f9e4547d updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-31 17:22:12 -05:00
3238b6d4de fix
Some checks reported errors
BlockStorage/repertory/pipeline/head Something is wrong with the build of this commit
2024-10-31 17:06:43 -05:00
43f0a4b646 fix
Some checks reported errors
BlockStorage/repertory/pipeline/head Something is wrong with the build of this commit
2024-10-31 16:53:04 -05:00
c760b7328b fix
Some checks reported errors
BlockStorage/repertory/pipeline/head Something is wrong with the build of this commit
2024-10-31 16:51:27 -05:00
7d9db55d5d refactor
Some checks reported errors
BlockStorage/repertory/pipeline/head Something is wrong with the build of this commit
2024-10-31 16:50:29 -05:00
0741e307cc fix
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-10-31 16:49:17 -05:00
38f5374e49 winfsp unit tests and fixes
Some checks reported errors
BlockStorage/repertory/pipeline/head Something is wrong with the build of this commit
2024-10-31 14:46:42 -05:00
db1320dd3a updated build system 2024-10-31 14:12:32 -05:00
2f8f38b6a2 winfsp unit tests and fixes 2024-10-31 12:14:09 -05:00
0303dcf16a updated build system 2024-10-31 10:55:31 -05:00
4c4db56a82 winfsp unit tests and fixes 2024-10-31 10:43:41 -05:00
11c58c4afb winfsp unit tests and fixes 2024-10-30 15:28:33 -05:00
07d0eb0616 winfsp unit tests and fixes 2024-10-30 14:56:50 -05:00
7d74d192f9 winfsp unit tests and fixes 2024-10-30 12:37:26 -05:00
da60c39216 winfsp unit tests and fixes 2024-10-30 11:25:08 -05:00
5ca67b28cf winfsp unit tests and fixes 2024-10-30 11:01:45 -05:00
8247669d3b winfsp unit tests and fixes 2024-10-30 09:49:28 -05:00
55762f9c92 winfsp unit tests and fixes 2024-10-30 09:26:45 -05:00
2a1a48bc68 show logging 2024-10-30 07:49:01 -05:00
d5b47a803c winfsp unit tests and fixes 2024-10-29 14:53:30 -05:00
695a7b0195 winfsp unit tests and fixes 2024-10-29 14:45:19 -05:00
ff13142093 winfsp unit tests and fixes 2024-10-29 14:45:05 -05:00
d6aace8c51 winfsp unit tests and fixes 2024-10-29 14:29:10 -05:00
ec2b8e1854 winfsp unit tests and fixes 2024-10-29 14:21:15 -05:00
884d2d6890 winfsp unit tests 2024-10-29 14:15:04 -05:00
72899173a0 fix 2024-10-28 15:15:10 -05:00
79c72d1aef added test 2024-10-28 14:36:38 -05:00
ab0b1b645b fix 2024-10-28 14:20:02 -05:00
7eb12d09f9 fixes 2024-10-28 14:18:24 -05:00
ed8ec257cc fix 2024-10-28 14:09:52 -05:00
c7a28ae526 refactor 2024-10-28 13:57:06 -05:00
c22c68ef28 refactor 2024-10-28 13:51:24 -05:00
d07e1f9cce refactor 2024-10-28 12:50:53 -05:00
158cd55b1a fixes 2024-10-28 12:50:20 -05:00
5fcc59434b refactor 2024-10-28 12:35:46 -05:00
0650790c43 fixes
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-10-26 21:27:35 -05:00
55debcb643 fix
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-10-26 21:17:58 -05:00
ded55057cc updated changelog
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-10-26 21:15:24 -05:00
7cb78cab40 fix 2024-10-26 21:11:20 -05:00
468aba1399 fixes 2024-10-26 20:53:41 -05:00
eba242de7f fix
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-10-26 20:39:01 -05:00
fa92540bb9 fix
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-10-25 18:38:56 -05:00
b508d98dd7 windows fixes
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-10-25 17:42:32 -05:00
4b68e5e4b7 fixes 2024-10-25 13:19:02 -05:00
3f6121839a fix 2024-10-25 12:03:45 -05:00
5e1745ebfb fix 2024-10-25 11:59:29 -05:00
b20bc6c28a refactor 2024-10-25 11:50:19 -05:00
9a1483377c refactor 2024-10-25 11:45:10 -05:00
4c97f6b098 remove logging 2024-10-25 11:37:52 -05:00
f9af43309d fixes 2024-10-25 11:24:47 -05:00
a77fd75687 refactor 2024-10-25 10:18:29 -05:00
f5b4928818 fix windows upload 2024-10-25 10:15:35 -05:00
25d61b5cd4 refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-24 20:41:21 -05:00
e65e14f796 fixes
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-10-24 20:33:20 -05:00
6aea801779 fix
Some checks reported errors
BlockStorage/repertory/pipeline/head Something is wrong with the build of this commit
2024-10-24 20:28:14 -05:00
21fd9fe227 fixes
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-10-24 20:25:06 -05:00
948b0fdd99 fix
Some checks reported errors
BlockStorage/repertory/pipeline/head Something is wrong with the build of this commit
2024-10-24 20:14:43 -05:00
b5cf2de900 refactor 2024-10-24 20:08:40 -05:00
18d05ca635 fixes
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-10-24 20:07:04 -05:00
b8cd42e235 updated build system
Some checks reported errors
BlockStorage/repertory/pipeline/head Something is wrong with the build of this commit
2024-10-24 19:06:05 -05:00
4e8ff7815e refactor
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-10-24 19:02:11 -05:00
57b007759e fixes
Some checks reported errors
BlockStorage/repertory/pipeline/head Something is wrong with the build of this commit
2024-10-24 19:01:33 -05:00
8692541e7f fuse test fixes 2024-10-24 18:27:36 -05:00
f5b827a039 test fixes 2024-10-24 17:32:07 -05:00
3a79cee2f9 fix
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-24 14:58:06 -05:00
3b68543e32 fix 2024-10-24 14:47:13 -05:00
7c2a245944 fix 2024-10-24 14:42:47 -05:00
56e27e3184 fix 2024-10-24 14:41:45 -05:00
b7c87bed82 refactor 2024-10-24 12:44:52 -05:00
3cfda97c05 refactor 2024-10-24 12:42:56 -05:00
52df12493b refactor 2024-10-24 12:40:30 -05:00
6e7b030afa fix 2024-10-24 08:14:43 -05:00
afeeb97f05 refactor 2024-10-24 08:10:46 -05:00
5f925d3e71 refactor 2024-10-24 08:08:40 -05:00
26e1975edc added winfsp test executable 2024-10-24 08:02:39 -05:00
2169456d6a added winfsp test executable 2024-10-24 07:45:20 -05:00
cca53b2d26 updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-23 11:19:59 -05:00
2a6ecf61b2 refactor 2024-10-23 11:13:56 -05:00
18c53fad9a refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-22 19:11:00 -05:00
84fc05acc0 fixes 2024-10-22 19:00:08 -05:00
c960df8f70 refactor
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-10-22 15:08:50 -05:00
9d0baf30b8 refactor 2024-10-22 15:07:15 -05:00
312e4bc0f1 fix 2024-10-22 15:05:54 -05:00
1d7f5b7ef1 fix 2024-10-22 15:04:38 -05:00
08e381a307 refactor 2024-10-22 15:01:04 -05:00
49e518ac19 refactor 2024-10-22 13:10:01 -05:00
8e2ebf05b8 refactor 2024-10-22 13:06:02 -05:00
924b79add2 refactor 2024-10-22 13:02:17 -05:00
857dcc5535 cleanup 2024-10-22 12:22:58 -05:00
7c0d583435 temp disable drive and provider unit tests 2024-10-22 11:53:24 -05:00
2b6a88f8cb continue refactor drive tests 2024-10-22 10:47:51 -05:00
fbf31c77ed fixes 2024-10-22 10:45:08 -05:00
9c2aa62f1f continue refactor drive tests 2024-10-22 10:36:39 -05:00
b6456abf0d continue refactor drive tests 2024-10-22 10:27:31 -05:00
5138b0d1ab refactor 2024-10-22 09:25:55 -05:00
0f60a5a467 continue refactor drive tests 2024-10-22 09:18:09 -05:00
982e5357a5 continue refactor drive tests 2024-10-22 09:14:33 -05:00
0ad0ff508b continue refactor drive tests 2024-10-22 08:18:53 -05:00
b4d61649cb refactor winfsp test 2024-10-22 08:00:16 -05:00
cdfbaa47b6 grammar 2024-10-21 11:57:38 -05:00
211805e460 updated CHANGELOG.md 2024-10-21 11:55:32 -05:00
841d57cf13 fuse permission fixes 2024-10-21 11:52:21 -05:00
bd25904371 fix 2024-10-21 08:30:26 -05:00
3c001c11ae updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-20 18:48:01 -05:00
1f6036ec18 updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-20 12:01:23 -05:00
104e101158 updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-19 20:45:55 -05:00
b281b4c105 fix
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-19 20:06:39 -05:00
cf6a370eea updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-19 15:58:51 -05:00
8feb244dc9 updated build system
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-10-19 11:39:55 -05:00
f9e2e72d84 updated build system
Some checks reported errors
BlockStorage/repertory/pipeline/head Something is wrong with the build of this commit
2024-10-19 11:34:41 -05:00
32ef8ba3c3 updated build system
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-10-19 11:33:28 -05:00
2fb53e34af updated build system
Some checks reported errors
BlockStorage/repertory/pipeline/head Something is wrong with the build of this commit
2024-10-19 11:10:36 -05:00
c72dec6369 fix
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-18 15:15:27 -05:00
c0b0c5d397 fixes 2024-10-18 14:56:44 -05:00
d34ccc424d refactor 2024-10-18 14:53:31 -05:00
663c89ac53 refactor 2024-10-18 14:30:13 -05:00
414f18a518 fix 2024-10-18 14:28:22 -05:00
9bfdece859 fixes 2024-10-18 14:27:50 -05:00
ad79c5daf5 refactor 2024-10-18 14:22:24 -05:00
48a1bef1ae fix 2024-10-18 13:28:21 -05:00
09cb5d8f19 updated build system 2024-10-18 11:46:59 -05:00
c216df9b73 refactor 2024-10-18 11:38:27 -05:00
a0a5ca3390 refactor 2024-10-18 07:36:52 -05:00
ae0a921ba8 updated build system 2024-10-18 06:50:09 -05:00
3ce03dabf5 updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-17 18:49:50 -05:00
4a530c4703 refactor 2024-10-17 18:26:24 -05:00
c9f9c2a24c fix
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-17 17:13:20 -05:00
cd538566d3 updated build system
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-10-17 16:34:32 -05:00
f0fe0a1962 fix 2024-10-17 14:41:01 -05:00
68e3f6fc00 fix 2024-10-17 13:43:45 -05:00
a23adf6db6 fixes 2024-10-17 13:35:00 -05:00
da5752e971 refactor 2024-10-17 13:14:40 -05:00
40682e37c0 refactor 2024-10-17 13:13:13 -05:00
cbd36efd91 refactor 2024-10-17 12:52:40 -05:00
96c712ae9c refactor 2024-10-17 12:51:31 -05:00
28065c2c8e refactor 2024-10-17 12:49:00 -05:00
9677ebb44e disable tests 2024-10-17 12:41:16 -05:00
172ea8bc00 disable tests 2024-10-17 12:40:24 -05:00
d2ec6f1f10 Add support for bucket name in Sia provider #16 2024-10-17 12:37:22 -05:00
bc7a74e432 Add support for bucket name in Sia provider #16 2024-10-17 12:29:46 -05:00
8561278539 fixes 2024-10-17 12:24:11 -05:00
f932799efa refactor 2024-10-17 12:14:31 -05:00
67dedc3fb5 refactor 2024-10-17 11:23:20 -05:00
7120856407 refactor 2024-10-17 11:20:59 -05:00
a1b138ccd1 refactor 2024-10-17 11:19:14 -05:00
d192904f8c updated build system 2024-10-17 11:18:08 -05:00
0bd7070ec5 refactor 2024-10-17 10:21:22 -05:00
63ca3089da updated build system 2024-10-17 10:20:07 -05:00
39d644e115 updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-12 17:59:32 -05:00
ac89796d5d updated build system
Some checks reported errors
BlockStorage/repertory/pipeline/head Something is wrong with the build of this commit
2024-10-12 17:48:29 -05:00
4e1250a832 updated build system
Some checks reported errors
BlockStorage/repertory/pipeline/head Something is wrong with the build of this commit
2024-10-12 17:06:31 -05:00
4049af94dc updated build system
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-10-12 13:17:59 -05:00
ab40037cb4 updated build system
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-10-12 13:12:47 -05:00
66305c3c86 updated build system
Some checks reported errors
BlockStorage/repertory/pipeline/head Something is wrong with the build of this commit
2024-10-12 13:06:42 -05:00
989d14072d cleanup
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-11 21:29:21 -05:00
21d4f4b78d refactor 2024-10-11 21:27:32 -05:00
94b7950cf1 fix 2024-10-11 21:26:48 -05:00
1fe3f62be4 fix
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-10-11 21:24:06 -05:00
43df4356b6 updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-11 18:11:48 -05:00
5440dd3dac updated build system
Some checks reported errors
BlockStorage/repertory/pipeline/head Something is wrong with the build of this commit
2024-10-11 18:03:30 -05:00
88718de96e updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-11 16:15:07 -05:00
d8d86f3f0c cleanup 2024-10-11 16:09:40 -05:00
1ff62fb2b5 updated build system
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-10-11 16:03:42 -05:00
284e2a3ead updated build system
Some checks reported errors
BlockStorage/repertory/pipeline/head Something is wrong with the build of this commit
2024-10-11 15:56:46 -05:00
3c97c2d953 updated build system 2024-10-10 17:58:32 -05:00
196aeae11f update build system
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-10-10 15:16:26 -05:00
ba2850ea21 updated build system 2024-10-10 15:07:46 -05:00
1f4872769d updated build system 2024-10-10 14:33:37 -05:00
24c647966b updated build system 2024-10-10 14:22:17 -05:00
d3f3048568 updated build system 2024-10-10 14:20:19 -05:00
ea249723f9 updated build system 2024-10-10 14:10:27 -05:00
52a2df2576 updated build system 2024-10-10 13:36:55 -05:00
6645b322bd updated build system 2024-10-10 12:52:51 -05:00
7a683a46a9 updated build system 2024-10-10 11:37:01 -05:00
cb24252286 updated build system 2024-10-10 11:24:35 -05:00
fff3dc4685 updated build system 2024-10-10 09:57:47 -05:00
9c01d51334 updated build system 2024-10-10 08:30:53 -05:00
9aafb62961 updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-09 20:50:08 -05:00
891040479b fix 2024-10-09 20:46:44 -05:00
55bcf082ec fixes 2024-10-09 19:50:05 -05:00
67053645e1 fixes 2024-10-09 19:36:35 -05:00
854caffea8 fix
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-10-09 16:58:27 -05:00
36e952606b fix
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-10-09 14:20:50 -05:00
eba93484c1 fix 2024-10-09 14:15:02 -05:00
d09d55951d refactor 2024-10-09 14:08:38 -05:00
b35e100bb2 fix 2024-10-09 14:03:06 -05:00
27b37eb570 refactor 2024-10-09 13:58:41 -05:00
92295506a7 refactor 2024-10-09 13:56:40 -05:00
d20ed07066 fix 2024-10-09 13:41:32 -05:00
3fa16fd846 updated build system 2024-10-09 12:50:41 -05:00
481dfd5ff5 fix 2024-10-09 12:50:17 -05:00
e5a99943a5 fix 2024-10-09 12:23:17 -05:00
dbaf379f19 refactor 2024-10-09 11:32:03 -05:00
e1bd3bb8ec refactor 2024-10-09 11:29:27 -05:00
ca834dd119 refactor 2024-10-09 11:11:07 -05:00
163549098d cleanup 2024-10-09 10:46:22 -05:00
4800db00d9 fix 2024-10-09 10:45:12 -05:00
675707b46c fix 2024-10-09 10:37:43 -05:00
14ebdab034 fix 2024-10-09 10:27:51 -05:00
8541e292cd fix 2024-10-09 09:51:09 -05:00
9adec02640 fix 2024-10-09 09:47:35 -05:00
6a97ad664b fix 2024-10-09 09:18:25 -05:00
8bb291bbd9 updated build system 2024-10-09 09:15:18 -05:00
13a55bff61 refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-08 19:14:39 -05:00
d6efc19ba8 fix
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-08 18:04:56 -05:00
517e603539 added test 2024-10-08 18:02:02 -05:00
d144101a7e refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-08 17:50:57 -05:00
b84202a689 refactor 2024-10-08 17:45:14 -05:00
572351067c refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-08 17:32:11 -05:00
9a3d6e725e refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-08 17:01:17 -05:00
d6ac8cfeca fix
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-08 14:26:06 -05:00
e697363574 fix 2024-10-08 14:17:32 -05:00
8cc6868480 fix 2024-10-08 14:15:53 -05:00
37d393c1f9 fix 2024-10-08 13:23:19 -05:00
ec8b7783f3 fix 2024-10-08 13:20:34 -05:00
7b375eb912 fix 2024-10-08 11:22:41 -05:00
ffdea97de3 fix 2024-10-08 11:08:47 -05:00
7c02d79a5c added enhanced where suport 2024-10-08 11:03:38 -05:00
46bbc4bf92 future where support
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-07 19:48:26 -05:00
08104c97f7 future where handling
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-07 16:55:44 -05:00
8cf169849c future group support
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-03 13:54:48 -05:00
67ac6a6c6e [Unit Test] SQLite mini-ORM unit tests and cleanup #14
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-02 15:16:21 -05:00
ef270887fc [Unit Test] SQLite mini-ORM unit tests and cleanup #14 2024-10-02 15:12:57 -05:00
d61cb3f0f3 [Unit Test] SQLite mini-ORM unit tests and cleanup #14 2024-10-02 15:08:37 -05:00
12d3fbe9a7 [Unit Test] SQLite mini-ORM unit tests and cleanup #14 2024-10-02 14:46:28 -05:00
bf9d6157f5 [Unit Test] SQLite mini-ORM unit tests and cleanup #14 2024-10-02 14:42:42 -05:00
c4d7868381 [Unit Test] SQLite mini-ORM unit tests and cleanup #14 2024-10-02 14:41:21 -05:00
de0a88e45b [Unit Test] SQLite mini-ORM unit tests and cleanup #14 2024-10-02 14:36:02 -05:00
fed4f40cfb [Unit Test] SQLite mini-ORM unit tests and cleanup #14 2024-10-02 14:25:14 -05:00
d1b4aab952 [Unit Test] SQLite mini-ORM unit tests and cleanup #14 2024-10-02 13:11:05 -05:00
072a5356b6 fix 2024-10-02 12:25:21 -05:00
781278df06 fix 2024-10-02 11:40:12 -05:00
42a81b1c8e fixes 2024-10-02 11:38:45 -05:00
3e6ed45562 refactor 2024-10-02 11:11:57 -05:00
366fe60e2f fixed log 2024-10-02 10:39:33 -05:00
be30230422 added error handling 2024-10-02 10:39:00 -05:00
a523d1ca66 [Unit Test] SQLite mini-ORM unit tests and cleanup #14
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-10-01 14:21:23 -05:00
62a75bcddd [Unit Test] SQLite mini-ORM unit tests and cleanup #14 2024-10-01 14:10:21 -05:00
bd754a7ee8 fix 2024-10-01 10:16:39 -05:00
ae98d199e7 continue sqlite3 mini-orm 2024-10-01 09:31:20 -05:00
95ff5dd5eb refactor 2024-10-01 08:46:29 -05:00
3f3e9b5735 refactor 2024-10-01 08:45:54 -05:00
82a48fa3b4 continue sqlite3 mini-orm 2024-10-01 08:44:54 -05:00
97ce74b798 sqlite3 mini-orm work 2024-10-01 07:49:42 -05:00
91704df08d fix test
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-29 20:19:33 -05:00
4add037dc7 fix config
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-29 20:18:44 -05:00
79314a11b5 fix
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-29 14:50:52 -05:00
0ff87a0ac3 fix 2024-09-29 14:50:07 -05:00
1aef67368b cleanup
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-29 14:47:58 -05:00
ba2a669be6 fix tests
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-29 09:56:32 -05:00
fd721c5dc4 [bug] Rename file is broken for files that are existing #19 2024-09-29 09:51:25 -05:00
34070bba89 [bug] Rename file is broken for files that are existing #19 2024-09-29 09:49:28 -05:00
1b1fc0fc09 [bug] Rename file is broken for files that are existing #19
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-29 09:16:37 -05:00
44a1547d2e refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-28 17:32:22 -05:00
439f9cce3d [bug] Rename file is broken for files that are existing #19 2024-09-28 17:28:47 -05:00
9aecec2d96 updates
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-28 15:03:45 -05:00
26fffdd3e8 fix
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-28 15:01:06 -05:00
7bf69ec350 updated changelog
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-28 13:21:56 -05:00
49f884bb68 [bug] Rename file is broken for files that are existing #19
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-09-28 13:19:35 -05:00
a58fcc7f14 [bug] Rename file is broken for files that are existing #19
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-28 13:16:37 -05:00
113b5e7258 updated changelog
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-28 11:54:50 -05:00
5b337d4c65 updated changelog
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-28 11:45:35 -05:00
0b80e82721 Add support for bucket name in Sia provider #16
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-28 09:58:02 -05:00
7e45fa9708 Add support for bucket name in Sia provider #16
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-09-28 09:54:01 -05:00
aa92f3d401 Add support for bucket name in Sia provider #16
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-28 09:52:57 -05:00
692c92b0df fix time
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-28 08:26:01 -05:00
51cb2c0b9a refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-27 14:07:13 -05:00
6e52474953 refactor 2024-09-27 14:03:36 -05:00
35db1cd0c4 refactor
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-09-27 14:01:26 -05:00
739a1103f0 refactor 2024-09-27 13:54:14 -05:00
bb8cbb49f5 updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-27 13:36:39 -05:00
3be29f5b73 updated changelog
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-27 09:27:47 -05:00
d944569fb2 updated changelog 2024-09-27 09:27:01 -05:00
3ca27ec566 cleanup
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-09-27 09:22:17 -05:00
779bfc762e fix times
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-27 09:16:59 -05:00
54828600a8 refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-26 20:44:38 -05:00
d5410b88a3 refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-26 20:22:14 -05:00
e86d9fd29b updated test
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-09-26 20:20:33 -05:00
cbebcfae82 fix liunx
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-26 20:17:03 -05:00
3dc16db278 changes
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-09-26 17:05:02 -05:00
e81227e5f7 fixes 2024-09-26 16:16:02 -05:00
fd83c263e1 fix 2024-09-26 15:52:06 -05:00
9ca857c98e fixes 2024-09-26 15:50:37 -05:00
afcba6b086 fix 2024-09-26 15:41:59 -05:00
ef50acc867 fix 2024-09-26 11:43:39 -05:00
f12833f36c refactor 2024-09-26 11:20:07 -05:00
b26788819e fix 2024-09-26 10:47:07 -05:00
f905de7c42 update test 2024-09-26 09:38:55 -05:00
8466a8850f refactor 2024-09-26 09:23:52 -05:00
af8e2cddcb fix time conversion 2024-09-26 09:20:13 -05:00
0089866142 removed test 2024-09-26 09:00:32 -05:00
ca892c7f11 fix tests 2024-09-26 08:46:02 -05:00
2a33000ace refactor 2024-09-26 08:38:41 -05:00
bd8da9b987 Merge branch 'development' of https://git.fifthgrid.com/blockstorage/repertory into development 2024-09-26 08:37:42 -05:00
5bd780ef07 updates
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-25 21:24:56 -05:00
25c445b889 removed is_processing check
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-25 18:47:36 -05:00
ca4111ac77 updated readme 2024-09-25 07:33:56 -05:00
868e8ae124 refactor 2024-09-25 07:18:48 -05:00
51358c7110 remove check for processing 2024-09-25 07:13:11 -05:00
ad97741e1e updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-24 17:03:41 -05:00
5d281e0fd0 fix
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-24 11:04:01 -05:00
c1a1242398 updated build system 2024-09-24 07:44:58 -05:00
5000215973 updated build system 2024-09-24 07:32:27 -05:00
ceda21830f refactor 2024-09-24 07:30:18 -05:00
b80b7b482c refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-23 20:43:50 -05:00
8e41f71e70 refactor 2024-09-23 20:42:46 -05:00
72286c865f revert 2024-09-23 20:42:06 -05:00
2d435457e6 fixes 2024-09-23 20:41:44 -05:00
cef1ff7067 refactor 2024-09-23 20:38:52 -05:00
393dc07de8 fix
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-09-23 20:35:45 -05:00
49f0425e56 fixes
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-23 20:34:27 -05:00
7973e523c3 refactor 2024-09-23 20:24:49 -05:00
c4326520cd refactor 2024-09-23 20:22:43 -05:00
dfb9d78448 clean cache directory 2024-09-23 20:08:35 -05:00
26714a836c added failure event
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-16 11:17:59 -05:00
59e2986080 fixes
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-16 11:15:34 -05:00
65fc484fa4 fix
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-14 19:31:21 -05:00
253978bc5f update 2024-09-14 19:12:42 -05:00
4aad60f69d fallback to stat
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-09-09 15:05:04 -05:00
72f7aaf9e4 move events to debug 2024-09-06 12:15:17 -05:00
d12b5f7b05 fix 2024-09-06 11:24:50 -05:00
a7239558bd new_build_system (#18)
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
Reviewed-on: #18
2024-09-06 15:05:48 +00:00
9d3e4b8767 fixes
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-08-05 19:51:50 -05:00
cbb99c7b47 fixes 2024-08-05 19:50:48 -05:00
dca0752189 updated build system
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-08-05 19:38:13 -05:00
760a1e4322 updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-08-04 19:51:35 -05:00
56bc78f63c updated build system
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-08-04 19:18:39 -05:00
60b89c5c08 updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-08-04 19:12:42 -05:00
fa0f648a0b cleanup
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-08-04 18:34:15 -05:00
5592c663a4 updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-08-04 17:06:00 -05:00
c847353baf updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-08-04 16:58:28 -05:00
a140f59acb updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-08-04 14:11:39 -05:00
b2eae73fb0 updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-08-04 13:57:00 -05:00
a34dc8f1f8 updated build system 2024-08-04 13:38:48 -05:00
92e2937a6b updated build system 2024-08-04 13:29:19 -05:00
47757f69bd updated build system 2024-08-04 13:27:55 -05:00
b642294193 updated build system 2024-08-04 13:21:29 -05:00
8075af364f updated build system 2024-08-04 13:18:42 -05:00
3d7285063a updated build system 2024-08-04 13:13:45 -05:00
232420621a updated build system
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-08-04 13:12:16 -05:00
a0432be819 updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-08-04 12:42:23 -05:00
468093227d updated build system
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-08-04 12:31:36 -05:00
32facdc2ff updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-08-04 11:02:17 -05:00
3a719272cb updated build system 2024-08-04 10:37:31 -05:00
9ce4c5a494 updated build system
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-08-04 10:30:49 -05:00
15cc7b0b87 updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-08-04 09:55:01 -05:00
6d152181d3 fixes
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-08-03 16:11:25 -05:00
97082fa42c fixes 2024-08-03 16:09:14 -05:00
becd137cf5 updated build system 2024-08-03 15:55:38 -05:00
89fc0fb372 fixes
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-08-03 15:50:16 -05:00
68d79c73cf updated build system
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-08-03 14:10:15 -05:00
d0e9ed4181 fix 2024-08-03 14:00:14 -05:00
535d2f85ca updated build system 2024-08-03 13:54:18 -05:00
5ec20fbcbd updated build system
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-08-03 13:44:02 -05:00
0225fb8e5d updated build system
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-08-03 12:24:22 -05:00
035f830b71 updated build system
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-08-03 12:15:08 -05:00
7a1fb4a790 updated build system
Some checks reported errors
BlockStorage/repertory/pipeline/head Something is wrong with the build of this commit
2024-08-03 11:00:57 -05:00
4939b05ca7 updated build system 2024-08-03 10:57:56 -05:00
2e1f0c8f5f updated build system
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-08-03 10:55:25 -05:00
d8ac596687 updated build system
Some checks reported errors
BlockStorage/repertory/pipeline/head Something is wrong with the build of this commit
2024-08-03 10:33:39 -05:00
1f43ab3bcc updated build system
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-08-03 10:28:17 -05:00
ef9fbf775c fix
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-08-03 07:37:32 -05:00
d6ff728a4d updated build system 2024-08-03 07:33:30 -05:00
f6ea8f077b updated build system 2024-08-03 07:28:10 -05:00
3e96764fc4 updated build system
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-08-03 07:20:37 -05:00
45b21ae0af updated build system
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-08-03 07:19:32 -05:00
490e49406f updated build system
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-08-03 00:04:04 -05:00
36e7d06b39 updated build system 2024-08-03 00:02:12 -05:00
b26b52fd2d updated build system 2024-08-02 23:28:33 -05:00
b35041f1f3 updated build system
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-08-02 23:27:05 -05:00
bf7ba3991f updated build system
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-08-02 23:11:38 -05:00
30806c6ebd updated build system 2024-08-02 23:07:48 -05:00
29af6e5b7c updated build system 2024-08-02 23:02:32 -05:00
cde0e92591 updated build system 2024-08-02 22:58:05 -05:00
85b28f7ee3 cleanup 2024-08-02 22:45:35 -05:00
df24b99303 updated build system
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-08-02 22:41:42 -05:00
d659a5e04d updated build system
Some checks reported errors
BlockStorage/repertory/pipeline/head Something is wrong with the build of this commit
2024-08-02 22:34:01 -05:00
99c0246720 updated build system 2024-08-02 21:41:44 -05:00
c0dba76ecd fixes 2024-08-02 21:30:17 -05:00
e37a375c18 fix 2024-08-02 21:01:21 -05:00
6718eb374d fix 2024-08-02 20:59:41 -05:00
bba7f10703 updated build system
Some checks are pending
BlockStorage/repertory/pipeline/head Build started...
2024-08-02 19:46:19 -05:00
df947db055 updated build system 2024-08-02 19:43:21 -05:00
256e873e7b updated build system 2024-08-02 19:42:49 -05:00
34bc3b759e updated build system
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-08-02 19:42:05 -05:00
853aa2784c updated build system
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-08-02 19:23:42 -05:00
4011ab8080 fix 2024-08-02 19:21:05 -05:00
07eaad7226 update 2024-08-02 18:59:30 -05:00
c743d3c341 updated build system 2024-08-02 18:57:35 -05:00
05a5645f4c update 2024-08-02 18:52:33 -05:00
7aa71fb202 updated build system 2024-08-02 18:52:00 -05:00
6410faa5b4 update
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-08-02 18:45:52 -05:00
a3e578240b updated build system
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-08-02 18:45:17 -05:00
dd492ff52d updated build system
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-08-02 17:18:09 -05:00
e7b576bc45 updated build system
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-08-02 16:33:52 -05:00
7fcc40d0e1 updated build system
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-08-02 16:22:46 -05:00
e6fc6093b2 added test 2024-08-02 16:10:07 -05:00
73ee70a862 fix 2024-08-02 16:04:04 -05:00
b647053fb9 updated build system 2024-08-02 15:43:36 -05:00
d518e55a67 updated build system 2024-08-02 15:34:01 -05:00
fcd38db2ef updated build system 2024-08-02 15:22:01 -05:00
89cd8c8fd0 test fixes 2024-08-02 15:10:59 -05:00
88c7e7f192 fixs 2024-08-02 14:55:29 -05:00
a00e77d554 refactor 2024-08-02 14:54:29 -05:00
05d5bd9fe5 fixes 2024-08-02 14:46:01 -05:00
e14e7e96c7 updated build system 2024-08-02 14:39:45 -05:00
4b8ac300c8 update 2024-08-02 14:23:36 -05:00
ee9c5fffad fix
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-08-02 14:15:56 -05:00
55caf049b1 updated build system
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-08-02 14:09:57 -05:00
4bdaeaa30d refactor 2024-08-02 14:09:20 -05:00
7c58d9c569 updated build system
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-08-02 14:06:25 -05:00
d6b2c1d81e updated build system
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-08-02 14:02:14 -05:00
4a2acf99a7 updated build system 2024-08-02 13:14:29 -05:00
3567941139 updated build system 2024-08-02 13:10:05 -05:00
50e7fb612c refactor 2024-08-02 13:08:03 -05:00
03dd9ee579 refactor 2024-08-02 13:05:15 -05:00
2a3c6f3522 updated build system 2024-08-02 13:01:00 -05:00
2e46f2fead cleanup 2024-08-02 12:59:29 -05:00
b00b72d70f updated build system 2024-08-02 12:58:44 -05:00
7dbaf9b8d9 updated build system 2024-08-02 12:57:40 -05:00
6b463adbb9 updated build system 2024-08-02 12:53:12 -05:00
cb819d7da6 updated build system 2024-08-02 12:51:39 -05:00
841ba37841 updated build system 2024-08-02 12:50:19 -05:00
7cfefbc512 updated build system
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-08-02 12:47:29 -05:00
9ebba7252c updated build system
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-08-02 12:37:55 -05:00
835c56330a update
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-08-02 12:19:01 -05:00
3e5c70e6b4 refactor 2024-08-02 12:15:15 -05:00
95b88b6ac6 updated build system 2024-08-02 12:11:45 -05:00
c1f39afaa3 updated build system 2024-08-02 12:10:55 -05:00
1893f7204e refactor 2024-08-02 12:05:51 -05:00
4a062abfde updated build system 2024-08-02 12:03:33 -05:00
fb26aa04b6 updated build system 2024-08-02 12:01:25 -05:00
7c538b471d updated build system 2024-08-02 11:56:58 -05:00
c4c1941fc8 updated build system 2024-08-02 11:46:51 -05:00
0ce2be24a3 updated build system 2024-08-02 11:44:51 -05:00
3cf60d0d26 updated build system 2024-08-02 11:34:45 -05:00
7d33ee80d0 update 2024-08-02 11:32:54 -05:00
52ba2de51f refactor 2024-08-02 11:31:59 -05:00
a1b8b57d34 cleanup
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-08-02 11:30:12 -05:00
cf9e0a356c updated build system
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-08-02 11:25:14 -05:00
e66b783df8 updated build system 2024-08-02 11:24:30 -05:00
b695200123 updated build system 2024-08-02 11:14:44 -05:00
a383fb405e updated build system 2024-08-02 11:14:28 -05:00
1cebaf83e1 updated build system 2024-08-02 10:57:34 -05:00
4976825bd6 updated build system 2024-08-02 10:56:40 -05:00
098d172f40 updated build system 2024-08-02 10:53:11 -05:00
f4a3c52428 refactor 2024-08-02 10:29:31 -05:00
2d60b637ec refactor 2024-08-02 10:00:14 -05:00
79b16d9a34 refactor 2024-08-02 09:58:16 -05:00
d69f2fd321 refactor 2024-08-02 09:52:12 -05:00
4015c1bb6e refactor 2024-08-02 09:50:53 -05:00
9f76f20ea8 refactor 2024-08-02 09:32:26 -05:00
b399ff3291 refactor 2024-08-02 09:30:41 -05:00
4fafc17b90 updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-28 13:31:30 -05:00
f1bb88624b updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-28 12:40:51 -05:00
f950e3192f updated build system 2024-07-28 12:33:26 -05:00
bef2f078e0 updated build system 2024-07-28 12:10:26 -05:00
218053a2be updated build system
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-07-28 11:50:19 -05:00
baca904079 updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-28 11:48:01 -05:00
b5175cb82b updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-28 09:52:58 -05:00
39888a1ebc updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-28 09:27:35 -05:00
ffea22c71b updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-27 16:31:45 -05:00
dd15bb2205 updated build system 2024-07-27 15:54:01 -05:00
4b53b5544f updated build system 2024-07-27 15:53:45 -05:00
24ca2d3531 updated build system 2024-07-27 15:27:36 -05:00
5587e3430b updated build system
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-07-27 15:15:05 -05:00
0c36669fef updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-27 15:10:30 -05:00
bbce86cb9d updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-27 11:24:19 -05:00
ed3a45bfc6 updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-27 10:22:56 -05:00
144fd59c1c updated build system 2024-07-27 10:17:14 -05:00
bdc54d7115 updated build system
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-07-27 10:11:59 -05:00
aa04371663 updated build system
Some checks reported errors
BlockStorage/repertory/pipeline/head Something is wrong with the build of this commit
2024-07-27 10:00:59 -05:00
78ef412516 updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-27 09:32:01 -05:00
28f7f5cb14 cleanup
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-27 09:17:34 -05:00
624b92bc97 enable backward-cpp for test 2024-07-27 09:14:51 -05:00
ca799298f4 switch to backward-cpp 2024-07-27 09:13:43 -05:00
42b67a7b61 refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-27 09:00:11 -05:00
4de4386142 fix
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-27 08:42:14 -05:00
509eeca760 fix test dir
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-07-27 08:38:50 -05:00
1cb9663776 fix
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-07-27 08:36:59 -05:00
26667fdcd6 fix
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-07-27 08:28:23 -05:00
681a3eec53 fixes 2024-07-27 08:23:20 -05:00
2a5df890f7 fix sign 2024-07-27 08:13:23 -05:00
ae0059591c fix directory caching 2024-07-27 08:12:43 -05:00
8655becf1e fix directory caching 2024-07-27 07:54:46 -05:00
2a3a0aa689 refactor
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-26 17:11:48 -05:00
ce417ec7c8 fix
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-07-26 17:08:49 -05:00
4e848a998e fix
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-26 16:56:58 -05:00
d89f35775e fix crash
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-26 16:54:01 -05:00
88ebf72003 updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-26 15:15:28 -05:00
9437b175a2 updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-26 15:08:41 -05:00
302d73a078 unmount if initilization fails 2024-07-26 14:54:05 -05:00
ba7c36e30e updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-26 14:43:06 -05:00
b4da43c87f added override.sh support
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-26 12:28:21 -05:00
f5e078ea3d updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-26 12:21:42 -05:00
8aba39c955 optional stack trace support
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-26 09:23:21 -05:00
68a5f90715 added cpptrace 2024-07-26 08:07:10 -05:00
f3dfadddfa added cpptrace 2024-07-26 07:54:29 -05:00
f1ec1fd4a6 format log message
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-26 07:22:15 -05:00
cf59db9cee fix logging
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-25 20:42:57 -05:00
e21fbe84ba fix
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-25 19:02:34 -05:00
1e4d675f76 added todo
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-25 15:02:26 -05:00
e02fe870e9 fix 2024-07-25 14:58:16 -05:00
9ef9250c18 moved event to debug 2024-07-25 14:46:51 -05:00
2e4b9d3787 added flush policy 2024-07-25 14:43:16 -05:00
fd2b3c3239 fix 2024-07-25 14:38:27 -05:00
6640394bd3 fix 2024-07-25 14:21:20 -05:00
84cc726c4f don't register loggers 2024-07-25 14:18:31 -05:00
5d99d21915 refactor 2024-07-25 13:58:27 -05:00
3b8636544a refactor logging 2024-07-25 13:55:30 -05:00
14ce60ab65 fix 2024-07-25 13:38:37 -05:00
7506431b34 fix 2024-07-25 13:31:48 -05:00
8621209eb3 fix 2024-07-25 13:30:48 -05:00
d1fe317fb9 refactor console consumer 2024-07-25 13:24:54 -05:00
a5c47d3f22 updated build system 2024-07-25 13:01:03 -05:00
267c272ce5 changed thread count 2024-07-25 12:31:12 -05:00
7a8ae32b85 refactor 2024-07-25 12:30:16 -05:00
921d404a5a switch to spdlog 2024-07-25 12:28:47 -05:00
de6a7beb5e refactor 2024-07-25 12:17:12 -05:00
5a928208fd added spdlog 2024-07-25 10:55:48 -05:00
92fc163341 fix 2024-07-25 10:47:26 -05:00
ca1e03f3ea logging changes 2024-07-25 10:46:29 -05:00
de8c3ad603 fix 2024-07-25 10:27:25 -05:00
f7d56bdd48 refactor 2024-07-25 10:19:50 -05:00
afc13b45f4 refactor 2024-07-25 10:16:17 -05:00
11d2c3c69e updated build system 2024-07-25 09:31:01 -05:00
356521e176 updated build system 2024-07-25 09:26:32 -05:00
468668e518 updated build system 2024-07-25 07:21:01 -05:00
e53dec7bab updated build system 2024-07-25 07:09:45 -05:00
26cd6de110 updated build system 2024-07-25 06:58:50 -05:00
d1157ca261 updated build system 2024-07-25 06:50:56 -05:00
0d49b49482 updated build system 2024-07-25 06:49:11 -05:00
753820bd81 updated build system 2024-07-25 06:45:38 -05:00
2a98e44a5c updated build system 2024-07-25 06:37:36 -05:00
9650d3f56b updated build system
All checks were successful
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-24 19:38:51 -05:00
aff308f67d updated build system
Some checks are pending
BlockStorage/repertory/pipeline/head Build queued...
2024-07-24 19:35:43 -05:00
be76d663b5 update
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-07-24 18:27:10 -05:00
160863561f updated build system
Some checks failed
BlockStorage/repertory/pipeline/head There was a failure building this commit
2024-07-23 11:49:23 -05:00
de02fdb812 updated build system 2024-07-23 10:57:31 -05:00
1dda98e337 fix test data location 2024-07-23 09:20:56 -05:00
ccc6afdfa4 updated build system 2024-07-23 09:14:36 -05:00
6b166551af updated build system 2024-07-23 09:11:01 -05:00
90694f816f updated build system 2024-07-23 08:15:44 -05:00
51636fbac3 fix 2024-07-23 08:00:48 -05:00
3c7aa417a9 updated build system 2024-07-23 07:51:09 -05:00
04d560739d updated build system 2024-07-23 07:48:34 -05:00
2c1842f06c updated build system 2024-07-23 07:13:55 -05:00
aba487277c updated build system 2024-07-22 15:26:32 -05:00
1b95d5dfed updated build system 2024-07-22 15:19:43 -05:00
7590fc4f45 fix
Some checks failed
BlockStorage/repertory_linux_builds/pipeline/head There was a failure building this commit
BlockStorage/repertory/pipeline/head This commit looks good
2024-07-17 17:27:48 -05:00
a409408ac0 fix
Some checks failed
BlockStorage/repertory_linux_builds/pipeline/head There was a failure building this commit
2024-07-17 17:21:27 -05:00
b121c9778f fix 2024-07-17 17:19:30 -05:00
d452761ff9 fix permissions
Some checks failed
BlockStorage/repertory_linux_builds/pipeline/head There was a failure building this commit
2024-07-17 17:08:38 -05:00
dbc5725779 fix
Some checks failed
BlockStorage/repertory_linux_builds/pipeline/head There was a failure building this commit
2024-07-17 13:58:31 -05:00
8b6536c55b fix 2024-07-17 13:57:08 -05:00
c7318b0383 const refactor 2024-07-17 13:53:56 -05:00
26fa85e421 updated build system 2024-07-17 12:28:20 -05:00
939742a991 updated build system 2024-07-17 12:22:44 -05:00
6ab7386a79 updated build system 2024-07-17 09:18:04 -05:00
a750cdc3cb updated build system 2024-07-17 09:02:14 -05:00
2f8af0fb08 updated build system 2024-07-17 08:59:06 -05:00
7a9802ccb7 updated build system 2024-07-17 08:05:37 -05:00
d356d04bf5 updated build system 2024-07-17 07:54:51 -05:00
c99c4988ff fix 2024-07-16 14:29:02 -05:00
6c564eb98b updated version.rc 2024-07-16 14:28:17 -05:00
786d8634cf updated build system 2024-07-16 14:17:00 -05:00
70b5204a9f updated build system 2024-07-16 14:00:12 -05:00
1d70309a16 updated build system 2024-07-16 13:50:25 -05:00
b2aba8c447 updated build system 2024-07-16 13:19:07 -05:00
8757b47b35 updated build system 2024-07-16 13:04:21 -05:00
b1a7632536 updated readme 2024-07-16 10:58:53 -05:00
97c73d5358 fix 2024-07-16 10:52:42 -05:00
bb3f05aef1 fix 2024-07-16 10:50:46 -05:00
2001ffc27c fix 2024-07-16 09:51:46 -05:00
bd2dd4035e isolate test directory 2024-07-16 09:46:13 -05:00
7eb793f577 fix test 2024-07-16 09:27:26 -05:00
b05cedb509 refactor 2024-07-16 09:17:59 -05:00
cd484aa7bb isolate test directory 2024-07-16 09:16:59 -05:00
fbfb528adc fix 2024-07-16 09:01:05 -05:00
d9cba7f4cc fix 2024-07-16 08:58:22 -05:00
7d5c5e18c6 isolate test directory 2024-07-16 08:54:41 -05:00
2c0935a771 isolate test directory 2024-07-16 08:13:13 -05:00
5312797b97 refactor 2024-07-16 08:04:49 -05:00
801af6b799 fix test 2024-07-16 07:52:49 -05:00
b42379b519 updated script 2024-07-16 07:51:35 -05:00
8b328b92b1 added post_build.sh 2024-07-16 07:49:16 -05:00
2e6ecf8ddb added test data to distribution 2024-07-16 07:48:13 -05:00
12c7c48b20 updated build system 2024-07-15 13:22:06 -05:00
732a42a073 refactor preprocessor directives 2024-07-15 13:19:27 -05:00
5638d5c010 fix 2024-07-15 13:12:14 -05:00
6a984ac84c refactor proprocessor directives 2024-07-15 12:20:15 -05:00
c1eb6b94ed updated build system 2024-07-15 11:11:11 -05:00
85135b9267 updated build system
All checks were successful
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2024-07-11 20:38:02 -05:00
670c5bac17 updated build system
All checks were successful
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2024-07-11 20:27:02 -05:00
2dab1426b4 fix permissions
All checks were successful
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2024-07-11 19:54:31 -05:00
43004b19c3 updated build system
Some checks failed
BlockStorage/repertory_linux_builds/pipeline/head There was a failure building this commit
2024-07-11 14:10:43 -05:00
4044d95aaa updated build system 2024-07-11 13:56:36 -05:00
c6024c06ce fix 2024-07-11 13:01:06 -05:00
e56bcb6489 updated build system 2024-07-11 12:31:50 -05:00
d9e0164080 updated build system 2024-07-11 11:42:06 -05:00
b588cd645e updated build system 2024-07-11 09:28:07 -05:00
2c163d7219 updated build system 2024-07-11 09:25:32 -05:00
39c53fe903 updated public key 2024-07-11 09:12:49 -05:00
7b12d8f9e9 updated build system 2024-07-11 07:42:28 -05:00
4649f66c00 updated build system 2024-07-11 07:35:44 -05:00
9648abf4b9 fix
All checks were successful
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2024-07-10 15:16:18 -05:00
cc1246d0f5 moved version.rc.in 2024-07-10 14:59:33 -05:00
726aa43a95 fix 2024-07-10 14:56:09 -05:00
224f983ca8 fix 2024-07-10 14:48:31 -05:00
f962884341 updated build system 2024-07-10 14:46:17 -05:00
6f35cc8bfa remove unused file 2024-07-10 10:14:25 -05:00
c08c9bc216 updated build system 2024-07-09 15:04:45 -05:00
239652be7c updated build system 2024-07-09 14:58:56 -05:00
68c9ec0f82 updated build system 2024-07-09 14:28:39 -05:00
da07122997 updated build system 2024-07-09 13:30:49 -05:00
5fb66dc6ee updated build system 2024-07-09 13:25:46 -05:00
0c414fc8a6 updated build system 2024-07-09 13:12:02 -05:00
097507a2d9 version handling 2024-07-09 12:38:10 -05:00
466b3fcead update build system 2024-07-09 12:22:47 -05:00
81dc6d1269 updated build system 2024-07-09 12:09:52 -05:00
b6c020d35d updated build system 2024-07-09 11:42:30 -05:00
dfbe97988f fixes 2024-07-09 08:37:31 -05:00
cd36679a1c update build system 2024-06-27 12:12:09 -05:00
4d555bce96 updated build system 2024-06-27 12:04:37 -05:00
d96c6012c5 update build system 2024-06-27 08:33:54 -05:00
15793e714d update build system
All checks were successful
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2024-06-07 20:17:07 -05:00
437cb94dda fix
All checks were successful
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2024-06-07 18:32:21 -05:00
544765e408 refactor 2024-06-07 18:27:29 -05:00
472b1f0da1 fix data directory
Some checks are pending
BlockStorage/repertory_linux_builds/pipeline/head Build queued...
2024-06-07 18:20:55 -05:00
867b2eee40 added back s3 enable/disable option
All checks were successful
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2024-06-07 18:04:33 -05:00
5a30b15d1e update build system
All checks were successful
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2024-06-07 16:15:22 -05:00
0f2c507c30 fix
All checks were successful
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2024-06-07 16:09:46 -05:00
c545916332 update build system
All checks were successful
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2024-06-07 14:42:23 -05:00
ac81f5f624 fix
All checks were successful
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2024-06-07 11:48:28 -05:00
50ae7ab558 updated build system 2024-06-07 11:36:15 -05:00
a5e29ed745 build script changes 2024-06-07 11:26:37 -05:00
6d29170342 removed temporary scripts 2024-06-07 11:25:39 -05:00
410314022f updated build system
Some checks are pending
BlockStorage/repertory_linux_builds/pipeline/head Build queued...
2024-06-07 10:01:58 -05:00
815e00d389 updated cmake
All checks were successful
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2024-06-07 09:37:09 -05:00
fbf5455f89 renamed project 2024-06-07 09:34:44 -05:00
8925704555 build system update
All checks were successful
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2024-06-07 09:32:43 -05:00
462365dc97 updated build system
All checks were successful
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2024-06-06 18:22:05 -05:00
e73dd52973 updated changelog
All checks were successful
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2024-06-06 14:32:36 -05:00
881eec4e02 updates 2024-06-06 14:31:41 -05:00
bcdfc73767 remove rc 2024-06-06 14:26:16 -05:00
0520aa3d40 fixes 2024-06-06 14:24:28 -05:00
c562d17b78 update 2024-06-06 14:19:30 -05:00
aee68520b3 move to new build system 2024-06-06 14:17:47 -05:00
88d8bf63f5 fix
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2024-04-21 14:25:00 -05:00
303887ea37 update
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2024-03-15 18:06:27 -05:00
51d3d33175 changed sqlite db options
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2024-03-11 09:15:46 -05:00
cdb5ea279d cleanup
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2024-02-08 22:12:20 -06:00
311e1b318a updated mingw64 docker 2024-02-08 22:06:54 -06:00
31c5b6f1db Merge branch 'development' of https://git.fifthgrid.com/blockstorage/repertory into development
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2024-02-05 07:21:45 -06:00
a024f81e5d fix runaway thread
All checks were successful
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
2024-02-03 10:41:38 -06:00
4089fca87c msys2 support 2024-01-30 13:23:14 -06:00
99533a9687 revert
All checks were successful
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
2024-01-29 11:36:26 -06:00
d175a38ad1 revert
Some checks failed
BlockStorage/repertory_linux_builds/pipeline/head There was a failure building this commit
2024-01-29 11:30:39 -06:00
28e918be18 fix
Some checks failed
BlockStorage/repertory_linux_builds/pipeline/head There was a failure building this commit
2024-01-29 11:22:41 -06:00
b4403ff959 Merge branch 'development' of ssh://git.fifthgrid.com:3022/blockstorage/repertory into development
Some checks failed
BlockStorage/repertory_linux_builds/pipeline/head There was a failure building this commit
2024-01-29 11:20:47 -06:00
8c548bad92 Merge branch 'development' of ssh://git.fifthgrid.com:3022/blockstorage/repertory into development 2024-01-29 11:18:24 -06:00
6ecad9af72 fix windows build
All checks were successful
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2024-01-29 10:54:57 -06:00
df73f9a18f fix
Some checks failed
BlockStorage/repertory_linux_builds/pipeline/head There was a failure building this commit
2024-01-29 09:48:02 -06:00
9b453327a8 fix download path
Some checks failed
BlockStorage/repertory_linux_builds/pipeline/head There was a failure building this commit
2024-01-29 08:00:50 -06:00
071552ffd5 updated zlib
Some checks failed
BlockStorage/repertory_linux_builds/pipeline/head There was a failure building this commit
2024-01-29 07:57:33 -06:00
33a0b4b295 fix unittests link on mingw64
Some checks failed
BlockStorage/repertory_linux_builds/pipeline/head There was a failure building this commit
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
2024-01-27 12:34:43 -06:00
21bcd0780c fix unittests build on mingw64 2024-01-27 12:31:21 -06:00
f6cdf2914c mingw64 build fixes 2024-01-27 12:21:37 -06:00
7698a62c1f updated mingw64 build 2024-01-27 11:51:35 -06:00
2ae22cdb30 fixes
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-12-22 21:44:52 -06:00
7705432820 refactor
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-12-20 13:13:52 -06:00
f150832676 default to release 2023-12-20 12:12:22 -06:00
80c9375d65 fix 2023-12-20 12:05:59 -06:00
48ddff9e85 default to release 2023-12-20 11:59:41 -06:00
6f64fe02d5 address compiler warning 2023-12-18 09:15:28 -06:00
61ab4d8d58 refactor
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-12-15 18:52:14 -06:00
3bb962a266 cleanup
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
2023-12-15 18:46:35 -06:00
c286dc4f5c refactoring
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
2023-12-15 18:35:07 -06:00
9d36ded2b3 fix
Some checks are pending
BlockStorage/repertory_linux_builds/pipeline/head Build queued...
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
2023-12-15 18:30:33 -06:00
71686405e0 refactoring
Some checks failed
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head There was a failure building this commit
2023-12-15 17:58:49 -06:00
34c4a9c508 refactoring
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-12-15 09:01:45 -06:00
effafd3e91 fix
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-12-14 13:51:48 -06:00
5a61bdf2b0 fix compiler warnings
Some checks failed
BlockStorage/repertory_osx_builds/pipeline/head There was a failure building this commit
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-12-14 13:45:08 -06:00
d21fcfd1c6 windows compiler warnings
Some checks failed
BlockStorage/repertory_osx_builds/pipeline/head There was a failure building this commit
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-12-14 09:37:58 -06:00
fff9a27362 cleanup
Some checks failed
BlockStorage/repertory_linux_builds/pipeline/head Build queued...
BlockStorage/repertory_osx_builds/pipeline/head There was a failure building this commit
2023-12-14 09:17:15 -06:00
ad7872a0e3 refactor
Some checks failed
BlockStorage/repertory_osx_builds/pipeline/head There was a failure building this commit
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-12-14 09:11:49 -06:00
f3ea2ccc21 fix warnings 2023-12-14 09:06:30 -06:00
e959a9e795 fix 2023-12-14 09:00:21 -06:00
3fe5eac56d fix 2023-12-14 08:58:33 -06:00
67191be78d fix encryption provider 2023-12-14 08:55:04 -06:00
00cfb67b64 fix file read 2023-12-14 08:46:44 -06:00
35aa8f5a94 fix scripts
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-12-13 20:12:18 -06:00
f44972b8b3 refactor
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-12-13 15:32:30 -06:00
94675a3011 refactor 2023-12-13 15:22:19 -06:00
7112fbee7e refactor 2023-12-13 15:21:21 -06:00
ff13633962 refactor 2023-12-13 14:57:08 -06:00
cb93e34de0 refactor 2023-12-13 14:53:39 -06:00
883968f53e refactor 2023-12-13 14:50:19 -06:00
d6d4b579c9 fix encryption provider 2023-12-13 13:34:21 -06:00
0e83d84360 fix encription provider 2023-12-13 10:02:05 -06:00
63a6b3bdba address windows compiler warnings
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-12-12 14:48:52 -06:00
aafa7e112a fix 2023-12-12 13:14:59 -06:00
804eb7aa5a fix 2023-12-12 13:11:46 -06:00
eeba641732 windows build script 2023-12-12 13:06:31 -06:00
932ea6cb1d fix
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-12-10 20:04:49 -06:00
dd4724e374 refactor
Some checks failed
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head There was a failure building this commit
2023-12-10 19:40:38 -06:00
e652c9956e address warnings
Some checks failed
BlockStorage/repertory_osx_builds/pipeline/head There was a failure building this commit
BlockStorage/repertory_linux_builds/pipeline/head There was a failure building this commit
2023-12-10 17:02:02 -06:00
d11767776e fix
Some checks are pending
BlockStorage/repertory_linux_builds/pipeline/head Build queued...
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
2023-12-10 16:59:48 -06:00
daefa58c0c code reorg
Some checks failed
BlockStorage/repertory_osx_builds/pipeline/head There was a failure building this commit
BlockStorage/repertory_linux_builds/pipeline/head There was a failure building this commit
2023-12-10 16:53:54 -06:00
7fe7299e7c refactor
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-12-10 16:21:01 -06:00
0af9c6e8b0 updated json
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-12-10 14:27:34 -06:00
b9c2113d43 test refactor
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
2023-12-10 14:09:53 -06:00
4fd23de9b6 prepare for 2024
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
2023-12-10 14:06:54 -06:00
3b372ebe86 prepare for 2024 2023-12-10 14:06:17 -06:00
d14d149a3b fix test 2023-12-10 14:03:09 -06:00
44acf7dace fix test 2023-12-10 14:02:10 -06:00
da7efdb1a8 cleanup
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
2023-12-10 13:41:18 -06:00
444f39f9ff refactor
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
2023-12-10 13:38:44 -06:00
fd9c26f016 updated changelog
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
2023-12-10 13:03:22 -06:00
7ef9f5cd65 removed binaries
Some checks are pending
BlockStorage/repertory_linux_builds/pipeline/head Build queued...
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
2023-12-10 13:02:10 -06:00
d3321b3c57 updated changelog 2023-12-10 12:22:34 -06:00
bbe546008f updated version
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-12-10 12:19:55 -06:00
5ec91e2d20 fix
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-12-01 11:12:58 -06:00
fbe4df0251 refactor
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-30 17:36:26 -06:00
e5c690eb89 added curl error event
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-30 16:30:09 -06:00
aeedd114a6 refactoring
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-30 16:07:16 -06:00
dfa5e0e005 unit tests and fixes
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-30 13:32:06 -06:00
5911d370e5 fix
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-30 13:15:16 -06:00
baf769c462 close all files in background
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-30 13:10:32 -06:00
33e8e0890b test fixes
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-30 12:54:00 -06:00
e2e82edefb script changes 2023-11-30 10:52:11 -06:00
da1b5c6953 refactor boost build
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-30 10:44:40 -06:00
629725e262 build fixes
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-30 10:40:29 -06:00
6ba4c0091b fix
Some checks failed
BlockStorage/repertory_linux_builds/pipeline/head There was a failure building this commit
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
2023-11-30 09:24:51 -06:00
99d5261aab updates 2023-11-30 09:19:09 -06:00
97487cf0c4 updates 2023-11-30 09:18:10 -06:00
8436f2e2bb refactor
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-19 18:02:28 -06:00
310e436163 fix crash
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-19 17:58:51 -06:00
b64477fd7e updated CHANGELOG.md
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-19 11:43:06 -06:00
782da43114 updated CHANGELOG.md
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-19 11:40:40 -06:00
37854b693b refactor
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-19 00:19:28 -06:00
7360b72c7b fix
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-18 23:54:51 -06:00
6f68f268c0 fix
Some checks are pending
BlockStorage/repertory_linux_builds/pipeline/head Build queued...
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
2023-11-18 23:54:13 -06:00
983587aeb7 fix deadlock
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-18 23:53:03 -06:00
ee415d2e4a refactor
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-18 23:01:49 -06:00
0c075d7fe4 fix
Some checks are pending
BlockStorage/repertory_linux_builds/pipeline/head Build queued...
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
2023-11-18 22:55:50 -06:00
eec3653c6b refactor
Some checks failed
BlockStorage/repertory_osx_builds/pipeline/head There was a failure building this commit
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-18 22:47:07 -06:00
5ac2a24611 refactor
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-18 20:05:34 -06:00
144683d7b3 cleanup
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
2023-11-18 19:23:52 -06:00
9c917a9119 enable s3 test
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
2023-11-18 19:08:14 -06:00
dc48b84191 fix truncate
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
2023-11-18 19:07:53 -06:00
8555d31ddf refactor
Some checks are pending
BlockStorage/repertory_linux_builds/pipeline/head Build queued...
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
2023-11-18 18:19:57 -06:00
cb0dde2a80 fix
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-18 17:38:48 -06:00
5f5b1f751b fix
Some checks reported errors
BlockStorage/repertory_osx_builds/pipeline/head Something is wrong with the build of this commit
BlockStorage/repertory_linux_builds/pipeline/head Something is wrong with the build of this commit
2023-11-18 17:18:03 -06:00
24c5dad929 fix lib64 directory issue 2023-11-18 17:14:38 -06:00
f0046fcd57 updated ignore list 2023-11-18 17:04:10 -06:00
3c3e415175 refactor 2023-11-18 17:03:42 -06:00
00d3355f43 updated openssl 2023-11-18 16:56:37 -06:00
907a14faff fix tests
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-18 16:34:40 -06:00
4348e89f99 refactor tests
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-18 15:34:43 -06:00
7a24cc54f8 refactor
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-16 08:08:03 -06:00
80f6e3c272 extract common behavior
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-15 20:54:28 -06:00
edb1297c4a fix boost version
All checks were successful
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
2023-11-15 19:56:44 -06:00
1ee157f943 switch to storj for binary hosting
All checks were successful
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
2023-11-15 19:17:21 -06:00
1b2981b06e removed logs
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-14 20:34:43 -06:00
7203fefd33 refactor
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-14 20:28:47 -06:00
d808b2dd19 refactor
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-14 20:09:50 -06:00
ee34a1e361 extract common behavior
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-14 19:49:46 -06:00
1766f91697 extract common behavior
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-14 18:35:18 -06:00
b3aa28d085 refactor
Some checks are pending
BlockStorage/repertory_linux_builds/pipeline/head Build queued...
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
2023-11-14 18:26:00 -06:00
a605d7af11 Merge branch 'development' of ssh://git.fifthgrid.com:3022/BlockStorage/repertory into development
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-14 18:24:27 -06:00
19b1a33c48 extract common behavior 2023-11-14 18:20:11 -06:00
04aa511448 Address compiler warnings #10
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-12 19:44:58 -06:00
8dc3d4466b refactor 2023-11-12 19:43:52 -06:00
0716a58ff0 Address compiler warnings #10
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-12 19:36:00 -06:00
60052081b4 fix
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-12 14:36:02 -06:00
f11d49b264 fix test
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
2023-11-12 14:34:58 -06:00
103dae6d08 unit tests and fixes
Some checks are pending
BlockStorage/repertory_linux_builds/pipeline/head Build queued...
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
2023-11-12 14:33:19 -06:00
821ed7b25e fix test 2023-11-12 14:27:11 -06:00
87f83b6f30 fix test
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-12 14:08:02 -06:00
ace81d797e cleanup 2023-11-12 14:01:53 -06:00
57ca2c7c6d refactor
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-12 12:41:18 -06:00
8360d9e045 extract common behavior
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-12 12:36:44 -06:00
560ffbbb6a fix events
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-12 11:49:03 -06:00
72314606f3 extract common behavior 2023-11-12 11:45:54 -06:00
db009b69dd fix
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-11 21:24:15 -06:00
3ed99dc0ce cleanup
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-11 21:17:07 -06:00
762a7c99d5 refactor s3 provider
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-11 21:15:08 -06:00
4e62156b70 added read retry to s3 provider
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-11 21:04:21 -06:00
cc49536755 fix
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-11 20:33:57 -06:00
54b844dc3b fix
Some checks are pending
BlockStorage/repertory_linux_builds/pipeline/head Build queued...
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
2023-11-11 20:24:21 -06:00
1e8ba13f66 s3 provider fixes
Some checks failed
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head There was a failure building this commit
2023-11-11 20:21:12 -06:00
93011cee9c refactor s3 provider
Some checks failed
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head There was a failure building this commit
2023-11-11 19:05:27 -06:00
a474a5c73c refactor s3 provider
Some checks failed
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head There was a failure building this commit
2023-11-11 18:49:15 -06:00
17b98ca99d refactor
Some checks failed
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head There was a failure building this commit
2023-11-11 18:31:49 -06:00
281eedb71e refactor s3 provider
Some checks failed
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head There was a failure building this commit
2023-11-11 16:42:38 -06:00
1ee533591c refactor s3 provider
Some checks failed
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head There was a failure building this commit
2023-11-11 16:28:00 -06:00
b87e1df140 refactor s3 provider 2023-11-11 15:35:35 -06:00
f88239a13e updated changelog 2023-11-11 11:52:19 -06:00
68476cbc00 refactor s3 provider 2023-11-11 11:32:14 -06:00
f2c1f64f02 Address compiler warnings #10
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-08 19:53:12 -06:00
a7209184c8 changed number of jobs
All checks were successful
BlockStorage/repertory_osx_builds/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-11-01 13:59:36 -05:00
ba59e29499 fix build
Some checks failed
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
BlockStorage/repertory/pipeline/head There was a failure building this commit
BlockStorage/repertory_osx_builds/pipeline/head There was a failure building this commit
2023-10-31 16:03:03 -05:00
f94196d865 Address compiler warnings #10
Some checks failed
BlockStorage/repertory_osx/pipeline/head There was a failure building this commit
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-10-30 19:33:18 -05:00
bb5a9f9737 fix
Some checks failed
BlockStorage/repertory_linux_builds/pipeline/head Build queued...
BlockStorage/repertory_osx/pipeline/head There was a failure building this commit
2023-10-30 19:08:35 -05:00
4bc5cf7c64 [require c++20] [moved to stduuid]
Some checks failed
BlockStorage/repertory_osx/pipeline/head There was a failure building this commit
BlockStorage/repertory_linux_builds/pipeline/head Build started...
2023-10-30 19:03:12 -05:00
639d14452b Address compiler warnings #10
Some checks failed
BlockStorage/repertory_osx/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head There was a failure building this commit
2023-10-30 13:31:52 -05:00
e7413fb741 Address compiler warnings #10
All checks were successful
BlockStorage/repertory_osx/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-10-30 12:06:07 -05:00
c0e720498d Address compiler warnings #10
All checks were successful
BlockStorage/repertory_osx/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-10-30 11:49:56 -05:00
383c3b4be6 Address compiler warnings #10 [Wconversion] 2023-10-30 11:41:45 -05:00
e9b202f5c8 \#10 Address compiler warnings
All checks were successful
BlockStorage/repertory_osx/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-10-30 11:07:42 -05:00
bc3005a6a4 \#10 Address compiler warnings 2023-10-30 10:59:04 -05:00
8cf19e0594 \#10 Address compiler warnings 2023-10-30 10:54:35 -05:00
b137b57dbc \#10 Address compiler warnings 2023-10-30 10:36:31 -05:00
5dff8927da build win32 last
All checks were successful
BlockStorage/repertory_osx/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-10-30 10:04:00 -05:00
197e79dd07 fix mingw64 jenkins build
All checks were successful
BlockStorage/repertory_osx/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-10-30 10:02:55 -05:00
6262aca761 cmake build cleanup
Some checks failed
BlockStorage/repertory_osx/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head There was a failure building this commit
2023-10-29 20:38:13 -05:00
c156ae704b cmake build refactor
All checks were successful
BlockStorage/repertory_osx/pipeline/head This commit looks good
2023-10-29 20:21:37 -05:00
a67979ec40 cleanup build
All checks were successful
BlockStorage/repertory_osx/pipeline/head This commit looks good
2023-10-29 20:05:38 -05:00
54bfc11620 fix erroneous nodiscard
All checks were successful
BlockStorage/repertory_osx/pipeline/head This commit looks good
2023-10-29 19:45:21 -05:00
d33c2cd3a2 removed msvc compilation support
Some checks are pending
BlockStorage/repertory_linux_builds/pipeline/head Build queued...
BlockStorage/repertory_osx/pipeline/head Build queued...
2023-10-29 19:40:29 -05:00
3a5f428fb6 [boost to v1.83.0] [curl to v8.4.0] [libsodium to v1.0.19] [rocksdb to v8.6.7]
Some checks reported errors
BlockStorage/repertory_windows/pipeline/head Something is wrong with the build of this commit
BlockStorage/repertory_osx/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head There was a failure building this commit
2023-10-29 19:26:09 -05:00
0331152569 add zlib as project dependency
All checks were successful
BlockStorage/repertory_windows/pipeline/head This commit looks good
BlockStorage/repertory_osx/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-10-29 19:12:16 -05:00
1b7e854f5f added zlib
All checks were successful
BlockStorage/repertory_windows/pipeline/head This commit looks good
BlockStorage/repertory_osx/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-10-29 19:05:40 -05:00
12a945d863 updated version
All checks were successful
BlockStorage/repertory_osx/pipeline/head This commit looks good
BlockStorage/repertory_windows/pipeline/head This commit looks good
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
2023-10-29 01:59:00 -05:00
109 changed files with 4344 additions and 8005 deletions

View File

@ -161,7 +161,6 @@ openssldir
pkgconfig
plarge_integer
plex
println
project_enable_fontconfig
project_enable_gtkmm
project_enable_libdsm

View File

@ -2,10 +2,6 @@
## v2.0.2-rc
### BREAKING CHANGES
* Refactored `config.json` - will need to verify configuration settings prior to mounting
### Issues
* \#12 \[Unit Test\] Complete all providers unit tests
@ -18,18 +14,13 @@
* MSYS2 is required for building Windows binaries on Windows
* OS X support is temporarily disabled
* \#19 \[bug\] Rename file is broken for files that are existing
* \#23 \[bug\] Incorrect file size displayed while upload is pending
* \#24 RocksDB implementations should be transactional
* \#25 Writes should block when maximum cache size is reached
* \#26 Complete ring buffer and direct download support
### Changes from v2.0.1-rc
* Ability to choose between RocksDB and SQLite databases
* Added direct reads and implemented download fallback
* Corrected file times on S3 and Sia providers
* Corrected handling of `chown()` and `chmod()`
* Fixed erroneous download of chunks after resize
* Comprehensive WinFSP and FUSE unit tests, including remote testing
## v2.0.1-rc

View File

@ -1,5 +1,5 @@
set(BINUTILS_HASH ae9a5789e23459e59606e6714723f2d3ffc31c03174191ef0d015bdf06007450)
set(BOOST_HASH f55c340aa49763b1925ccf02b2e83f35fdcf634c9d5164a2acb87540173c741d)
set(BOOST_HASH 2575e74ffc3ef1cd0babac2c1ee8bdb5782a0ee672b1912da40e5b4b591ca01f)
set(BOOST2_HASH 7bd7ddceec1a1dfdcbdb3e609b60d01739c38390a5f956385a12f3122049f0ca)
set(CPP_HTTPLIB_HASH 405abd8170f2a446fc8612ac635d0db5947c0d2e156e32603403a4496255ff00)
set(CURL_HASH 5a231145114589491fc52da118f9c7ef8abee885d1cb1ced99c7290e9a352f07)

View File

@ -1,7 +1,9 @@
if(PROJECT_ENABLE_ROCKSDB)
if(PROJECT_BUILD)
add_definitions(-DPROJECT_ENABLE_ROCKSDB)
find_library(ROCKSDB_LIBRARY NAMES librocksdb.a REQUIRED)
link_libraries(${ROCKSDB_LIBRARY})
elseif(NOT PROJECT_IS_MINGW OR CMAKE_HOST_WIN32)
ExternalProject_Add(rocksdb_project
@ -10,11 +12,11 @@ if(PROJECT_ENABLE_ROCKSDB)
URL_HASH SHA256=${ROCKSDB_HASH}
LIST_SEPARATOR |
CMAKE_ARGS ${PROJECT_EXTERNAL_CMAKE_FLAGS}
-DBUILD_SHARED_LIBS=OFF
-DBUILD_SHARED_LIBS=${PROJECT_BUILD_SHARED_LIBS}
-DBUILD_STATIC_LIBS=ON
-DFAIL_ON_WARNINGS=OFF
-DPORTABLE=1
-DROCKSDB_BUILD_SHARED=OFF
-DROCKSDB_BUILD_SHARED=${PROJECT_BUILD_SHARED_LIBS}
-DROCKSDB_INSTALL_ON_WINDOWS=ON
-DWITH_BENCHMARK=OFF
-DWITH_BENCHMARK_TOOLS=OFF
@ -31,4 +33,4 @@ if(PROJECT_ENABLE_ROCKSDB)
list(APPEND PROJECT_DEPENDENCIES rocksdb_project)
endif()
endif()
endif()

View File

@ -1,15 +1,15 @@
set(BINUTILS_VERSION 2.41)
set(BOOST_MAJOR_VERSION 1)
set(BOOST_MINOR_VERSION 86)
set(BOOST_PATCH_VERSION 0)
set(BOOST2_MAJOR_VERSION 1)
set(BOOST2_MINOR_VERSION 76)
set(BOOST2_PATCH_VERSION 0)
set(BOOST_MAJOR_VERSION 1)
set(BOOST_MINOR_VERSION 87)
set(BOOST_PATCH_VERSION 0)
set(CPP_HTTPLIB_VERSION 0.18.1)
set(CURL2_VERSION 8_11_0)
set(CURL_VERSION 8.11.0)
set(EXPAT2_VERSION 2_6_4)
set(CURL2_VERSION 8_11_0)
set(EXPAT_VERSION 2.6.4)
set(EXPAT2_VERSION 2_6_4)
set(GCC_VERSION 14.2.0)
set(GTEST_VERSION 1.15.2)
set(ICU_VERSION 75-1)
@ -22,7 +22,7 @@ set(PKG_CONFIG_VERSION 0.29.2)
set(PUGIXML_VERSION 1.14)
set(ROCKSDB_VERSION 9.7.4)
set(SPDLOG_VERSION 1.15.0)
set(SQLITE2_VERSION 3.46.1)
set(SQLITE_VERSION 3460100)
set(SQLITE2_VERSION 3.46.1)
set(STDUUID_VERSION 1.2.3)
set(ZLIB_VERSION 1.3.1)

View File

@ -22,9 +22,10 @@
#ifndef REPERTORY_INCLUDE_APP_CONFIG_HPP_
#define REPERTORY_INCLUDE_APP_CONFIG_HPP_
#include "events/event.hpp"
#include "types/remote.hpp"
#include "events/event_system.hpp"
#include "events/events.hpp"
#include "types/repertory.hpp"
#include "utils/error_utils.hpp"
namespace repertory {
class app_config final {
@ -39,7 +40,7 @@ public:
default_data_directory(const provider_type &prov) -> std::string;
[[nodiscard]] static auto
default_remote_api_port(const provider_type &prov) -> std::uint16_t;
default_remote_port(const provider_type &prov) -> std::uint16_t;
[[nodiscard]] static auto
default_rpc_port(const provider_type &prov) -> std::uint16_t;
@ -53,202 +54,427 @@ public:
public:
app_config(const provider_type &prov, std::string_view data_directory = "");
app_config() = delete;
app_config(app_config &&) = delete;
app_config(const app_config &) = delete;
~app_config() { save(); }
auto operator=(const app_config &) -> app_config & = delete;
auto operator=(app_config &&) -> app_config & = delete;
private:
provider_type prov_;
atomic<std::string> api_auth_;
std::atomic<std::uint16_t> api_port_;
atomic<std::string> api_user_;
std::atomic<bool> config_changed_;
std::atomic<database_type> db_type_{database_type::rocksdb};
std::atomic<std::uint8_t> download_timeout_secs_;
std::atomic<bool> enable_download_timeout_;
std::atomic<bool> enable_drive_events_;
std::string api_auth_;
std::uint16_t api_port_;
std::string api_user_;
bool config_changed_;
std::string data_directory_;
std::uint8_t download_timeout_secs_;
bool enable_chunk_downloader_timeout_;
bool enable_comm_duration_events_;
bool enable_drive_events_;
bool enable_max_cache_size_;
#if defined(_WIN32)
std::atomic<bool> enable_mount_manager_;
bool enable_mount_manager_;
#endif // defined(_WIN32)
std::atomic<event_level> event_level_;
std::atomic<std::uint32_t> eviction_delay_mins_;
std::atomic<bool> eviction_uses_accessed_time_;
std::atomic<std::uint16_t> high_freq_interval_secs_;
std::atomic<std::uint16_t> low_freq_interval_secs_;
std::atomic<std::uint64_t> max_cache_size_bytes_;
std::atomic<std::uint8_t> max_upload_count_;
std::atomic<std::uint16_t> med_freq_interval_secs_;
std::atomic<std::uint16_t> online_check_retry_secs_;
std::atomic<std::uint16_t> orphaned_file_retention_days_;
std::atomic<download_type> preferred_download_type_;
std::atomic<std::uint16_t> retry_read_count_;
std::atomic<std::uint16_t> ring_buffer_file_size_;
std::atomic<std::uint16_t> task_wait_ms_;
bool enable_remote_mount_;
encrypt_config encrypt_config_;
event_level event_level_;
std::uint32_t eviction_delay_mins_;
bool eviction_uses_accessed_time_;
std::uint16_t high_freq_interval_secs_;
bool is_remote_mount_;
std::uint16_t low_freq_interval_secs_;
std::uint64_t max_cache_size_bytes_;
std::uint8_t max_upload_count_;
std::uint16_t med_freq_interval_secs_;
std::uint8_t min_download_timeout_secs_;
std::uint16_t online_check_retry_secs_;
std::uint16_t orphaned_file_retention_days_;
std::string preferred_download_type_;
std::uint8_t read_ahead_count_;
std::uint8_t remote_client_pool_size_;
std::string remote_host_name_or_ip_;
std::uint8_t remote_max_connections_;
std::uint16_t remote_port_;
std::uint16_t remote_receive_timeout_secs_;
std::uint16_t remote_send_timeout_secs_;
std::string remote_token_;
std::uint16_t retry_read_count_;
std::uint16_t ring_buffer_file_size_;
std::uint16_t task_wait_ms_;
private:
std::string cache_directory_;
std::string data_directory_;
atomic<encrypt_config> encrypt_config_;
atomic<host_config> host_config_;
host_config hc_{};
s3_config s3_config_{};
sia_config sia_config_{};
std::uint64_t version_{REPERTORY_CONFIG_VERSION};
std::string log_directory_;
mutable std::recursive_mutex read_write_mutex_;
atomic<remote::remote_config> remote_config_;
atomic<remote::remote_mount> remote_mount_;
atomic<s3_config> s3_config_;
atomic<sia_config> sia_config_;
std::unordered_map<std::string, std::function<std::string()>>
value_get_lookup_;
std::unordered_map<std::string,
std::function<std::string(const std::string &)>>
value_set_lookup_;
std::uint64_t version_{REPERTORY_CONFIG_VERSION};
mutable std::recursive_mutex remote_mount_mutex_;
private:
[[nodiscard]] auto load() -> bool;
template <typename dest>
auto get_value(const json &json_document, const std::string &name, dest &dst,
bool &success_flag) -> bool {
REPERTORY_USES_FUNCTION_NAME();
auto ret{false};
try {
if (json_document.find(name) != json_document.end()) {
dst = json_document[name].get<dest>();
ret = true;
} else {
success_flag = false;
}
} catch (const json::exception &ex) {
utils::error::raise_error(function_name, ex, "exception occurred");
success_flag = false;
ret = false;
}
return ret;
}
template <typename dest, typename source>
auto set_value(dest &dst, const source &src) -> bool;
auto set_value(dest &dst, const source &src) -> bool {
auto ret{false};
recur_mutex_lock lock(read_write_mutex_);
if (dst != src) {
dst = src;
config_changed_ = true;
save();
ret = true;
}
return ret;
}
public:
[[nodiscard]] auto get_api_auth() const -> std::string;
[[nodiscard]] auto get_api_auth() const -> std::string { return api_auth_; }
[[nodiscard]] auto get_api_port() const -> std::uint16_t;
[[nodiscard]] auto get_api_port() const -> std::uint16_t { return api_port_; }
[[nodiscard]] auto get_api_user() const -> std::string;
[[nodiscard]] auto get_api_user() const -> std::string { return api_user_; }
[[nodiscard]] auto get_cache_directory() const -> std::string;
[[nodiscard]] auto get_cache_directory() const -> std::string {
return cache_directory_;
}
[[nodiscard]] auto get_chunk_downloader_timeout_secs() const -> std::uint8_t {
return std::max(min_download_timeout_secs_, download_timeout_secs_);
}
[[nodiscard]] auto get_config_file_path() const -> std::string;
[[nodiscard]] auto get_database_type() const -> database_type;
[[nodiscard]] auto get_data_directory() const -> std::string {
return data_directory_;
}
[[nodiscard]] auto get_data_directory() const -> std::string;
[[nodiscard]] auto get_enable_chunk_download_timeout() const -> bool {
return enable_chunk_downloader_timeout_;
}
[[nodiscard]] auto get_download_timeout_secs() const -> std::uint8_t;
[[nodiscard]] auto get_enable_comm_duration_events() const -> bool {
return enable_comm_duration_events_;
}
[[nodiscard]] auto get_enable_download_timeout() const -> bool;
[[nodiscard]] auto get_enable_drive_events() const -> bool {
return enable_drive_events_;
}
[[nodiscard]] auto get_enable_drive_events() const -> bool;
[[nodiscard]] auto get_encrypt_config() const -> encrypt_config;
[[nodiscard]] auto get_encrypt_config() const -> encrypt_config {
return encrypt_config_;
}
#if defined(_WIN32)
[[nodiscard]] auto get_enable_mount_manager() const -> bool;
[[nodiscard]] auto get_enable_mount_manager() const -> bool {
return enable_mount_manager_;
}
#endif // defined(_WIN32)
[[nodiscard]] auto get_event_level() const -> event_level;
[[nodiscard]] auto get_enable_max_cache_size() const -> bool {
return enable_max_cache_size_;
}
[[nodiscard]] auto get_eviction_delay_mins() const -> std::uint32_t;
[[nodiscard]] auto get_enable_remote_mount() const -> bool {
return enable_remote_mount_;
}
[[nodiscard]] auto get_eviction_uses_accessed_time() const -> bool;
[[nodiscard]] auto get_event_level() const -> event_level {
return event_level_;
}
[[nodiscard]] auto get_high_frequency_interval_secs() const -> std::uint16_t;
[[nodiscard]] auto get_eviction_delay_mins() const -> std::uint32_t {
return eviction_delay_mins_;
}
[[nodiscard]] auto get_host_config() const -> host_config;
[[nodiscard]] auto get_eviction_uses_accessed_time() const -> bool {
return eviction_uses_accessed_time_;
}
[[nodiscard]] auto get_high_frequency_interval_secs() const -> std::uint16_t {
return std::max(static_cast<std::uint16_t>(1U), high_freq_interval_secs_);
}
[[nodiscard]] auto get_host_config() const -> host_config { return hc_; }
[[nodiscard]] auto get_is_remote_mount() const -> bool {
return is_remote_mount_;
}
[[nodiscard]] auto get_json() const -> json;
[[nodiscard]] auto get_log_directory() const -> std::string;
[[nodiscard]] auto get_log_directory() const -> std::string {
return log_directory_;
}
[[nodiscard]] auto get_low_frequency_interval_secs() const -> std::uint16_t;
[[nodiscard]] auto get_low_frequency_interval_secs() const -> std::uint16_t {
return std::max(static_cast<std::uint16_t>(1U), low_freq_interval_secs_);
}
[[nodiscard]] auto get_max_cache_size_bytes() const -> std::uint64_t;
[[nodiscard]] auto get_max_upload_count() const -> std::uint8_t;
[[nodiscard]] auto get_max_upload_count() const -> std::uint8_t {
return std::max(std::uint8_t(1U), max_upload_count_);
}
[[nodiscard]] auto get_med_frequency_interval_secs() const -> std::uint16_t;
[[nodiscard]] auto get_med_frequency_interval_secs() const -> std::uint16_t {
return std::max(static_cast<std::uint16_t>(1U), med_freq_interval_secs_);
}
[[nodiscard]] auto get_online_check_retry_secs() const -> std::uint16_t;
[[nodiscard]] auto get_online_check_retry_secs() const -> std::uint16_t {
return std::max(std::uint16_t(15U), online_check_retry_secs_);
}
[[nodiscard]] auto get_orphaned_file_retention_days() const -> std::uint16_t;
[[nodiscard]] auto get_orphaned_file_retention_days() const -> std::uint16_t {
return std::min(static_cast<std::uint16_t>(31U),
std::max(static_cast<std::uint16_t>(1U),
orphaned_file_retention_days_));
}
[[nodiscard]] auto get_preferred_download_type() const -> download_type;
[[nodiscard]] auto get_preferred_download_type() const -> download_type {
return download_type_from_string(preferred_download_type_,
download_type::fallback);
}
[[nodiscard]] auto get_provider_type() const -> provider_type;
[[nodiscard]] auto get_provider_type() const -> provider_type {
return prov_;
}
[[nodiscard]] auto get_remote_config() const -> remote::remote_config;
[[nodiscard]] auto get_read_ahead_count() const -> std::uint8_t {
return std::max(static_cast<std::uint8_t>(1U), read_ahead_count_);
}
[[nodiscard]] auto get_remote_mount() const -> remote::remote_mount;
[[nodiscard]] auto get_remote_client_pool_size() const -> std::uint8_t {
return std::max(static_cast<std::uint8_t>(5U), remote_client_pool_size_);
}
[[nodiscard]] auto get_retry_read_count() const -> std::uint16_t;
[[nodiscard]] auto get_remote_host_name_or_ip() const -> std::string {
return remote_host_name_or_ip_;
}
[[nodiscard]] auto get_ring_buffer_file_size() const -> std::uint16_t;
[[nodiscard]] auto get_remote_max_connections() const -> std::uint8_t {
return std::max(static_cast<std::uint8_t>(1U), remote_max_connections_);
}
[[nodiscard]] auto get_s3_config() const -> s3_config;
[[nodiscard]] auto get_remote_port() const -> std::uint16_t {
return remote_port_;
}
[[nodiscard]] auto get_sia_config() const -> sia_config;
[[nodiscard]] auto get_remote_receive_timeout_secs() const -> std::uint16_t {
return remote_receive_timeout_secs_;
}
[[nodiscard]] auto get_task_wait_ms() const -> std::uint16_t;
[[nodiscard]] auto get_remote_send_timeout_secs() const -> std::uint16_t {
return remote_send_timeout_secs_;
}
[[nodiscard]] auto
get_value_by_name(const std::string &name) const -> std::string;
[[nodiscard]] auto get_remote_token() const -> std::string {
return remote_token_;
}
[[nodiscard]] auto get_version() const -> std::uint64_t;
[[nodiscard]] auto get_retry_read_count() const -> std::uint16_t {
return std::max(std::uint16_t(2), retry_read_count_);
}
[[nodiscard]] auto get_ring_buffer_file_size() const -> std::uint16_t {
return std::max(
static_cast<std::uint16_t>(64U),
std::min(static_cast<std::uint16_t>(1024U), ring_buffer_file_size_));
}
[[nodiscard]] auto get_s3_config() const -> s3_config { return s3_config_; }
[[nodiscard]] auto get_sia_config() const -> sia_config {
return sia_config_;
}
[[nodiscard]] auto get_task_wait_ms() const -> std::uint16_t {
return std::max(static_cast<std::uint16_t>(50U), task_wait_ms_);
}
[[nodiscard]] auto get_value_by_name(const std::string &name) -> std::string;
[[nodiscard]] auto get_version() const -> std::uint64_t { return version_; }
void save();
void set_api_auth(const std::string &value);
void set_api_auth(const std::string &api_auth) {
set_value(api_auth_, api_auth);
}
void set_api_port(std::uint16_t value);
void set_api_port(std::uint16_t api_port) { set_value(api_port_, api_port); }
void set_api_user(const std::string &value);
void set_api_user(const std::string &api_user) {
set_value(api_user_, api_user);
}
void set_download_timeout_secs(std::uint8_t value);
void set_chunk_downloader_timeout_secs(
std::uint8_t chunk_downloader_timeout_secs) {
set_value(download_timeout_secs_, chunk_downloader_timeout_secs);
}
void set_database_type(const database_type &value);
void
set_enable_chunk_downloader_timeout(bool enable_chunk_downloader_timeout) {
set_value(enable_chunk_downloader_timeout_,
enable_chunk_downloader_timeout);
}
void set_enable_download_timeout(bool value);
void set_enable_comm_duration_events(bool enable_comm_duration_events) {
set_value(enable_comm_duration_events_, enable_comm_duration_events);
}
void set_enable_drive_events(bool value);
void set_enable_drive_events(bool enable_drive_events) {
set_value(enable_drive_events_, enable_drive_events);
}
void set_enable_max_cache_size(bool enable_max_cache_size) {
set_value(enable_max_cache_size_, enable_max_cache_size);
}
#if defined(_WIN32)
void set_enable_mount_manager(bool value);
void set_enable_mount_manager(bool enable_mount_manager) {
set_value(enable_mount_manager_, enable_mount_manager);
}
#endif // defined(_WIN32)
void set_event_level(const event_level &value);
void set_enable_remote_mount(bool enable_remote_mount);
void set_encrypt_config(encrypt_config value);
void set_event_level(const event_level &level) {
if (set_value(event_level_, level)) {
event_system::instance().raise<event_level_changed>(
event_level_to_string(level));
}
}
void set_eviction_delay_mins(std::uint32_t value);
void set_eviction_delay_mins(std::uint32_t eviction_delay_mins) {
set_value(eviction_delay_mins_, eviction_delay_mins);
}
void set_eviction_uses_accessed_time(bool value);
void set_eviction_uses_accessed_time(bool eviction_uses_accessed_time) {
set_value(eviction_uses_accessed_time_, eviction_uses_accessed_time);
}
void set_high_frequency_interval_secs(std::uint16_t value);
void
set_high_frequency_interval_secs(std::uint16_t high_frequency_interval_secs) {
set_value(high_freq_interval_secs_, high_frequency_interval_secs);
}
void set_host_config(host_config value);
#if defined(PROJECT_TESTING)
void set_host_config(host_config hc) {
config_changed_ = true;
hc_ = std::move(hc);
save();
}
void set_low_frequency_interval_secs(std::uint16_t value);
void set_s3_config(s3_config s3) {
config_changed_ = true;
s3_config_ = std::move(s3);
save();
}
void set_max_cache_size_bytes(std::uint64_t value);
void set_sia_config(sia_config sia) {
config_changed_ = true;
sia_config_ = std::move(sia);
save();
}
#endif // defined(PROJECT_TESTING)
void set_max_upload_count(std::uint8_t value);
void set_is_remote_mount(bool is_remote_mount);
void set_med_frequency_interval_secs(std::uint16_t value);
void
set_low_frequency_interval_secs(std::uint16_t low_frequency_interval_secs) {
set_value(low_freq_interval_secs_, low_frequency_interval_secs);
}
void set_online_check_retry_secs(std::uint16_t value);
void set_max_cache_size_bytes(std::uint64_t max_cache_size_bytes) {
set_value(max_cache_size_bytes_, max_cache_size_bytes);
}
void set_orphaned_file_retention_days(std::uint16_t value);
void set_max_upload_count(std::uint8_t max_upload_count) {
set_value(max_upload_count_, max_upload_count);
}
void set_preferred_download_type(const download_type &value);
void
set_med_frequency_interval_secs(std::uint16_t med_frequency_interval_secs) {
set_value(med_freq_interval_secs_, med_frequency_interval_secs);
}
void set_remote_config(remote::remote_config value);
void set_online_check_retry_secs(std::uint16_t online_check_retry_secs) {
set_value(online_check_retry_secs_, online_check_retry_secs);
}
void set_remote_mount(remote::remote_mount value);
void
set_orphaned_file_retention_days(std::uint16_t orphaned_file_retention_days) {
set_value(orphaned_file_retention_days_, orphaned_file_retention_days);
}
void set_retry_read_count(std::uint16_t value);
void set_preferred_download_type(const download_type &dt) {
set_value(preferred_download_type_, download_type_to_string(dt));
}
void set_ring_buffer_file_size(std::uint16_t value);
void set_read_ahead_count(std::uint8_t read_ahead_count) {
set_value(read_ahead_count_, read_ahead_count);
}
void set_s3_config(s3_config value);
void set_remote_client_pool_size(std::uint8_t remote_client_pool_size) {
set_value(remote_client_pool_size_, remote_client_pool_size);
}
void set_sia_config(sia_config value);
void set_ring_buffer_file_size(std::uint16_t ring_buffer_file_size) {
set_value(ring_buffer_file_size_, ring_buffer_file_size);
}
void set_task_wait_ms(std::uint16_t value);
void set_remote_host_name_or_ip(const std::string &remote_host_name_or_ip) {
set_value(remote_host_name_or_ip_, remote_host_name_or_ip);
}
void set_remote_max_connections(std::uint8_t remote_max_connections) {
set_value(remote_max_connections_, remote_max_connections);
}
void set_remote_port(std::uint16_t remote_port) {
set_value(remote_port_, remote_port);
}
void
set_remote_receive_timeout_secs(std::uint16_t remote_receive_timeout_secs) {
set_value(remote_receive_timeout_secs_, remote_receive_timeout_secs);
}
void set_remote_send_timeout_secs(std::uint16_t remote_send_timeout_secs) {
set_value(remote_send_timeout_secs_, remote_send_timeout_secs);
}
void set_remote_token(const std::string &remote_token) {
set_value(remote_token_, remote_token);
}
void set_retry_read_count(std::uint16_t retry_read_count) {
set_value(retry_read_count_, retry_read_count);
}
void set_task_wait_ms(std::uint16_t task_wait_ms) {
set_value(task_wait_ms_, task_wait_ms);
}
[[nodiscard]] auto set_value_by_name(const std::string &name,
const std::string &value) -> std::string;

View File

@ -52,23 +52,23 @@ public:
~packet() = default;
private:
data_buffer buffer_{};
std::size_t decode_offset_{0U};
data_buffer buffer_;
std::size_t decode_offset_ = 0U;
public:
[[nodiscard]] static auto decode_json(packet &response, json &json_data)
-> int;
[[nodiscard]] static auto decode_json(packet &response,
json &json_data) -> int;
public:
void clear();
[[nodiscard]] auto current_pointer() -> unsigned char * {
return (decode_offset_ < buffer_.size()) ? &buffer_.at(decode_offset_)
return (decode_offset_ < buffer_.size()) ? &buffer_[decode_offset_]
: nullptr;
}
[[nodiscard]] auto current_pointer() const -> const unsigned char * {
return (decode_offset_ < buffer_.size()) ? &buffer_.at(decode_offset_)
return (decode_offset_ < buffer_.size()) ? &buffer_[decode_offset_]
: nullptr;
}
@ -206,7 +206,7 @@ public:
return static_cast<std::uint32_t>(buffer_.size());
}
void to_buffer(data_buffer &buffer);
void transfer_into(data_buffer &buffer);
public:
auto operator=(const data_buffer &buffer) noexcept -> packet &;
@ -226,6 +226,8 @@ public:
return buffer_.at(index);
}
};
using packet = packet;
} // namespace repertory
#endif // REPERTORY_INCLUDE_COMM_PACKET_PACKET_HPP_

View File

@ -23,7 +23,6 @@
#define REPERTORY_INCLUDE_COMM_PACKET_PACKET_CLIENT_HPP_
#include "comm/packet/packet.hpp"
#include "types/remote.hpp"
using boost::asio::ip::tcp;
@ -37,7 +36,9 @@ private:
};
public:
packet_client(remote::remote_config cfg);
packet_client(std::string host_name_or_ip, std::uint8_t max_connections,
std::uint16_t port, std::uint16_t receive_timeout,
std::uint16_t send_timeout, std::string encryption_token);
~packet_client();
@ -48,7 +49,12 @@ public:
private:
boost::asio::io_context io_context_;
remote::remote_config cfg_;
std::string host_name_or_ip_;
std::uint8_t max_connections_;
std::uint16_t port_;
std::uint16_t receive_timeout_;
std::uint16_t send_timeout_;
std::string encryption_token_;
std::string unique_id_;
private:
@ -69,21 +75,21 @@ private:
void put_client(std::shared_ptr<client> &cli);
[[nodiscard]] auto read_packet(client &cli, packet &response)
-> packet::error_type;
[[nodiscard]] auto read_packet(client &cli,
packet &response) -> packet::error_type;
void resolve();
public:
[[nodiscard]] auto send(std::string_view method, std::uint32_t &service_flags)
-> packet::error_type;
[[nodiscard]] auto send(std::string_view method,
std::uint32_t &service_flags) -> packet::error_type;
[[nodiscard]] auto send(std::string_view method, packet &request,
std::uint32_t &service_flags) -> packet::error_type;
[[nodiscard]] auto send(std::string_view method, packet &request,
packet &response, std::uint32_t &service_flags)
-> packet::error_type;
packet &response,
std::uint32_t &service_flags) -> packet::error_type;
};
} // namespace repertory

View File

@ -52,8 +52,8 @@ public:
private:
struct connection {
connection(io_context &ctx, tcp::acceptor &acceptor_)
: socket(ctx), acceptor(acceptor_) {}
connection(boost::asio::io_service &io_service, tcp::acceptor &acceptor_)
: socket(io_service), acceptor(acceptor_) {}
tcp::socket socket;
tcp::acceptor &acceptor;
@ -68,7 +68,7 @@ private:
std::string encryption_token_;
closed_callback closed_;
message_handler_callback message_handler_;
io_context io_context_;
boost::asio::io_context io_context_;
std::unique_ptr<std::thread> server_thread_;
std::vector<std::thread> service_threads_;
std::recursive_mutex connection_mutex_;

View File

@ -1,34 +0,0 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_DB_FILE_DB_HPP_
#define REPERTORY_INCLUDE_DB_FILE_DB_HPP_
#include "db/i_file_db.hpp"
namespace repertory {
class app_config;
[[nodiscard]] auto create_file_db(const app_config &cfg)
-> std::unique_ptr<i_file_db>;
} // namespace repertory
#endif // REPERTORY_INCLUDE_DB_FILE_DB_HPP_

View File

@ -1,95 +0,0 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_DB_I_FILE_DB_HPP_
#define REPERTORY_INCLUDE_DB_I_FILE_DB_HPP_
#include "types/repertory.hpp"
namespace repertory {
class i_file_db {
INTERFACE_SETUP(i_file_db);
public:
struct file_info final {
std::string api_path;
bool directory;
std::string source_path;
};
struct file_data final {
std::string api_path;
std::uint64_t file_size{};
std::vector<
std::array<unsigned char, crypto_aead_xchacha20poly1305_IETF_NPUBBYTES>>
iv_list{};
std::string source_path;
};
public:
[[nodiscard]] virtual auto add_directory(const std::string &api_path,
const std::string &source_path)
-> api_error = 0;
[[nodiscard]] virtual auto add_or_update_file(const file_data &data)
-> api_error = 0;
virtual void clear() = 0;
[[nodiscard]] virtual auto count() const -> std::uint64_t = 0;
[[nodiscard]] virtual auto get_api_path(const std::string &source_path,
std::string &api_path) const
-> api_error = 0;
[[nodiscard]] virtual auto
get_directory_api_path(const std::string &source_path,
std::string &api_path) const -> api_error = 0;
[[nodiscard]] virtual auto
get_directory_source_path(const std::string &api_path,
std::string &source_path) const -> api_error = 0;
[[nodiscard]] virtual auto get_file_api_path(const std::string &source_path,
std::string &api_path) const
-> api_error = 0;
[[nodiscard]] virtual auto get_file_data(const std::string &api_path,
file_data &data) const
-> api_error = 0;
[[nodiscard]] virtual auto
get_file_source_path(const std::string &api_path,
std::string &source_path) const -> api_error = 0;
[[nodiscard]] virtual auto get_item_list() const
-> std::vector<file_info> = 0;
[[nodiscard]] virtual auto get_source_path(const std::string &api_path,
std::string &source_path) const
-> api_error = 0;
[[nodiscard]] virtual auto remove_item(const std::string &api_path)
-> api_error = 0;
};
} // namespace repertory
#endif // REPERTORY_INCLUDE_DB_I_FILE_DB_HPP_

View File

@ -43,43 +43,44 @@ public:
struct upload_entry final {
std::string api_path;
std::uint64_t date_time{};
std::string source_path;
};
public:
[[nodiscard]] virtual auto add_resume(const resume_entry &entry) -> bool = 0;
[[nodiscard]] virtual auto add_resume(resume_entry entry) -> bool = 0;
[[nodiscard]] virtual auto add_upload(const upload_entry &entry) -> bool = 0;
[[nodiscard]] virtual auto add_upload(upload_entry entry) -> bool = 0;
[[nodiscard]] virtual auto add_upload_active(const upload_active_entry &entry)
-> bool = 0;
[[nodiscard]] virtual auto
add_upload_active(upload_active_entry entry) -> bool = 0;
virtual void clear() = 0;
[[nodiscard]] virtual auto get_next_upload() const
-> std::optional<upload_entry> = 0;
[[nodiscard]] virtual auto
get_next_upload() const -> std::optional<upload_entry> = 0;
[[nodiscard]] virtual auto get_resume_list() const
-> std::vector<resume_entry> = 0;
[[nodiscard]] virtual auto
get_resume_list() const -> std::vector<resume_entry> = 0;
[[nodiscard]] virtual auto get_upload(const std::string &api_path) const
-> std::optional<upload_entry> = 0;
[[nodiscard]] virtual auto get_upload_active_list() const
-> std::vector<upload_active_entry> = 0;
[[nodiscard]] virtual auto
get_upload_active_list() const -> std::vector<upload_active_entry> = 0;
[[nodiscard]] virtual auto remove_resume(const std::string &api_path)
-> bool = 0;
[[nodiscard]] virtual auto
remove_resume(const std::string &api_path) -> bool = 0;
[[nodiscard]] virtual auto remove_upload(const std::string &api_path)
-> bool = 0;
[[nodiscard]] virtual auto
remove_upload(const std::string &api_path) -> bool = 0;
[[nodiscard]] virtual auto remove_upload_active(const std::string &api_path)
-> bool = 0;
[[nodiscard]] virtual auto
remove_upload_active(const std::string &api_path) -> bool = 0;
[[nodiscard]] virtual auto rename_resume(const std::string &from_api_path,
const std::string &to_api_path)
-> bool = 0;
[[nodiscard]] virtual auto
rename_resume(const std::string &from_api_path,
const std::string &to_api_path) -> bool = 0;
};
} // namespace repertory

View File

@ -1,117 +0,0 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_DB_IMPL_RDB_FILE_DB_HPP_
#define REPERTORY_INCLUDE_DB_IMPL_RDB_FILE_DB_HPP_
#include "db/i_file_db.hpp"
namespace repertory {
class app_config;
class rdb_file_db final : public i_file_db {
public:
rdb_file_db(const app_config &cfg);
~rdb_file_db() override;
rdb_file_db(const rdb_file_db &) = delete;
rdb_file_db(rdb_file_db &&) = delete;
auto operator=(const rdb_file_db &) -> rdb_file_db & = delete;
auto operator=(rdb_file_db &&) -> rdb_file_db & = delete;
private:
const app_config &cfg_;
private:
std::unique_ptr<rocksdb::TransactionDB> db_{nullptr};
rocksdb::ColumnFamilyHandle *directory_family_{};
rocksdb::ColumnFamilyHandle *file_family_{};
rocksdb::ColumnFamilyHandle *path_family_{};
rocksdb::ColumnFamilyHandle *source_family_{};
private:
void create_or_open(bool clear);
[[nodiscard]] auto create_iterator(rocksdb::ColumnFamilyHandle *family) const
-> std::shared_ptr<rocksdb::Iterator>;
[[nodiscard]] static auto
perform_action(std::string_view function_name,
std::function<rocksdb::Status()> action) -> api_error;
[[nodiscard]] auto perform_action(
std::string_view function_name,
std::function<rocksdb::Status(rocksdb::Transaction *txn)> action)
-> api_error;
[[nodiscard]] auto remove_item(const std::string &api_path,
const std::string &source_path,
rocksdb::Transaction *txn) -> rocksdb::Status;
public:
[[nodiscard]] auto
add_directory(const std::string &api_path,
const std::string &source_path) -> api_error override;
[[nodiscard]] auto
add_or_update_file(const i_file_db::file_data &data) -> api_error override;
void clear() override;
[[nodiscard]] auto count() const -> std::uint64_t override;
[[nodiscard]] auto
get_api_path(const std::string &source_path,
std::string &api_path) const -> api_error override;
[[nodiscard]] auto
get_directory_api_path(const std::string &source_path,
std::string &api_path) const -> api_error override;
[[nodiscard]] auto get_directory_source_path(const std::string &api_path,
std::string &source_path) const
-> api_error override;
[[nodiscard]] auto
get_file_api_path(const std::string &source_path,
std::string &api_path) const -> api_error override;
[[nodiscard]] auto
get_file_data(const std::string &api_path,
i_file_db::file_data &data) const -> api_error override;
[[nodiscard]] auto
get_file_source_path(const std::string &api_path,
std::string &source_path) const -> api_error override;
[[nodiscard]] auto
get_item_list() const -> std::vector<i_file_db::file_info> override;
[[nodiscard]] auto
get_source_path(const std::string &api_path,
std::string &source_path) const -> api_error override;
[[nodiscard]] auto
remove_item(const std::string &api_path) -> api_error override;
};
} // namespace repertory
#endif // REPERTORY_INCLUDE_DB_IMPL_RDB_FILE_DB_HPP_

View File

@ -1,92 +0,0 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_DB_IMPL_SQLITE_FILE_DB_HPP_
#define REPERTORY_INCLUDE_DB_IMPL_SQLITE_FILE_DB_HPP_
#include "db/i_file_db.hpp"
#include "utils/db/sqlite/db_common.hpp"
namespace repertory {
class app_config;
class sqlite_file_db final : public i_file_db {
public:
sqlite_file_db(const app_config &cfg);
~sqlite_file_db() override;
sqlite_file_db(const sqlite_file_db &) = delete;
sqlite_file_db(sqlite_file_db &&) = delete;
auto operator=(const sqlite_file_db &) -> sqlite_file_db & = delete;
auto operator=(sqlite_file_db &&) -> sqlite_file_db & = delete;
private:
utils::db::sqlite::db3_t db_;
public:
[[nodiscard]] auto add_directory(const std::string &api_path,
const std::string &source_path)
-> api_error override;
[[nodiscard]] auto add_or_update_file(const i_file_db::file_data &data)
-> api_error override;
void clear() override;
[[nodiscard]] auto count() const -> std::uint64_t override;
[[nodiscard]] auto get_api_path(const std::string &source_path,
std::string &api_path) const
-> api_error override;
[[nodiscard]] auto get_directory_api_path(const std::string &source_path,
std::string &api_path) const
-> api_error override;
[[nodiscard]] auto get_directory_source_path(const std::string &api_path,
std::string &source_path) const
-> api_error override;
[[nodiscard]] auto get_file_api_path(const std::string &source_path,
std::string &api_path) const
-> api_error override;
[[nodiscard]] auto get_file_data(const std::string &api_path,
i_file_db::file_data &data) const
-> api_error override;
[[nodiscard]] auto get_file_source_path(const std::string &api_path,
std::string &source_path) const
-> api_error override;
[[nodiscard]] auto get_item_list() const
-> std::vector<i_file_db::file_info> override;
[[nodiscard]] auto get_source_path(const std::string &api_path,
std::string &source_path) const
-> api_error override;
[[nodiscard]] auto remove_item(const std::string &api_path)
-> api_error override;
};
} // namespace repertory
#endif // REPERTORY_INCLUDE_DB_IMPL_SQLITE_FILE_DB_HPP_

View File

@ -19,8 +19,8 @@
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_DB_IMPL_RDB_FILE_MGR_DB_HPP_
#define REPERTORY_INCLUDE_DB_IMPL_RDB_FILE_MGR_DB_HPP_
#ifndef REPERTORY_INCLUDE_DB_RDB_FILE_MGR_DB_HPP_
#define REPERTORY_INCLUDE_DB_RDB_FILE_MGR_DB_HPP_
#include "db/i_file_mgr_db.hpp"
@ -41,7 +41,7 @@ private:
const app_config &cfg_;
private:
std::unique_ptr<rocksdb::TransactionDB> db_{nullptr};
std::unique_ptr<rocksdb::DB> db_;
std::atomic<std::uint64_t> id_{0U};
rocksdb::ColumnFamilyHandle *resume_family_{};
rocksdb::ColumnFamilyHandle *upload_active_family_{};
@ -57,53 +57,42 @@ private:
perform_action(std::string_view function_name,
std::function<rocksdb::Status()> action) -> bool;
[[nodiscard]] auto perform_action(
std::string_view function_name,
std::function<rocksdb::Status(rocksdb::Transaction *txn)> action) -> bool;
[[nodiscard]] auto remove_resume(const std::string &api_path,
rocksdb::Transaction *txn)
-> rocksdb::Status;
[[nodiscard]] auto add_resume(const resume_entry &entry,
rocksdb::Transaction *txn) -> rocksdb::Status;
public:
[[nodiscard]] auto add_resume(const resume_entry &entry) -> bool override;
[[nodiscard]] auto add_resume(resume_entry entry) -> bool override;
[[nodiscard]] auto add_upload(const upload_entry &entry) -> bool override;
[[nodiscard]] auto add_upload(upload_entry entry) -> bool override;
[[nodiscard]] auto add_upload_active(const upload_active_entry &entry)
-> bool override;
[[nodiscard]] auto
add_upload_active(upload_active_entry entry) -> bool override;
void clear() override;
[[nodiscard]] auto get_next_upload() const
-> std::optional<upload_entry> override;
[[nodiscard]] auto
get_next_upload() const -> std::optional<upload_entry> override;
[[nodiscard]] auto get_resume_list() const
-> std::vector<resume_entry> override;
[[nodiscard]] auto
get_resume_list() const -> std::vector<resume_entry> override;
[[nodiscard]] auto get_upload(const std::string &api_path) const
-> std::optional<upload_entry> override;
[[nodiscard]] auto get_upload_active_list() const
-> std::vector<upload_active_entry> override;
[[nodiscard]] auto
get_upload_active_list() const -> std::vector<upload_active_entry> override;
[[nodiscard]] auto remove_resume(const std::string &api_path)
-> bool override;
[[nodiscard]] auto
remove_resume(const std::string &api_path) -> bool override;
[[nodiscard]] auto remove_upload(const std::string &api_path)
-> bool override;
[[nodiscard]] auto
remove_upload(const std::string &api_path) -> bool override;
[[nodiscard]] auto remove_upload_active(const std::string &api_path)
-> bool override;
[[nodiscard]] auto
remove_upload_active(const std::string &api_path) -> bool override;
[[nodiscard]] auto rename_resume(const std::string &from_api_path,
const std::string &to_api_path)
-> bool override;
[[nodiscard]] auto
rename_resume(const std::string &from_api_path,
const std::string &to_api_path) -> bool override;
};
} // namespace repertory
#endif // REPERTORY_INCLUDE_DB_IMPL_RDB_FILE_MGR_DB_HPP_
#endif // REPERTORY_INCLUDE_DB_RDB_FILE_MGR_DB_HPP_

View File

@ -19,8 +19,8 @@
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_DB_IMPL_RDB_META_DB_HPP_
#define REPERTORY_INCLUDE_DB_IMPL_RDB_META_DB_HPP_
#ifndef REPERTORY_INCLUDE_DB_RDB_META_DB_HPP_
#define REPERTORY_INCLUDE_DB_RDB_META_DB_HPP_
#include "db/i_meta_db.hpp"
#include "types/repertory.hpp"
@ -40,10 +40,8 @@ public:
private:
const app_config &cfg_;
private:
std::unique_ptr<rocksdb::TransactionDB> db_{nullptr};
rocksdb::ColumnFamilyHandle *meta_family_{};
std::unique_ptr<rocksdb::DB> db_{nullptr};
rocksdb::ColumnFamilyHandle *default_family_{};
rocksdb::ColumnFamilyHandle *pinned_family_{};
rocksdb::ColumnFamilyHandle *size_family_{};
rocksdb::ColumnFamilyHandle *source_family_{};
@ -61,21 +59,8 @@ private:
perform_action(std::string_view function_name,
std::function<rocksdb::Status()> action) -> api_error;
[[nodiscard]] auto perform_action(
std::string_view function_name,
std::function<rocksdb::Status(rocksdb::Transaction *txn)> action)
-> api_error;
[[nodiscard]] auto remove_api_path(const std::string &api_path,
const std::string &source_path,
rocksdb::Transaction *txn)
-> rocksdb::Status;
[[nodiscard]] auto update_item_meta(const std::string &api_path,
json json_data,
rocksdb::Transaction *base_txn = nullptr,
rocksdb::Status *status = nullptr)
-> api_error;
json json_data) -> api_error;
public:
void clear() override;
@ -124,4 +109,4 @@ public:
};
} // namespace repertory
#endif // REPERTORY_INCLUDE_DB_IMPL_RDB_META_DB_HPP_
#endif // REPERTORY_INCLUDE_DB_RDB_META_DB_HPP_

View File

@ -19,8 +19,8 @@
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_DB_IMPL_SQLITE_FILE_MGR_DB_HPP_
#define REPERTORY_INCLUDE_DB_IMPL_SQLITE_FILE_MGR_DB_HPP_
#ifndef REPERTORY_INCLUDE_DB_SQLITE_FILE_MGR_DB_HPP_
#define REPERTORY_INCLUDE_DB_SQLITE_FILE_MGR_DB_HPP_
#include "db/i_file_mgr_db.hpp"
#include "utils/db/sqlite/db_common.hpp"
@ -42,41 +42,41 @@ private:
utils::db::sqlite::db3_t db_;
public:
[[nodiscard]] auto add_resume(const resume_entry &entry) -> bool override;
[[nodiscard]] auto add_resume(resume_entry entry) -> bool override;
[[nodiscard]] auto add_upload(const upload_entry &entry) -> bool override;
[[nodiscard]] auto add_upload(upload_entry entry) -> bool override;
[[nodiscard]] auto add_upload_active(const upload_active_entry &entry)
-> bool override;
[[nodiscard]] auto
add_upload_active(upload_active_entry entry) -> bool override;
void clear() override;
[[nodiscard]] auto get_next_upload() const
-> std::optional<upload_entry> override;
[[nodiscard]] auto
get_next_upload() const -> std::optional<upload_entry> override;
[[nodiscard]] auto get_resume_list() const
-> std::vector<resume_entry> override;
[[nodiscard]] auto
get_resume_list() const -> std::vector<resume_entry> override;
[[nodiscard]] auto get_upload(const std::string &api_path) const
-> std::optional<upload_entry> override;
[[nodiscard]] auto get_upload_active_list() const
-> std::vector<upload_active_entry> override;
[[nodiscard]] auto
get_upload_active_list() const -> std::vector<upload_active_entry> override;
[[nodiscard]] auto remove_resume(const std::string &api_path)
-> bool override;
[[nodiscard]] auto
remove_resume(const std::string &api_path) -> bool override;
[[nodiscard]] auto remove_upload(const std::string &api_path)
-> bool override;
[[nodiscard]] auto
remove_upload(const std::string &api_path) -> bool override;
[[nodiscard]] auto remove_upload_active(const std::string &api_path)
-> bool override;
[[nodiscard]] auto
remove_upload_active(const std::string &api_path) -> bool override;
[[nodiscard]] auto rename_resume(const std::string &from_api_path,
const std::string &to_api_path)
-> bool override;
[[nodiscard]] auto
rename_resume(const std::string &from_api_path,
const std::string &to_api_path) -> bool override;
};
} // namespace repertory
#endif // REPERTORY_INCLUDE_DB_IMPL_SQLITE_FILE_MGR_DB_HPP_
#endif // REPERTORY_INCLUDE_DB_SQLITE_FILE_MGR_DB_HPP_

View File

@ -19,8 +19,8 @@
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_DB_IMPL_SQLITE_META_DB_HPP_
#define REPERTORY_INCLUDE_DB_IMPL_SQLITE_META_DB_HPP_
#ifndef REPERTORY_INCLUDE_DB_SQLITE_META_DB_HPP_
#define REPERTORY_INCLUDE_DB_SQLITE_META_DB_HPP_
#include "db/i_meta_db.hpp"
#include "types/repertory.hpp"
@ -94,4 +94,4 @@ public:
};
} // namespace repertory
#endif // REPERTORY_INCLUDE_DB_IMPL_SQLITE_META_DB_HPP_
#endif // REPERTORY_INCLUDE_DB_SQLITE_META_DB_HPP_

View File

@ -34,7 +34,7 @@ public:
private:
struct open_directory final {
std::shared_ptr<directory_iterator> iterator;
std::vector<std::uint64_t> handles;
std::vector<std::uint64_t> handles{};
std::chrono::system_clock::time_point last_update{
std::chrono::system_clock::now()};
};
@ -60,8 +60,8 @@ public:
void execute_action(const std::string &api_path,
const execute_callback &execute);
[[nodiscard]] auto get_directory(std::uint64_t handle)
-> std::shared_ptr<directory_iterator>;
[[nodiscard]] auto
get_directory(std::uint64_t handle) -> std::shared_ptr<directory_iterator>;
[[nodiscard]] auto remove_directory(const std::string &api_path)
-> std::shared_ptr<directory_iterator>;

View File

@ -31,23 +31,22 @@ class i_provider;
class eviction final : public single_thread_service_base {
public:
eviction(i_provider &provider, const app_config &config,
i_file_manager &file_mgr)
eviction(i_provider &provider, const app_config &config, i_file_manager &fm)
: single_thread_service_base("eviction"),
provider_(provider),
config_(config),
file_mgr_(file_mgr),
provider_(provider) {}
fm_(fm) {}
~eviction() override = default;
private:
const app_config &config_;
i_file_manager &file_mgr_;
i_provider &provider_;
const app_config &config_;
i_file_manager &fm_;
private:
[[nodiscard]] auto check_minimum_requirements(const std::string &file_path)
-> bool;
[[nodiscard]] auto
check_minimum_requirements(const std::string &file_path) -> bool;
[[nodiscard]] auto get_filtered_cached_files() -> std::deque<std::string>;

View File

@ -29,8 +29,6 @@
#include "drives/fuse/remotefuse/i_remote_instance.hpp"
#include "drives/remote/remote_open_file_table.hpp"
#include "drives/winfsp/remotewinfsp/i_remote_instance.hpp"
#include "events/event_system.hpp"
#include "events/events.hpp"
#include "types/remote.hpp"
#include "types/repertory.hpp"
#include "utils/base64.hpp"
@ -54,7 +52,7 @@ public:
: config_(config),
drive_(drv),
mount_location_(std::move(mount_location)),
client_pool_(config.get_remote_mount().client_pool_size) {
client_pool_(config.get_remote_client_pool_size()) {
event_system::instance().raise<service_started>("remote_server_base");
handler_lookup_.insert(
{"::winfsp_can_delete",
@ -1359,8 +1357,7 @@ public:
}});
packet_server_ = std::make_unique<packet_server>(
config_.get_remote_mount().api_port,
config_.get_remote_mount().encryption_token, 10,
config_.get_remote_port(), config_.get_remote_token(), 10,
[this](const std::string &client_id) {
return this->closed_handler(client_id);
},

View File

@ -32,12 +32,9 @@ enum class event_level {
trace,
};
[[nodiscard]] auto
event_level_from_string(std::string level,
event_level default_level = event_level::info)
-> event_level;
auto event_level_from_string(std::string level) -> event_level;
[[nodiscard]] auto event_level_to_string(event_level level) -> std::string;
auto event_level_to_string(event_level level) -> std::string;
class event {
protected:
@ -75,18 +72,4 @@ public:
};
} // namespace repertory
NLOHMANN_JSON_NAMESPACE_BEGIN
template <> struct adl_serializer<std::atomic<repertory::event_level>> {
static void to_json(json &data,
const std::atomic<repertory::event_level> &value) {
data = repertory::event_level_to_string(value.load());
}
static void from_json(const json &data,
std::atomic<repertory::event_level> &value) {
value.store(repertory::event_level_from_string(data.get<std::string>()));
}
};
NLOHMANN_JSON_NAMESPACE_END
#endif // REPERTORY_INCLUDE_EVENTS_EVENT_HPP_

View File

@ -44,7 +44,6 @@ using event_consumer = event_system::event_consumer;
#define E_FROM_STRING(t) t
#define E_FROM_UINT16(t) std::to_string(t)
#define E_FROM_UINT64(t) std::to_string(t)
#define E_FROM_DOWNLOAD_TYPE(t) download_type_to_string(t)
#define E_PROP(type, name, short_name, ts) \
private: \

View File

@ -1,67 +0,0 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_FILE_MANAGER_CACHE_SIZE_MGR_HPP_
#define REPERTORY_INCLUDE_FILE_MANAGER_CACHE_SIZE_MGR_HPP_
#include "types/repertory.hpp"
namespace repertory {
class app_config;
class cache_size_mgr final {
public:
cache_size_mgr(const cache_size_mgr &) = delete;
cache_size_mgr(cache_size_mgr &&) = delete;
auto operator=(const cache_size_mgr &) -> cache_size_mgr & = delete;
auto operator=(cache_size_mgr &&) -> cache_size_mgr & = delete;
protected:
cache_size_mgr() = default;
~cache_size_mgr() { stop(); }
private:
static cache_size_mgr instance_;
private:
app_config *cfg_{nullptr};
std::uint64_t cache_size_{0U};
mutable std::mutex mtx_;
std::condition_variable notify_;
stop_type stop_requested_{false};
public:
[[nodiscard]] auto expand(std::uint64_t size) -> api_error;
void initialize(app_config *cfg);
[[nodiscard]] static auto instance() -> cache_size_mgr & { return instance_; }
[[nodiscard]] auto shrink(std::uint64_t size) -> api_error;
[[nodiscard]] auto size() const -> std::uint64_t;
void stop();
};
} // namespace repertory
#endif // REPERTORY_INCLUDE_FILE_MANAGER_CACHE_SIZE_MGR_HPP_

View File

@ -1,83 +0,0 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_FILE_MANAGER_DIRECT_OPEN_FILE_HPP_
#define REPERTORY_INCLUDE_FILE_MANAGER_DIRECT_OPEN_FILE_HPP_
#include "file_manager/ring_buffer_base.hpp"
#include "types/repertory.hpp"
namespace repertory {
class i_provider;
class i_upload_manager;
class direct_open_file final : public ring_buffer_base {
public:
direct_open_file(std::uint64_t chunk_size, std::uint8_t chunk_timeout,
filesystem_item fsi, i_provider &provider);
~direct_open_file() override;
public:
direct_open_file() = delete;
direct_open_file(const direct_open_file &) noexcept = delete;
direct_open_file(direct_open_file &&) noexcept = delete;
auto operator=(direct_open_file &&) noexcept -> direct_open_file & = delete;
auto
operator=(const direct_open_file &) noexcept -> direct_open_file & = delete;
private:
std::array<data_buffer, min_ring_size> ring_data_;
protected:
[[nodiscard]] auto on_check_start() -> bool override;
[[nodiscard]] auto
on_chunk_downloaded(std::size_t /* chunk */,
const data_buffer & /* buffer */) -> api_error override {
return api_error::success;
}
[[nodiscard]] auto
on_read_chunk(std::size_t chunk, std::size_t read_size,
std::uint64_t read_offset, data_buffer &data,
std::size_t &bytes_read) -> api_error override;
[[nodiscard]] auto use_buffer(std::size_t chunk,
std::function<api_error(data_buffer &)> func)
-> api_error override;
public:
[[nodiscard]] auto native_operation(native_operation_callback /* callback */)
-> api_error override {
return api_error::not_supported;
}
[[nodiscard]] auto native_operation(std::uint64_t /* new_file_size */,
native_operation_callback /* callback */)
-> api_error override {
return api_error::not_supported;
}
};
} // namespace repertory
#endif // REPERTORY_INCLUDE_FILE_MANAGER_DIRECT_OPEN_FILE_HPP_

View File

@ -32,6 +32,23 @@ E_SIMPLE2(download_begin, info, true,
std::string, dest_path, dest, E_FROM_STRING
);
E_SIMPLE5(download_chunk_begin, debug, true,
std::string, api_path, ap, E_FROM_STRING,
std::string, dest_path, dest, E_FROM_STRING,
std::size_t, chunk, chunk, E_FROM_SIZE_T,
std::size_t, total, total, E_FROM_SIZE_T,
std::size_t, complete, complete, E_FROM_SIZE_T
);
E_SIMPLE6(download_chunk_end, debug, true,
std::string, api_path, ap, E_FROM_STRING,
std::string, dest_path, dest, E_FROM_STRING,
std::size_t, chunk, chunk, E_FROM_SIZE_T,
std::size_t, total, total, E_FROM_SIZE_T,
std::size_t, complete, complete, E_FROM_SIZE_T,
api_error, result, result, E_FROM_API_FILE_ERROR
);
E_SIMPLE3(download_end, info, true,
std::string, api_path, ap, E_FROM_STRING,
std::string, dest_path, dest, E_FROM_STRING,
@ -74,12 +91,6 @@ E_SIMPLE2(download_resume_removed, debug, true,
E_SIMPLE1(item_timeout, trace, true,
std::string, api_path, ap, E_FROM_STRING
);
E_SIMPLE3(download_type_selected, debug, true,
std::string, api_path, ap, E_FROM_STRING,
std::string, source, src, E_FROM_STRING,
download_type, download_type, type, E_FROM_DOWNLOAD_TYPE
);
// clang-format on
} // namespace repertory

View File

@ -68,7 +68,7 @@ private:
std::unique_ptr<std::thread> upload_thread_;
private:
[[nodiscard]] auto close_all(const std::string &api_path) -> bool;
void close_all(const std::string &api_path);
void close_timed_out_files();
@ -85,9 +85,6 @@ private:
void queue_upload(const std::string &api_path, const std::string &source_path,
bool no_lock);
void remove_resume(const std::string &api_path,
const std::string &source_path, bool no_lock);
void remove_upload(const std::string &api_path, bool no_lock);
void swap_renamed_items(std::string from_api_path, std::string to_api_path,
@ -108,11 +105,6 @@ public:
void remove_resume(const std::string &api_path,
const std::string &source_path) override;
static auto remove_source_and_shrink_cache(const std::string &api_path,
const std::string &source_path,
std::uint64_t file_size,
bool allocated) -> bool;
void remove_upload(const std::string &api_path) override;
void store_resume(const i_open_file &file) override;
@ -139,13 +131,13 @@ public:
[[nodiscard]] auto get_open_handle_count() const -> std::size_t;
[[nodiscard]] auto get_stored_downloads() const
-> std::vector<i_file_mgr_db::resume_entry>;
[[nodiscard]] auto
get_stored_downloads() const -> std::vector<i_file_mgr_db::resume_entry>;
[[nodiscard]] auto has_no_open_file_handles() const -> bool override;
[[nodiscard]] auto is_processing(const std::string &api_path) const
-> bool override;
[[nodiscard]] auto
is_processing(const std::string &api_path) const -> bool override;
#if defined(PROJECT_TESTING)
[[nodiscard]] auto open(std::shared_ptr<i_closeable_open_file> of,
@ -158,13 +150,13 @@ public:
[[nodiscard]] auto remove_file(const std::string &api_path) -> api_error;
[[nodiscard]] auto rename_directory(const std::string &from_api_path,
const std::string &to_api_path)
-> api_error;
[[nodiscard]] auto
rename_directory(const std::string &from_api_path,
const std::string &to_api_path) -> api_error;
[[nodiscard]] auto rename_file(const std::string &from_api_path,
const std::string &to_api_path, bool overwrite)
-> api_error;
const std::string &to_api_path,
bool overwrite) -> api_error;
void start();

View File

@ -62,12 +62,8 @@ public:
[[nodiscard]] virtual auto get_source_path() const -> std::string = 0;
[[nodiscard]] virtual auto is_complete() const -> bool = 0;
[[nodiscard]] virtual auto is_directory() const -> bool = 0;
[[nodiscard]] virtual auto is_write_supported() const -> bool = 0;
[[nodiscard]] virtual auto has_handle(std::uint64_t handle) const -> bool = 0;
[[nodiscard]] virtual auto
@ -97,8 +93,6 @@ class i_closeable_open_file : public i_open_file {
public:
virtual void add(std::uint64_t handle, open_file_data ofd) = 0;
[[nodiscard]] virtual auto get_allocated() const -> bool = 0;
[[nodiscard]] virtual auto can_close() const -> bool = 0;
virtual auto close() -> bool = 0;
@ -106,8 +100,12 @@ public:
[[nodiscard]] virtual auto get_handles() const
-> std::vector<std::uint64_t> = 0;
[[nodiscard]] virtual auto is_complete() const -> bool = 0;
[[nodiscard]] virtual auto is_modified() const -> bool = 0;
[[nodiscard]] virtual auto is_write_supported() const -> bool = 0;
virtual void remove(std::uint64_t handle) = 0;
virtual void remove_all() = 0;

View File

@ -29,14 +29,14 @@ class i_upload_manager {
INTERFACE_SETUP(i_upload_manager);
public:
virtual void queue_upload(const i_open_file &file) = 0;
virtual void queue_upload(const i_open_file &o) = 0;
virtual void remove_resume(const std::string &api_path,
const std::string &source_path) = 0;
virtual void remove_upload(const std::string &api_path) = 0;
virtual void store_resume(const i_open_file &file) = 0;
virtual void store_resume(const i_open_file &o) = 0;
};
} // namespace repertory

View File

@ -25,7 +25,6 @@
#include "file_manager/open_file_base.hpp"
#include "types/repertory.hpp"
#include "utils/types/file/i_file.hpp"
namespace repertory {
class i_provider;
@ -68,55 +67,45 @@ private:
i_upload_manager &mgr_;
private:
bool allocated{false};
std::unique_ptr<utils::file::i_file> nf_;
bool notified_{false};
bool notified_ = false;
std::size_t read_chunk_{};
boost::dynamic_bitset<> read_state_;
std::unique_ptr<std::thread> reader_thread_;
mutable std::recursive_mutex rw_mtx_;
stop_type stop_requested_{false};
std::unique_ptr<std::thread> download_thread_;
stop_type stop_requested_ = false;
private:
[[nodiscard]] auto adjust_cache_size(std::uint64_t file_size,
bool shrink) -> api_error;
[[nodiscard]] auto check_start() -> api_error;
void download_chunk(std::size_t chunk, bool skip_active, bool should_reset);
void download_range(std::size_t begin_chunk, std::size_t end_chunk,
void download_range(std::size_t start_chunk, std::size_t end_chunk,
bool should_reset);
void set_modified();
void set_read_state(std::size_t chunk);
void update_background_reader(std::size_t read_chunk);
void set_read_state(boost::dynamic_bitset<> read_state);
void update_reader(std::size_t chunk);
protected:
auto is_download_complete() const -> bool override {
return read_state_.all();
}
public:
auto close() -> bool override;
[[nodiscard]] auto get_allocated() const -> bool override;
[[nodiscard]] auto get_read_state() const -> boost::dynamic_bitset<> override;
[[nodiscard]] auto get_read_state(std::size_t chunk) const -> bool override;
[[nodiscard]] auto is_complete() const -> bool override;
[[nodiscard]] auto is_write_supported() const -> bool override {
return true;
}
auto is_write_supported() const -> bool override { return true; }
[[nodiscard]] auto
native_operation(native_operation_callback callback) -> api_error override;
[[nodiscard]] auto native_operation(native_operation_callback callback)
-> api_error override;
[[nodiscard]] auto
native_operation(std::uint64_t new_file_size,
native_operation_callback callback) -> api_error override;
[[nodiscard]] auto native_operation(std::uint64_t new_file_size,
native_operation_callback callback)
-> api_error override;
void remove(std::uint64_t handle) override;

View File

@ -24,18 +24,20 @@
#include "file_manager/i_open_file.hpp"
#include "utils/types/file/i_file.hpp"
namespace repertory {
class i_provider;
class open_file_base : public i_closeable_open_file {
public:
open_file_base(std::uint64_t chunk_size, std::uint8_t chunk_timeout,
filesystem_item fsi, i_provider &provider, bool disable_io);
filesystem_item fsi, i_provider &provider);
open_file_base(std::uint64_t chunk_size, std::uint8_t chunk_timeout,
filesystem_item fsi,
std::map<std::uint64_t, open_file_data> open_data,
i_provider &provider, bool disable_io);
i_provider &provider);
~open_file_base() override = default;
@ -96,7 +98,7 @@ public:
[[nodiscard]] auto get_result() -> api_error;
};
private:
protected:
std::uint64_t chunk_size_;
std::uint8_t chunk_timeout_;
filesystem_item fsi_;
@ -105,19 +107,21 @@ private:
i_provider &provider_;
private:
std::unordered_map<std::size_t, std::shared_ptr<download>> active_downloads_;
api_error error_{api_error::success};
mutable std::mutex error_mtx_;
mutable std::recursive_mutex file_mtx_;
stop_type io_stop_requested_{false};
std::unique_ptr<std::thread> io_thread_;
protected:
std::unordered_map<std::size_t, std::shared_ptr<download>> active_downloads_;
mutable std::recursive_mutex file_mtx_;
std::atomic<std::chrono::system_clock::time_point> last_access_{
std::chrono::system_clock::now()};
bool modified_{false};
std::unique_ptr<utils::file::i_file> nf_;
mutable std::mutex io_thread_mtx_;
std::condition_variable io_thread_notify_;
std::deque<std::shared_ptr<io_item>> io_thread_queue_;
std::atomic<std::chrono::system_clock::time_point> last_access_{
std::chrono::system_clock::now(),
};
bool modified_{false};
bool removed_{false};
private:
@ -126,42 +130,11 @@ private:
protected:
[[nodiscard]] auto do_io(std::function<api_error()> action) -> api_error;
[[nodiscard]] auto get_active_downloads()
-> std::unordered_map<std::size_t, std::shared_ptr<download>> & {
return active_downloads_;
}
[[nodiscard]] auto get_mutex() const -> std::recursive_mutex & {
return file_mtx_;
}
[[nodiscard]] auto get_last_chunk_size() const -> std::size_t;
[[nodiscard]] auto get_provider() -> i_provider & { return provider_; }
[[nodiscard]] auto get_provider() const -> const i_provider & {
return provider_;
}
[[nodiscard]] auto is_removed() const -> bool;
void notify_io();
virtual auto is_download_complete() const -> bool = 0;
void reset_timeout();
auto set_api_error(const api_error &err) -> api_error;
void set_file_size(std::uint64_t size);
void set_last_chunk_size(std::size_t size);
void set_modified(bool modified);
void set_removed(bool removed);
void set_source_path(std::string source_path);
void wait_for_io(stop_type &stop_requested);
auto set_api_error(const api_error &e) -> api_error;
public:
void add(std::uint64_t handle, open_file_data ofd) override;
@ -170,8 +143,6 @@ public:
auto close() -> bool override;
[[nodiscard]] auto get_allocated() const -> bool override { return false; }
[[nodiscard]] auto get_api_error() const -> api_error;
[[nodiscard]] auto get_api_path() const -> std::string override;
@ -186,23 +157,27 @@ public:
[[nodiscard]] auto get_handles() const -> std::vector<std::uint64_t> override;
[[nodiscard]] auto
get_open_data() -> std::map<std::uint64_t, open_file_data> & override;
[[nodiscard]] auto get_open_data()
-> std::map<std::uint64_t, open_file_data> & override;
[[nodiscard]] auto get_open_data() const
-> const std::map<std::uint64_t, open_file_data> & override;
[[nodiscard]] auto
get_open_data(std::uint64_t handle) -> open_file_data & override;
[[nodiscard]] auto get_open_data(std::uint64_t handle)
-> open_file_data & override;
[[nodiscard]] auto
get_open_data(std::uint64_t handle) const -> const open_file_data & override;
[[nodiscard]] auto get_open_data(std::uint64_t handle) const
-> const open_file_data & override;
[[nodiscard]] auto get_open_file_count() const -> std::size_t override;
[[nodiscard]] auto get_source_path() const -> std::string override;
[[nodiscard]] auto get_source_path() const -> std::string override {
return fsi_.source_path;
}
[[nodiscard]] auto has_handle(std::uint64_t handle) const -> bool override;
[[nodiscard]] auto has_handle(std::uint64_t handle) const -> bool override {
return open_data_.find(handle) != open_data_.end();
}
[[nodiscard]] auto is_directory() const -> bool override {
return fsi_.directory;

View File

@ -1,150 +0,0 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_FILE_MANAGER_RING_BUFFER_BASE_HPP_
#define REPERTORY_INCLUDE_FILE_MANAGER_RING_BUFFER_BASE_HPP_
#include "file_manager/open_file_base.hpp"
#include "types/repertory.hpp"
#include "utils/file.hpp"
namespace repertory {
class i_provider;
class i_upload_manager;
class ring_buffer_base : public open_file_base {
public:
ring_buffer_base(std::uint64_t chunk_size, std::uint8_t chunk_timeout,
filesystem_item fsi, i_provider &provider,
std::size_t ring_size, bool disable_io);
~ring_buffer_base() override = default;
public:
ring_buffer_base() = delete;
ring_buffer_base(const ring_buffer_base &) noexcept = delete;
ring_buffer_base(ring_buffer_base &&) noexcept = delete;
auto operator=(ring_buffer_base &&) noexcept -> ring_buffer_base & = delete;
auto
operator=(const ring_buffer_base &) noexcept -> ring_buffer_base & = delete;
public:
static constexpr const auto min_ring_size{5U};
private:
boost::dynamic_bitset<> read_state_;
std::size_t total_chunks_;
private:
std::condition_variable chunk_notify_;
mutable std::mutex chunk_mtx_;
std::mutex read_mtx_;
std::unique_ptr<std::thread> reader_thread_;
std::size_t ring_begin_{};
std::size_t ring_end_{};
std::size_t ring_pos_{};
stop_type stop_requested_{false};
private:
[[nodiscard]] auto check_start() -> api_error;
auto download_chunk(std::size_t chunk, bool skip_active) -> api_error;
void reader_thread();
void update_position(std::size_t count, bool is_forward);
protected:
[[nodiscard]] auto has_reader_thread() const -> bool {
return reader_thread_ != nullptr;
}
[[nodiscard]] auto get_ring_size() const -> std::size_t {
return read_state_.size();
}
[[nodiscard]] virtual auto on_check_start() -> bool = 0;
[[nodiscard]] virtual auto
on_chunk_downloaded(std::size_t chunk,
const data_buffer &buffer) -> api_error = 0;
[[nodiscard]] virtual auto
on_read_chunk(std::size_t chunk, std::size_t read_size,
std::uint64_t read_offset, data_buffer &data,
std::size_t &bytes_read) -> api_error = 0;
[[nodiscard]] virtual auto
use_buffer(std::size_t chunk,
std::function<api_error(data_buffer &)> func) -> api_error = 0;
public:
auto close() -> bool override;
void forward(std::size_t count);
[[nodiscard]] auto get_current_chunk() const -> std::size_t {
return ring_pos_;
}
[[nodiscard]] auto get_first_chunk() const -> std::size_t {
return ring_begin_;
}
[[nodiscard]] auto get_last_chunk() const -> std::size_t { return ring_end_; }
[[nodiscard]] auto get_read_state() const -> boost::dynamic_bitset<> override;
[[nodiscard]] auto get_read_state(std::size_t chunk) const -> bool override;
[[nodiscard]] auto get_total_chunks() const -> std::size_t {
return total_chunks_;
}
[[nodiscard]] auto is_complete() const -> bool override { return false; }
[[nodiscard]] auto is_write_supported() const -> bool override {
return false;
}
[[nodiscard]] auto read(std::size_t read_size, std::uint64_t read_offset,
data_buffer &data) -> api_error override;
[[nodiscard]] auto resize(std::uint64_t /* size */) -> api_error override {
return api_error::not_supported;
}
void reverse(std::size_t count);
void set(std::size_t first_chunk, std::size_t current_chunk);
void set_api_path(const std::string &api_path) override;
[[nodiscard]] auto
write(std::uint64_t /* write_offset */, const data_buffer & /* data */,
std::size_t & /* bytes_written */) -> api_error override {
return api_error::not_supported;
}
};
} // namespace repertory
#endif // REPERTORY_INCLUDE_FILE_MANAGER_RING_BUFFER_BASE_HPP_

View File

@ -22,17 +22,20 @@
#ifndef REPERTORY_INCLUDE_FILE_MANAGER_RING_BUFFER_OPEN_FILE_HPP_
#define REPERTORY_INCLUDE_FILE_MANAGER_RING_BUFFER_OPEN_FILE_HPP_
#include "file_manager/ring_buffer_base.hpp"
#include "file_manager/open_file_base.hpp"
#include "types/repertory.hpp"
#include "utils/file.hpp"
namespace repertory {
class i_provider;
class i_upload_manager;
class ring_buffer_open_file final : public ring_buffer_base {
class ring_buffer_open_file final : public open_file_base {
public:
ring_buffer_open_file(std::string buffer_directory, std::uint64_t chunk_size,
std::uint8_t chunk_timeout, filesystem_item fsi,
i_provider &provider);
ring_buffer_open_file(std::string buffer_directory, std::uint64_t chunk_size,
std::uint8_t chunk_timeout, filesystem_item fsi,
i_provider &provider, std::size_t ring_size);
@ -43,49 +46,85 @@ public:
ring_buffer_open_file() = delete;
ring_buffer_open_file(const ring_buffer_open_file &) noexcept = delete;
ring_buffer_open_file(ring_buffer_open_file &&) noexcept = delete;
auto operator=(ring_buffer_open_file &&) noexcept -> ring_buffer_open_file & =
delete;
auto operator=(ring_buffer_open_file &&) noexcept
-> ring_buffer_open_file & = delete;
auto operator=(const ring_buffer_open_file &) noexcept
-> ring_buffer_open_file & = delete;
private:
std::string source_path_;
boost::dynamic_bitset<> ring_state_;
std::size_t total_chunks_;
private:
std::unique_ptr<utils::file::i_file> nf_;
std::unique_ptr<std::thread> chunk_forward_thread_;
std::unique_ptr<std::thread> chunk_reverse_thread_;
std::condition_variable chunk_notify_;
mutable std::mutex chunk_mtx_;
std::size_t current_chunk_{};
std::size_t first_chunk_{};
std::size_t last_chunk_;
private:
auto download_chunk(std::size_t chunk) -> api_error;
void forward_reader_thread(std::size_t count);
void reverse_reader_thread(std::size_t count);
protected:
[[nodiscard]] auto on_check_start() -> bool override;
[[nodiscard]] auto
on_chunk_downloaded(std::size_t chunk,
const data_buffer &buffer) -> api_error override;
[[nodiscard]] auto
on_read_chunk(std::size_t chunk, std::size_t read_size,
std::uint64_t read_offset, data_buffer &data,
std::size_t &bytes_read) -> api_error override;
[[nodiscard]] auto use_buffer(std::size_t chunk,
std::function<api_error(data_buffer &)> func)
-> api_error override;
auto is_download_complete() const -> bool override;
public:
[[nodiscard]] static auto can_handle_file(std::uint64_t file_size,
std::size_t chunk_size,
std::size_t ring_size) -> bool;
void forward(std::size_t count);
[[nodiscard]] auto
native_operation(native_operation_callback callback) -> api_error override;
[[nodiscard]] auto get_current_chunk() const -> std::size_t {
return current_chunk_;
}
[[nodiscard]] auto native_operation(std::uint64_t /* new_file_size */,
native_operation_callback /* callback */)
[[nodiscard]] auto get_first_chunk() const -> std::size_t {
return first_chunk_;
}
[[nodiscard]] auto get_last_chunk() const -> std::size_t {
return last_chunk_;
}
[[nodiscard]] auto get_read_state() const -> boost::dynamic_bitset<> override;
[[nodiscard]] auto get_read_state(std::size_t chunk) const -> bool override;
[[nodiscard]] auto get_total_chunks() const -> std::size_t {
return total_chunks_;
}
[[nodiscard]] auto is_complete() const -> bool override { return true; }
auto is_write_supported() const -> bool override { return false; }
[[nodiscard]] auto native_operation(native_operation_callback callback)
-> api_error override;
[[nodiscard]] auto native_operation(std::uint64_t, native_operation_callback)
-> api_error override {
return api_error::not_supported;
}
[[nodiscard]] auto get_source_path() const -> std::string override {
return source_path_;
[[nodiscard]] auto read(std::size_t read_size, std::uint64_t read_offset,
data_buffer &data) -> api_error override;
[[nodiscard]] auto resize(std::uint64_t) -> api_error override {
return api_error::not_supported;
}
void reverse(std::size_t count);
void set(std::size_t first_chunk, std::size_t current_chunk);
void set_api_path(const std::string &api_path) override;
[[nodiscard]] auto write(std::uint64_t, const data_buffer &, std::size_t &)
-> api_error override {
return api_error::not_supported;
}
};
} // namespace repertory

View File

@ -96,9 +96,7 @@ protected:
return api_item_added_;
}
[[nodiscard]] auto get_comm() -> i_http_comm & { return comm_; }
[[nodiscard]] auto get_comm() const -> const i_http_comm & { return comm_; }
[[nodiscard]] auto get_comm() const -> i_http_comm & { return comm_; }
[[nodiscard]] auto get_config() -> app_config & { return config_; }

View File

@ -23,8 +23,8 @@
#define REPERTORY_INCLUDE_PROVIDERS_ENCRYPT_ENCRYPT_PROVIDER_HPP_
#include "app_config.hpp"
#include "db/i_file_db.hpp"
#include "providers/i_provider.hpp"
#include "utils/db/sqlite/db_common.hpp"
#include "utils/encrypting_reader.hpp"
namespace repertory {
@ -45,22 +45,21 @@ public:
private:
struct reader_info final {
std::chrono::system_clock::time_point last_access_time{
std::chrono::system_clock::now(),
};
std::unique_ptr<utils::encryption::encrypting_reader> reader;
std::chrono::system_clock::time_point last_access_time =
std::chrono::system_clock::now();
std::unique_ptr<utils::encryption::encrypting_reader> reader{};
std::mutex reader_mtx;
};
private:
app_config &config_;
encrypt_config encrypt_config_;
utils::db::sqlite::db3_t db_;
private:
std::unique_ptr<i_file_db> db_{nullptr};
i_file_manager *fm_{nullptr};
std::unordered_map<std::string, std::shared_ptr<reader_info>> reader_lookup_;
std::recursive_mutex reader_lookup_mtx_;
i_file_manager *fm_ = nullptr;
std::unordered_map<std::string, std::shared_ptr<reader_info>>
reader_lookup_{};
std::recursive_mutex reader_lookup_mtx_{};
private:
static auto create_api_file(const std::string &api_path, bool directory,
@ -74,10 +73,6 @@ private:
const std::string &source_path)>
callback) const -> api_error;
[[nodiscard]] auto get_encrypt_config() const -> const encrypt_config & {
return encrypt_config_;
}
auto process_directory_entry(const utils::file::i_fs_item &dir_entry,
const encrypt_config &cfg,
std::string &api_path) const -> bool;
@ -88,68 +83,62 @@ public:
[[nodiscard]] auto create_directory(const std::string &api_path,
api_meta_map &meta) -> api_error override;
[[nodiscard]] auto
create_directory_clone_source_meta(const std::string & /*source_api_path*/,
const std::string & /*api_path*/)
-> api_error override {
return api_error::not_implemented;
}
[[nodiscard]] auto create_file(const std::string & /*api_path*/,
api_meta_map & /*meta*/)
-> api_error override {
[[nodiscard]] auto create_directory_clone_source_meta(
const std::string & /*source_api_path*/,
const std::string & /*api_path*/) -> api_error override {
return api_error::not_implemented;
}
[[nodiscard]] auto
get_api_path_from_source(const std::string & /*source_path*/,
std::string & /*api_path*/) const
-> api_error override;
create_file(const std::string & /*api_path*/,
api_meta_map & /*meta*/) -> api_error override {
return api_error::not_implemented;
}
[[nodiscard]] auto get_api_path_from_source(
const std::string & /*source_path*/,
std::string & /*api_path*/) const -> api_error override;
[[nodiscard]] auto get_directory_item_count(const std::string &api_path) const
-> std::uint64_t override;
[[nodiscard]] auto get_directory_items(const std::string &api_path,
directory_item_list &list) const
-> api_error override;
[[nodiscard]] auto
get_directory_items(const std::string &api_path,
directory_item_list &list) const -> api_error override;
[[nodiscard]] auto get_file(const std::string &api_path, api_file &file) const
-> api_error override;
[[nodiscard]] auto get_file_list(api_file_list &list,
std::string &marker) const
-> api_error override;
[[nodiscard]] auto get_file_size(const std::string &api_path,
std::uint64_t &file_size) const
-> api_error override;
[[nodiscard]] auto get_filesystem_item(const std::string &api_path,
bool directory,
filesystem_item &fsi) const
-> api_error override;
[[nodiscard]] auto get_filesystem_item_and_file(const std::string &api_path,
api_file &file,
filesystem_item &fsi) const
-> api_error override;
[[nodiscard]] auto get_file(const std::string &api_path,
api_file &file) const -> api_error override;
[[nodiscard]] auto
get_filesystem_item_from_source_path(const std::string &source_path,
filesystem_item &fsi) const
-> api_error override;
get_file_list(api_file_list &list,
std::string &marker) const -> api_error override;
[[nodiscard]] auto get_pinned_files() const
-> std::vector<std::string> override;
[[nodiscard]] auto
get_file_size(const std::string &api_path,
std::uint64_t &file_size) const -> api_error override;
[[nodiscard]] auto get_item_meta(const std::string &api_path,
api_meta_map &meta) const
-> api_error override;
[[nodiscard]] auto
get_filesystem_item(const std::string &api_path, bool directory,
filesystem_item &fsi) const -> api_error override;
[[nodiscard]] auto get_item_meta(const std::string &api_path,
const std::string &key,
std::string &value) const
-> api_error override;
[[nodiscard]] auto get_filesystem_item_and_file(
const std::string &api_path, api_file &file,
filesystem_item &fsi) const -> api_error override;
[[nodiscard]] auto get_filesystem_item_from_source_path(
const std::string &source_path,
filesystem_item &fsi) const -> api_error override;
[[nodiscard]] auto
get_pinned_files() const -> std::vector<std::string> override;
[[nodiscard]] auto
get_item_meta(const std::string &api_path,
api_meta_map &meta) const -> api_error override;
[[nodiscard]] auto
get_item_meta(const std::string &api_path, const std::string &key,
std::string &value) const -> api_error override;
[[nodiscard]] auto get_total_drive_space() const -> std::uint64_t override;
@ -164,11 +153,11 @@ public:
[[nodiscard]] auto is_directory(const std::string &api_path,
bool &exists) const -> api_error override;
[[nodiscard]] auto is_file(const std::string &api_path, bool &exists) const
-> api_error override;
[[nodiscard]] auto is_file(const std::string &api_path,
bool &exists) const -> api_error override;
[[nodiscard]] auto is_file_writeable(const std::string &api_path) const
-> bool override;
[[nodiscard]] auto
is_file_writeable(const std::string &api_path) const -> bool override;
[[nodiscard]] auto is_online() const -> bool override;
@ -178,44 +167,42 @@ public:
return false;
}
[[nodiscard]] auto read_file_bytes(const std::string &api_path,
std::size_t size, std::uint64_t offset,
data_buffer &data,
stop_type &stop_requested)
-> api_error override;
[[nodiscard]] auto
read_file_bytes(const std::string &api_path, std::size_t size,
std::uint64_t offset, data_buffer &data,
stop_type &stop_requested) -> api_error override;
[[nodiscard]] auto remove_directory(const std::string & /*api_path*/)
-> api_error override {
[[nodiscard]] auto
remove_directory(const std::string & /*api_path*/) -> api_error override {
return api_error::not_implemented;
}
[[nodiscard]] auto remove_file(const std::string & /*api_path*/)
-> api_error override {
[[nodiscard]] auto
remove_file(const std::string & /*api_path*/) -> api_error override {
return api_error::not_implemented;
}
[[nodiscard]] auto remove_item_meta(const std::string & /*api_path*/,
const std::string & /*key*/)
-> api_error override {
[[nodiscard]] auto
remove_item_meta(const std::string & /*api_path*/,
const std::string & /*key*/) -> api_error override {
return api_error::success;
}
[[nodiscard]] auto rename_file(const std::string & /*from_api_path*/,
const std::string & /*to_api_path*/)
-> api_error override {
[[nodiscard]] auto
rename_file(const std::string & /*from_api_path*/,
const std::string & /*to_api_path*/) -> api_error override {
return api_error::not_implemented;
}
[[nodiscard]] auto set_item_meta(const std::string & /*api_path*/,
const std::string & /*key*/,
const std::string & /*value*/)
-> api_error override {
[[nodiscard]] auto
set_item_meta(const std::string & /*api_path*/, const std::string & /*key*/,
const std::string & /*value*/) -> api_error override {
return api_error::success;
}
[[nodiscard]] auto set_item_meta(const std::string & /*api_path*/,
const api_meta_map & /*meta*/)
-> api_error override {
[[nodiscard]] auto
set_item_meta(const std::string & /*api_path*/,
const api_meta_map & /*meta*/) -> api_error override {
return api_error::success;
}
@ -224,10 +211,10 @@ public:
void stop() override;
[[nodiscard]] auto upload_file(const std::string & /*api_path*/,
const std::string & /*source_path*/,
stop_type & /*stop_requested*/)
-> api_error override {
[[nodiscard]] auto
upload_file(const std::string & /*api_path*/,
const std::string & /*source_path*/,
stop_type & /*stop_requested*/) -> api_error override {
return api_error::not_implemented;
}
};

View File

@ -46,9 +46,6 @@ public:
auto operator=(const s3_provider &) -> s3_provider & = delete;
auto operator=(s3_provider &&) -> s3_provider & = delete;
private:
s3_config s3_config_;
private:
[[nodiscard]] auto add_if_not_found(api_file &file,
const std::string &object_name) const
@ -81,10 +78,6 @@ private:
std::optional<std::string> token = std::nullopt) const
-> bool;
[[nodiscard]] auto get_s3_config() const -> const s3_config & {
return s3_config_;
}
protected:
[[nodiscard]] auto create_directory_impl(const std::string &api_path,
api_meta_map &meta)

View File

@ -45,9 +45,6 @@ public:
auto operator=(const sia_provider &) -> sia_provider & = delete;
auto operator=(sia_provider &&) -> sia_provider & = delete;
private:
sia_config sia_config_;
private:
[[nodiscard]] auto get_object_info(const std::string &api_path,
json &object_info) const -> api_error;
@ -55,10 +52,6 @@ private:
[[nodiscard]] auto get_object_list(const std::string &api_path,
nlohmann::json &object_list) const -> bool;
[[nodiscard]] auto get_sia_config() const -> const auto & {
return sia_config_;
}
protected:
[[nodiscard]] auto create_directory_impl(const std::string &api_path,
api_meta_map &meta)

View File

@ -22,8 +22,6 @@
#ifndef REPERTORY_INCLUDE_TYPES_REMOTE_HPP_
#define REPERTORY_INCLUDE_TYPES_REMOTE_HPP_
#include "types/repertory.hpp"
inline constexpr const auto PACKET_SERVICE_FUSE{1U};
inline constexpr const auto PACKET_SERVICE_WINFSP{2U};
@ -33,67 +31,7 @@ inline constexpr const auto PACKET_SERVICE_FLAGS{PACKET_SERVICE_WINFSP};
inline constexpr const auto PACKET_SERVICE_FLAGS{PACKET_SERVICE_FUSE};
#endif // defined(_WIN32)
constexpr const auto default_remote_client_pool_size{20U};
constexpr const auto default_remote_max_connections{20U};
constexpr const auto default_remote_receive_timeout_ms{120U * 1000U};
constexpr const auto default_remote_send_timeout_ms{30U * 1000U};
namespace repertory::remote {
struct remote_config final {
std::uint16_t api_port{};
std::string encryption_token;
std::string host_name_or_ip;
std::uint8_t max_connections{default_remote_max_connections};
std::uint32_t recv_timeout_ms{default_remote_receive_timeout_ms};
std::uint32_t send_timeout_ms{default_remote_send_timeout_ms};
auto operator==(const remote_config &cfg) const noexcept -> bool {
if (&cfg != this) {
return api_port == cfg.api_port &&
encryption_token == cfg.encryption_token &&
host_name_or_ip == cfg.host_name_or_ip &&
max_connections == cfg.max_connections &&
recv_timeout_ms == cfg.recv_timeout_ms &&
send_timeout_ms == cfg.send_timeout_ms;
}
return true;
}
auto operator!=(const remote_config &cfg) const noexcept -> bool {
if (&cfg != this) {
return not(cfg == *this);
}
return false;
}
};
struct remote_mount final {
std::uint16_t api_port{};
std::uint8_t client_pool_size{default_remote_client_pool_size};
bool enable{false};
std::string encryption_token;
auto operator==(const remote_mount &cfg) const noexcept -> bool {
if (&cfg == this) {
return true;
}
return api_port == cfg.api_port &&
client_pool_size == cfg.client_pool_size && enable == cfg.enable &&
encryption_token == cfg.encryption_token;
}
auto operator!=(const remote_mount &cfg) const noexcept -> bool {
if (&cfg == this) {
return false;
}
return not(cfg == *this);
}
};
using block_count = std::uint64_t;
using block_size = std::uint32_t;
using file_handle = std::uint64_t;
@ -222,46 +160,4 @@ create_os_open_flags(const open_flags &flags) -> std::uint32_t;
#endif // !defined(_WIN32)
} // namespace repertory::remote
NLOHMANN_JSON_NAMESPACE_BEGIN
template <> struct adl_serializer<repertory::remote::remote_config> {
static void to_json(json &data,
const repertory::remote::remote_config &value) {
data[repertory::JSON_API_PORT] = value.api_port;
data[repertory::JSON_ENCRYPTION_TOKEN] = value.encryption_token;
data[repertory::JSON_HOST_NAME_OR_IP] = value.host_name_or_ip;
data[repertory::JSON_MAX_CONNECTIONS] = value.max_connections;
data[repertory::JSON_RECV_TIMEOUT_MS] = value.recv_timeout_ms;
data[repertory::JSON_SEND_TIMEOUT_MS] = value.send_timeout_ms;
}
static void from_json(const json &data,
repertory::remote::remote_config &value) {
data.at(repertory::JSON_API_PORT).get_to(value.api_port);
data.at(repertory::JSON_ENCRYPTION_TOKEN).get_to(value.encryption_token);
data.at(repertory::JSON_HOST_NAME_OR_IP).get_to(value.host_name_or_ip);
data.at(repertory::JSON_MAX_CONNECTIONS).get_to(value.max_connections);
data.at(repertory::JSON_RECV_TIMEOUT_MS).get_to(value.recv_timeout_ms);
data.at(repertory::JSON_SEND_TIMEOUT_MS).get_to(value.send_timeout_ms);
}
};
template <> struct adl_serializer<repertory::remote::remote_mount> {
static void to_json(json &data,
const repertory::remote::remote_mount &value) {
data[repertory::JSON_API_PORT] = value.api_port;
data[repertory::JSON_CLIENT_POOL_SIZE] = value.client_pool_size;
data[repertory::JSON_ENABLE_REMOTE_MOUNT] = value.enable;
data[repertory::JSON_ENCRYPTION_TOKEN] = value.encryption_token;
}
static void from_json(const json &data,
repertory::remote::remote_mount &value) {
data.at(repertory::JSON_API_PORT).get_to(value.api_port);
data.at(repertory::JSON_CLIENT_POOL_SIZE).get_to(value.client_pool_size);
data.at(repertory::JSON_ENABLE_REMOTE_MOUNT).get_to(value.enable);
data.at(repertory::JSON_ENCRYPTION_TOKEN).get_to(value.encryption_token);
}
};
NLOHMANN_JSON_NAMESPACE_END
#endif // REPERTORY_INCLUDE_TYPES_REMOTE_HPP_

View File

@ -1,655 +1,293 @@
/*
Copyright <2018-2023> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_TYPES_REPERTORY_HPP_
#define REPERTORY_INCLUDE_TYPES_REPERTORY_HPP_
namespace repertory {
constexpr const auto default_api_auth_size{48U};
constexpr const auto default_download_timeout_ces{30U};
constexpr const auto default_eviction_delay_mins{1U};
constexpr const auto default_high_freq_interval_secs{30U};
constexpr const auto default_low_freq_interval_secs{0U * 60U};
constexpr const auto default_max_cache_size_bytes{
std::uint64_t(20UL * 1024UL * 1024UL * 1024UL),
};
constexpr const auto default_max_upload_count{5U};
constexpr const auto default_med_freq_interval_secs{2U * 60U};
constexpr const auto default_online_check_retry_secs{60U};
constexpr const auto default_orphaned_file_retention_days{15U};
constexpr const auto default_retry_read_count{6U};
constexpr const auto default_ring_buffer_file_size{512U};
constexpr const auto default_task_wait_ms{100U};
constexpr const auto default_timeout_ms{60000U};
constexpr const auto max_orphaned_file_retention_days{std::uint16_t(31U)};
constexpr const auto max_ring_buffer_file_size{std::uint16_t(1024U)};
constexpr const auto min_cache_size_bytes{
std::uint64_t(100UL * 1024UL * 1024UL)};
constexpr const auto min_download_timeout_secs{std::uint8_t(5U)};
constexpr const auto min_online_check_retry_secs{std::uint16_t(15U)};
constexpr const auto min_orphaned_file_retention_days{std::uint16_t(1U)};
constexpr const auto min_retry_read_count{std::uint16_t(2U)};
constexpr const auto min_ring_buffer_file_size{std::uint16_t(64U)};
constexpr const auto min_task_wait_ms{std::uint16_t(50U)};
template <typename data_t> class atomic final {
public:
atomic() : mtx_(std::make_shared<std::mutex>()) {}
atomic(const atomic &at_data)
: data_(at_data.load()), mtx_(std::make_shared<std::mutex>()) {}
atomic(data_t data)
: data_(std::move(data)), mtx_(std::make_shared<std::mutex>()) {}
atomic(atomic &&) = default;
~atomic() = default;
private:
data_t data_;
std::shared_ptr<std::mutex> mtx_;
public:
[[nodiscard]] auto load() const -> data_t {
mutex_lock lock(*mtx_);
return data_;
}
auto store(data_t data) -> data_t {
mutex_lock lock(*mtx_);
data_ = std::move(data);
return data_;
}
auto operator=(const atomic &at_data) -> atomic & {
if (&at_data == this) {
return *this;
}
store(at_data.load());
return *this;
}
auto operator=(atomic &&) -> atomic & = default;
auto operator=(data_t data) -> atomic & {
if (&data == &data_) {
return *this;
}
store(std::move(data));
return *this;
}
[[nodiscard]] auto operator==(const atomic &at_data) const -> bool {
if (&at_data == this) {
return true;
}
mutex_lock lock(*mtx_);
return at_data.load() == data_;
}
[[nodiscard]] auto operator==(const data_t &data) const -> bool {
if (&data == &data_) {
return true;
}
mutex_lock lock(*mtx_);
return data == data_;
}
[[nodiscard]] auto operator!=(const atomic &at_data) const -> bool {
if (&at_data == this) {
return false;
}
mutex_lock lock(*mtx_);
return at_data.load() != data_;
}
[[nodiscard]] auto operator!=(const data_t &data) const -> bool {
if (&data == &data_) {
return false;
}
mutex_lock lock(*mtx_);
return data != data_;
}
[[nodiscard]] operator data_t() const { return load(); }
};
inline constexpr const auto max_time{
std::numeric_limits<std::uint64_t>::max(),
};
inline constexpr const std::string META_ACCESSED{"accessed"};
inline constexpr const std::string META_ATTRIBUTES{"attributes"};
inline constexpr const std::string META_BACKUP{"backup"};
inline constexpr const std::string META_CHANGED{"changed"};
inline constexpr const std::string META_CREATION{"creation"};
inline constexpr const std::string META_DIRECTORY{"directory"};
inline constexpr const std::string META_GID{"gid"};
inline constexpr const std::string META_KEY{"key"};
inline constexpr const std::string META_MODE{"mode"};
inline constexpr const std::string META_MODIFIED{"modified"};
inline constexpr const std::string META_OSXFLAGS{"flags"};
inline constexpr const std::string META_PINNED{"pinned"};
inline constexpr const std::string META_SIZE{"size"};
inline constexpr const std::string META_SOURCE{"source"};
inline constexpr const std::string META_UID{"uid"};
inline constexpr const std::string META_WRITTEN{"written"};
inline constexpr const std::array<std::string, 16U> META_USED_NAMES = {
META_ACCESSED, META_ATTRIBUTES, META_BACKUP, META_CHANGED,
META_CREATION, META_DIRECTORY, META_GID, META_KEY,
META_MODE, META_MODIFIED, META_OSXFLAGS, META_PINNED,
META_SIZE, META_SOURCE, META_UID, META_WRITTEN,
};
using api_meta_map = std::map<std::string, std::string>;
enum class api_error {
success = 0,
access_denied,
bad_address,
buffer_overflow,
buffer_too_small,
cache_not_initialized,
comm_error,
decryption_error,
directory_end_of_files,
directory_exists,
directory_not_empty,
directory_not_found,
download_failed,
download_incomplete,
download_stopped,
empty_ring_buffer_chunk_size,
empty_ring_buffer_size,
error,
file_in_use,
file_size_mismatch,
incompatible_version,
invalid_handle,
invalid_operation,
invalid_ring_buffer_multiple,
invalid_ring_buffer_position,
invalid_ring_buffer_size,
invalid_version,
item_exists,
item_not_found,
more_data,
no_disk_space,
not_implemented,
not_supported,
os_error,
out_of_memory,
permission_denied,
upload_failed,
xattr_buffer_small,
xattr_exists,
xattr_not_found,
xattr_too_big,
ERROR_COUNT
};
[[nodiscard]] auto api_error_from_string(std::string_view str) -> api_error;
[[nodiscard]] auto
api_error_to_string(const api_error &error) -> const std::string &;
enum class database_type {
rocksdb,
sqlite,
};
[[nodiscard]] auto database_type_from_string(
std::string type,
database_type default_type = database_type::rocksdb) -> database_type;
[[nodiscard]] auto
database_type_to_string(const database_type &type) -> std::string;
enum class download_type {
default_,
direct,
ring_buffer,
};
[[nodiscard]] auto download_type_from_string(
std::string type,
download_type default_type = download_type::default_) -> download_type;
[[nodiscard]] auto
download_type_to_string(const download_type &type) -> std::string;
enum class exit_code : std::int32_t {
success = 0,
communication_error = -1,
file_creation_failed = -2,
incompatible_version = -3,
invalid_syntax = -4,
lock_failed = -5,
mount_active = -6,
mount_result = -7,
not_mounted = -8,
startup_exception = -9,
failed_to_get_mount_state = -10,
export_failed = -11,
import_failed = -12,
option_not_found = -13,
invalid_provider_type = -14,
set_option_not_found = -15,
pin_failed = -16,
unpin_failed = -17,
init_failed = -18,
};
enum http_error_codes : std::int32_t {
ok = 200,
multiple_choices = 300,
not_found = 404,
};
enum class lock_result {
success,
locked,
failure,
};
enum class provider_type : std::size_t {
sia,
remote,
s3,
encrypt,
unknown,
};
#if defined(_WIN32)
struct open_file_data final {
PVOID directory_buffer{nullptr};
};
#else
using open_file_data = int;
#endif
struct api_file final {
std::string api_path;
std::string api_parent;
std::uint64_t accessed_date{};
std::uint64_t changed_date{};
std::uint64_t creation_date{};
std::uint64_t file_size{};
std::string key;
std::uint64_t modified_date{};
std::string source_path;
};
struct directory_item final {
std::string api_path;
std::string api_parent;
bool directory{false};
std::uint64_t size{};
api_meta_map meta;
bool resolved{false};
};
struct encrypt_config final {
std::string encryption_token;
std::string path;
auto operator==(const encrypt_config &cfg) const noexcept -> bool {
if (&cfg != this) {
return encryption_token == cfg.encryption_token && path == cfg.path;
}
return true;
}
auto operator!=(const encrypt_config &cfg) const noexcept -> bool {
if (&cfg != this) {
return not(cfg == *this);
}
return false;
}
};
struct filesystem_item final {
std::string api_path;
std::string api_parent;
bool directory{false};
std::uint64_t size{};
std::string source_path;
};
struct host_config final {
std::string agent_string;
std::string api_password;
std::string api_user;
std::uint16_t api_port;
std::string host_name_or_ip{"localhost"};
std::string path;
std::string protocol{"http"};
std::uint32_t timeout_ms{default_timeout_ms};
auto operator==(const host_config &cfg) const noexcept -> bool {
if (&cfg != this) {
return agent_string == cfg.agent_string &&
api_password == cfg.api_password && api_user == cfg.api_user &&
api_port == cfg.api_port &&
host_name_or_ip == cfg.host_name_or_ip && path == cfg.path &&
protocol == cfg.protocol && timeout_ms == cfg.timeout_ms;
}
return true;
}
auto operator!=(const host_config &cfg) const noexcept -> bool {
if (&cfg != this) {
return not(cfg == *this);
}
return false;
}
};
struct s3_config final {
std::string access_key;
std::string bucket;
std::string encryption_token;
std::string region{"any"};
std::string secret_key;
std::uint32_t timeout_ms{default_timeout_ms};
std::string url;
bool use_path_style{false};
bool use_region_in_url{false};
auto operator==(const s3_config &cfg) const noexcept -> bool {
if (&cfg != this) {
return access_key == cfg.access_key && bucket == cfg.bucket &&
encryption_token == cfg.encryption_token && region == cfg.region &&
secret_key == cfg.secret_key && timeout_ms == cfg.timeout_ms &&
url == cfg.url && use_path_style == cfg.use_path_style &&
use_region_in_url == cfg.use_region_in_url;
}
return true;
}
auto operator!=(const s3_config &cfg) const noexcept -> bool {
if (&cfg != this) {
return not(cfg == *this);
}
return false;
}
};
struct sia_config final {
std::string bucket;
auto operator==(const sia_config &cfg) const noexcept -> bool {
if (&cfg != this) {
return bucket == cfg.bucket;
}
return true;
}
auto operator!=(const sia_config &cfg) const noexcept -> bool {
if (&cfg != this) {
return not(cfg == *this);
}
return false;
}
};
using api_file_list = std::vector<api_file>;
using api_file_provider_callback = std::function<void(api_file &)>;
using api_item_added_callback = std::function<api_error(bool, api_file &)>;
using directory_item_list = std::vector<directory_item>;
using meta_provider_callback = std::function<void(directory_item &)>;
inline constexpr const auto JSON_ACCESS_KEY{"AccessKey"};
inline constexpr const auto JSON_AGENT_STRING{"AgentString"};
inline constexpr const auto JSON_API_AUTH{"ApiAuth"};
inline constexpr const auto JSON_API_PARENT{"ApiParent"};
inline constexpr const auto JSON_API_PASSWORD{"ApiPassword"};
inline constexpr const auto JSON_API_PATH{"ApiPath"};
inline constexpr const auto JSON_API_PORT{"ApiPort"};
inline constexpr const auto JSON_API_USER{"ApiUser"};
inline constexpr const auto JSON_BUCKET{"Bucket"};
inline constexpr const auto JSON_CLIENT_POOL_SIZE{"ClientPoolSize"};
inline constexpr const auto JSON_DATABASE_TYPE{"DatabaseType"};
inline constexpr const auto JSON_DIRECTORY{"Directory"};
inline constexpr const auto JSON_DOWNLOAD_TIMEOUT_SECS{
"DownloadTimeoutSeconds"};
inline constexpr const auto JSON_ENABLE_DRIVE_EVENTS{"EnableDriveEvents"};
inline constexpr const auto JSON_ENABLE_DOWNLOAD_TIMEOUT{
"EnableDownloadTimeout"};
inline constexpr const auto JSON_ENABLE_MOUNT_MANAGER{"EnableMountManager"};
inline constexpr const auto JSON_ENABLE_REMOTE_MOUNT{"Enable"};
inline constexpr const auto JSON_ENCRYPTION_TOKEN{"EncryptionToken"};
inline constexpr const auto JSON_ENCRYPT_CONFIG{"EncryptConfig"};
inline constexpr const auto JSON_EVENT_LEVEL{"EventLevel"};
inline constexpr const auto JSON_EVICTION_DELAY_MINS{"EvictionDelayMinutes"};
inline constexpr const auto JSON_EVICTION_USE_ACCESS_TIME{
"EvictionUseAccessedTime"};
inline constexpr const auto JSON_HIGH_FREQ_INTERVAL_SECS{
"HighFreqIntervalSeconds"};
inline constexpr const auto JSON_HOST_CONFIG{"HostConfig"};
inline constexpr const auto JSON_HOST_NAME_OR_IP{"HostNameOrIp"};
inline constexpr const auto JSON_LOW_FREQ_INTERVAL_SECS{
"LowFreqIntervalSeconds"};
inline constexpr const auto JSON_MAX_CACHE_SIZE_BYTES{"MaxCacheSizeBytes"};
inline constexpr const auto JSON_MAX_CONNECTIONS{"MaxConnections"};
inline constexpr const auto JSON_MAX_UPLOAD_COUNT{"MaxUploadCount"};
inline constexpr const auto JSON_MED_FREQ_INTERVAL_SECS{
"MedFreqIntervalSeconds"};
inline constexpr const auto JSON_META{"Meta"};
inline constexpr const auto JSON_ONLINE_CHECK_RETRY_SECS{
"OnlineCheckRetrySeconds"};
inline constexpr const auto JSON_ORPHANED_FILE_RETENTION_DAYS{
"OrphanedFileRetentionDays"};
inline constexpr const auto JSON_PATH{"Path"};
inline constexpr const auto JSON_PREFERRED_DOWNLOAD_TYPE{
"PreferredDownloadType"};
inline constexpr const auto JSON_PROTOCOL{"Protocol"};
inline constexpr const auto JSON_RECV_TIMEOUT_MS{"ReceiveTimeoutMs"};
inline constexpr const auto JSON_REGION{"Region"};
inline constexpr const auto JSON_REMOTE_CONFIG{"RemoteConfig"};
inline constexpr const auto JSON_REMOTE_MOUNT{"RemoteMount"};
inline constexpr const auto JSON_RETRY_READ_COUNT{"RetryReadCount"};
inline constexpr const auto JSON_RING_BUFFER_FILE_SIZE{"RingBufferFileSize"};
inline constexpr const auto JSON_S3_CONFIG{"S3Config"};
inline constexpr const auto JSON_SECRET_KEY{"SecretKey"};
inline constexpr const auto JSON_SEND_TIMEOUT_MS{"SendTimeoutMs"};
inline constexpr const auto JSON_SIA_CONFIG{"SiaConfig"};
inline constexpr const auto JSON_SIZE{"Size"};
inline constexpr const auto JSON_TASK_WAIT_MS{"TaskWaitMs"};
inline constexpr const auto JSON_TIMEOUT_MS{"TimeoutMs"};
inline constexpr const auto JSON_URL{"URL"};
inline constexpr const auto JSON_USE_PATH_STYLE{"UsePathStyle"};
inline constexpr const auto JSON_USE_REGION_IN_URL{"UseRegionInURL"};
inline constexpr const auto JSON_VERSION{"Version"};
} // namespace repertory
NLOHMANN_JSON_NAMESPACE_BEGIN
template <> struct adl_serializer<repertory::directory_item> {
static void to_json(json &data, const repertory::directory_item &value) {
data[repertory::JSON_API_PARENT] = value.api_parent;
data[repertory::JSON_API_PATH] = value.api_path;
data[repertory::JSON_DIRECTORY] = value.directory;
data[repertory::JSON_META] = value.meta;
data[repertory::JSON_SIZE] = value.size;
}
static void from_json(const json &data, repertory::directory_item &value) {
data.at(repertory::JSON_API_PARENT).get_to<std::string>(value.api_parent);
data.at(repertory::JSON_API_PATH).get_to<std::string>(value.api_path);
data.at(repertory::JSON_DIRECTORY).get_to<bool>(value.directory);
data.at(repertory::JSON_META).get_to<repertory::api_meta_map>(value.meta);
data.at(repertory::JSON_SIZE).get_to<std::uint64_t>(value.size);
}
};
template <> struct adl_serializer<repertory::encrypt_config> {
static void to_json(json &data, const repertory::encrypt_config &value) {
data[repertory::JSON_ENCRYPTION_TOKEN] = value.encryption_token;
data[repertory::JSON_PATH] = value.path;
}
static void from_json(const json &data, repertory::encrypt_config &value) {
data.at(repertory::JSON_ENCRYPTION_TOKEN).get_to(value.encryption_token);
data.at(repertory::JSON_PATH).get_to(value.path);
}
};
template <> struct adl_serializer<repertory::host_config> {
static void to_json(json &data, const repertory::host_config &value) {
data[repertory::JSON_AGENT_STRING] = value.agent_string;
data[repertory::JSON_API_PASSWORD] = value.api_password;
data[repertory::JSON_API_PORT] = value.api_port;
data[repertory::JSON_API_USER] = value.api_user;
data[repertory::JSON_HOST_NAME_OR_IP] = value.host_name_or_ip;
data[repertory::JSON_PATH] = value.path;
data[repertory::JSON_PROTOCOL] = value.protocol;
data[repertory::JSON_TIMEOUT_MS] = value.timeout_ms;
}
static void from_json(const json &data, repertory::host_config &value) {
data.at(repertory::JSON_AGENT_STRING).get_to(value.agent_string);
data.at(repertory::JSON_API_PASSWORD).get_to(value.api_password);
data.at(repertory::JSON_API_PORT).get_to(value.api_port);
data.at(repertory::JSON_API_USER).get_to(value.api_user);
data.at(repertory::JSON_HOST_NAME_OR_IP).get_to(value.host_name_or_ip);
data.at(repertory::JSON_PATH).get_to(value.path);
data.at(repertory::JSON_PROTOCOL).get_to(value.protocol);
data.at(repertory::JSON_TIMEOUT_MS).get_to(value.timeout_ms);
}
};
template <> struct adl_serializer<repertory::s3_config> {
static void to_json(json &data, const repertory::s3_config &value) {
data[repertory::JSON_ACCESS_KEY] = value.access_key;
data[repertory::JSON_BUCKET] = value.bucket;
data[repertory::JSON_ENCRYPTION_TOKEN] = value.encryption_token;
data[repertory::JSON_REGION] = value.region;
data[repertory::JSON_SECRET_KEY] = value.secret_key;
data[repertory::JSON_TIMEOUT_MS] = value.timeout_ms;
data[repertory::JSON_URL] = value.url;
data[repertory::JSON_USE_PATH_STYLE] = value.use_path_style;
data[repertory::JSON_USE_REGION_IN_URL] = value.use_region_in_url;
}
static void from_json(const json &data, repertory::s3_config &value) {
data.at(repertory::JSON_ACCESS_KEY).get_to(value.access_key);
data.at(repertory::JSON_BUCKET).get_to(value.bucket);
data.at(repertory::JSON_ENCRYPTION_TOKEN).get_to(value.encryption_token);
data.at(repertory::JSON_REGION).get_to(value.region);
data.at(repertory::JSON_SECRET_KEY).get_to(value.secret_key);
data.at(repertory::JSON_TIMEOUT_MS).get_to(value.timeout_ms);
data.at(repertory::JSON_URL).get_to(value.url);
data.at(repertory::JSON_USE_PATH_STYLE).get_to(value.use_path_style);
data.at(repertory::JSON_USE_REGION_IN_URL).get_to(value.use_region_in_url);
}
};
template <> struct adl_serializer<repertory::sia_config> {
static void to_json(json &data, const repertory::sia_config &value) {
data[repertory::JSON_BUCKET] = value.bucket;
}
static void from_json(const json &data, repertory::sia_config &value) {
data.at(repertory::JSON_BUCKET).get_to(value.bucket);
}
};
template <typename data_t> struct adl_serializer<repertory::atomic<data_t>> {
static void to_json(json &data, const repertory::atomic<data_t> &value) {
data = value.load();
}
static void from_json(const json &data, repertory::atomic<data_t> &value) {
value.store(data.get<data_t>());
}
};
template <typename primitive_t>
struct adl_serializer<std::atomic<primitive_t>> {
static void to_json(json &data, const std::atomic<primitive_t> &value) {
data = value.load();
}
static void from_json(const json &data, std::atomic<primitive_t> &value) {
value.store(data.get<primitive_t>());
}
};
template <> struct adl_serializer<std::atomic<repertory::database_type>> {
static void to_json(json &data,
const std::atomic<repertory::database_type> &value) {
data = repertory::database_type_to_string(value.load());
}
static void from_json(const json &data,
std::atomic<repertory::database_type> &value) {
value.store(repertory::database_type_from_string(data.get<std::string>()));
}
};
template <> struct adl_serializer<std::atomic<repertory::download_type>> {
static void to_json(json &data,
const std::atomic<repertory::download_type> &value) {
data = repertory::download_type_to_string(value.load());
}
static void from_json(const json &data,
std::atomic<repertory::download_type> &value) {
value.store(repertory::download_type_from_string(data.get<std::string>()));
}
};
template <> struct adl_serializer<repertory::database_type> {
static void to_json(json &data, const repertory::database_type &value) {
data = repertory::database_type_to_string(value);
}
static void from_json(const json &data, repertory::database_type &value) {
value = repertory::database_type_from_string(data.get<std::string>());
}
};
template <> struct adl_serializer<repertory::download_type> {
static void to_json(json &data, const repertory::download_type &value) {
data = repertory::download_type_to_string(value);
}
static void from_json(const json &data, repertory::download_type &value) {
value = repertory::download_type_from_string(data.get<std::string>());
}
};
NLOHMANN_JSON_NAMESPACE_END
#endif // REPERTORY_INCLUDE_TYPES_REPERTORY_HPP_
/*
Copyright <2018-2023> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_INCLUDE_TYPES_REPERTORY_HPP_
#define REPERTORY_INCLUDE_TYPES_REPERTORY_HPP_
namespace repertory {
inline constexpr const auto max_time{std::numeric_limits<std::uint64_t>::max()};
inline constexpr const std::string META_ACCESSED{"accessed"};
inline constexpr const std::string META_ATTRIBUTES{"attributes"};
inline constexpr const std::string META_BACKUP{"backup"};
inline constexpr const std::string META_CHANGED{"changed"};
inline constexpr const std::string META_CREATION{"creation"};
inline constexpr const std::string META_DIRECTORY{"directory"};
inline constexpr const std::string META_GID{"gid"};
inline constexpr const std::string META_KEY{"key"};
inline constexpr const std::string META_MODE{"mode"};
inline constexpr const std::string META_MODIFIED{"modified"};
inline constexpr const std::string META_OSXFLAGS{"flags"};
inline constexpr const std::string META_PINNED{"pinned"};
inline constexpr const std::string META_SIZE{"size"};
inline constexpr const std::string META_SOURCE{"source"};
inline constexpr const std::string META_UID{"uid"};
inline constexpr const std::string META_WRITTEN{"written"};
inline constexpr const std::array<std::string, 16U> META_USED_NAMES = {
META_ACCESSED, META_ATTRIBUTES, META_BACKUP, META_CHANGED,
META_CREATION, META_DIRECTORY, META_GID, META_KEY,
META_MODE, META_MODIFIED, META_OSXFLAGS, META_PINNED,
META_SIZE, META_SOURCE, META_UID, META_WRITTEN,
};
using api_meta_map = std::map<std::string, std::string>;
enum class api_error {
success = 0,
access_denied,
bad_address,
buffer_overflow,
buffer_too_small,
comm_error,
decryption_error,
directory_end_of_files,
directory_exists,
directory_not_empty,
directory_not_found,
download_failed,
download_incomplete,
download_stopped,
empty_ring_buffer_chunk_size,
empty_ring_buffer_size,
error,
file_in_use,
file_size_mismatch,
incompatible_version,
invalid_handle,
invalid_operation,
invalid_ring_buffer_multiple,
invalid_ring_buffer_size,
invalid_version,
item_exists,
item_not_found,
more_data,
no_disk_space,
not_implemented,
not_supported,
os_error,
out_of_memory,
permission_denied,
upload_failed,
xattr_buffer_small,
xattr_exists,
xattr_not_found,
xattr_too_big,
ERROR_COUNT
};
[[nodiscard]] auto api_error_from_string(std::string_view str) -> api_error;
[[nodiscard]] auto api_error_to_string(const api_error &error)
-> const std::string &;
enum class download_type { direct, fallback, ring_buffer };
[[nodiscard]] auto download_type_from_string(std::string type,
const download_type &default_type)
-> download_type;
[[nodiscard]] auto download_type_to_string(const download_type &type)
-> std::string;
enum class exit_code : std::int32_t {
success,
communication_error = -1,
file_creation_failed = -2,
incompatible_version = -3,
invalid_syntax = -4,
lock_failed = -5,
mount_active = -6,
mount_result = -7,
not_mounted = -8,
startup_exception = -9,
failed_to_get_mount_state = -10,
export_failed = -11,
import_failed = -12,
option_not_found = -13,
invalid_provider_type = -14,
set_option_not_found = -15,
pin_failed = -16,
unpin_failed = -17,
init_failed = -18,
};
enum http_error_codes : std::int32_t {
ok = 200,
multiple_choices = 300,
not_found = 404,
};
enum class lock_result {
success,
locked,
failure,
};
enum class provider_type : std::size_t {
sia,
remote,
s3,
encrypt,
unknown,
};
#if defined(_WIN32)
struct open_file_data final {
PVOID directory_buffer{nullptr};
};
#else
using open_file_data = int;
#endif
struct api_file final {
std::string api_path{};
std::string api_parent{};
std::uint64_t accessed_date{};
std::uint64_t changed_date{};
std::uint64_t creation_date{};
std::uint64_t file_size{};
std::string key{};
std::uint64_t modified_date{};
std::string source_path;
};
struct directory_item final {
std::string api_path{};
std::string api_parent{};
bool directory{false};
std::uint64_t size{};
api_meta_map meta{};
bool resolved{false};
[[nodiscard]] static auto from_json(const json &item) -> directory_item {
directory_item ret{};
ret.api_path = item["path"].get<std::string>();
ret.api_parent = item["parent"].get<std::string>();
ret.directory = item["directory"].get<bool>();
ret.size = item["size"].get<std::uint64_t>();
ret.meta = item["meta"].get<api_meta_map>();
return ret;
}
[[nodiscard]] auto to_json() const -> json {
return {
{"path", api_path}, {"parent", api_parent}, {"size", size},
{"directory", directory}, {"meta", meta},
};
}
};
struct encrypt_config final {
std::string encryption_token{};
std::string path{};
};
struct filesystem_item final {
std::string api_path{};
std::string api_parent{};
bool directory{false};
std::uint64_t size{};
std::string source_path{};
};
struct host_config final {
std::string agent_string{};
std::string api_password{};
std::string api_user{};
std::uint16_t api_port{};
std::string host_name_or_ip{"localhost"};
std::string path{};
std::string protocol{"http"};
std::uint32_t timeout_ms{60000U};
auto operator==(const host_config &hc) const noexcept -> bool {
if (&hc != this) {
return agent_string == hc.agent_string &&
api_password == hc.api_password && api_user == hc.api_user &&
api_port == hc.api_port && host_name_or_ip == hc.host_name_or_ip &&
path == hc.path && protocol == hc.protocol &&
timeout_ms == hc.timeout_ms;
}
return true;
}
auto operator!=(const host_config &hc) const noexcept -> bool {
if (&hc != this) {
return not(hc == *this);
}
return false;
}
};
#if defined(__GNUG__)
__attribute__((unused))
#endif
static void
to_json(json &j, const host_config &hc) {
j = json{{"AgentString", hc.agent_string},
{"ApiPassword", hc.api_password},
{"ApiPort", hc.api_port},
{"ApiUser", hc.api_user},
{"HostNameOrIp", hc.host_name_or_ip},
{"Path", hc.path},
{"Protocol", hc.protocol},
{"TimeoutMs", hc.timeout_ms}};
}
#if defined(__GNUG__)
__attribute__((unused))
#endif
static void
from_json(const json &j, host_config &hc) {
j.at("AgentString").get_to(hc.agent_string);
j.at("ApiPassword").get_to(hc.api_password);
j.at("ApiPort").get_to(hc.api_port);
j.at("AuthUser").get_to(hc.api_user);
j.at("HostNameOrIp").get_to(hc.host_name_or_ip);
j.at("Path").get_to(hc.path);
j.at("Protocol").get_to(hc.protocol);
j.at("TimeoutMs").get_to(hc.timeout_ms);
}
struct s3_config final {
std::string access_key{};
std::string bucket{};
std::string encryption_token{};
std::string region{"any"};
std::string secret_key{};
std::uint32_t timeout_ms{60000U};
std::string url{};
bool use_path_style{false};
bool use_region_in_url{false};
};
struct sia_config final {
std::string bucket{};
};
using api_file_list = std::vector<api_file>;
using api_file_provider_callback = std::function<void(api_file &)>;
using api_item_added_callback = std::function<api_error(bool, api_file &)>;
using directory_item_list = std::vector<directory_item>;
using meta_provider_callback = std::function<void(directory_item &)>;
} // namespace repertory
#endif // REPERTORY_INCLUDE_TYPES_REPERTORY_HPP_

View File

@ -24,25 +24,15 @@
#include "types/repertory.hpp"
namespace repertory {
class app_config;
namespace utils {
namespace repertory::utils {
void calculate_allocation_size(bool directory, std::uint64_t file_size,
UINT64 allocation_size,
std::string &allocation_meta_size);
[[nodiscard]] auto
create_rocksdb(const app_config &cfg, const std::string &name,
const std::vector<rocksdb::ColumnFamilyDescriptor> &families,
std::vector<rocksdb::ColumnFamilyHandle *> &handles, bool clear)
-> std::unique_ptr<rocksdb::TransactionDB>;
[[nodiscard]] auto create_volume_label(const provider_type &prov)
-> std::string;
create_volume_label(const provider_type &prov) -> std::string;
[[nodiscard]] auto get_attributes_from_meta(const api_meta_map &meta) -> DWORD;
} // namespace utils
} // namespace repertory
} // namespace repertory::utils
#endif // REPERTORY_INCLUDE_UTILS_UTILS_HPP_

File diff suppressed because it is too large Load Diff

View File

@ -30,7 +30,7 @@ namespace repertory {
void client_pool::pool::execute(
std::uint64_t thread_id, const worker_callback &worker,
const worker_complete_callback &worker_complete) {
auto index = thread_id % pool_queues_.size();
const auto index = thread_id % pool_queues_.size();
auto job = std::make_shared<work_item>(worker, worker_complete);
auto &pool_queue = pool_queues_[index];
@ -51,7 +51,7 @@ client_pool::pool::pool(std::uint8_t pool_size) {
for (std::size_t i = 0U; i < pool_queues_.size(); i++) {
pool_threads_.emplace_back([this]() {
auto thread_index = thread_index_++;
const auto thread_index = thread_index_++;
auto &pool_queue = pool_queues_[thread_index];
auto &queue = pool_queue->queue;
@ -74,7 +74,7 @@ client_pool::pool::pool(std::uint8_t pool_size) {
queue_lock.unlock();
try {
auto result = item->work();
const auto result = item->work();
item->work_complete(result);
} catch (const std::exception &e) {
item->work_complete(utils::from_api_error(api_error::error));

View File

@ -36,8 +36,8 @@ void packet::clear() {
}
auto packet::decode(std::string &data) -> packet::error_type {
const auto *str = reinterpret_cast<const char *>(&buffer_.at(decode_offset_));
auto length = strnlen(str, buffer_.size() - decode_offset_);
const auto *str = reinterpret_cast<const char *>(&buffer_[decode_offset_]);
const auto length = strnlen(str, buffer_.size() - decode_offset_);
data = std::string(str, length);
decode_offset_ += (length + 1);
@ -46,7 +46,7 @@ auto packet::decode(std::string &data) -> packet::error_type {
auto packet::decode(std::wstring &data) -> packet::error_type {
std::string utf8_string;
auto ret = decode(utf8_string);
const auto ret = decode(utf8_string);
if (ret == 0) {
data = utils::string::from_utf8(utf8_string);
}
@ -60,7 +60,7 @@ auto packet::decode(void *&ptr) -> packet::error_type {
auto packet::decode(void *buffer, std::size_t size) -> packet::error_type {
if (size != 0U) {
auto read_size =
const auto read_size =
utils::calculate_read_size(buffer_.size(), size, decode_offset_);
if (read_size == size) {
memcpy(buffer, &buffer_[decode_offset_], size);
@ -76,7 +76,7 @@ auto packet::decode(void *buffer, std::size_t size) -> packet::error_type {
}
auto packet::decode(std::int8_t &val) -> packet::error_type {
auto ret = decode(&val, sizeof(val));
const auto ret = decode(&val, sizeof(val));
if (ret == 0) {
boost::endian::big_to_native_inplace(val);
}
@ -84,7 +84,7 @@ auto packet::decode(std::int8_t &val) -> packet::error_type {
}
auto packet::decode(std::uint8_t &val) -> packet::error_type {
auto ret = decode(&val, sizeof(val));
const auto ret = decode(&val, sizeof(val));
if (ret == 0) {
boost::endian::big_to_native_inplace(val);
}
@ -92,7 +92,7 @@ auto packet::decode(std::uint8_t &val) -> packet::error_type {
}
auto packet::decode(std::int16_t &val) -> packet::error_type {
auto ret = decode(&val, sizeof(val));
const auto ret = decode(&val, sizeof(val));
if (ret == 0) {
boost::endian::big_to_native_inplace(val);
}
@ -100,7 +100,7 @@ auto packet::decode(std::int16_t &val) -> packet::error_type {
}
auto packet::decode(std::uint16_t &val) -> packet::error_type {
auto ret = decode(&val, sizeof(val));
const auto ret = decode(&val, sizeof(val));
if (ret == 0) {
boost::endian::big_to_native_inplace(val);
}
@ -108,7 +108,7 @@ auto packet::decode(std::uint16_t &val) -> packet::error_type {
}
auto packet::decode(std::int32_t &val) -> packet::error_type {
auto ret = decode(&val, sizeof(val));
const auto ret = decode(&val, sizeof(val));
if (ret == 0) {
boost::endian::big_to_native_inplace(val);
}
@ -116,7 +116,7 @@ auto packet::decode(std::int32_t &val) -> packet::error_type {
}
auto packet::decode(std::uint32_t &val) -> packet::error_type {
auto ret = decode(&val, sizeof(val));
const auto ret = decode(&val, sizeof(val));
if (ret == 0) {
boost::endian::big_to_native_inplace(val);
}
@ -124,7 +124,7 @@ auto packet::decode(std::uint32_t &val) -> packet::error_type {
}
auto packet::decode(std::int64_t &val) -> packet::error_type {
auto ret = decode(&val, sizeof(val));
const auto ret = decode(&val, sizeof(val));
if (ret == 0) {
boost::endian::big_to_native_inplace(val);
}
@ -132,7 +132,7 @@ auto packet::decode(std::int64_t &val) -> packet::error_type {
}
auto packet::decode(std::uint64_t &val) -> packet::error_type {
auto ret = decode(&val, sizeof(val));
const auto ret = decode(&val, sizeof(val));
if (ret == 0) {
boost::endian::big_to_native_inplace(val);
}
@ -140,7 +140,7 @@ auto packet::decode(std::uint64_t &val) -> packet::error_type {
}
auto packet::decode(remote::setattr_x &val) -> packet::error_type {
auto ret = decode(&val, sizeof(val));
const auto ret = decode(&val, sizeof(val));
if (ret == 0) {
boost::endian::big_to_native_inplace(val.acctime);
boost::endian::big_to_native_inplace(val.bkuptime);
@ -159,7 +159,7 @@ auto packet::decode(remote::setattr_x &val) -> packet::error_type {
}
auto packet::decode(remote::stat &val) -> packet::error_type {
auto ret = decode(&val, sizeof(val));
const auto ret = decode(&val, sizeof(val));
if (ret == 0) {
boost::endian::big_to_native_inplace(val.st_mode);
boost::endian::big_to_native_inplace(val.st_nlink);
@ -179,7 +179,7 @@ auto packet::decode(remote::stat &val) -> packet::error_type {
}
auto packet::decode(remote::statfs &val) -> packet::error_type {
auto ret = decode(&val, sizeof(val));
const auto ret = decode(&val, sizeof(val));
if (ret == 0) {
boost::endian::big_to_native_inplace(val.f_bavail);
boost::endian::big_to_native_inplace(val.f_bfree);
@ -200,7 +200,7 @@ auto packet::decode(remote::statfs_x &val) -> packet::error_type {
}
auto packet::decode(remote::file_info &val) -> packet::error_type {
auto ret = decode(&val, sizeof(val));
const auto ret = decode(&val, sizeof(val));
if (ret == 0) {
boost::endian::big_to_native_inplace(val.AllocationSize);
boost::endian::big_to_native_inplace(val.ChangeTime);
@ -268,7 +268,7 @@ void packet::encode(const void *buffer, std::size_t size, bool should_reserve) {
}
void packet::encode(std::string_view str) {
auto len = str.size();
const auto len = str.size();
buffer_.reserve(len + 1 + buffer_.size());
encode(str.data(), len, false);
buffer_.emplace_back(0);
@ -401,7 +401,7 @@ void packet::encode_top(const void *buffer, std::size_t size,
}
void packet::encode_top(std::string_view str) {
auto len = str.size();
const auto len = str.size();
buffer_.reserve(len + 1U + buffer_.size());
encode_top(str.data(), len, false);
buffer_.insert(buffer_.begin() + static_cast<std::int32_t>(len), 0);
@ -531,7 +531,7 @@ void packet::encrypt(std::string_view token) {
}
}
void packet::to_buffer(data_buffer &buffer) {
void packet::transfer_into(data_buffer &buffer) {
buffer = std::move(buffer_);
buffer_ = data_buffer();
decode_offset_ = 0;

View File

@ -38,8 +38,18 @@ E_SIMPLE2(packet_client_timeout, error, true,
);
// clang-format on
packet_client::packet_client(remote::remote_config cfg)
: cfg_(std::move(cfg)), unique_id_(utils::create_uuid_string()) {}
packet_client::packet_client(std::string host_name_or_ip,
std::uint8_t max_connections, std::uint16_t port,
std::uint16_t receive_timeout,
std::uint16_t send_timeout,
std::string encryption_token)
: host_name_or_ip_(std::move(host_name_or_ip)),
max_connections_(max_connections == 0U ? 20U : max_connections),
port_(port),
receive_timeout_(receive_timeout),
send_timeout_(send_timeout),
encryption_token_(std::move(encryption_token)),
unique_id_(utils::create_uuid_string()) {}
packet_client::~packet_client() {
allow_connections_ = false;
@ -75,7 +85,7 @@ void packet_client::connect(client &cli) {
cli.socket.set_option(boost::asio::socket_base::linger(false, 0));
packet response;
auto res = read_packet(cli, response);
const auto res = read_packet(cli, response);
if (res != 0) {
throw std::runtime_error(std::to_string(res));
}
@ -85,27 +95,27 @@ void packet_client::connect(client &cli) {
}
auto packet_client::get_client() -> std::shared_ptr<packet_client::client> {
std::shared_ptr<client> ret;
unique_mutex_lock clients_lock(clients_mutex_);
if (not allow_connections_) {
return nullptr;
if (allow_connections_) {
if (clients_.empty()) {
clients_lock.unlock();
ret = std::make_shared<client>(io_context_);
connect(*ret);
} else {
ret = clients_[0U];
utils::collection::remove_element(clients_, ret);
clients_lock.unlock();
}
}
if (clients_.empty()) {
clients_lock.unlock();
auto cli = std::make_shared<client>(io_context_);
connect(*cli);
return cli;
}
auto cli = clients_.at(0U);
utils::collection::remove_element(clients_, cli);
return cli;
return ret;
}
void packet_client::put_client(std::shared_ptr<client> &cli) {
mutex_lock clientsLock(clients_mutex_);
if (clients_.size() < cfg_.max_connections) {
if (clients_.size() < max_connections_) {
clients_.emplace_back(cli);
}
}
@ -116,7 +126,7 @@ auto packet_client::read_packet(client &cli, packet &response)
const auto read_buffer = [&]() {
std::uint32_t offset{};
while (offset < buffer.size()) {
auto bytes_read = boost::asio::read(
const auto bytes_read = boost::asio::read(
cli.socket,
boost::asio::buffer(&buffer[offset], buffer.size() - offset));
if (bytes_read <= 0) {
@ -127,14 +137,14 @@ auto packet_client::read_packet(client &cli, packet &response)
};
read_buffer();
auto size = boost::endian::big_to_native(
const auto size = boost::endian::big_to_native(
*reinterpret_cast<std::uint32_t *>(buffer.data()));
buffer.resize(size);
read_buffer();
response = std::move(buffer);
auto ret = response.decrypt(cfg_.encryption_token);
auto ret = response.decrypt(encryption_token_);
if (ret == 0) {
ret = response.decode(cli.nonce);
}
@ -143,13 +153,10 @@ auto packet_client::read_packet(client &cli, packet &response)
}
void packet_client::resolve() {
if (not resolve_results_.empty()) {
return;
if (resolve_results_.empty()) {
resolve_results_ = tcp::resolver(io_context_)
.resolve({host_name_or_ip_, std::to_string(port_)});
}
resolve_results_ =
tcp::resolver(io_context_)
.resolve(cfg_.host_name_or_ip, std::to_string(cfg_.api_port));
}
auto packet_client::send(std::string_view method, std::uint32_t &service_flags)
@ -177,14 +184,14 @@ auto packet_client::send(std::string_view method, packet &request,
request.encode_top(PACKET_SERVICE_FLAGS);
request.encode_top(std::string{project_get_version()});
static constexpr const std::uint8_t max_attempts{5U};
static const std::uint8_t max_attempts{5U};
for (std::uint8_t i = 1U;
allow_connections_ && not success && (i <= max_attempts); i++) {
auto current_client = get_client();
if (current_client) {
try {
request.encode_top(current_client->nonce);
request.encrypt(cfg_.encryption_token);
request.encrypt(encryption_token_);
timeout request_timeout(
[method, current_client]() {
@ -192,11 +199,11 @@ auto packet_client::send(std::string_view method, packet &request,
"request", std::string{method});
packet_client::close(*current_client);
},
std::chrono::milliseconds(cfg_.send_timeout_ms));
std::chrono::seconds(send_timeout_));
std::uint32_t offset{};
while (offset < request.get_size()) {
auto bytes_written = boost::asio::write(
const auto bytes_written = boost::asio::write(
current_client->socket,
boost::asio::buffer(&request[offset],
request.get_size() - offset));
@ -214,7 +221,7 @@ auto packet_client::send(std::string_view method, packet &request,
"response", std::string{method});
packet_client::close(*current_client);
},
std::chrono::milliseconds(cfg_.recv_timeout_ms));
std::chrono::seconds(receive_timeout_));
ret = read_packet(*current_client, response);
response_timeout.disable();

View File

@ -72,7 +72,7 @@ void packet_server::initialize(const uint16_t &port, uint8_t pool_size) {
server_thread_ = std::make_unique<std::thread>([this, port, pool_size]() {
tcp::acceptor acceptor(io_context_);
try {
auto endpoint = tcp::endpoint(tcp::v4(), port);
const auto endpoint = tcp::endpoint(tcp::v4(), port);
acceptor.open(endpoint.protocol());
acceptor.set_option(socket_base::reuse_address(true));
acceptor.bind(endpoint);
@ -148,7 +148,7 @@ void packet_server::read_packet(std::shared_ptr<connection> conn,
const auto read_buffer = [&]() {
std::uint32_t offset{};
while (offset < conn->buffer.size()) {
auto bytes_read = boost::asio::read(
const auto bytes_read = boost::asio::read(
conn->socket, boost::asio::buffer(&conn->buffer[offset],
conn->buffer.size() - offset));
if (bytes_read <= 0) {
@ -244,7 +244,7 @@ void packet_server::send_response(std::shared_ptr<connection> conn,
response.encode_top(PACKET_SERVICE_FLAGS);
response.encode_top(conn->nonce);
response.encrypt(encryption_token_);
response.to_buffer(conn->buffer);
response.transfer_into(conn->buffer);
boost::asio::async_write(
conn->socket, boost::asio::buffer(conn->buffer),

View File

@ -1,38 +0,0 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "db/file_db.hpp"
#include "app_config.hpp"
#include "db/impl/rdb_file_db.hpp"
#include "db/impl/sqlite_file_db.hpp"
namespace repertory {
auto create_file_db(const app_config &cfg) -> std::unique_ptr<i_file_db> {
switch (cfg.get_database_type()) {
case database_type::sqlite:
return std::make_unique<sqlite_file_db>(cfg);
default:
return std::make_unique<rdb_file_db>(cfg);
}
}
} // namespace repertory

View File

@ -23,18 +23,12 @@
#include "app_config.hpp"
#include "db/i_file_mgr_db.hpp"
#include "db/impl/rdb_file_mgr_db.hpp"
#include "db/impl/sqlite_file_mgr_db.hpp"
#include "db/rdb_file_mgr_db.hpp"
#include "db/sqlite_file_mgr_db.hpp"
namespace repertory {
auto create_file_mgr_db(const app_config &cfg)
-> std::unique_ptr<i_file_mgr_db> {
switch (cfg.get_database_type()) {
case database_type::sqlite:
return std::make_unique<sqlite_file_mgr_db>(cfg);
default:
return std::make_unique<rdb_file_mgr_db>(cfg);
}
return std::make_unique<sqlite_file_mgr_db>(cfg);
}
} // namespace repertory

View File

@ -1,390 +0,0 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "db/impl/rdb_file_db.hpp"
#include "app_config.hpp"
#include "types/startup_exception.hpp"
#include "utils/config.hpp"
#include "utils/error_utils.hpp"
#include "utils/file.hpp"
#include "utils/path.hpp"
#include "utils/string.hpp"
#include "utils/utils.hpp"
namespace repertory {
rdb_file_db::rdb_file_db(const app_config &cfg) : cfg_(cfg) {
create_or_open(false);
}
rdb_file_db::~rdb_file_db() { db_.reset(); }
void rdb_file_db::create_or_open(bool clear) {
db_.reset();
auto families = std::vector<rocksdb::ColumnFamilyDescriptor>();
families.emplace_back(rocksdb::kDefaultColumnFamilyName,
rocksdb::ColumnFamilyOptions());
families.emplace_back("file", rocksdb::ColumnFamilyOptions());
families.emplace_back("path", rocksdb::ColumnFamilyOptions());
families.emplace_back("source", rocksdb::ColumnFamilyOptions());
auto handles = std::vector<rocksdb::ColumnFamilyHandle *>();
db_ = utils::create_rocksdb(cfg_, "file", families, handles, clear);
std::size_t idx{};
directory_family_ = handles.at(idx++);
file_family_ = handles.at(idx++);
path_family_ = handles.at(idx++);
source_family_ = handles.at(idx++);
}
auto rdb_file_db::add_directory(const std::string &api_path,
const std::string &source_path) -> api_error {
REPERTORY_USES_FUNCTION_NAME();
std::string existing_source_path;
auto result = get_directory_source_path(api_path, existing_source_path);
if (result != api_error::success &&
result != api_error::directory_not_found) {
return result;
}
return perform_action(
function_name, [&](rocksdb::Transaction *txn) -> rocksdb::Status {
if (not existing_source_path.empty()) {
auto res = remove_item(api_path, existing_source_path, txn);
if (not res.ok() && not res.IsNotFound()) {
return res;
}
}
auto res = txn->Put(directory_family_, api_path, source_path);
if (not res.ok()) {
return res;
}
res = txn->Put(path_family_, api_path, source_path);
if (not res.ok()) {
return res;
}
return txn->Put(source_family_, source_path, api_path);
});
}
auto rdb_file_db::add_or_update_file(const i_file_db::file_data &data)
-> api_error {
REPERTORY_USES_FUNCTION_NAME();
std::string existing_source_path;
auto result = get_file_source_path(data.api_path, existing_source_path);
if (result != api_error::success && result != api_error::item_not_found) {
return result;
}
return perform_action(
function_name, [&](rocksdb::Transaction *txn) -> rocksdb::Status {
if (not existing_source_path.empty()) {
auto res = remove_item(data.api_path, existing_source_path, txn);
if (not res.ok() && not res.IsNotFound()) {
return res;
}
}
json json_data = {
{"file_size", data.file_size},
{"iv", data.iv_list},
{"source_path", data.source_path},
};
auto res = txn->Put(file_family_, data.api_path, json_data.dump());
if (not res.ok()) {
return res;
}
res = txn->Put(path_family_, data.api_path, data.source_path);
if (not res.ok()) {
return res;
}
return txn->Put(source_family_, data.source_path, data.api_path);
});
}
void rdb_file_db::clear() { create_or_open(true); }
auto rdb_file_db::create_iterator(rocksdb::ColumnFamilyHandle *family) const
-> std::shared_ptr<rocksdb::Iterator> {
return std::shared_ptr<rocksdb::Iterator>(
db_->NewIterator(rocksdb::ReadOptions{}, family));
}
auto rdb_file_db::count() const -> std::uint64_t {
std::uint64_t ret{};
auto iter = create_iterator(source_family_);
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
++ret;
}
return ret;
}
auto rdb_file_db::get_api_path(const std::string &source_path,
std::string &api_path) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
return perform_action(function_name, [&]() -> rocksdb::Status {
return db_->Get(rocksdb::ReadOptions{}, source_family_, source_path,
&api_path);
});
}
auto rdb_file_db::get_directory_api_path(
const std::string &source_path, std::string &api_path) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
auto result = perform_action(function_name, [&]() -> rocksdb::Status {
auto res = db_->Get(rocksdb::ReadOptions{}, source_family_, source_path,
&api_path);
if (not res.ok()) {
return res;
}
std::string value;
return db_->Get(rocksdb::ReadOptions{}, directory_family_, api_path,
&value);
});
if (result != api_error::success) {
api_path.clear();
}
return result == api_error::item_not_found ? api_error::directory_not_found
: result;
}
auto rdb_file_db::get_directory_source_path(
const std::string &api_path, std::string &source_path) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
auto result = perform_action(function_name, [&]() -> rocksdb::Status {
return db_->Get(rocksdb::ReadOptions{}, directory_family_, api_path,
&source_path);
});
return result == api_error::item_not_found ? api_error::directory_not_found
: result;
}
auto rdb_file_db::get_file_api_path(const std::string &source_path,
std::string &api_path) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
auto result = perform_action(function_name, [&]() -> rocksdb::Status {
auto res = db_->Get(rocksdb::ReadOptions{}, source_family_, source_path,
&api_path);
if (not res.ok()) {
return res;
}
std::string value;
return db_->Get(rocksdb::ReadOptions{}, file_family_, api_path, &value);
});
if (result != api_error::success) {
api_path.clear();
}
return result;
}
auto rdb_file_db::get_file_data(const std::string &api_path,
i_file_db::file_data &data) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
auto result = perform_action(function_name, [&]() -> rocksdb::Status {
std::string value;
auto res = db_->Get(rocksdb::ReadOptions{}, file_family_, api_path, &value);
if (not res.ok()) {
return res;
}
auto json_data = json::parse(value);
data.api_path = api_path;
data.file_size = json_data.at("file_size").get<std::uint64_t>();
data.iv_list =
json_data.at("iv")
.get<std::vector<
std::array<unsigned char,
crypto_aead_xchacha20poly1305_IETF_NPUBBYTES>>>();
data.source_path = json_data.at("source_path").get<std::string>();
return res;
});
return result;
}
auto rdb_file_db::get_file_source_path(
const std::string &api_path, std::string &source_path) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
auto result = perform_action(function_name, [&]() -> rocksdb::Status {
std::string value;
auto res = db_->Get(rocksdb::ReadOptions{}, file_family_, api_path, &value);
if (not res.ok()) {
return res;
}
auto json_data = json::parse(value);
source_path = json_data.at("source_path").get<std::string>();
return res;
});
return result;
}
auto rdb_file_db::get_item_list() const -> std::vector<i_file_db::file_info> {
std::vector<i_file_db::file_info> ret{};
{
auto iter = create_iterator(directory_family_);
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
ret.emplace_back(i_file_db::file_info{
iter->key().ToString(),
true,
iter->value().ToString(),
});
}
}
{
auto iter = create_iterator(file_family_);
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
auto json_data = json::parse(iter->value().ToString());
ret.emplace_back(i_file_db::file_info{
iter->key().ToString(),
true,
json_data.at("source_path").get<std::string>(),
});
}
}
return ret;
}
auto rdb_file_db::get_source_path(const std::string &api_path,
std::string &source_path) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
return perform_action(function_name, [&]() -> rocksdb::Status {
return db_->Get(rocksdb::ReadOptions{}, path_family_, api_path,
&source_path);
});
}
auto rdb_file_db::perform_action(std::string_view function_name,
std::function<rocksdb::Status()> action)
-> api_error {
auto res = action();
if (res.ok()) {
return api_error::success;
}
if (not res.IsNotFound()) {
utils::error::raise_error(function_name, res.ToString());
}
return res.IsNotFound() ? api_error::item_not_found : api_error::error;
}
auto rdb_file_db::perform_action(
std::string_view function_name,
std::function<rocksdb::Status(rocksdb::Transaction *txn)> action)
-> api_error {
std::unique_ptr<rocksdb::Transaction> txn{
db_->BeginTransaction(rocksdb::WriteOptions{},
rocksdb::TransactionOptions{}),
};
try {
auto res = action(txn.get());
if (res.ok()) {
auto commit_res = txn->Commit();
if (commit_res.ok()) {
return api_error::success;
}
utils::error::raise_error(function_name,
"rocksdb commit failed|" + res.ToString());
return api_error::error;
}
utils::error::raise_error(function_name,
"rocksdb action failed|" + res.ToString());
} catch (const std::exception &ex) {
utils::error::raise_error(function_name, ex,
"failed to handle rocksdb action");
}
auto rollback_res = txn->Rollback();
utils::error::raise_error(function_name, "rocksdb rollback failed|" +
rollback_res.ToString());
return api_error::error;
}
auto rdb_file_db::remove_item(const std::string &api_path) -> api_error {
REPERTORY_USES_FUNCTION_NAME();
std::string source_path;
auto res = get_source_path(api_path, source_path);
if (res != api_error::success) {
return res;
}
return perform_action(function_name,
[&](rocksdb::Transaction *txn) -> rocksdb::Status {
return remove_item(api_path, source_path, txn);
});
}
auto rdb_file_db::remove_item(const std::string &api_path,
const std::string &source_path,
rocksdb::Transaction *txn) -> rocksdb::Status {
auto res = txn->Delete(source_family_, source_path);
if (not res.ok()) {
return res;
}
res = txn->Delete(path_family_, api_path);
if (not res.ok()) {
return res;
}
res = txn->Delete(directory_family_, api_path);
if (not res.ok()) {
return res;
}
return txn->Delete(file_family_, api_path);
}
} // namespace repertory

View File

@ -1,327 +0,0 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "db/impl/rdb_file_mgr_db.hpp"
#include "app_config.hpp"
#include "types/startup_exception.hpp"
#include "utils/config.hpp"
#include "utils/error_utils.hpp"
#include "utils/file.hpp"
#include "utils/path.hpp"
#include "utils/string.hpp"
#include "utils/utils.hpp"
namespace repertory {
rdb_file_mgr_db::rdb_file_mgr_db(const app_config &cfg) : cfg_(cfg) {
create_or_open(false);
}
rdb_file_mgr_db::~rdb_file_mgr_db() { db_.reset(); }
void rdb_file_mgr_db::create_or_open(bool clear) {
db_.reset();
auto families = std::vector<rocksdb::ColumnFamilyDescriptor>();
families.emplace_back(rocksdb::kDefaultColumnFamilyName,
rocksdb::ColumnFamilyOptions());
families.emplace_back("upload_active", rocksdb::ColumnFamilyOptions());
families.emplace_back("upload", rocksdb::ColumnFamilyOptions());
auto handles = std::vector<rocksdb::ColumnFamilyHandle *>();
db_ = utils::create_rocksdb(cfg_, "file_mgr", families, handles, clear);
std::size_t idx{};
resume_family_ = handles.at(idx++);
upload_active_family_ = handles.at(idx++);
upload_family_ = handles.at(idx++);
}
auto rdb_file_mgr_db::add_resume(const resume_entry &entry) -> bool {
REPERTORY_USES_FUNCTION_NAME();
return perform_action(
function_name,
[this, &entry](rocksdb::Transaction *txn) -> rocksdb::Status {
return add_resume(entry, txn);
});
}
auto rdb_file_mgr_db::add_resume(const resume_entry &entry,
rocksdb::Transaction *txn) -> rocksdb::Status {
REPERTORY_USES_FUNCTION_NAME();
auto data = json({
{"chunk_size", entry.chunk_size},
{"read_state", utils::string::from_dynamic_bitset(entry.read_state)},
{"source_path", entry.source_path},
});
return txn->Put(resume_family_, entry.api_path, data.dump());
}
auto rdb_file_mgr_db::add_upload(const upload_entry &entry) -> bool {
REPERTORY_USES_FUNCTION_NAME();
return perform_action(
function_name,
[this, &entry](rocksdb::Transaction *txn) -> rocksdb::Status {
return txn->Put(upload_family_,
utils::string::zero_pad(std::to_string(++id_), 20U) +
'|' + entry.api_path,
entry.source_path);
});
}
auto rdb_file_mgr_db::add_upload_active(const upload_active_entry &entry)
-> bool {
REPERTORY_USES_FUNCTION_NAME();
return perform_action(
function_name,
[this, &entry](rocksdb::Transaction *txn) -> rocksdb::Status {
return txn->Put(upload_active_family_, entry.api_path,
entry.source_path);
});
}
void rdb_file_mgr_db::clear() { create_or_open(true); }
auto rdb_file_mgr_db::create_iterator(rocksdb::ColumnFamilyHandle *family) const
-> std::shared_ptr<rocksdb::Iterator> {
return std::shared_ptr<rocksdb::Iterator>(
db_->NewIterator(rocksdb::ReadOptions(), family));
}
auto rdb_file_mgr_db::get_next_upload() const -> std::optional<upload_entry> {
auto iter = create_iterator(upload_family_);
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
auto parts = utils::string::split(iter->key().ToString(), '|', false);
parts.erase(parts.begin());
auto api_path = utils::string::join(parts, '|');
return upload_entry{
api_path,
iter->value().ToString(),
};
}
return std::nullopt;
}
auto rdb_file_mgr_db::get_resume_list() const -> std::vector<resume_entry> {
std::vector<resume_entry> ret;
auto iter = create_iterator(resume_family_);
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
auto data = json::parse(iter->value().ToString());
ret.emplace_back(resume_entry{
iter->key().ToString(),
data.at("chunk_size").get<std::uint64_t>(),
utils::string::to_dynamic_bitset(
data.at("read_state").get<std::string>()),
data.at("source_path").get<std::string>(),
});
}
return ret;
}
auto rdb_file_mgr_db::get_upload(const std::string &api_path) const
-> std::optional<upload_entry> {
auto iter = create_iterator(upload_family_);
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
auto parts = utils::string::split(iter->key().ToString(), '|', false);
parts.erase(parts.begin());
if (api_path != utils::string::join(parts, '|')) {
continue;
}
return upload_entry{
api_path,
iter->value().ToString(),
};
}
return std::nullopt;
}
auto rdb_file_mgr_db::get_upload_active_list() const
-> std::vector<upload_active_entry> {
std::vector<upload_active_entry> ret;
auto iter = create_iterator(upload_active_family_);
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
ret.emplace_back(upload_active_entry{
iter->key().ToString(),
iter->value().ToString(),
});
}
return ret;
}
auto rdb_file_mgr_db::perform_action(std::string_view function_name,
std::function<rocksdb::Status()> action)
-> bool {
try {
auto res = action();
if (not res.ok()) {
utils::error::raise_error(function_name, res.ToString());
}
return res.ok();
} catch (const std::exception &ex) {
utils::error::raise_error(function_name, ex);
}
return false;
}
auto rdb_file_mgr_db::perform_action(
std::string_view function_name,
std::function<rocksdb::Status(rocksdb::Transaction *txn)> action) -> bool {
std::unique_ptr<rocksdb::Transaction> txn{
db_->BeginTransaction(rocksdb::WriteOptions{},
rocksdb::TransactionOptions{}),
};
try {
auto res = action(txn.get());
if (res.ok()) {
auto commit_res = txn->Commit();
if (commit_res.ok()) {
return true;
}
utils::error::raise_error(function_name,
"rocksdb commit failed|" + res.ToString());
return false;
}
utils::error::raise_error(function_name,
"rocksdb action failed|" + res.ToString());
} catch (const std::exception &ex) {
utils::error::raise_error(function_name, ex,
"failed to handle rocksdb action");
}
auto rollback_res = txn->Rollback();
utils::error::raise_error(function_name, "rocksdb rollback failed|" +
rollback_res.ToString());
return false;
}
auto rdb_file_mgr_db::remove_resume(const std::string &api_path) -> bool {
REPERTORY_USES_FUNCTION_NAME();
return perform_action(
function_name,
[this, &api_path](rocksdb::Transaction *txn) -> rocksdb::Status {
return remove_resume(api_path, txn);
});
}
auto rdb_file_mgr_db::remove_resume(
const std::string &api_path, rocksdb::Transaction *txn) -> rocksdb::Status {
return txn->Delete(resume_family_, api_path);
}
auto rdb_file_mgr_db::remove_upload(const std::string &api_path) -> bool {
REPERTORY_USES_FUNCTION_NAME();
auto iter = create_iterator(upload_family_);
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
auto parts = utils::string::split(iter->key().ToString(), '|', false);
parts.erase(parts.begin());
if (api_path != utils::string::join(parts, '|')) {
continue;
}
return perform_action(
function_name,
[this, &iter](rocksdb::Transaction *txn) -> rocksdb::Status {
return txn->Delete(upload_family_, iter->key());
});
}
return true;
}
auto rdb_file_mgr_db::remove_upload_active(const std::string &api_path)
-> bool {
REPERTORY_USES_FUNCTION_NAME();
return perform_action(
function_name,
[this, &api_path](rocksdb::Transaction *txn) -> rocksdb::Status {
return txn->Delete(upload_active_family_, api_path);
});
}
auto rdb_file_mgr_db::rename_resume(const std::string &from_api_path,
const std::string &to_api_path) -> bool {
REPERTORY_USES_FUNCTION_NAME();
bool not_found{false};
std::string value;
auto res = perform_action(
function_name,
[this, &from_api_path, &not_found, &value]() -> rocksdb::Status {
auto result = db_->Get(rocksdb::ReadOptions{}, from_api_path, &value);
not_found = result.IsNotFound();
return result;
});
if (not_found) {
return true;
}
if (not res) {
return false;
}
if (value.empty()) {
return true;
}
auto data = json::parse(value);
resume_entry entry{
to_api_path,
data.at("chunk_size").get<std::uint64_t>(),
utils::string::to_dynamic_bitset(
data.at("read_state").get<std::string>()),
data.at("source_path").get<std::string>(),
};
return perform_action(function_name,
[this, &entry, &from_api_path](
rocksdb::Transaction *txn) -> rocksdb::Status {
auto txn_res = remove_resume(from_api_path, txn);
if (not txn_res.ok()) {
return txn_res;
}
return add_resume(entry, txn);
});
}
} // namespace repertory

View File

@ -1,334 +0,0 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "db/impl/sqlite_file_db.hpp"
#include "app_config.hpp"
#include "types/startup_exception.hpp"
#include "utils/config.hpp"
#include "utils/db/sqlite/db_common.hpp"
#include "utils/db/sqlite/db_delete.hpp"
#include "utils/db/sqlite/db_insert.hpp"
#include "utils/db/sqlite/db_select.hpp"
#include "utils/db/sqlite/db_update.hpp"
#include "utils/error_utils.hpp"
#include "utils/file.hpp"
#include "utils/path.hpp"
#include "utils/string.hpp"
namespace {
const std::string file_table = "file";
const std::map<std::string, std::string> sql_create_tables = {
{
{file_table},
{"CREATE TABLE IF NOT EXISTS " + file_table +
"("
"source_path TEXT PRIMARY KEY ASC, "
"api_path TEXT UNIQUE NOT NULL, "
"iv TEXT DEFAULT '' NOT NULL, "
"directory INTEGER NOT NULL, "
"size INTEGER DEFAULT 0 NOT NULL"
");"},
},
};
} // namespace
namespace repertory {
sqlite_file_db::sqlite_file_db(const app_config &cfg) {
auto db_dir = utils::path::combine(cfg.get_data_directory(), {"db"});
if (not utils::file::directory{db_dir}.create_directory()) {
throw startup_exception(
fmt::format("failed to create db directory|", db_dir));
}
db_ = utils::db::sqlite::create_db(utils::path::combine(db_dir, {"file.db"}),
sql_create_tables);
}
sqlite_file_db::~sqlite_file_db() { db_.reset(); }
auto sqlite_file_db::add_directory(const std::string &api_path,
const std::string &source_path)
-> api_error {
REPERTORY_USES_FUNCTION_NAME();
auto result = utils::db::sqlite::db_insert{*db_, file_table}
.column_value("api_path", api_path)
.column_value("directory", 1)
.column_value("source_path", source_path)
.go();
if (result.ok()) {
return api_error::success;
}
utils::error::raise_api_path_error(
function_name, api_path, api_error::error,
fmt::format("failed to add directory|{}", result.get_error_str()));
return api_error::error;
}
auto sqlite_file_db::add_or_update_file(const i_file_db::file_data &data)
-> api_error {
REPERTORY_USES_FUNCTION_NAME();
auto result =
utils::db::sqlite::db_insert{*db_, file_table}
.or_replace()
.column_value("api_path", data.api_path)
.column_value("directory", 0)
.column_value("iv", json(data.iv_list).dump())
.column_value("size", static_cast<std::int64_t>(data.file_size))
.column_value("source_path", data.source_path)
.go();
if (result.ok()) {
return api_error::success;
}
utils::error::raise_api_path_error(
function_name, data.api_path, api_error::error,
fmt::format("failed to add file|{}", result.get_error_str()));
return api_error::error;
}
void sqlite_file_db::clear() {
REPERTORY_USES_FUNCTION_NAME();
auto result = utils::db::sqlite::db_delete{*db_, file_table}.go();
if (not result.ok()) {
utils::error::raise_error(function_name,
fmt::format("failed to clear file table|{}",
std::to_string(result.get_error())));
}
}
auto sqlite_file_db::count() const -> std::uint64_t {
auto result = utils::db::sqlite::db_select{*db_, file_table}
.count("api_path", "count")
.go();
std::optional<utils::db::sqlite::db_result::row> row;
if (result.get_row(row) && row.has_value()) {
return static_cast<std::uint64_t>(
row->get_column("count").get_value<std::int64_t>());
}
return 0U;
}
auto sqlite_file_db::get_api_path(const std::string &source_path,
std::string &api_path) const -> api_error {
auto result = utils::db::sqlite::db_select{*db_, file_table}
.column("api_path")
.where("source_path")
.equals(source_path)
.op()
.limit(1)
.go();
std::optional<utils::db::sqlite::db_result::row> row;
if (result.get_row(row) && row.has_value()) {
api_path = row->get_column("api_path").get_value<std::string>();
return api_error::success;
}
return api_error::item_not_found;
}
auto sqlite_file_db::get_directory_api_path(const std::string &source_path,
std::string &api_path) const
-> api_error {
auto result = utils::db::sqlite::db_select{*db_, file_table}
.column("api_path")
.where("source_path")
.equals(source_path)
.and_()
.where("directory")
.equals(1)
.op()
.limit(1)
.go();
std::optional<utils::db::sqlite::db_result::row> row;
if (result.get_row(row) && row.has_value()) {
api_path = row->get_column("api_path").get_value<std::string>();
return api_error::success;
}
return api_error::directory_not_found;
}
auto sqlite_file_db::get_directory_source_path(const std::string &api_path,
std::string &source_path) const
-> api_error {
auto result = utils::db::sqlite::db_select{*db_, file_table}
.column("source_path")
.where("api_path")
.equals(api_path)
.and_()
.where("directory")
.equals(1)
.op()
.limit(1)
.go();
std::optional<utils::db::sqlite::db_result::row> row;
if (result.get_row(row) && row.has_value()) {
source_path = row->get_column("source_path").get_value<std::string>();
return api_error::success;
}
return api_error::directory_not_found;
}
auto sqlite_file_db::get_file_api_path(const std::string &source_path,
std::string &api_path) const
-> api_error {
auto result = utils::db::sqlite::db_select{*db_, file_table}
.column("api_path")
.where("source_path")
.equals(source_path)
.and_()
.where("directory")
.equals(0)
.op()
.limit(1)
.go();
std::optional<utils::db::sqlite::db_result::row> row;
if (result.get_row(row) && row.has_value()) {
api_path = row->get_column("api_path").get_value<std::string>();
return api_error::success;
}
return api_error::item_not_found;
}
auto sqlite_file_db::get_file_data(const std::string &api_path,
i_file_db::file_data &data) const
-> api_error {
auto result = utils::db::sqlite::db_select{*db_, file_table}
.column("iv")
.column("size")
.column("source_path")
.where("api_path")
.equals(api_path)
.and_()
.where("directory")
.equals(0)
.op()
.limit(1)
.go();
std::optional<utils::db::sqlite::db_result::row> row;
if (result.get_row(row) && row.has_value()) {
data.api_path = api_path;
data.file_size = static_cast<std::uint64_t>(
row->get_column("size").get_value<std::int64_t>());
data.source_path = row->get_column("source_path").get_value<std::string>();
auto str_data = row->get_column("iv").get_value<std::string>();
if (not str_data.empty()) {
data.iv_list =
json::parse(str_data)
.get<std::vector<
std::array<unsigned char,
crypto_aead_xchacha20poly1305_IETF_NPUBBYTES>>>();
}
return api_error::success;
}
return api_error::item_not_found;
}
auto sqlite_file_db::get_file_source_path(const std::string &api_path,
std::string &source_path) const
-> api_error {
auto result = utils::db::sqlite::db_select{*db_, file_table}
.column("source_path")
.where("api_path")
.equals(api_path)
.and_()
.where("directory")
.equals(0)
.op()
.limit(1)
.go();
std::optional<utils::db::sqlite::db_result::row> row;
if (result.get_row(row) && row.has_value()) {
source_path = row->get_column("source_path").get_value<std::string>();
return api_error::success;
}
return api_error::item_not_found;
}
auto sqlite_file_db::get_item_list() const
-> std::vector<i_file_db::file_info> {
std::vector<i_file_db::file_info> ret;
auto result = utils::db::sqlite::db_select{*db_, file_table}.go();
while (result.has_row()) {
std::optional<utils::db::sqlite::db_result::row> row;
if (result.get_row(row) && row.has_value()) {
ret.emplace_back(i_file_db::file_info{
row->get_column("api_path").get_value<std::string>(),
row->get_column("directory").get_value<std::int64_t>() == 1,
row->get_column("source_path").get_value<std::string>(),
});
}
result.next_row();
}
return ret;
}
auto sqlite_file_db::get_source_path(const std::string &api_path,
std::string &source_path) const
-> api_error {
auto result = utils::db::sqlite::db_select{*db_, file_table}
.column("source_path")
.where("api_path")
.equals(api_path)
.op()
.limit(1)
.go();
std::optional<utils::db::sqlite::db_result::row> row;
if (result.get_row(row) && row.has_value()) {
source_path = row->get_column("source_path").get_value<std::string>();
return api_error::success;
}
return api_error::item_not_found;
}
auto sqlite_file_db::remove_item(const std::string &api_path) -> api_error {
auto result = utils::db::sqlite::db_delete{*db_, file_table}
.where("api_path")
.equals(api_path)
.go();
return result.ok() ? api_error::success : api_error::error;
}
} // namespace repertory

View File

@ -22,17 +22,11 @@
#include "db/meta_db.hpp"
#include "app_config.hpp"
#include "db/impl/rdb_meta_db.hpp"
#include "db/impl/sqlite_meta_db.hpp"
#include "db/rdb_meta_db.hpp"
#include "db/sqlite_meta_db.hpp"
namespace repertory {
auto create_meta_db(const app_config &cfg) -> std::unique_ptr<i_meta_db> {
switch (cfg.get_database_type()) {
case database_type::sqlite:
return std::make_unique<sqlite_meta_db>(cfg);
default:
return std::make_unique<rdb_meta_db>(cfg);
}
return std::make_unique<rdb_meta_db>(cfg);
}
} // namespace repertory

View File

@ -0,0 +1,234 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "db/rdb_file_mgr_db.hpp"
#include "app_config.hpp"
#include "types/startup_exception.hpp"
#include "utils/config.hpp"
#include "utils/error_utils.hpp"
#include "utils/file.hpp"
#include "utils/path.hpp"
#include "utils/string.hpp"
namespace {
[[nodiscard]] auto
create_rocksdb(const repertory::app_config &cfg, const std::string &name,
const std::vector<rocksdb::ColumnFamilyDescriptor> &families,
std::vector<rocksdb::ColumnFamilyHandle *> &handles,
bool clear) -> std::unique_ptr<rocksdb::DB> {
REPERTORY_USES_FUNCTION_NAME();
auto path = repertory::utils::path::combine(cfg.get_data_directory(), {name});
if (clear &&
not repertory::utils::file::directory{path}.remove_recursively()) {
repertory::utils::error::raise_error(
function_name, "failed to remove file mgr db|" + path);
}
rocksdb::Options options{};
options.create_if_missing = true;
options.create_missing_column_families = true;
options.db_log_dir = cfg.get_log_directory();
options.keep_log_file_num = 10;
rocksdb::DB *ptr{};
auto status = rocksdb::DB::Open(options, path, families, &handles, &ptr);
if (not status.ok()) {
repertory::utils::error::raise_error(function_name, status.ToString());
throw repertory::startup_exception(status.ToString());
}
return std::unique_ptr<rocksdb::DB>(ptr);
}
} // namespace
namespace repertory {
rdb_file_mgr_db::rdb_file_mgr_db(const app_config &cfg) : cfg_(cfg) {
create_or_open(false);
}
rdb_file_mgr_db::~rdb_file_mgr_db() { db_.reset(); }
void rdb_file_mgr_db::create_or_open(bool clear) {
db_.reset();
auto families = std::vector<rocksdb::ColumnFamilyDescriptor>();
families.emplace_back(rocksdb::kDefaultColumnFamilyName,
rocksdb::ColumnFamilyOptions());
families.emplace_back("upload_active", rocksdb::ColumnFamilyOptions());
families.emplace_back("upload", rocksdb::ColumnFamilyOptions());
auto handles = std::vector<rocksdb::ColumnFamilyHandle *>();
db_ = create_rocksdb(cfg_, "mgr", families, handles, clear);
std::size_t idx{};
resume_family_ = handles[idx++];
upload_active_family_ = handles[idx++];
upload_family_ = handles[idx++];
}
auto rdb_file_mgr_db::add_resume(resume_entry entry) -> bool {
REPERTORY_USES_FUNCTION_NAME();
return perform_action(function_name, [this, &entry]() -> rocksdb::Status {
auto data = json({
{"chunk_size", entry.chunk_size},
{"read_state", utils::string::from_dynamic_bitset(entry.read_state)},
{"source_path", entry.source_path},
});
return db_->Put(rocksdb::WriteOptions{}, resume_family_, entry.api_path,
data.dump());
});
}
auto rdb_file_mgr_db::add_upload(upload_entry entry) -> bool {
REPERTORY_USES_FUNCTION_NAME();
return perform_action(function_name, [this, &entry]() -> rocksdb::Status {
auto data = json({
{"date_time", entry.date_time},
{"source_path", entry.source_path},
});
return db_->Put(rocksdb::WriteOptions{}, upload_family_,
utils::string::zero_pad(std::to_string(++id_), 19U) + '|' +
entry.api_path,
data.dump());
});
}
auto rdb_file_mgr_db::add_upload_active(upload_active_entry entry) -> bool {}
void rdb_file_mgr_db::clear() { create_or_open(true); }
auto rdb_file_mgr_db::create_iterator(rocksdb::ColumnFamilyHandle *family) const
-> std::shared_ptr<rocksdb::Iterator> {
return std::shared_ptr<rocksdb::Iterator>(
db_->NewIterator(rocksdb::ReadOptions(), family));
}
auto rdb_file_mgr_db::get_next_upload() const -> std::optional<upload_entry> {}
auto rdb_file_mgr_db::get_resume_list() const -> std::vector<resume_entry> {
REPERTORY_USES_FUNCTION_NAME();
std::vector<resume_entry> ret;
auto iter = create_iterator(resume_family_);
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
auto data = json::parse(iter->value().ToString());
ret.emplace_back(resume_entry{
iter->key().ToString(),
data.at("chunk_size").get<std::uint64_t>(),
utils::string::to_dynamic_bitset(
data.at("read_state").get<std::string>()),
data.at("source_path").get<std::string>(),
});
}
return ret;
}
auto rdb_file_mgr_db::get_upload(const std::string &api_path) const
-> std::optional<upload_entry> {
REPERTORY_USES_FUNCTION_NAME();
auto iter = create_iterator(upload_family_);
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
auto parts = utils::string::split(iter->key().ToString(), '|', false);
parts.erase(parts.begin());
if (api_path != utils::string::join(parts, '|')) {
continue;
}
auto data = json::parse(iter->value().ToString());
return upload_entry{
api_path,
data.at("date_time").get<std::uint64_t>(),
data.at("source_path").get<std::string>(),
};
}
return std::nullopt;
}
auto rdb_file_mgr_db::get_upload_active_list() const
-> std::vector<upload_active_entry> {}
auto rdb_file_mgr_db::perform_action(std::string_view function_name,
std::function<rocksdb::Status()> action)
-> bool {
auto res = action();
if (not res.ok()) {
utils::error::raise_error(function_name, res.ToString());
}
return res.ok();
}
auto rdb_file_mgr_db::remove_resume(const std::string &api_path) -> bool {
REPERTORY_USES_FUNCTION_NAME();
return perform_action(function_name, [this, &api_path]() -> rocksdb::Status {
return db_->Delete(rocksdb::WriteOptions{}, api_path);
});
}
auto rdb_file_mgr_db::remove_upload(const std::string &api_path) -> bool {}
auto rdb_file_mgr_db::remove_upload_active(const std::string &api_path)
-> bool {}
auto rdb_file_mgr_db::rename_resume(const std::string &from_api_path,
const std::string &to_api_path) -> bool {
REPERTORY_USES_FUNCTION_NAME();
std::string value;
auto res = perform_action(
function_name, [this, &from_api_path, &value]() -> rocksdb::Status {
return db_->Get(rocksdb::ReadOptions{}, from_api_path, &value);
});
if (not res) {
return false;
}
if (value.empty()) {
return false;
}
auto data = json::parse(value);
resume_entry entry{
to_api_path,
data.at("chunk_size").get<std::uint64_t>(),
utils::string::to_dynamic_bitset(
data.at("read_state").get<std::string>()),
data.at("source_path").get<std::string>(),
};
if (not remove_resume(from_api_path)) {
return false;
}
return add_resume(entry);
}
} // namespace repertory

View File

@ -19,7 +19,7 @@
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "db/impl/rdb_meta_db.hpp"
#include "db/rdb_meta_db.hpp"
#include "app_config.hpp"
#include "types/startup_exception.hpp"
@ -27,7 +27,38 @@
#include "utils/file.hpp"
#include "utils/path.hpp"
#include "utils/string.hpp"
#include "utils/utils.hpp"
namespace {
[[nodiscard]] auto
create_rocksdb(const repertory::app_config &cfg, const std::string &name,
const std::vector<rocksdb::ColumnFamilyDescriptor> &families,
std::vector<rocksdb::ColumnFamilyHandle *> &handles, bool clear)
-> std::unique_ptr<rocksdb::DB> {
REPERTORY_USES_FUNCTION_NAME();
auto path = repertory::utils::path::combine(cfg.get_data_directory(), {name});
if (clear &&
not repertory::utils::file::directory{path}.remove_recursively()) {
repertory::utils::error::raise_error(function_name,
"failed to remove meta db|" + path);
}
rocksdb::Options options{};
options.create_if_missing = true;
options.create_missing_column_families = true;
options.db_log_dir = cfg.get_log_directory();
options.keep_log_file_num = 10;
rocksdb::DB *ptr{};
auto status = rocksdb::DB::Open(options, path, families, &handles, &ptr);
if (not status.ok()) {
repertory::utils::error::raise_error(function_name, status.ToString());
throw repertory::startup_exception(status.ToString());
}
return std::unique_ptr<rocksdb::DB>(ptr);
}
} // namespace
namespace repertory {
rdb_meta_db::rdb_meta_db(const app_config &cfg) : cfg_(cfg) {
@ -47,13 +78,13 @@ void rdb_meta_db::create_or_open(bool clear) {
families.emplace_back("source", rocksdb::ColumnFamilyOptions());
auto handles = std::vector<rocksdb::ColumnFamilyHandle *>();
db_ = utils::create_rocksdb(cfg_, "provider_meta", families, handles, clear);
db_ = create_rocksdb(cfg_, "provider_meta", families, handles, clear);
std::size_t idx{};
meta_family_ = handles.at(idx++);
pinned_family_ = handles.at(idx++);
size_family_ = handles.at(idx++);
source_family_ = handles.at(idx++);
default_family_ = handles[idx++];
pinned_family_ = handles[idx++];
size_family_ = handles[idx++];
source_family_ = handles[idx++];
}
void rdb_meta_db::clear() { create_or_open(true); }
@ -61,7 +92,7 @@ void rdb_meta_db::clear() { create_or_open(true); }
auto rdb_meta_db::create_iterator(rocksdb::ColumnFamilyHandle *family) const
-> std::shared_ptr<rocksdb::Iterator> {
return std::shared_ptr<rocksdb::Iterator>(
db_->NewIterator(rocksdb::ReadOptions{}, family));
db_->NewIterator(rocksdb::ReadOptions(), family));
}
auto rdb_meta_db::get_api_path(const std::string &source_path,
@ -73,14 +104,14 @@ auto rdb_meta_db::get_api_path(const std::string &source_path,
}
return perform_action(function_name, [&]() -> rocksdb::Status {
return db_->Get(rocksdb::ReadOptions{}, source_family_, source_path,
return db_->Get(rocksdb::ReadOptions(), source_family_, source_path,
&api_path);
});
}
auto rdb_meta_db::get_api_path_list() const -> std::vector<std::string> {
std::vector<std::string> ret;
auto iter = create_iterator(meta_family_);
auto iter = create_iterator(default_family_);
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
ret.push_back(iter->key().ToString());
}
@ -98,7 +129,8 @@ auto rdb_meta_db::get_item_meta_json(const std::string &api_path,
{
std::string value;
auto res = perform_action(function_name, [&]() -> rocksdb::Status {
return db_->Get(rocksdb::ReadOptions{}, meta_family_, api_path, &value);
return db_->Get(rocksdb::ReadOptions(), default_family_, api_path,
&value);
});
if (res != api_error::success) {
return res;
@ -112,7 +144,7 @@ auto rdb_meta_db::get_item_meta_json(const std::string &api_path,
{
std::string value;
auto res = perform_action(function_name, [&]() -> rocksdb::Status {
return db_->Get(rocksdb::ReadOptions{}, pinned_family_, api_path,
return db_->Get(rocksdb::ReadOptions(), pinned_family_, api_path,
&value);
});
if (res != api_error::success) {
@ -126,7 +158,7 @@ auto rdb_meta_db::get_item_meta_json(const std::string &api_path,
{
std::string value;
auto res = perform_action(function_name, [&]() -> rocksdb::Status {
return db_->Get(rocksdb::ReadOptions{}, size_family_, api_path, &value);
return db_->Get(rocksdb::ReadOptions(), size_family_, api_path, &value);
});
if (res != api_error::success) {
return res;
@ -167,13 +199,13 @@ auto rdb_meta_db::get_item_meta(const std::string &api_path,
if (key == META_PINNED) {
return perform_action(function_name, [&]() -> rocksdb::Status {
return db_->Get(rocksdb::ReadOptions{}, pinned_family_, api_path, &value);
return db_->Get(rocksdb::ReadOptions(), pinned_family_, api_path, &value);
});
}
if (key == META_SIZE) {
return perform_action(function_name, [&]() -> rocksdb::Status {
return db_->Get(rocksdb::ReadOptions{}, size_family_, api_path, &value);
return db_->Get(rocksdb::ReadOptions(), size_family_, api_path, &value);
});
}
@ -206,7 +238,7 @@ auto rdb_meta_db::get_pinned_files() const -> std::vector<std::string> {
auto rdb_meta_db::get_total_item_count() const -> std::uint64_t {
std::uint64_t ret{};
auto iter = create_iterator(meta_family_);
auto iter = create_iterator(default_family_);
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
++ret;
}
@ -239,84 +271,21 @@ auto rdb_meta_db::perform_action(std::string_view function_name,
return res.IsNotFound() ? api_error::item_not_found : api_error::error;
}
auto rdb_meta_db::perform_action(
std::string_view function_name,
std::function<rocksdb::Status(rocksdb::Transaction *txn)> action)
-> api_error {
std::unique_ptr<rocksdb::Transaction> txn{
db_->BeginTransaction(rocksdb::WriteOptions{},
rocksdb::TransactionOptions{}),
};
try {
auto res = action(txn.get());
if (res.ok()) {
auto commit_res = txn->Commit();
if (commit_res.ok()) {
return api_error::success;
}
utils::error::raise_error(function_name,
"rocksdb commit failed|" + res.ToString());
return api_error::error;
}
utils::error::raise_error(function_name,
"rocksdb action failed|" + res.ToString());
} catch (const std::exception &ex) {
utils::error::raise_error(function_name, ex,
"failed to handle rocksdb action");
}
auto rollback_res = txn->Rollback();
utils::error::raise_error(function_name, "rocksdb rollback failed|" +
rollback_res.ToString());
return api_error::error;
}
void rdb_meta_db::remove_api_path(const std::string &api_path) {
REPERTORY_USES_FUNCTION_NAME();
std::string source_path;
auto res = get_item_meta(api_path, META_SOURCE, source_path);
if (res != api_error::success) {
utils::error::raise_api_path_error(function_name, api_path, res,
"failed to get source path");
}
[[maybe_unused]] auto res = get_item_meta(api_path, META_SOURCE, source_path);
res = perform_action(function_name,
[this, &api_path, &source_path](
rocksdb::Transaction *txn) -> rocksdb::Status {
return remove_api_path(api_path, source_path, txn);
});
if (res != api_error::success) {
utils::error::raise_api_path_error(function_name, api_path, res,
"failed to remove api path");
}
}
auto rdb_meta_db::remove_api_path(const std::string &api_path,
const std::string &source_path,
rocksdb::Transaction *txn)
-> rocksdb::Status {
auto txn_res = txn->Delete(pinned_family_, api_path);
if (not txn_res.ok()) {
return txn_res;
}
txn_res = txn->Delete(size_family_, api_path);
if (not txn_res.ok()) {
return txn_res;
}
if (not source_path.empty()) {
txn_res = txn->Delete(source_family_, source_path);
if (not txn_res.ok()) {
return txn_res;
}
}
return txn->Delete(meta_family_, api_path);
res = perform_action(
function_name, [this, &api_path, &source_path]() -> rocksdb::Status {
db_->Delete(rocksdb::WriteOptions(), pinned_family_, api_path);
db_->Delete(rocksdb::WriteOptions(), size_family_, api_path);
if (not source_path.empty()) {
db_->Delete(rocksdb::WriteOptions(), source_family_, source_path);
}
return db_->Delete(rocksdb::WriteOptions(), default_family_, api_path);
});
}
auto rdb_meta_db::remove_item_meta(const std::string &api_path,
@ -340,27 +309,14 @@ auto rdb_meta_db::remove_item_meta(const std::string &api_path,
auto rdb_meta_db::rename_item_meta(const std::string &from_api_path,
const std::string &to_api_path)
-> api_error {
REPERTORY_USES_FUNCTION_NAME();
json json_data;
auto res = get_item_meta_json(from_api_path, json_data);
if (res != api_error::success) {
return res;
}
return perform_action(
function_name, [&](rocksdb::Transaction *txn) -> rocksdb::Status {
auto txn_res = remove_api_path(
from_api_path, json_data[META_SOURCE].get<std::string>(), txn);
if (not txn_res.ok()) {
return txn_res;
}
rocksdb::Status status;
[[maybe_unused]] auto api_res =
update_item_meta(to_api_path, json_data, txn, &status);
return status;
});
remove_api_path(from_api_path);
return update_item_meta(to_api_path, json_data);
}
auto rdb_meta_db::set_item_meta(const std::string &api_path,
@ -369,17 +325,15 @@ auto rdb_meta_db::set_item_meta(const std::string &api_path,
REPERTORY_USES_FUNCTION_NAME();
if (key == META_PINNED) {
return perform_action(function_name,
[&](rocksdb::Transaction *txn) -> rocksdb::Status {
return txn->Put(pinned_family_, api_path, value);
});
return perform_action(function_name, [&]() -> rocksdb::Status {
return db_->Put(rocksdb::WriteOptions(), pinned_family_, api_path, value);
});
}
if (key == META_SIZE) {
return perform_action(function_name,
[&](rocksdb::Transaction *txn) -> rocksdb::Status {
return txn->Put(size_family_, api_path, value);
});
return perform_action(function_name, [&]() -> rocksdb::Status {
return db_->Put(rocksdb::WriteOptions(), size_family_, api_path, value);
});
}
json json_data;
@ -408,9 +362,8 @@ auto rdb_meta_db::set_item_meta(const std::string &api_path,
return update_item_meta(api_path, json_data);
}
auto rdb_meta_db::update_item_meta(const std::string &api_path, json json_data,
rocksdb::Transaction *base_txn,
rocksdb::Status *status) -> api_error {
auto rdb_meta_db::update_item_meta(const std::string &api_path, json json_data)
-> api_error {
REPERTORY_USES_FUNCTION_NAME();
try {
@ -440,67 +393,51 @@ auto rdb_meta_db::update_item_meta(const std::string &api_path, json json_data,
json_data[META_SIZE] = std::to_string(size);
json_data[META_SOURCE] = source_path;
auto should_del_source{false};
std::string orig_source_path;
if (not directory) {
std::string orig_source_path;
auto res = get_item_meta(api_path, META_SOURCE, orig_source_path);
if (res != api_error::success && res != api_error::item_not_found) {
return res;
}
should_del_source =
not orig_source_path.empty() && orig_source_path != source_path;
if (not orig_source_path.empty() && orig_source_path != source_path) {
res = perform_action(function_name, [&]() -> rocksdb::Status {
return db_->Delete(rocksdb::WriteOptions(), source_family_,
orig_source_path);
});
if (res != api_error::success && res != api_error::item_not_found) {
return res;
}
}
}
json_data.erase(META_PINNED);
json_data.erase(META_SIZE);
const auto set_status = [&status](rocksdb::Status res) -> rocksdb::Status {
if (status != nullptr) {
*status = res;
}
return res;
};
const auto do_transaction =
[&](rocksdb::Transaction *txn) -> rocksdb::Status {
if (should_del_source) {
auto res = set_status(txn->Delete(source_family_, orig_source_path));
if (not res.ok()) {
return res;
}
}
auto res = set_status(
txn->Put(pinned_family_, api_path, utils::string::from_bool(pinned)));
return perform_action(function_name, [&]() -> rocksdb::Status {
auto res = db_->Put(rocksdb::WriteOptions(), pinned_family_, api_path,
utils::string::from_bool(pinned));
if (not res.ok()) {
return res;
}
res = set_status(txn->Put(size_family_, api_path, std::to_string(size)));
res = db_->Put(rocksdb::WriteOptions(), size_family_, api_path,
std::to_string(size));
if (not res.ok()) {
return res;
}
if (not source_path.empty()) {
res = set_status(txn->Put(source_family_, source_path, api_path));
res = db_->Put(rocksdb::WriteOptions(), source_family_, source_path,
api_path);
if (not res.ok()) {
return res;
}
}
return set_status(txn->Put(meta_family_, api_path, json_data.dump()));
};
if (base_txn == nullptr) {
return perform_action(function_name, do_transaction);
}
auto res = set_status(do_transaction(base_txn));
if (res.ok()) {
return api_error::success;
}
return db_->Put(rocksdb::WriteOptions(), default_family_, api_path,
json_data.dump());
});
} catch (const std::exception &e) {
utils::error::raise_api_path_error(function_name, api_path, e,
"failed to update item meta");

View File

@ -19,10 +19,9 @@
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "db/impl/sqlite_file_mgr_db.hpp"
#include "db/sqlite_file_mgr_db.hpp"
#include "app_config.hpp"
#include "types/startup_exception.hpp"
#include "utils/config.hpp"
#include "utils/db/sqlite/db_common.hpp"
#include "utils/db/sqlite/db_delete.hpp"
@ -30,7 +29,6 @@
#include "utils/db/sqlite/db_select.hpp"
#include "utils/db/sqlite/db_update.hpp"
#include "utils/error_utils.hpp"
#include "utils/file.hpp"
#include "utils/path.hpp"
#include "utils/string.hpp"
@ -58,6 +56,7 @@ const std::map<std::string, std::string> sql_create_tables{
"("
"id INTEGER PRIMARY KEY AUTOINCREMENT, "
"api_path TEXT UNIQUE, "
"date_time INTEGER, "
"source_path TEXT"
");",
},
@ -77,19 +76,14 @@ const std::map<std::string, std::string> sql_create_tables{
namespace repertory {
sqlite_file_mgr_db::sqlite_file_mgr_db(const app_config &cfg) {
auto db_dir = utils::path::combine(cfg.get_data_directory(), {"db"});
if (not utils::file::directory{db_dir}.create_directory()) {
throw startup_exception(
fmt::format("failed to create db directory|", db_dir));
}
db_ = utils::db::sqlite::create_db(
utils::path::combine(db_dir, {"file_mgr.db"}), sql_create_tables);
utils::path::combine(cfg.get_data_directory(), {"mgr.db"}),
sql_create_tables);
}
sqlite_file_mgr_db::~sqlite_file_mgr_db() { db_.reset(); }
auto sqlite_file_mgr_db::add_resume(const resume_entry &entry) -> bool {
auto sqlite_file_mgr_db::add_resume(resume_entry entry) -> bool {
return utils::db::sqlite::db_insert{*db_, resume_table}
.or_replace()
.column_value("api_path", entry.api_path)
@ -101,17 +95,17 @@ auto sqlite_file_mgr_db::add_resume(const resume_entry &entry) -> bool {
.ok();
}
auto sqlite_file_mgr_db::add_upload(const upload_entry &entry) -> bool {
auto sqlite_file_mgr_db::add_upload(upload_entry entry) -> bool {
return utils::db::sqlite::db_insert{*db_, upload_table}
.or_replace()
.column_value("api_path", entry.api_path)
.column_value("date_time", static_cast<std::int64_t>(entry.date_time))
.column_value("source_path", entry.source_path)
.go()
.ok();
}
auto sqlite_file_mgr_db::add_upload_active(const upload_active_entry &entry)
-> bool {
auto sqlite_file_mgr_db::add_upload_active(upload_active_entry entry) -> bool {
return utils::db::sqlite::db_insert{*db_, upload_active_table}
.or_replace()
.column_value("api_path", entry.api_path)
@ -158,6 +152,8 @@ auto sqlite_file_mgr_db::get_next_upload() const
return upload_entry{
row->get_column("api_path").get_value<std::string>(),
static_cast<std::uint64_t>(
row->get_column("date_time").get_value<std::int64_t>()),
row->get_column("source_path").get_value<std::string>(),
};
}
@ -206,6 +202,8 @@ auto sqlite_file_mgr_db::get_upload(const std::string &api_path) const
return upload_entry{
row->get_column("api_path").get_value<std::string>(),
static_cast<std::uint64_t>(
row->get_column("date_time").get_value<std::int64_t>()),
row->get_column("source_path").get_value<std::string>(),
};
}

View File

@ -19,16 +19,14 @@
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "db/impl/sqlite_meta_db.hpp"
#include "db/sqlite_meta_db.hpp"
#include "app_config.hpp"
#include "types/startup_exception.hpp"
#include "utils/db/sqlite/db_common.hpp"
#include "utils/db/sqlite/db_delete.hpp"
#include "utils/db/sqlite/db_insert.hpp"
#include "utils/db/sqlite/db_select.hpp"
#include "utils/error_utils.hpp"
#include "utils/file.hpp"
#include "utils/path.hpp"
#include "utils/string.hpp"
@ -50,14 +48,9 @@ sqlite_meta_db::sqlite_meta_db(const app_config &cfg) {
},
};
auto db_dir = utils::path::combine(cfg.get_data_directory(), {"db"});
if (not utils::file::directory{db_dir}.create_directory()) {
throw startup_exception(
fmt::format("failed to create db directory|", db_dir));
}
db_ = utils::db::sqlite::create_db(utils::path::combine(db_dir, {"meta.db"}),
sql_create_tables);
db_ = utils::db::sqlite::create_db(
utils::path::combine(cfg.get_data_directory(), {"provider_meta.db"}),
sql_create_tables);
}
sqlite_meta_db::~sqlite_meta_db() { db_.reset(); }
@ -279,12 +272,6 @@ void sqlite_meta_db::remove_api_path(const std::string &api_path) {
auto sqlite_meta_db::remove_item_meta(const std::string &api_path,
const std::string &key) -> api_error {
if (key == META_DIRECTORY || key == META_PINNED || key == META_SIZE ||
key == META_SOURCE) {
// TODO log warning for unsupported attributes
return api_error::success;
}
api_meta_map meta{};
auto res = get_item_meta(api_path, meta);
if (res != api_error::success) {

View File

@ -115,7 +115,7 @@ auto directory_iterator::get_directory_item(const std::string &api_path,
auto directory_iterator::get_json(std::size_t offset, json &item) -> int {
if (offset < items_.size()) {
item = json(items_.at(offset));
item = items_[offset].to_json();
return 0;
}

View File

@ -30,17 +30,18 @@
#include "utils/file_utils.hpp"
#include "utils/time.hpp"
#include "utils/utils.hpp"
#include <spdlog/fmt/bundled/base.h>
namespace repertory {
auto eviction::check_minimum_requirements(const std::string &file_path)
-> bool {
REPERTORY_USES_FUNCTION_NAME();
auto file = utils::file::file{file_path};
auto reference_time = file.get_time(config_.get_eviction_uses_accessed_time()
? utils::file::time_type::accessed
: utils::file::time_type::modified);
auto check_file = utils::file::file{file_path};
auto reference_time =
check_file.get_time(config_.get_eviction_uses_accessed_time()
? utils::file::time_type::accessed
: utils::file::time_type::modified);
if (not reference_time.has_value()) {
utils::error::raise_error(function_name, utils::get_last_error_code(),
@ -48,17 +49,18 @@ auto eviction::check_minimum_requirements(const std::string &file_path)
return false;
}
auto delay =
static_cast<std::uint64_t>(config_.get_eviction_delay_mins() * 60U) *
utils::time::NANOS_PER_SECOND;
return (reference_time.value() + delay) <= utils::time::get_time_now();
auto delay = (config_.get_eviction_delay_mins() * 60UL) *
utils::time::NANOS_PER_SECOND;
return ((reference_time.value() + static_cast<std::uint64_t>(delay)) <=
utils::time::get_time_now());
}
auto eviction::get_filtered_cached_files() -> std::deque<std::string> {
auto list =
utils::file::get_directory_files(config_.get_cache_directory(), true);
list.erase(std::remove_if(list.begin(), list.end(),
[this](auto &&path) -> bool {
[this](const std::string &path) -> bool {
return not this->check_minimum_requirements(path);
}),
list.end());
@ -68,38 +70,65 @@ auto eviction::get_filtered_cached_files() -> std::deque<std::string> {
void eviction::service_function() {
REPERTORY_USES_FUNCTION_NAME();
auto cached_files_list = get_filtered_cached_files();
auto was_file_evicted{false};
while (not get_stop_requested() && not cached_files_list.empty()) {
auto file_path = cached_files_list.front();
cached_files_list.pop_front();
auto should_evict = true;
try {
std::string api_path;
auto res = provider_.get_api_path_from_source(file_path, api_path);
if (res != api_error::success) {
continue;
// Handle maximum cache size eviction
auto used_bytes =
utils::file::directory{config_.get_cache_directory()}.size();
if (config_.get_enable_max_cache_size()) {
should_evict = (used_bytes > config_.get_max_cache_size_bytes());
}
if (should_evict) {
// Remove cached source files that don't meet minimum requirements
auto cached_files_list = get_filtered_cached_files();
while (not get_stop_requested() && should_evict &&
not cached_files_list.empty()) {
try {
std::string api_path;
if (provider_.get_api_path_from_source(
cached_files_list.front(), api_path) == api_error::success) {
api_file file{};
filesystem_item fsi{};
if (provider_.get_filesystem_item_and_file(api_path, file, fsi) ==
api_error::success) {
// Only evict files that match expected size
auto opt_size = utils::file::file{cached_files_list.front()}.size();
if (opt_size.has_value()) {
auto file_size{opt_size.value()};
if (file_size == fsi.size) {
// Try to evict file
if (fm_.evict_file(fsi.api_path) &&
config_.get_enable_max_cache_size()) {
// Restrict number of items evicted if maximum cache size is
// enabled
used_bytes -= file_size;
should_evict =
(used_bytes > config_.get_max_cache_size_bytes());
}
}
} else {
utils::error::raise_api_path_error(
function_name, file.api_path, file.source_path,
utils::get_last_error_code(), "failed to get file size");
}
}
}
} catch (const std::exception &ex) {
utils::error::raise_error(function_name, ex,
"failed to process cached file|sp|" +
cached_files_list.front());
}
if (file_mgr_.evict_file(api_path)) {
was_file_evicted = true;
}
} catch (const std::exception &ex) {
utils::error::raise_error(
function_name, ex,
fmt::format("failed to process cached file|sp|{}", file_path));
cached_files_list.pop_front();
}
}
if (get_stop_requested() || was_file_evicted) {
return;
if (not get_stop_requested()) {
unique_mutex_lock lock(get_mutex());
if (not get_stop_requested()) {
get_notify().wait_for(lock, 30s);
}
}
unique_mutex_lock lock(get_mutex());
if (get_stop_requested()) {
return;
}
get_notify().wait_for(lock, 30s);
}
} // namespace repertory

View File

@ -30,7 +30,6 @@
#include "initialize.hpp"
#include "platform/platform.hpp"
#include "utils/collection.hpp"
#include "utils/error_utils.hpp"
#include "utils/file_utils.hpp"
#include "utils/path.hpp"
#include "utils/string.hpp"

View File

@ -612,7 +612,7 @@ void *fuse_drive::init_impl(struct fuse_conn_info *conn) {
eviction_->start();
}
if (config_.get_remote_mount().enable) {
if (config_.get_enable_remote_mount()) {
remote_server_ = std::make_unique<remote_fuse::remote_server>(
config_, *this, get_mount_location());
}
@ -1315,10 +1315,6 @@ auto fuse_drive::truncate_impl(std::string api_path, off_t size) -> api_error {
return res;
}
if (not fm_->get_open_file(handle, true, open_file)) {
return api_error::invalid_handle;
}
res = open_file->resize(static_cast<std::uint64_t>(size));
}

View File

@ -27,7 +27,12 @@
namespace repertory::remote_fuse {
remote_client::remote_client(const app_config &config)
: config_(config), packet_client_(config.get_remote_config()) {}
: config_(config),
packet_client_(
config.get_remote_host_name_or_ip(),
config.get_remote_max_connections(), config.get_remote_port(),
config.get_remote_receive_timeout_secs(),
config.get_remote_send_timeout_secs(), config.get_remote_token()) {}
auto remote_client::fuse_access(const char *path, const std::int32_t &mask)
-> packet::error_type {

View File

@ -47,7 +47,12 @@ E_SIMPLE3(remote_winfsp_client_event, debug, true,
// clang-format on
remote_client::remote_client(const app_config &config)
: config_(config), packet_client_(config.get_remote_config()) {}
: config_(config),
packet_client_(
config.get_remote_host_name_or_ip(),
config.get_remote_max_connections(), config.get_remote_port(),
config.get_remote_receive_timeout_secs(),
config.get_remote_send_timeout_secs(), config.get_remote_token()) {}
auto remote_client::winfsp_can_delete(PVOID file_desc, PWSTR file_name)
-> packet::error_type {

View File

@ -302,27 +302,27 @@ auto remote_winfsp_drive::Overwrite(PVOID /*file_node*/, PVOID file_desc,
BOOLEAN replace_attributes,
UINT64 allocation_size, FileInfo *file_info)
-> NTSTATUS {
remote::file_info info{};
remote::file_info fi{};
auto ret = remote_instance_->winfsp_overwrite(
file_desc, attributes, replace_attributes, allocation_size, &info);
set_file_info(*file_info, info);
file_desc, attributes, replace_attributes, allocation_size, &fi);
set_file_info(*file_info, fi);
return ret;
}
void remote_winfsp_drive::populate_file_info(const json &item,
FSP_FSCTL_FILE_INFO &file_info) {
auto dir_item = item.get<directory_item>();
file_info.FileSize = dir_item.directory ? 0 : dir_item.size;
auto di = directory_item::from_json(item);
file_info.FileSize = di.directory ? 0 : di.size;
file_info.AllocationSize =
utils::divide_with_ceiling(file_info.FileSize, WINFSP_ALLOCATION_UNIT) *
WINFSP_ALLOCATION_UNIT;
file_info.ChangeTime = utils::get_changed_time_from_meta(dir_item.meta);
file_info.CreationTime = utils::get_creation_time_from_meta(dir_item.meta);
file_info.FileAttributes = utils::get_attributes_from_meta(dir_item.meta);
file_info.ChangeTime = utils::get_changed_time_from_meta(di.meta);
file_info.CreationTime = utils::get_creation_time_from_meta(di.meta);
file_info.FileAttributes = utils::get_attributes_from_meta(di.meta);
file_info.HardLinks = 0;
file_info.IndexNumber = 0;
file_info.LastAccessTime = utils::get_accessed_time_from_meta(dir_item.meta);
file_info.LastWriteTime = utils::get_written_time_from_meta(dir_item.meta);
file_info.LastAccessTime = utils::get_accessed_time_from_meta(di.meta);
file_info.LastWriteTime = utils::get_written_time_from_meta(di.meta);
file_info.ReparseTag = 0;
file_info.EaSize = 0;
}

View File

@ -89,8 +89,8 @@ auto winfsp_drive::handle_error(std::string_view function_name,
return ret;
}
auto winfsp_drive::winfsp_service::OnStart(ULONG /*Argc*/, PWSTR * /*Argv*/)
-> NTSTATUS {
auto winfsp_drive::winfsp_service::OnStart(ULONG /*Argc*/,
PWSTR * /*Argv*/) -> NTSTATUS {
REPERTORY_USES_FUNCTION_NAME();
auto mount_location = utils::string::to_lower(
@ -457,10 +457,9 @@ auto winfsp_drive::get_item_meta(const std::string &api_path,
return ret;
}
auto winfsp_drive::get_security_by_name(PWSTR file_name, PUINT32 attributes,
PSECURITY_DESCRIPTOR descriptor,
std::uint64_t *descriptor_size)
-> NTSTATUS {
auto winfsp_drive::get_security_by_name(
PWSTR file_name, PUINT32 attributes, PSECURITY_DESCRIPTOR descriptor,
std::uint64_t *descriptor_size) -> NTSTATUS {
auto api_path =
utils::path::create_api_path(utils::string::to_utf8(file_name));
@ -641,7 +640,7 @@ auto winfsp_drive::Mounted(PVOID host) -> NTSTATUS {
}
auto mount_location = parse_mount_location(file_system_host->MountPoint());
if (config_.get_remote_mount().enable) {
if (config_.get_enable_remote_mount()) {
remote_server_ = std::make_unique<remote_winfsp::remote_server>(
config_, *this, mount_location);
}
@ -721,8 +720,8 @@ auto winfsp_drive::Open(PWSTR file_name, UINT32 create_options,
auto winfsp_drive::Overwrite(PVOID /*file_node*/, PVOID file_desc,
UINT32 attributes, BOOLEAN replace_attributes,
UINT64 /*allocation_size*/, FileInfo *file_info)
-> NTSTATUS {
UINT64 /*allocation_size*/,
FileInfo *file_info) -> NTSTATUS {
REPERTORY_USES_FUNCTION_NAME();
std::string api_path;
@ -828,8 +827,8 @@ void winfsp_drive::populate_file_info(std::uint64_t file_size,
}
auto winfsp_drive::Read(PVOID /*file_node*/, PVOID file_desc, PVOID buffer,
UINT64 offset, ULONG length, PULONG bytes_transferred)
-> NTSTATUS {
UINT64 offset, ULONG length,
PULONG bytes_transferred) -> NTSTATUS {
REPERTORY_USES_FUNCTION_NAME();
*bytes_transferred = 0U;
@ -884,8 +883,8 @@ auto winfsp_drive::Read(PVOID /*file_node*/, PVOID file_desc, PVOID buffer,
auto winfsp_drive::ReadDirectory(PVOID /*file_node*/, PVOID file_desc,
PWSTR /*pattern*/, PWSTR marker, PVOID buffer,
ULONG buffer_length, PULONG bytes_transferred)
-> NTSTATUS {
ULONG buffer_length,
PULONG bytes_transferred) -> NTSTATUS {
REPERTORY_USES_FUNCTION_NAME();
std::string api_path;
@ -1047,8 +1046,8 @@ auto winfsp_drive::Rename(PVOID /*file_node*/, PVOID /*file_desc*/,
auto winfsp_drive::SetBasicInfo(PVOID /*file_node*/, PVOID file_desc,
UINT32 attributes, UINT64 creation_time,
UINT64 last_access_time, UINT64 last_write_time,
UINT64 change_time, FileInfo *file_info)
-> NTSTATUS {
UINT64 change_time,
FileInfo *file_info) -> NTSTATUS {
REPERTORY_USES_FUNCTION_NAME();
std::string api_path;

View File

@ -24,8 +24,7 @@
#include "utils/string.hpp"
namespace repertory {
auto event_level_from_string(std::string level, event_level default_level)
-> event_level {
auto event_level_from_string(std::string level) -> event_level {
level = utils::string::to_lower(level);
if (level == "critical" || level == "event_level::critical") {
return event_level::critical;
@ -51,7 +50,7 @@ auto event_level_from_string(std::string level, event_level default_level)
return event_level::trace;
}
return default_level;
return event_level::info;
}
auto event_level_to_string(event_level level) -> std::string {

View File

@ -1,128 +0,0 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "file_manager/cache_size_mgr.hpp"
#include "app_config.hpp"
#include "events/event.hpp"
#include "events/event_system.hpp"
#include "types/startup_exception.hpp"
#include "utils/file_utils.hpp"
namespace repertory {
// clang-format off
E_SIMPLE2(invalid_cache_size, warn, true,
std::uint64_t, cache_size, sz, E_FROM_UINT64,
std::uint64_t, by, by, E_FROM_UINT64
);
E_SIMPLE2(max_cache_size_reached, warn, true,
std::uint64_t, cache_size, sz, E_FROM_UINT64,
std::uint64_t, max_cache_size, max, E_FROM_UINT64
);
// clang-format on
cache_size_mgr cache_size_mgr::instance_{};
// TODO add timeout
auto cache_size_mgr::expand(std::uint64_t size) -> api_error {
if (size == 0U) {
return api_error::success;
}
unique_mutex_lock lock(mtx_);
if (cfg_ == nullptr) {
return api_error::cache_not_initialized;
}
cache_size_ += size;
auto max_cache_size = cfg_->get_max_cache_size_bytes();
auto cache_dir = utils::file::directory{cfg_->get_cache_directory()};
while (not stop_requested_ && cache_size_ > max_cache_size &&
cache_dir.count() > 1U) {
event_system::instance().raise<max_cache_size_reached>(cache_size_,
max_cache_size);
notify_.wait(lock);
}
notify_.notify_all();
return api_error::success;
}
void cache_size_mgr::initialize(app_config *cfg) {
if (cfg == nullptr) {
throw startup_exception("app_config must not be null");
}
mutex_lock lock(mtx_);
cfg_ = cfg;
stop_requested_ = false;
auto cache_dir = utils::file::directory{cfg_->get_cache_directory()};
if (not cache_dir.create_directory()) {
throw startup_exception(fmt::format("failed to create cache directory|{}",
cache_dir.get_path()));
}
cache_size_ = cache_dir.size(false);
notify_.notify_all();
}
auto cache_size_mgr::shrink(std::uint64_t size) -> api_error {
mutex_lock lock(mtx_);
if (size == 0U) {
notify_.notify_all();
return api_error::success;
}
if (cache_size_ >= size) {
cache_size_ -= size;
} else {
event_system::instance().raise<invalid_cache_size>(cache_size_, size);
cache_size_ = 0U;
}
notify_.notify_all();
return api_error::success;
}
auto cache_size_mgr::size() const -> std::uint64_t {
mutex_lock lock(mtx_);
return cache_size_;
}
void cache_size_mgr::stop() {
if (stop_requested_) {
return;
}
stop_requested_ = true;
mutex_lock lock(mtx_);
notify_.notify_all();
}
} // namespace repertory

View File

@ -1,63 +0,0 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "file_manager/direct_open_file.hpp"
#include "file_manager/open_file_base.hpp"
#include "providers/i_provider.hpp"
#include "types/repertory.hpp"
namespace repertory {
direct_open_file::direct_open_file(std::uint64_t chunk_size,
std::uint8_t chunk_timeout,
filesystem_item fsi, i_provider &provider)
: ring_buffer_base(chunk_size, chunk_timeout, fsi, provider,
min_ring_size, true) {}
direct_open_file::~direct_open_file() {
REPERTORY_USES_FUNCTION_NAME();
close();
}
auto direct_open_file::on_check_start() -> bool {
return (get_file_size() == 0U || has_reader_thread());
}
auto direct_open_file::on_read_chunk(std::size_t chunk, std::size_t read_size,
std::uint64_t read_offset,
data_buffer &data,
std::size_t &bytes_read) -> api_error {
auto &buffer = ring_data_.at(chunk % get_ring_size());
auto begin =
std::next(buffer.begin(), static_cast<std::int64_t>(read_offset));
auto end = std::next(begin, static_cast<std::int64_t>(read_size));
data.insert(data.end(), begin, end);
bytes_read = read_size;
return api_error::success;
}
auto direct_open_file::use_buffer(std::size_t chunk,
std::function<api_error(data_buffer &)> func)
-> api_error {
return func(ring_data_.at(chunk % get_ring_size()));
}
} // namespace repertory

View File

@ -23,8 +23,6 @@
#include "app_config.hpp"
#include "db/file_mgr_db.hpp"
#include "file_manager/cache_size_mgr.hpp"
#include "file_manager/direct_open_file.hpp"
#include "file_manager/events.hpp"
#include "file_manager/open_file.hpp"
#include "file_manager/open_file_base.hpp"
@ -39,6 +37,7 @@
#include "utils/file.hpp"
#include "utils/path.hpp"
#include "utils/polling.hpp"
#include "utils/time.hpp"
namespace repertory {
file_manager::file_manager(app_config &config, i_provider &provider)
@ -73,13 +72,13 @@ void file_manager::close(std::uint64_t handle) {
closeable_file->remove(handle);
}
auto file_manager::close_all(const std::string &api_path) -> bool {
void file_manager::close_all(const std::string &api_path) {
REPERTORY_USES_FUNCTION_NAME();
unique_recur_mutex_lock file_lock(open_file_mtx_);
auto file_iter = open_file_lookup_.find(api_path);
if (file_iter == open_file_lookup_.end()) {
return false;
return;
}
auto closeable_file = file_iter->second;
@ -88,8 +87,6 @@ auto file_manager::close_all(const std::string &api_path) -> bool {
closeable_file->remove_all();
closeable_file->close();
return closeable_file->get_allocated();
}
void file_manager::close_timed_out_files() {
@ -104,12 +101,12 @@ void file_manager::close_timed_out_files() {
}
return items;
});
for (const auto &closeable_file : closeable_list) {
for (auto &&closeable_file : closeable_list) {
open_file_lookup_.erase(closeable_file->get_api_path());
}
file_lock.unlock();
for (auto &closeable_file : closeable_list) {
for (auto &&closeable_file : closeable_list) {
closeable_file->close();
event_system::instance().raise<item_timeout>(
closeable_file->get_api_path());
@ -142,7 +139,7 @@ auto file_manager::evict_file(const std::string &api_path) -> bool {
return false;
}
unique_recur_mutex_lock open_lock(open_file_mtx_);
recur_mutex_lock open_lock(open_file_mtx_);
if (is_processing(api_path)) {
return false;
}
@ -151,18 +148,8 @@ auto file_manager::evict_file(const std::string &api_path) -> bool {
return false;
}
filesystem_item fsi{};
auto res = provider_.get_filesystem_item(api_path, false, fsi);
if (res != api_error::success) {
return false;
}
if (fsi.source_path.empty()) {
return false;
}
std::string pinned;
res = provider_.get_item_meta(api_path, META_PINNED, pinned);
auto res = provider_.get_item_meta(api_path, META_PINNED, pinned);
if (res != api_error::success && res != api_error::item_not_found) {
utils::error::raise_api_path_error(std::string{function_name}, api_path,
res, "failed to get pinned status");
@ -173,22 +160,23 @@ auto file_manager::evict_file(const std::string &api_path) -> bool {
return false;
}
std::shared_ptr<i_closeable_open_file> closeable_file;
if (open_file_lookup_.contains(api_path)) {
closeable_file = open_file_lookup_.at(api_path);
std::string source_path{};
res = provider_.get_item_meta(api_path, META_SOURCE, source_path);
if (res != api_error::success) {
utils::error::raise_api_path_error(std::string{function_name}, api_path,
res, "failed to get source path");
return false;
}
if (source_path.empty()) {
return false;
}
open_file_lookup_.erase(api_path);
open_lock.unlock();
auto allocated = closeable_file ? closeable_file->get_allocated() : true;
closeable_file.reset();
auto removed = remove_source_and_shrink_cache(api_path, fsi.source_path,
fsi.size, allocated);
auto removed = utils::file::file{source_path}.remove();
if (removed) {
event_system::instance().raise<filesystem_item_evicted>(api_path,
fsi.source_path);
source_path);
}
return removed;
@ -219,7 +207,7 @@ auto file_manager::get_open_file_by_handle(std::uint64_t handle) const
-> std::shared_ptr<i_closeable_open_file> {
auto file_iter =
std::find_if(open_file_lookup_.begin(), open_file_lookup_.end(),
[&handle](auto &&item) -> bool {
[&handle](const auto &item) -> bool {
return item.second->has_handle(handle);
});
return (file_iter == open_file_lookup_.end()) ? nullptr : file_iter->second;
@ -235,7 +223,7 @@ auto file_manager::get_open_file_count(const std::string &api_path) const
auto file_manager::get_open_file(std::uint64_t handle, bool write_supported,
std::shared_ptr<i_open_file> &file) -> bool {
unique_recur_mutex_lock open_lock(open_file_mtx_);
recur_mutex_lock open_lock(open_file_mtx_);
auto file_ptr = get_open_file_by_handle(handle);
if (not file_ptr) {
return false;
@ -244,8 +232,8 @@ auto file_manager::get_open_file(std::uint64_t handle, bool write_supported,
if (write_supported && not file_ptr->is_write_supported()) {
auto writeable_file = std::make_shared<open_file>(
utils::encryption::encrypting_reader::get_data_chunk_size(),
config_.get_enable_download_timeout()
? config_.get_download_timeout_secs()
config_.get_enable_chunk_download_timeout()
? config_.get_chunk_downloader_timeout_secs()
: 0U,
file_ptr->get_filesystem_item(), file_ptr->get_open_data(), provider_,
*this);
@ -268,7 +256,7 @@ auto file_manager::get_open_files() const
std::unordered_map<std::string, std::size_t> ret;
recur_mutex_lock open_lock(open_file_mtx_);
for (const auto &item : open_file_lookup_) {
for (auto &&item : open_file_lookup_) {
ret[item.first] = item.second->get_open_file_count();
}
@ -358,19 +346,12 @@ auto file_manager::is_processing(const std::string &api_path) const -> bool {
return true;
};
unique_recur_mutex_lock open_lock(open_file_mtx_);
recur_mutex_lock open_lock(open_file_mtx_);
auto file_iter = open_file_lookup_.find(api_path);
if (file_iter == open_file_lookup_.end()) {
return false;
}
auto closeable_file = file_iter->second;
open_lock.unlock();
return closeable_file->is_write_supported()
? closeable_file->is_modified() ||
not closeable_file->is_complete()
: false;
return (file_iter == open_file_lookup_.end())
? false
: file_iter->second->is_modified() ||
not file_iter->second->is_complete();
}
auto file_manager::open(const std::string &api_path, bool directory,
@ -384,8 +365,6 @@ auto file_manager::open(
const std::string &api_path, bool directory, const open_file_data &ofd,
std::uint64_t &handle, std::shared_ptr<i_open_file> &file,
std::shared_ptr<i_closeable_open_file> closeable_file) -> api_error {
REPERTORY_USES_FUNCTION_NAME();
const auto create_and_add_handle =
[&](std::shared_ptr<i_closeable_open_file> cur_file) {
handle = get_next_handle();
@ -415,99 +394,21 @@ auto file_manager::open(
}
if (not closeable_file) {
auto buffer_directory{
utils::path::combine(config_.get_data_directory(), {"buffer"}),
};
auto chunk_size{
closeable_file = std::make_shared<open_file>(
utils::encryption::encrypting_reader::get_data_chunk_size(),
};
auto chunk_timeout = config_.get_enable_download_timeout()
? config_.get_download_timeout_secs()
: 0U;
auto ring_buffer_file_size{
static_cast<std::uint64_t>(config_.get_ring_buffer_file_size()) *
1024UL * 1024UL,
};
auto ring_size{ring_buffer_file_size / chunk_size};
const auto get_download_type = [&](download_type type) -> download_type {
if (directory || fsi.size == 0U || is_processing(api_path)) {
return download_type::default_;
}
if (type == download_type::direct) {
return type;
}
if (type == download_type::default_) {
auto free_space =
utils::file::get_free_drive_space(config_.get_cache_directory());
if (fsi.size < free_space) {
return download_type::default_;
}
}
if (not ring_buffer_open_file::can_handle_file(fsi.size, chunk_size,
ring_size)) {
return download_type::direct;
}
if (not utils::file::directory{buffer_directory}.create_directory()) {
utils::error::raise_error(
function_name, utils::get_last_error_code(),
fmt::format("failed to create buffer directory|sp|{}",
buffer_directory));
return download_type::direct;
}
auto free_space = utils::file::get_free_drive_space(buffer_directory);
if (ring_buffer_file_size < free_space) {
return download_type::ring_buffer;
}
return download_type::direct;
};
auto preferred_type = config_.get_preferred_download_type();
auto type = get_download_type(directory ? download_type::default_
: preferred_type == download_type::default_
? download_type::ring_buffer
: preferred_type);
if (not directory) {
event_system::instance().raise<download_type_selected>(
fsi.api_path, fsi.source_path, type);
}
switch (type) {
case repertory::download_type::direct: {
closeable_file = std::make_shared<direct_open_file>(
chunk_size, chunk_timeout, fsi, provider_);
} break;
case repertory::download_type::ring_buffer: {
closeable_file = std::make_shared<ring_buffer_open_file>(
buffer_directory, chunk_size, chunk_timeout, fsi, provider_,
ring_size);
} break;
default: {
closeable_file = std::make_shared<open_file>(chunk_size, chunk_timeout,
fsi, provider_, *this);
} break;
}
config_.get_enable_chunk_download_timeout()
? config_.get_chunk_downloader_timeout_secs()
: 0U,
fsi, provider_, *this);
}
open_file_lookup_[api_path] = closeable_file;
create_and_add_handle(closeable_file);
return api_error::success;
}
void file_manager::queue_upload(const i_open_file &file) {
queue_upload(file.get_api_path(), file.get_source_path(), false);
return queue_upload(file.get_api_path(), file.get_source_path(), false);
}
void file_manager::queue_upload(const std::string &api_path,
@ -516,18 +417,19 @@ void file_manager::queue_upload(const std::string &api_path,
return;
}
std::unique_ptr<mutex_lock> upload_lock;
std::unique_ptr<mutex_lock> lock;
if (not no_lock) {
upload_lock = std::make_unique<mutex_lock>(upload_mtx_);
lock = std::make_unique<mutex_lock>(upload_mtx_);
}
remove_upload(api_path, true);
if (mgr_db_->add_upload(i_file_mgr_db::upload_entry{
api_path,
utils::time::get_time_now(),
source_path,
})) {
remove_resume(api_path, source_path, true);
remove_resume(api_path, source_path);
event_system::instance().raise<file_upload_queued>(api_path, source_path);
} else {
event_system::instance().raise<file_upload_failed>(
@ -542,90 +444,38 @@ void file_manager::queue_upload(const std::string &api_path,
auto file_manager::remove_file(const std::string &api_path) -> api_error {
REPERTORY_USES_FUNCTION_NAME();
recur_mutex_lock open_lock(open_file_mtx_);
filesystem_item fsi{};
auto res = provider_.get_filesystem_item(api_path, false, fsi);
if (res != api_error::success) {
return res;
}
auto allocated = close_all(api_path);
unique_mutex_lock upload_lock(upload_mtx_);
remove_upload(api_path, true);
remove_resume(api_path, fsi.source_path, true);
upload_notify_.notify_all();
upload_lock.unlock();
recur_mutex_lock open_lock(open_file_mtx_);
close_all(api_path);
res = provider_.remove_file(api_path);
if (res != api_error::success) {
return res;
}
remove_source_and_shrink_cache(api_path, fsi.source_path, fsi.size,
allocated);
if (not utils::file::file{fsi.source_path}.remove()) {
utils::error::raise_api_path_error(
function_name, fsi.api_path, fsi.source_path,
utils::get_last_error_code(), "failed to delete source");
}
return api_error::success;
}
void file_manager::remove_resume(const std::string &api_path,
const std::string &source_path) {
remove_resume(api_path, source_path, false);
}
void file_manager::remove_resume(const std::string &api_path,
const std::string &source_path, bool no_lock) {
if (provider_.is_read_only()) {
if (not mgr_db_->remove_resume(api_path)) {
return;
}
std::unique_ptr<mutex_lock> upload_lock;
if (not no_lock) {
upload_lock = std::make_unique<mutex_lock>(upload_mtx_);
}
if (mgr_db_->remove_resume(api_path)) {
event_system::instance().raise<download_resume_removed>(api_path,
source_path);
}
if (not no_lock) {
upload_notify_.notify_all();
}
}
auto file_manager::remove_source_and_shrink_cache(
const std::string &api_path, const std::string &source_path,
std::uint64_t file_size, bool allocated) -> bool {
REPERTORY_USES_FUNCTION_NAME();
auto file = utils::file::file{source_path};
auto source_size = file.exists() ? file.size().value_or(0U) : 0U;
if (not file.remove()) {
utils::error::raise_api_path_error(function_name, api_path, source_path,
utils::get_last_error_code(),
"failed to delete source");
return false;
}
if (not allocated || source_size == 0U) {
auto res = cache_size_mgr::instance().shrink(0U);
if (res != api_error::success) {
utils::error::raise_api_path_error(function_name, api_path, source_path,
res, "failed to shrink cache");
}
return true;
}
auto res = cache_size_mgr::instance().shrink(file_size);
if (res != api_error::success) {
utils::error::raise_api_path_error(function_name, api_path, source_path,
res, "failed to shrink cache");
}
return true;
event_system::instance().raise<download_resume_removed>(api_path,
source_path);
}
void file_manager::remove_upload(const std::string &api_path) {
@ -639,9 +489,9 @@ void file_manager::remove_upload(const std::string &api_path, bool no_lock) {
return;
}
std::unique_ptr<mutex_lock> upload_lock;
std::unique_ptr<mutex_lock> lock;
if (not no_lock) {
upload_lock = std::make_unique<mutex_lock>(upload_mtx_);
lock = std::make_unique<mutex_lock>(upload_mtx_);
}
if (not mgr_db_->remove_upload(api_path)) {
@ -651,9 +501,9 @@ void file_manager::remove_upload(const std::string &api_path, bool no_lock) {
auto removed = mgr_db_->remove_upload_active(api_path);
if (not removed) {
utils::error::raise_api_path_error(function_name, api_path,
api_error::error,
"failed to remove active upload");
utils::error::raise_api_path_error(
function_name, api_path, api_error::error,
"failed to remove from upload_active table");
}
if (upload_lookup_.find(api_path) != upload_lookup_.end()) {
@ -880,12 +730,12 @@ void file_manager::start() {
continue;
}
auto closeable_file =
std::make_shared<open_file>(entry.chunk_size,
config_.get_enable_download_timeout()
? config_.get_download_timeout_secs()
: 0U,
fsi, provider_, entry.read_state, *this);
auto closeable_file = std::make_shared<open_file>(
entry.chunk_size,
config_.get_enable_chunk_download_timeout()
? config_.get_chunk_downloader_timeout_secs()
: 0U,
fsi, provider_, entry.read_state, *this);
open_file_lookup_[entry.api_path] = closeable_file;
event_system::instance().raise<download_restored>(fsi.api_path,
fsi.source_path);
@ -907,6 +757,7 @@ void file_manager::stop() {
stop_requested_ = true;
polling::instance().remove_callback("db_cleanup");
polling::instance().remove_callback("timed_out_close");
unique_mutex_lock upload_lock(upload_mtx_);
@ -920,7 +771,7 @@ void file_manager::stop() {
open_file_lookup_.clear();
upload_lock.lock();
for (auto &item : upload_lookup_) {
for (auto &&item : upload_lookup_) {
item.second->stop();
}
upload_notify_.notify_all();
@ -966,10 +817,10 @@ void file_manager::swap_renamed_items(std::string from_api_path,
auto file_iter = open_file_lookup_.find(from_api_path);
if (file_iter != open_file_lookup_.end()) {
auto closeable_file = std::move(open_file_lookup_[from_api_path]);
auto ptr = std::move(open_file_lookup_[from_api_path]);
open_file_lookup_.erase(from_api_path);
closeable_file->set_api_path(to_api_path);
open_file_lookup_[to_api_path] = std::move(closeable_file);
ptr->set_api_path(to_api_path);
open_file_lookup_[to_api_path] = std::move(ptr);
}
if (directory) {

View File

@ -21,17 +21,18 @@
*/
#include "file_manager/open_file.hpp"
#include "file_manager/cache_size_mgr.hpp"
#include "file_manager/events.hpp"
#include "file_manager/file_manager.hpp"
#include "file_manager/i_upload_manager.hpp"
#include "platform/platform.hpp"
#include "providers/i_provider.hpp"
#include "types/repertory.hpp"
#include "types/startup_exception.hpp"
#include "utils/common.hpp"
#include "utils/error_utils.hpp"
#include "utils/file_utils.hpp"
#include "utils/path.hpp"
#include "utils/time.hpp"
#include "utils/utils.hpp"
namespace repertory {
open_file::open_file(std::uint64_t chunk_size, std::uint8_t chunk_timeout,
@ -60,246 +61,75 @@ open_file::open_file(std::uint64_t chunk_size, std::uint8_t chunk_timeout,
i_provider &provider,
std::optional<boost::dynamic_bitset<>> read_state,
i_upload_manager &mgr)
: open_file_base(chunk_size, chunk_timeout, fsi, open_data, provider,
false),
: open_file_base(chunk_size, chunk_timeout, fsi, open_data, provider),
mgr_(mgr) {
REPERTORY_USES_FUNCTION_NAME();
if (fsi_.directory && read_state.has_value()) {
throw startup_exception("cannot resume a directory|" + fsi.api_path);
}
if (fsi.directory) {
if (read_state.has_value()) {
utils::error::raise_api_path_error(
function_name, fsi.api_path, fsi.source_path,
fmt::format("cannot resume a directory|sp|", fsi.api_path));
if (not fsi.directory) {
nf_ = utils::file::file::open_or_create_file(fsi.source_path,
provider_.is_read_only());
set_api_error(*nf_ ? api_error::success : api_error::os_error);
if (get_api_error() == api_error::success) {
if (read_state.has_value()) {
read_state_ = read_state.value();
set_modified();
} else if (fsi_.size > 0U) {
read_state_.resize(static_cast<std::size_t>(utils::divide_with_ceiling(
fsi_.size, chunk_size)),
false);
auto file_size = nf_->size();
if (provider_.is_read_only() || file_size == fsi.size) {
read_state_.set(0U, read_state_.size(), true);
} else if (not nf_->truncate(fsi.size)) {
set_api_error(api_error::os_error);
}
}
if (get_api_error() != api_error::success && *nf_) {
nf_->close();
}
}
return;
}
nf_ = utils::file::file::open_or_create_file(fsi.source_path,
get_provider().is_read_only());
set_api_error(*nf_ ? api_error::success : api_error::os_error);
if (get_api_error() != api_error::success) {
return;
}
if (read_state.has_value()) {
read_state_ = read_state.value();
set_modified();
allocated = true;
return;
}
if (fsi.size == 0U) {
return;
}
read_state_.resize(static_cast<std::size_t>(
utils::divide_with_ceiling(fsi.size, chunk_size)),
false);
auto file_size = nf_->size();
if (not file_size.has_value()) {
utils::error::raise_api_path_error(
function_name, fsi.api_path, fsi.source_path,
utils::get_last_error_code(), "failed to get file size");
set_api_error(api_error::os_error);
return;
}
if (get_provider().is_read_only() || file_size.value() == fsi.size) {
read_state_.set(0U, read_state_.size(), true);
allocated = true;
}
if (get_api_error() != api_error::success && *nf_) {
nf_->close();
}
}
open_file::~open_file() { close(); }
auto open_file::adjust_cache_size(std::uint64_t file_size,
bool shrink) -> api_error {
REPERTORY_USES_FUNCTION_NAME();
if (file_size == get_file_size()) {
return api_error::success;
}
if (file_size > get_file_size()) {
auto size = file_size - get_file_size();
auto res = shrink ? cache_size_mgr::instance().shrink(size)
: cache_size_mgr::instance().expand(size);
if (res == api_error::success) {
return res;
}
utils::error::raise_api_path_error(
function_name, get_api_path(), get_source_path(), res,
fmt::format("failed to {} cache|size|{}",
(shrink ? "shrink" : "expand"), size));
return set_api_error(res);
}
auto size = get_file_size() - file_size;
auto res = shrink ? cache_size_mgr::instance().expand(size)
: cache_size_mgr::instance().shrink(size);
if (res == api_error::success) {
return res;
}
utils::error::raise_api_path_error(
function_name, get_api_path(), get_source_path(), res,
fmt::format("failed to {} cache|size|{}", (shrink ? "expand" : "shrink"),
size));
return set_api_error(res);
}
auto open_file::check_start() -> api_error {
REPERTORY_USES_FUNCTION_NAME();
unique_recur_mutex_lock file_lock(get_mutex());
if (allocated) {
return api_error::success;
}
auto file_size = nf_->size();
if (not file_size.has_value()) {
utils::error::raise_api_path_error(
function_name, get_api_path(), get_source_path(),
utils::get_last_error_code(), "failed to get file size");
return set_api_error(api_error::os_error);
}
if (file_size.value() == get_file_size()) {
allocated = true;
return api_error::success;
}
file_lock.unlock();
auto res = adjust_cache_size(file_size.value(), true);
if (res != api_error::success) {
return res;
}
file_lock.lock();
if (not nf_->truncate(get_file_size())) {
utils::error::raise_api_path_error(
function_name, get_api_path(), get_source_path(),
utils::get_last_error_code(),
fmt::format("failed to truncate file|size|{}", get_file_size()));
return set_api_error(res);
}
allocated = true;
return api_error::success;
}
auto open_file::close() -> bool {
REPERTORY_USES_FUNCTION_NAME();
if (is_directory() || stop_requested_) {
return false;
}
stop_requested_ = true;
notify_io();
if (reader_thread_) {
reader_thread_->join();
reader_thread_.reset();
}
if (not open_file_base::close()) {
return false;
}
auto read_state = get_read_state();
auto err = get_api_error();
if (err == api_error::success || err == api_error::download_incomplete ||
err == api_error::download_stopped) {
if (is_modified() && not read_state.all()) {
set_api_error(api_error::download_incomplete);
} else if (not is_modified() && (get_file_size() > 0U) &&
not read_state.all()) {
set_api_error(api_error::download_stopped);
}
err = get_api_error();
}
nf_->close();
if (is_modified()) {
if (err == api_error::success) {
mgr_.queue_upload(*this);
return true;
}
if (err == api_error::download_incomplete) {
mgr_.store_resume(*this);
return true;
}
}
if (err != api_error::success || read_state.all()) {
mgr_.remove_resume(get_api_path(), get_source_path());
}
if (err == api_error::success) {
return true;
}
file_manager::remove_source_and_shrink_cache(
get_api_path(), get_source_path(), get_file_size(), allocated);
auto parent = utils::path::get_parent_path(get_source_path());
set_source_path(utils::path::combine(parent, {utils::create_uuid_string()}));
auto res = get_provider().set_item_meta(get_api_path(), META_SOURCE,
get_source_path());
if (res != api_error::success) {
utils::error::raise_api_path_error(function_name, get_api_path(),
get_source_path(), res,
"failed to set new source path");
}
return true;
}
void open_file::download_chunk(std::size_t chunk, bool skip_active,
bool should_reset) {
if (should_reset) {
reset_timeout();
}
unique_recur_mutex_lock rw_lock(rw_mtx_);
auto read_state = get_read_state();
if ((get_api_error() == api_error::success) && (chunk < read_state.size()) &&
not read_state[chunk]) {
if (get_active_downloads().find(chunk) != get_active_downloads().end()) {
if (skip_active) {
return;
unique_recur_mutex_lock download_lock(file_mtx_);
if ((get_api_error() == api_error::success) && (chunk < read_state_.size()) &&
not read_state_[chunk]) {
if (active_downloads_.find(chunk) != active_downloads_.end()) {
if (not skip_active) {
auto active_download = active_downloads_.at(chunk);
download_lock.unlock();
active_download->wait();
}
auto active_download = get_active_downloads().at(chunk);
rw_lock.unlock();
active_download->wait();
return;
}
auto data_offset = chunk * get_chunk_size();
auto data_size = (chunk == read_state.size() - 1U) ? get_last_chunk_size()
: get_chunk_size();
if (get_active_downloads().empty() && (read_state.count() == 0U)) {
event_system::instance().raise<download_begin>(get_api_path(),
get_source_path());
auto data_offset = chunk * chunk_size_;
auto data_size =
(chunk == read_state_.size() - 1U) ? last_chunk_size_ : chunk_size_;
if (active_downloads_.empty() && (read_state_.count() == 0U)) {
event_system::instance().raise<download_begin>(fsi_.api_path,
fsi_.source_path);
}
event_system::instance().raise<download_chunk_begin>(
fsi_.api_path, fsi_.source_path, chunk, read_state_.size(),
read_state_.count());
get_active_downloads()[chunk] = std::make_shared<download>();
rw_lock.unlock();
active_downloads_[chunk] = std::make_shared<download>();
download_lock.unlock();
if (should_reset) {
reset_timeout();
@ -308,28 +138,28 @@ void open_file::download_chunk(std::size_t chunk, bool skip_active,
std::async(std::launch::async, [this, chunk, data_size, data_offset,
should_reset]() {
const auto notify_complete = [this, chunk, should_reset]() {
auto state = get_read_state();
unique_recur_mutex_lock lock(rw_mtx_);
auto active_download = get_active_downloads().at(chunk);
get_active_downloads().erase(chunk);
unique_recur_mutex_lock file_lock(file_mtx_);
auto active_download = active_downloads_.at(chunk);
active_downloads_.erase(chunk);
event_system::instance().raise<download_chunk_end>(
fsi_.api_path, fsi_.source_path, chunk, read_state_.size(),
read_state_.count(), get_api_error());
if (get_api_error() == api_error::success) {
auto progress = (static_cast<double>(state.count()) /
static_cast<double>(state.size())) *
100.0;
auto progress = (static_cast<double>(read_state_.count()) /
static_cast<double>(read_state_.size()) * 100.0);
event_system::instance().raise<download_progress>(
get_api_path(), get_source_path(), progress);
if (state.all() && not notified_) {
fsi_.api_path, fsi_.source_path, progress);
if (read_state_.all() && not notified_) {
notified_ = true;
event_system::instance().raise<download_end>(
get_api_path(), get_source_path(), get_api_error());
fsi_.api_path, fsi_.source_path, get_api_error());
}
} else if (not notified_) {
notified_ = true;
event_system::instance().raise<download_end>(
get_api_path(), get_source_path(), get_api_error());
fsi_.api_path, fsi_.source_path, get_api_error());
}
lock.unlock();
file_lock.unlock();
active_download->notify(get_api_error());
@ -338,9 +168,9 @@ void open_file::download_chunk(std::size_t chunk, bool skip_active,
}
};
data_buffer buffer;
auto res = get_provider().read_file_bytes(
get_api_path(), data_size, data_offset, buffer, stop_requested_);
data_buffer data;
auto res = provider_.read_file_bytes(get_api_path(), data_size,
data_offset, data, stop_requested_);
if (res != api_error::success) {
set_api_error(res);
notify_complete();
@ -353,7 +183,7 @@ void open_file::download_chunk(std::size_t chunk, bool skip_active,
res = do_io([&]() -> api_error {
std::size_t bytes_written{};
if (not nf_->write(buffer, data_offset, &bytes_written)) {
if (not nf_->write(data, data_offset, &bytes_written)) {
return api_error::os_error;
}
@ -368,50 +198,48 @@ void open_file::download_chunk(std::size_t chunk, bool skip_active,
return;
}
set_read_state(chunk);
unique_recur_mutex_lock file_lock(file_mtx_);
read_state_.set(chunk);
file_lock.unlock();
notify_complete();
}).wait();
}
}
void open_file::download_range(std::size_t begin_chunk, std::size_t end_chunk,
void open_file::download_range(std::size_t start_chunk, std::size_t end_chunk,
bool should_reset) {
for (std::size_t chunk = begin_chunk;
(get_api_error() == api_error::success) && (chunk <= end_chunk);
++chunk) {
for (std::size_t chunk = start_chunk; chunk <= end_chunk; ++chunk) {
download_chunk(chunk, false, should_reset);
if (get_api_error() != api_error::success) {
return;
}
}
}
auto open_file::get_allocated() const -> bool {
recur_mutex_lock file_lock(get_mutex());
return allocated;
}
auto open_file::get_read_state() const -> boost::dynamic_bitset<> {
recur_mutex_lock file_lock(get_mutex());
recur_mutex_lock file_lock(file_mtx_);
return read_state_;
}
auto open_file::get_read_state(std::size_t chunk) const -> bool {
return get_read_state()[chunk];
recur_mutex_lock file_lock(file_mtx_);
return read_state_[chunk];
}
auto open_file::is_complete() const -> bool { return get_read_state().all(); }
auto open_file::is_complete() const -> bool {
recur_mutex_lock file_lock(file_mtx_);
return read_state_.all();
}
auto open_file::native_operation(
i_open_file::native_operation_callback callback) -> api_error {
unique_recur_mutex_lock file_lock(file_mtx_);
if (stop_requested_) {
return set_api_error(api_error::download_stopped);
return api_error::download_stopped;
}
file_lock.unlock();
auto res = check_start();
if (res != api_error::success) {
return res;
}
unique_recur_mutex_lock rw_lock(rw_mtx_);
return do_io([&]() -> api_error { return callback(nf_->get_handle()); });
}
@ -420,48 +248,38 @@ auto open_file::native_operation(
i_open_file::native_operation_callback callback) -> api_error {
REPERTORY_USES_FUNCTION_NAME();
if (is_directory()) {
return set_api_error(api_error::invalid_operation);
if (fsi_.directory) {
return api_error::invalid_operation;
}
unique_recur_mutex_lock file_lock(file_mtx_);
if (stop_requested_) {
return set_api_error(api_error::download_stopped);
}
auto res = check_start();
if (res != api_error::success) {
return res;
}
res = adjust_cache_size(new_file_size, false);
if (res != api_error::success) {
return res;
return api_error::download_stopped;
}
file_lock.unlock();
auto is_empty_file = new_file_size == 0U;
auto last_chunk = is_empty_file
? std::size_t(0U)
: static_cast<std::size_t>(utils::divide_with_ceiling(
new_file_size, get_chunk_size())) -
new_file_size, chunk_size_)) -
1U;
unique_recur_mutex_lock rw_lock(rw_mtx_);
auto read_state = get_read_state();
if (not is_empty_file && (last_chunk < read_state.size())) {
rw_lock.unlock();
update_reader(0U);
file_lock.lock();
if (not is_empty_file && (last_chunk < read_state_.size())) {
file_lock.unlock();
update_background_reader(0U);
download_chunk(last_chunk, false, true);
if (get_api_error() != api_error::success) {
return get_api_error();
}
rw_lock.lock();
file_lock.lock();
}
read_state = get_read_state();
auto original_file_size = get_file_size();
res = do_io([&]() -> api_error { return callback(nf_->get_handle()); });
auto res = do_io([&]() -> api_error { return callback(nf_->get_handle()); });
if (res != api_error::success) {
utils::error::raise_api_path_error(function_name, get_api_path(),
utils::get_last_error_code(),
@ -470,73 +288,59 @@ auto open_file::native_operation(
}
{
auto file_size = nf_->size();
if (not file_size.has_value()) {
auto file_size = nf_->size().value_or(0U);
if (file_size != new_file_size) {
utils::error::raise_api_path_error(
function_name, get_api_path(), api_error::file_size_mismatch,
fmt::format("failed to get file size|error|{}",
utils::get_last_error_code()));
return set_api_error(api_error::error);
}
if (file_size.value() != new_file_size) {
utils::error::raise_api_path_error(
function_name, get_api_path(), api_error::file_size_mismatch,
fmt::format("file size mismatch|expected|{}|actual|{}", new_file_size,
file_size.value()));
"allocated file size mismatch|expected|" +
std::to_string(new_file_size) + "|actual|" +
std::to_string(file_size));
return set_api_error(api_error::error);
}
}
if (is_empty_file || (read_state.size() != (last_chunk + 1U))) {
auto old_size = read_state.size();
read_state.resize(is_empty_file ? 0U : last_chunk + 1U);
if (is_empty_file || (read_state_.size() != (last_chunk + 1U))) {
auto old_size = read_state_.size();
read_state_.resize(is_empty_file ? 0U : last_chunk + 1U);
if (not is_empty_file) {
for (std::size_t chunk = old_size; chunk <= last_chunk; ++chunk) {
read_state.set(chunk);
read_state_.set(chunk);
}
}
set_read_state(read_state);
set_last_chunk_size(static_cast<std::size_t>(
new_file_size <= get_chunk_size() ? new_file_size
: (new_file_size % get_chunk_size()) == 0U
? get_chunk_size()
: new_file_size % get_chunk_size()));
last_chunk_size_ = static_cast<std::size_t>(
new_file_size <= chunk_size_ ? new_file_size
: (new_file_size % chunk_size_) == 0U ? chunk_size_
: new_file_size % chunk_size_);
}
if (original_file_size == new_file_size) {
return res;
}
set_modified();
if (original_file_size != new_file_size) {
set_modified();
set_file_size(new_file_size);
auto now = std::to_string(utils::time::get_time_now());
res = get_provider().set_item_meta(
get_api_path(), {
{META_CHANGED, now},
{META_MODIFIED, now},
{META_SIZE, std::to_string(new_file_size)},
{META_WRITTEN, now},
});
if (res == api_error::success) {
return res;
fsi_.size = new_file_size;
auto now = std::to_string(utils::time::get_time_now());
res = provider_.set_item_meta(
fsi_.api_path, {
{META_CHANGED, now},
{META_MODIFIED, now},
{META_SIZE, std::to_string(new_file_size)},
{META_WRITTEN, now},
});
if (res != api_error::success) {
utils::error::raise_api_path_error(function_name, get_api_path(), res,
"failed to set file meta");
return set_api_error(res);
}
}
utils::error::raise_api_path_error(function_name, get_api_path(), res,
"failed to set file meta");
return set_api_error(res);
return res;
}
auto open_file::read(std::size_t read_size, std::uint64_t read_offset,
data_buffer &data) -> api_error {
if (is_directory()) {
return set_api_error(api_error::invalid_operation);
}
if (stop_requested_) {
return set_api_error(api_error::download_stopped);
if (fsi_.directory) {
return api_error::invalid_operation;
}
read_size =
@ -545,17 +349,12 @@ auto open_file::read(std::size_t read_size, std::uint64_t read_offset,
return api_error::success;
}
auto res = check_start();
if (res != api_error::success) {
return res;
}
const auto read_from_source = [this, &data, &read_offset,
&read_size]() -> api_error {
return do_io([this, &data, &read_offset, &read_size]() -> api_error {
if (get_provider().is_read_only()) {
return get_provider().read_file_bytes(
get_api_path(), read_size, read_offset, data, stop_requested_);
if (provider_.is_read_only()) {
return provider_.read_file_bytes(fsi_.api_path, read_size, read_offset,
data, stop_requested_);
}
data.resize(read_size);
@ -566,48 +365,49 @@ auto open_file::read(std::size_t read_size, std::uint64_t read_offset,
});
};
if (get_read_state().all()) {
unique_recur_mutex_lock file_lock(file_mtx_);
if (read_state_.all()) {
reset_timeout();
return read_from_source();
}
file_lock.unlock();
auto begin_chunk = static_cast<std::size_t>(read_offset / get_chunk_size());
auto start_chunk = static_cast<std::size_t>(read_offset / chunk_size_);
auto end_chunk =
static_cast<std::size_t>((read_size + read_offset) / get_chunk_size());
static_cast<std::size_t>((read_size + read_offset) / chunk_size_);
update_reader(begin_chunk);
update_background_reader(start_chunk);
download_range(begin_chunk, end_chunk, true);
download_range(start_chunk, end_chunk, true);
if (get_api_error() != api_error::success) {
return get_api_error();
}
unique_recur_mutex_lock rw_lock(rw_mtx_);
file_lock.lock();
return get_api_error() == api_error::success ? read_from_source()
: get_api_error();
}
void open_file::remove(std::uint64_t handle) {
recur_mutex_lock file_lock(file_mtx_);
open_file_base::remove(handle);
recur_mutex_lock rw_lock(rw_mtx_);
if (is_modified() && get_read_state().all() &&
if (modified_ && read_state_.all() &&
(get_api_error() == api_error::success)) {
mgr_.queue_upload(*this);
open_file_base::set_modified(false);
modified_ = false;
}
if (is_removed() && (get_open_file_count() == 0U)) {
open_file_base::set_removed(false);
if (removed_ && (get_open_file_count() == 0U)) {
removed_ = false;
}
}
void open_file::remove_all() {
recur_mutex_lock file_lock(file_mtx_);
open_file_base::remove_all();
recur_mutex_lock rw_lock(rw_mtx_);
open_file_base::set_modified(false);
open_file_base::set_removed(true);
modified_ = false;
removed_ = true;
mgr_.remove_upload(get_api_path());
@ -615,12 +415,8 @@ void open_file::remove_all() {
}
auto open_file::resize(std::uint64_t new_file_size) -> api_error {
if (is_directory()) {
return set_api_error(api_error::invalid_operation);
}
if (new_file_size == get_file_size()) {
return api_error::success;
if (fsi_.directory) {
return api_error::invalid_operation;
}
return native_operation(
@ -630,62 +426,123 @@ auto open_file::resize(std::uint64_t new_file_size) -> api_error {
});
}
auto open_file::close() -> bool {
REPERTORY_USES_FUNCTION_NAME();
if (fsi_.directory || stop_requested_) {
return false;
}
stop_requested_ = true;
unique_mutex_lock reader_lock(io_thread_mtx_);
io_thread_notify_.notify_all();
reader_lock.unlock();
if (reader_thread_) {
reader_thread_->join();
reader_thread_.reset();
}
if (not open_file_base::close()) {
return false;
}
auto err = get_api_error();
if (err == api_error::success || err == api_error::download_incomplete ||
err == api_error::download_stopped) {
if (modified_ && not read_state_.all()) {
set_api_error(api_error::download_incomplete);
} else if (not modified_ && (fsi_.size > 0U) && not read_state_.all()) {
set_api_error(api_error::download_stopped);
}
err = get_api_error();
}
nf_->close();
if (modified_) {
if (err == api_error::success) {
mgr_.queue_upload(*this);
return true;
}
if (err == api_error::download_incomplete) {
mgr_.store_resume(*this);
return true;
}
}
if (err == api_error::success) {
return true;
}
mgr_.remove_resume(get_api_path(), get_source_path());
if (not utils::file::file(fsi_.source_path).remove()) {
utils::error::raise_api_path_error(
function_name, get_api_path(), fsi_.source_path,
utils::get_last_error_code(), "failed to delete file");
}
auto parent = utils::path::get_parent_path(fsi_.source_path);
fsi_.source_path =
utils::path::combine(parent, {utils::create_uuid_string()});
auto res =
provider_.set_item_meta(fsi_.api_path, META_SOURCE, fsi_.source_path);
if (res != api_error::success) {
utils::error::raise_api_path_error(function_name, get_api_path(),
fsi_.source_path, res,
"failed to set file meta");
}
return true;
}
void open_file::set_modified() {
if (not is_modified()) {
open_file_base::set_modified(true);
if (not modified_) {
modified_ = true;
mgr_.store_resume(*this);
}
if (not is_removed()) {
open_file_base::set_removed(true);
if (not removed_) {
removed_ = true;
mgr_.remove_upload(get_api_path());
}
}
void open_file::set_read_state(std::size_t chunk) {
recur_mutex_lock file_lock(get_mutex());
read_state_.set(chunk);
}
void open_file::update_background_reader(std::size_t read_chunk) {
recur_mutex_lock reader_lock(file_mtx_);
read_chunk_ = read_chunk;
void open_file::set_read_state(boost::dynamic_bitset<> read_state) {
recur_mutex_lock file_lock(get_mutex());
read_state_ = std::move(read_state);
}
if (not reader_thread_ && not stop_requested_) {
reader_thread_ = std::make_unique<std::thread>([this]() {
std::size_t next_chunk{};
while (not stop_requested_) {
unique_recur_mutex_lock file_lock(file_mtx_);
if ((fsi_.size == 0U) || read_state_.all()) {
file_lock.unlock();
void open_file::update_reader(std::size_t chunk) {
recur_mutex_lock rw_lock(rw_mtx_);
read_chunk_ = chunk;
unique_mutex_lock io_lock(io_thread_mtx_);
if (not stop_requested_ && io_thread_queue_.empty()) {
io_thread_notify_.wait(io_lock);
}
io_thread_notify_.notify_all();
io_lock.unlock();
} else {
do {
next_chunk = read_chunk_ =
((read_chunk_ + 1U) >= read_state_.size()) ? 0U
: read_chunk_ + 1U;
} while ((next_chunk != 0U) && (active_downloads_.find(next_chunk) !=
active_downloads_.end()));
if (reader_thread_ || stop_requested_) {
return;
file_lock.unlock();
download_chunk(next_chunk, true, false);
}
}
});
}
reader_thread_ = std::make_unique<std::thread>([this]() {
unique_recur_mutex_lock lock(rw_mtx_);
auto next_chunk{read_chunk_};
auto read_chunk{read_chunk_};
lock.unlock();
while (not stop_requested_) {
lock.lock();
auto read_state = get_read_state();
if ((get_file_size() == 0U) || read_state.all()) {
lock.unlock();
wait_for_io(stop_requested_);
continue;
}
if (read_chunk != read_chunk_) {
next_chunk = read_chunk = read_chunk_;
}
next_chunk = next_chunk + 1U >= read_state.size() ? 0U : next_chunk + 1U;
lock.unlock();
download_chunk(next_chunk, true, false);
}
});
}
auto open_file::write(std::uint64_t write_offset, const data_buffer &data,
@ -694,44 +551,41 @@ auto open_file::write(std::uint64_t write_offset, const data_buffer &data,
bytes_written = 0U;
if (is_directory() || get_provider().is_read_only()) {
return set_api_error(api_error::invalid_operation);
if (fsi_.directory || provider_.is_read_only()) {
return api_error::invalid_operation;
}
if (data.empty()) {
return api_error::success;
}
unique_recur_mutex_lock write_lock(file_mtx_);
if (stop_requested_) {
return set_api_error(api_error::download_stopped);
return api_error::download_stopped;
}
write_lock.unlock();
auto res = check_start();
if (res != api_error::success) {
return res;
}
auto begin_chunk = static_cast<std::size_t>(write_offset / get_chunk_size());
auto start_chunk = static_cast<std::size_t>(write_offset / chunk_size_);
auto end_chunk =
static_cast<std::size_t>((write_offset + data.size()) / get_chunk_size());
static_cast<std::size_t>((write_offset + data.size()) / chunk_size_);
update_reader(begin_chunk);
update_background_reader(start_chunk);
download_range(begin_chunk, std::min(get_read_state().size() - 1U, end_chunk),
download_range(start_chunk, std::min(read_state_.size() - 1U, end_chunk),
true);
if (get_api_error() != api_error::success) {
return get_api_error();
}
unique_recur_mutex_lock rw_lock(rw_mtx_);
if ((write_offset + data.size()) > get_file_size()) {
res = resize(write_offset + data.size());
write_lock.lock();
if ((write_offset + data.size()) > fsi_.size) {
auto res = resize(write_offset + data.size());
if (res != api_error::success) {
return res;
}
}
res = do_io([&]() -> api_error {
auto res = do_io([&]() -> api_error {
if (not nf_->write(data, write_offset, &bytes_written)) {
return api_error::os_error;
}
@ -744,11 +598,11 @@ auto open_file::write(std::uint64_t write_offset, const data_buffer &data,
}
auto now = std::to_string(utils::time::get_time_now());
res = get_provider().set_item_meta(get_api_path(), {
{META_CHANGED, now},
{META_MODIFIED, now},
{META_WRITTEN, now},
});
res = provider_.set_item_meta(fsi_.api_path, {
{META_CHANGED, now},
{META_MODIFIED, now},
{META_WRITTEN, now},
});
if (res != api_error::success) {
utils::error::raise_api_path_error(function_name, get_api_path(), res,
"failed to set file meta");

View File

@ -35,15 +35,13 @@ void open_file_base::download::notify(const api_error &err) {
}
auto open_file_base::download::wait() -> api_error {
if (complete_) {
return error_;
}
unique_mutex_lock lock(mtx_);
if (not complete_) {
notify_.wait(lock);
unique_mutex_lock lock(mtx_);
if (not complete_) {
notify_.wait(lock);
}
notify_.notify_all();
}
notify_.notify_all();
return error_;
}
@ -67,14 +65,12 @@ auto open_file_base::io_item::get_result() -> api_error {
open_file_base::open_file_base(std::uint64_t chunk_size,
std::uint8_t chunk_timeout, filesystem_item fsi,
i_provider &provider, bool disable_io)
: open_file_base(chunk_size, chunk_timeout, fsi, {}, provider, disable_io) {
}
i_provider &provider)
: open_file_base(chunk_size, chunk_timeout, fsi, {}, provider) {}
open_file_base::open_file_base(
std::uint64_t chunk_size, std::uint8_t chunk_timeout, filesystem_item fsi,
std::map<std::uint64_t, open_file_data> open_data, i_provider &provider,
bool disable_io)
std::map<std::uint64_t, open_file_data> open_data, i_provider &provider)
: chunk_size_(chunk_size),
chunk_timeout_(chunk_timeout),
fsi_(std::move(fsi)),
@ -84,7 +80,7 @@ open_file_base::open_file_base(
: fsi.size % chunk_size)),
open_data_(std::move(open_data)),
provider_(provider) {
if (not fsi.directory && not disable_io) {
if (not fsi.directory) {
io_thread_ = std::make_unique<std::thread>([this] { file_io_thread(); });
}
}
@ -119,7 +115,7 @@ auto open_file_base::can_close() const -> bool {
return true;
}
if (is_complete()) {
if (is_download_complete()) {
return true;
}
@ -127,30 +123,12 @@ auto open_file_base::can_close() const -> bool {
return true;
}
std::chrono::system_clock::time_point last_access{last_access_};
auto duration = std::chrono::duration_cast<std::chrono::seconds>(
const std::chrono::system_clock::time_point last_access = last_access_;
const auto duration = std::chrono::duration_cast<std::chrono::seconds>(
std::chrono::system_clock::now() - last_access);
return (duration.count() >= chunk_timeout_);
}
auto open_file_base::close() -> bool {
unique_mutex_lock io_lock(io_thread_mtx_);
if (io_stop_requested_ || not io_thread_) {
io_thread_notify_.notify_all();
io_lock.unlock();
return false;
}
io_stop_requested_ = true;
io_thread_notify_.notify_all();
io_lock.unlock();
io_thread_->join();
io_thread_.reset();
return true;
}
auto open_file_base::do_io(std::function<api_error()> action) -> api_error {
unique_mutex_lock io_lock(io_thread_mtx_);
auto item = std::make_shared<io_item>(action);
@ -209,36 +187,6 @@ auto open_file_base::get_file_size() const -> std::uint64_t {
return fsi_.size;
}
[[nodiscard]] auto open_file_base::get_last_chunk_size() const -> std::size_t {
recur_mutex_lock file_lock(file_mtx_);
return last_chunk_size_;
}
void open_file_base::set_file_size(std::uint64_t size) {
recur_mutex_lock file_lock(file_mtx_);
fsi_.size = size;
}
void open_file_base::set_last_chunk_size(std::size_t size) {
recur_mutex_lock file_lock(file_mtx_);
last_chunk_size_ = size;
}
void open_file_base::set_modified(bool modified) {
recur_mutex_lock file_lock(file_mtx_);
modified_ = modified;
}
void open_file_base::set_removed(bool removed) {
recur_mutex_lock file_lock(file_mtx_);
removed_ = removed;
}
void open_file_base::set_source_path(std::string source_path) {
recur_mutex_lock file_lock(file_mtx_);
fsi_.source_path = std::move(source_path);
}
auto open_file_base::get_filesystem_item() const -> filesystem_item {
recur_mutex_lock file_lock(file_mtx_);
return fsi_;
@ -246,9 +194,8 @@ auto open_file_base::get_filesystem_item() const -> filesystem_item {
auto open_file_base::get_handles() const -> std::vector<std::uint64_t> {
recur_mutex_lock file_lock(file_mtx_);
std::vector<std::uint64_t> ret;
for (const auto &item : open_data_) {
for (auto &&item : open_data_) {
ret.emplace_back(item.first);
}
@ -283,31 +230,11 @@ auto open_file_base::get_open_file_count() const -> std::size_t {
return open_data_.size();
}
auto open_file_base::get_source_path() const -> std::string {
recur_mutex_lock file_lock(file_mtx_);
return fsi_.source_path;
}
auto open_file_base::has_handle(std::uint64_t handle) const -> bool {
recur_mutex_lock file_lock(file_mtx_);
return open_data_.find(handle) != open_data_.end();
}
auto open_file_base::is_modified() const -> bool {
recur_mutex_lock file_lock(file_mtx_);
return modified_;
}
auto open_file_base::is_removed() const -> bool {
recur_mutex_lock file_lock(file_mtx_);
return removed_;
}
void open_file_base::notify_io() {
mutex_lock io_lock(io_thread_mtx_);
io_thread_notify_.notify_all();
}
void open_file_base::remove(std::uint64_t handle) {
recur_mutex_lock file_lock(file_mtx_);
if (open_data_.find(handle) == open_data_.end()) {
@ -334,7 +261,7 @@ void open_file_base::remove_all() {
auto open_data = open_data_;
open_data_.clear();
for (const auto &data : open_data) {
for (auto &&data : open_data) {
event_system::instance().raise<filesystem_item_handle_closed>(
fsi_.api_path, data.first, fsi_.source_path, fsi_.directory, modified_);
}
@ -349,15 +276,15 @@ void open_file_base::reset_timeout() {
auto open_file_base::set_api_error(const api_error &err) -> api_error {
mutex_lock error_lock(error_mtx_);
if (error_ == err) {
return error_;
if (error_ != err) {
return ((error_ = (error_ == api_error::success ||
error_ == api_error::download_incomplete ||
error_ == api_error::download_stopped
? err
: error_)));
}
return ((error_ = (error_ == api_error::success ||
error_ == api_error::download_incomplete ||
error_ == api_error::download_stopped
? err
: error_)));
return error_;
}
void open_file_base::set_api_path(const std::string &api_path) {
@ -366,12 +293,24 @@ void open_file_base::set_api_path(const std::string &api_path) {
fsi_.api_parent = utils::path::get_parent_api_path(api_path);
}
void open_file_base::wait_for_io(stop_type &stop_requested) {
auto open_file_base::close() -> bool {
unique_mutex_lock io_lock(io_thread_mtx_);
if (not stop_requested && io_thread_queue_.empty()) {
io_thread_notify_.wait(io_lock);
if (not fsi_.directory && not io_stop_requested_) {
io_stop_requested_ = true;
io_thread_notify_.notify_all();
io_lock.unlock();
if (io_thread_) {
io_thread_->join();
io_thread_.reset();
return true;
}
return false;
}
io_thread_notify_.notify_all();
io_lock.unlock();
return false;
}
} // namespace repertory

View File

@ -1,367 +0,0 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "file_manager/ring_buffer_base.hpp"
#include "events/event_system.hpp"
#include "file_manager/events.hpp"
#include "file_manager/open_file_base.hpp"
#include "platform/platform.hpp"
#include "providers/i_provider.hpp"
#include "types/repertory.hpp"
#include "utils/common.hpp"
#include "utils/error_utils.hpp"
namespace repertory {
ring_buffer_base::ring_buffer_base(std::uint64_t chunk_size,
std::uint8_t chunk_timeout,
filesystem_item fsi, i_provider &provider,
std::size_t ring_size, bool disable_io)
: open_file_base(chunk_size, chunk_timeout, fsi, provider, disable_io),
read_state_(ring_size),
total_chunks_(static_cast<std::size_t>(
utils::divide_with_ceiling(fsi.size, chunk_size))) {
if (disable_io) {
if (fsi.size > 0U) {
read_state_.resize(std::min(total_chunks_, read_state_.size()));
ring_end_ =
std::min(total_chunks_ - 1U, ring_begin_ + read_state_.size() - 1U);
read_state_.set(0U, read_state_.size(), false);
}
} else {
if (ring_size < min_ring_size) {
throw std::runtime_error("ring size must be greater than or equal to 5");
}
ring_end_ = std::min(total_chunks_ - 1U, ring_begin_ + ring_size - 1U);
read_state_.set(0U, ring_size, false);
}
}
auto ring_buffer_base::check_start() -> api_error {
REPERTORY_USES_FUNCTION_NAME();
try {
if (on_check_start()) {
return api_error::success;
}
event_system::instance().raise<download_begin>(get_api_path(),
get_source_path());
reader_thread_ =
std::make_unique<std::thread>([this]() { reader_thread(); });
return api_error::success;
} catch (const std::exception &ex) {
utils::error::raise_api_path_error(function_name, get_api_path(),
get_source_path(), ex,
"failed to start");
return api_error::error;
}
}
auto ring_buffer_base::close() -> bool {
stop_requested_ = true;
unique_mutex_lock chunk_lock(chunk_mtx_);
chunk_notify_.notify_all();
chunk_lock.unlock();
auto res = open_file_base::close();
if (reader_thread_) {
reader_thread_->join();
reader_thread_.reset();
}
return res;
}
auto ring_buffer_base::download_chunk(std::size_t chunk,
bool skip_active) -> api_error {
unique_mutex_lock chunk_lock(chunk_mtx_);
const auto unlock_and_notify = [this, &chunk_lock]() {
chunk_notify_.notify_all();
chunk_lock.unlock();
};
const auto unlock_and_return =
[&unlock_and_notify](api_error res) -> api_error {
unlock_and_notify();
return res;
};
if (chunk < ring_begin_ || chunk > ring_end_) {
return unlock_and_return(api_error::invalid_ring_buffer_position);
}
if (get_active_downloads().find(chunk) != get_active_downloads().end()) {
if (skip_active) {
return unlock_and_return(api_error::success);
}
auto active_download = get_active_downloads().at(chunk);
unlock_and_notify();
return active_download->wait();
}
if (read_state_[chunk % read_state_.size()]) {
return unlock_and_return(api_error::success);
}
auto active_download{std::make_shared<download>()};
get_active_downloads()[chunk] = active_download;
return use_buffer(chunk, [&](data_buffer &buffer) -> api_error {
auto data_offset{chunk * get_chunk_size()};
auto data_size{
chunk == (total_chunks_ - 1U) ? get_last_chunk_size()
: get_chunk_size(),
};
unlock_and_notify();
auto result{
get_provider().read_file_bytes(get_api_path(), data_size, data_offset,
buffer, stop_requested_),
};
chunk_lock.lock();
if (chunk < ring_begin_ || chunk > ring_end_) {
result = api_error::invalid_ring_buffer_position;
}
if (result == api_error::success) {
result = on_chunk_downloaded(chunk, buffer);
if (result == api_error::success) {
read_state_[chunk % read_state_.size()] = true;
auto progress = (static_cast<double>(chunk + 1U) /
static_cast<double>(total_chunks_)) *
100.0;
event_system::instance().raise<download_progress>(
get_api_path(), get_source_path(), progress);
}
}
get_active_downloads().erase(chunk);
unlock_and_notify();
active_download->notify(result);
return result;
});
}
void ring_buffer_base::forward(std::size_t count) {
update_position(count, true);
}
auto ring_buffer_base::get_read_state() const -> boost::dynamic_bitset<> {
recur_mutex_lock file_lock(get_mutex());
return read_state_;
}
auto ring_buffer_base::get_read_state(std::size_t chunk) const -> bool {
recur_mutex_lock file_lock(get_mutex());
return read_state_[chunk % read_state_.size()];
}
auto ring_buffer_base::read(std::size_t read_size, std::uint64_t read_offset,
data_buffer &data) -> api_error {
if (is_directory()) {
return api_error::invalid_operation;
}
reset_timeout();
read_size =
utils::calculate_read_size(get_file_size(), read_size, read_offset);
if (read_size == 0U) {
return api_error::success;
}
auto begin_chunk{static_cast<std::size_t>(read_offset / get_chunk_size())};
read_offset = read_offset - (begin_chunk * get_chunk_size());
unique_mutex_lock read_lock(read_mtx_);
auto res = check_start();
if (res != api_error::success) {
return res;
}
for (std::size_t chunk = begin_chunk;
not stop_requested_ && (res == api_error::success) && (read_size > 0U);
++chunk) {
reset_timeout();
if (chunk > ring_pos_) {
forward(chunk - ring_pos_);
} else if (chunk < ring_pos_) {
reverse(ring_pos_ - chunk);
}
res = download_chunk(chunk, false);
if (res != api_error::success) {
if (res == api_error::invalid_ring_buffer_position) {
read_lock.unlock();
// TODO limit retry
return read(read_size, read_offset, data);
}
return res;
}
reset_timeout();
std::size_t bytes_read{};
res = on_read_chunk(
chunk,
std::min(static_cast<std::size_t>(get_chunk_size() - read_offset),
read_size),
read_offset, data, bytes_read);
if (res != api_error::success) {
return res;
}
reset_timeout();
read_size -= bytes_read;
read_offset = 0U;
}
return stop_requested_ ? api_error::download_stopped : res;
}
void ring_buffer_base::reader_thread() {
unique_mutex_lock chunk_lock(chunk_mtx_);
auto next_chunk{ring_pos_};
chunk_notify_.notify_all();
chunk_lock.unlock();
while (not stop_requested_) {
chunk_lock.lock();
next_chunk = next_chunk + 1U > ring_end_ ? ring_begin_ : next_chunk + 1U;
const auto check_and_wait = [this, &chunk_lock, &next_chunk]() {
if (stop_requested_) {
chunk_notify_.notify_all();
chunk_lock.unlock();
return;
}
if (get_read_state().all()) {
chunk_notify_.wait(chunk_lock);
next_chunk = ring_pos_;
}
chunk_notify_.notify_all();
chunk_lock.unlock();
};
if (read_state_[next_chunk % read_state_.size()]) {
check_and_wait();
continue;
}
chunk_notify_.notify_all();
chunk_lock.unlock();
download_chunk(next_chunk, true);
}
event_system::instance().raise<download_end>(
get_api_path(), get_source_path(), api_error::download_stopped);
}
void ring_buffer_base::reverse(std::size_t count) {
update_position(count, false);
}
void ring_buffer_base::set(std::size_t first_chunk, std::size_t current_chunk) {
mutex_lock chunk_lock(chunk_mtx_);
if (first_chunk >= total_chunks_) {
chunk_notify_.notify_all();
throw std::runtime_error("first chunk must be less than total chunks");
}
ring_begin_ = first_chunk;
ring_end_ =
std::min(total_chunks_ - 1U, ring_begin_ + read_state_.size() - 1U);
if (current_chunk > ring_end_) {
chunk_notify_.notify_all();
throw std::runtime_error(
"current chunk must be less than or equal to last chunk");
}
ring_pos_ = current_chunk;
read_state_.set(0U, read_state_.size(), true);
chunk_notify_.notify_all();
}
void ring_buffer_base::set_api_path(const std::string &api_path) {
mutex_lock chunk_lock(chunk_mtx_);
open_file_base::set_api_path(api_path);
chunk_notify_.notify_all();
}
void ring_buffer_base::update_position(std::size_t count, bool is_forward) {
mutex_lock chunk_lock(chunk_mtx_);
if (is_forward) {
if ((ring_pos_ + count) > (total_chunks_ - 1U)) {
count = (total_chunks_ - 1U) - ring_pos_;
}
} else {
count = std::min(ring_pos_, count);
}
if (is_forward ? (ring_pos_ + count) <= ring_end_
: (ring_pos_ - count) >= ring_begin_) {
ring_pos_ += is_forward ? count : -count;
} else {
auto delta = is_forward ? count - (ring_end_ - ring_pos_)
: count - (ring_pos_ - ring_begin_);
if (delta >= read_state_.size()) {
read_state_.set(0U, read_state_.size(), false);
ring_pos_ += is_forward ? count : -count;
ring_begin_ += is_forward ? delta : -delta;
} else {
for (std::size_t idx = 0U; idx < delta; ++idx) {
if (is_forward) {
read_state_[(ring_begin_ + idx) % read_state_.size()] = false;
} else {
read_state_[(ring_end_ - idx) % read_state_.size()] = false;
}
}
ring_begin_ += is_forward ? delta : -delta;
ring_pos_ += is_forward ? count : -count;
}
ring_end_ =
std::min(total_chunks_ - 1U, ring_begin_ + read_state_.size() - 1U);
}
chunk_notify_.notify_all();
}
} // namespace repertory

View File

@ -21,30 +21,73 @@
*/
#include "file_manager/ring_buffer_open_file.hpp"
#include "app_config.hpp"
#include "file_manager/events.hpp"
#include "file_manager/open_file_base.hpp"
#include "platform/platform.hpp"
#include "providers/i_provider.hpp"
#include "types/repertory.hpp"
#include "utils/common.hpp"
#include "utils/error_utils.hpp"
#include "utils/encrypting_reader.hpp"
#include "utils/file_utils.hpp"
#include "utils/path.hpp"
#include "utils/utils.hpp"
namespace repertory {
ring_buffer_open_file::ring_buffer_open_file(std::string buffer_directory,
std::uint64_t chunk_size,
std::uint8_t chunk_timeout,
filesystem_item fsi,
i_provider &provider)
: ring_buffer_open_file(std::move(buffer_directory), chunk_size,
chunk_timeout, std::move(fsi), provider,
(1024ULL * 1024ULL * 1024ULL) / chunk_size) {}
ring_buffer_open_file::ring_buffer_open_file(std::string buffer_directory,
std::uint64_t chunk_size,
std::uint8_t chunk_timeout,
filesystem_item fsi,
i_provider &provider,
std::size_t ring_size)
: ring_buffer_base(chunk_size, chunk_timeout, fsi, provider, ring_size,
false),
source_path_(utils::path::combine(buffer_directory,
{
utils::create_uuid_string(),
})) {
if (not can_handle_file(fsi.size, chunk_size, ring_size)) {
: open_file_base(chunk_size, chunk_timeout, fsi, provider),
ring_state_(ring_size),
total_chunks_(static_cast<std::size_t>(
utils::divide_with_ceiling(fsi.size, chunk_size_))) {
if ((ring_size % 2U) != 0U) {
throw std::runtime_error("ring size must be a multiple of 2");
}
if (ring_size < 4U) {
throw std::runtime_error("ring size must be greater than or equal to 4");
}
if (fsi.size < (ring_state_.size() * chunk_size)) {
throw std::runtime_error("file size is less than ring buffer size");
}
last_chunk_ = ring_state_.size() - 1U;
ring_state_.set(0U, ring_state_.size(), true);
buffer_directory = utils::path::absolute(buffer_directory);
if (not utils::file::directory(buffer_directory).create_directory()) {
throw std::runtime_error("failed to create buffer directory|path|" +
buffer_directory + "|err|" +
std::to_string(utils::get_last_error_code()));
}
fsi_.source_path =
utils::path::combine(buffer_directory, {utils::create_uuid_string()});
nf_ = utils::file::file::open_or_create_file(fsi_.source_path);
if (not*nf_) {
throw std::runtime_error("failed to create buffer file|err|" +
std::to_string(utils::get_last_error_code()));
}
if (not nf_->truncate(ring_state_.size() * chunk_size)) {
nf_->close();
throw std::runtime_error("failed to resize buffer file|err|" +
std::to_string(utils::get_last_error_code()));
}
}
ring_buffer_open_file::~ring_buffer_open_file() {
@ -52,24 +95,107 @@ ring_buffer_open_file::~ring_buffer_open_file() {
close();
if (not nf_) {
return;
}
nf_->close();
nf_.reset();
if (not utils::file::file(source_path_).remove()) {
if (not utils::file::file(fsi_.source_path).remove()) {
utils::error::raise_api_path_error(
function_name, get_api_path(), source_path_,
function_name, fsi_.api_path, fsi_.source_path,
utils::get_last_error_code(), "failed to delete file");
}
}
auto ring_buffer_open_file::can_handle_file(std::uint64_t file_size,
std::size_t chunk_size,
std::size_t ring_size) -> bool {
return file_size >= (static_cast<std::uint64_t>(ring_size) * chunk_size);
auto ring_buffer_open_file::download_chunk(std::size_t chunk) -> api_error {
unique_mutex_lock chunk_lock(chunk_mtx_);
if (active_downloads_.find(chunk) != active_downloads_.end()) {
auto active_download = active_downloads_.at(chunk);
chunk_notify_.notify_all();
chunk_lock.unlock();
return active_download->wait();
}
if (ring_state_[chunk % ring_state_.size()]) {
auto active_download = std::make_shared<download>();
active_downloads_[chunk] = active_download;
ring_state_[chunk % ring_state_.size()] = false;
chunk_notify_.notify_all();
chunk_lock.unlock();
data_buffer buffer((chunk == (total_chunks_ - 1U)) ? last_chunk_size_
: chunk_size_);
stop_type stop_requested = !!ring_state_[chunk % ring_state_.size()];
auto res =
provider_.read_file_bytes(fsi_.api_path, buffer.size(),
chunk * chunk_size_, buffer, stop_requested);
if (res == api_error::success) {
res = do_io([&]() -> api_error {
std::size_t bytes_written{};
if (not nf_->write(buffer, (chunk % ring_state_.size()) * chunk_size_,
&bytes_written)) {
return api_error::os_error;
}
return api_error::success;
});
}
active_download->notify(res);
chunk_lock.lock();
active_downloads_.erase(chunk);
chunk_notify_.notify_all();
return res;
}
chunk_notify_.notify_all();
chunk_lock.unlock();
return api_error::success;
}
void ring_buffer_open_file::forward(std::size_t count) {
mutex_lock chunk_lock(chunk_mtx_);
if ((current_chunk_ + count) > (total_chunks_ - 1U)) {
count = (total_chunks_ - 1U) - current_chunk_;
}
if ((current_chunk_ + count) <= last_chunk_) {
current_chunk_ += count;
} else {
const auto added = count - (last_chunk_ - current_chunk_);
if (added >= ring_state_.size()) {
ring_state_.set(0U, ring_state_.size(), true);
current_chunk_ += count;
first_chunk_ += added;
last_chunk_ =
std::min(total_chunks_ - 1U, first_chunk_ + ring_state_.size() - 1U);
} else {
for (std::size_t idx = 0U; idx < added; ++idx) {
ring_state_[(first_chunk_ + idx) % ring_state_.size()] = true;
}
first_chunk_ += added;
current_chunk_ += count;
last_chunk_ =
std::min(total_chunks_ - 1U, first_chunk_ + ring_state_.size() - 1U);
}
}
chunk_notify_.notify_all();
}
auto ring_buffer_open_file::get_read_state() const -> boost::dynamic_bitset<> {
recur_mutex_lock file_lock(file_mtx_);
auto read_state = ring_state_;
return read_state.flip();
}
auto ring_buffer_open_file::get_read_state(std::size_t chunk) const -> bool {
recur_mutex_lock file_lock(file_mtx_);
return not ring_state_[chunk % ring_state_.size()];
}
auto ring_buffer_open_file::is_download_complete() const -> bool {
return false;
}
auto ring_buffer_open_file::native_operation(
@ -77,75 +203,121 @@ auto ring_buffer_open_file::native_operation(
return do_io([&]() -> api_error { return callback(nf_->get_handle()); });
}
auto ring_buffer_open_file::on_check_start() -> bool {
REPERTORY_USES_FUNCTION_NAME();
if (nf_) {
return true;
void ring_buffer_open_file::reverse(std::size_t count) {
mutex_lock chunk_lock(chunk_mtx_);
if (current_chunk_ < count) {
count = current_chunk_;
}
auto buffer_directory{utils::path::get_parent_path(source_path_)};
if (not utils::file::directory(buffer_directory).create_directory()) {
throw std::runtime_error(
fmt::format("failed to create buffer directory|path|{}|err|{}",
buffer_directory, utils::get_last_error_code()));
if ((current_chunk_ - count) >= first_chunk_) {
current_chunk_ -= count;
} else {
const auto removed = count - (current_chunk_ - first_chunk_);
if (removed >= ring_state_.size()) {
ring_state_.set(0U, ring_state_.size(), true);
current_chunk_ -= count;
first_chunk_ = current_chunk_;
last_chunk_ =
std::min(total_chunks_ - 1U, first_chunk_ + ring_state_.size() - 1U);
} else {
for (std::size_t idx = 0U; idx < removed; ++idx) {
ring_state_[(last_chunk_ - idx) % ring_state_.size()] = true;
}
first_chunk_ -= removed;
current_chunk_ -= count;
last_chunk_ =
std::min(total_chunks_ - 1U, first_chunk_ + ring_state_.size() - 1U);
}
}
nf_ = utils::file::file::open_or_create_file(source_path_);
if (not nf_ || not *nf_) {
throw std::runtime_error(fmt::format("failed to create buffer file|err|{}",
utils::get_last_error_code()));
}
if (not nf_->truncate(get_ring_size() * get_chunk_size())) {
nf_->close();
nf_.reset();
throw std::runtime_error(fmt::format("failed to resize buffer file|err|{}",
utils::get_last_error_code()));
}
return false;
chunk_notify_.notify_all();
}
auto ring_buffer_open_file::on_chunk_downloaded(
std::size_t chunk, const data_buffer &buffer) -> api_error {
return do_io([&]() -> api_error {
std::size_t bytes_written{};
if (nf_->write(buffer, (chunk % get_ring_size()) * get_chunk_size(),
&bytes_written)) {
return api_error::success;
auto ring_buffer_open_file::read(std::size_t read_size,
std::uint64_t read_offset, data_buffer &data)
-> api_error {
if (fsi_.directory) {
return api_error::invalid_operation;
}
reset_timeout();
read_size = utils::calculate_read_size(fsi_.size, read_size, read_offset);
if (read_size == 0U) {
return api_error::success;
}
const auto start_chunk_index =
static_cast<std::size_t>(read_offset / chunk_size_);
read_offset = read_offset - (start_chunk_index * chunk_size_);
data_buffer buffer(chunk_size_);
auto res = api_error::success;
for (std::size_t chunk = start_chunk_index;
(res == api_error::success) && (read_size > 0U); ++chunk) {
if (chunk > current_chunk_) {
forward(chunk - current_chunk_);
} else if (chunk < current_chunk_) {
reverse(current_chunk_ - chunk);
}
return api_error::os_error;
});
}
reset_timeout();
res = download_chunk(chunk);
if (res == api_error::success) {
const auto to_read = std::min(
static_cast<std::size_t>(chunk_size_ - read_offset), read_size);
res = do_io([this, &buffer, &chunk, &data, read_offset,
&to_read]() -> api_error {
std::size_t bytes_read{};
auto ret =
nf_->read(buffer, ((chunk % ring_state_.size()) * chunk_size_),
&bytes_read)
? api_error::success
: api_error::os_error;
if (ret == api_error::success) {
data.insert(data.end(),
buffer.begin() + static_cast<std::int64_t>(read_offset),
buffer.begin() +
static_cast<std::int64_t>(read_offset + to_read));
reset_timeout();
}
auto ring_buffer_open_file::on_read_chunk(
std::size_t chunk, std::size_t read_size, std::uint64_t read_offset,
data_buffer &data, std::size_t &bytes_read) -> api_error {
data_buffer buffer(read_size);
auto res = do_io([&]() -> api_error {
return nf_->read(
buffer,
(((chunk % get_ring_size()) * get_chunk_size()) + read_offset),
&bytes_read)
? api_error::success
: api_error::os_error;
});
if (res != api_error::success) {
return res;
return ret;
});
read_offset = 0U;
read_size -= to_read;
}
}
data.insert(data.end(), buffer.begin(), buffer.end());
return api_error::success;
return res;
}
auto ring_buffer_open_file::use_buffer(
std::size_t /* chunk */,
std::function<api_error(data_buffer &)> func) -> api_error {
data_buffer buffer;
return func(buffer);
void ring_buffer_open_file::set(std::size_t first_chunk,
std::size_t current_chunk) {
mutex_lock chunk_lock(chunk_mtx_);
if (first_chunk >= total_chunks_) {
chunk_notify_.notify_all();
throw std::runtime_error("first chunk must be less than total chunks");
}
first_chunk_ = first_chunk;
last_chunk_ = first_chunk_ + ring_state_.size() - 1U;
if (current_chunk > last_chunk_) {
chunk_notify_.notify_all();
throw std::runtime_error(
"current chunk must be less than or equal to last chunk");
}
current_chunk_ = current_chunk;
ring_state_.set(0U, ring_state_.size(), false);
chunk_notify_.notify_all();
}
void ring_buffer_open_file::set_api_path(const std::string &api_path) {
mutex_lock chunk_lock(chunk_mtx_);
open_file_base::set_api_path(api_path);
chunk_notify_.notify_all();
}
} // namespace repertory

View File

@ -53,8 +53,7 @@ void upload::upload_thread() {
error_ =
provider_.upload_file(fsi_.api_path, fsi_.source_path, stop_requested_);
if (error_ == api_error::success &&
not utils::file::reset_modified_time(fsi_.source_path)) {
if (not utils::file::reset_modified_time(fsi_.source_path)) {
utils::error::raise_api_path_error(
function_name, fsi_.api_path, fsi_.source_path,
utils::get_last_error_code(), "failed to reset modified time");

View File

@ -28,8 +28,9 @@
#endif // defined(PROJECT_ENABLE_OPENSSL)
#if defined(PROJECT_REQUIRE_ALPINE) && !defined(PROJECT_IS_MINGW)
#include <cstdlib>
#include <filesystem>
#include <pthread.h>
#include <stdlib.h>
#endif // defined(PROJECT_REQUIRE_ALPINE) && !defined (PROJECT_IS_MINGW)
#if defined(PROJECT_ENABLE_LIBSODIUM)
@ -43,7 +44,6 @@
#include "spdlog/spdlog.h"
#include "initialize.hpp"
#if defined(PROJECT_REQUIRE_ALPINE) && !defined(PROJECT_IS_MINGW)
#include "utils/path.hpp"
#endif // defined(PROJECT_REQUIRE_ALPINE) && !defined (PROJECT_IS_MINGW)

View File

@ -24,8 +24,6 @@
#include "platform/unix_platform.hpp"
#include "app_config.hpp"
#include "events/event_system.hpp"
#include "events/events.hpp"
#include "providers/i_provider.hpp"
#include "types/startup_exception.hpp"
#include "utils/common.hpp"

View File

@ -23,14 +23,12 @@
#include "platform/win32_platform.hpp"
#include "events/event_system.hpp"
#include "events/events.hpp"
#include "providers/i_provider.hpp"
#include "utils/error_utils.hpp"
namespace repertory {
auto lock_data::get_mount_state(const provider_type & /*pt*/,
json &mount_state) -> bool {
auto lock_data::get_mount_state(const provider_type & /*pt*/, json &mount_state)
-> bool {
const auto ret = get_mount_state(mount_state);
if (ret) {
const auto mount_id =

View File

@ -25,10 +25,8 @@
#include "db/meta_db.hpp"
#include "events/event_system.hpp"
#include "events/events.hpp"
#include "file_manager/cache_size_mgr.hpp"
#include "file_manager/i_file_manager.hpp"
#include "platform/platform.hpp"
#include "utils/error_utils.hpp"
#include "utils/file_utils.hpp"
#include "utils/path.hpp"
#include "utils/polling.hpp"
@ -51,8 +49,8 @@ void base_provider::add_all_items(const stop_type &stop_requested) {
}
auto base_provider::create_api_file(std::string path, std::string key,
std::uint64_t size, std::uint64_t file_time)
-> api_file {
std::uint64_t size,
std::uint64_t file_time) -> api_file {
api_file file{};
file.api_path = utils::path::create_api_path(path);
file.api_parent = utils::path::get_parent_api_path(file.api_path);
@ -84,8 +82,8 @@ auto base_provider::create_api_file(std::string path, std::uint64_t size,
}
auto base_provider::create_directory_clone_source_meta(
const std::string &source_api_path, const std::string &api_path)
-> api_error {
const std::string &source_api_path,
const std::string &api_path) -> api_error {
REPERTORY_USES_FUNCTION_NAME();
bool exists{};
@ -182,8 +180,8 @@ auto base_provider::create_directory(const std::string &api_path,
return set_item_meta(api_path, meta);
}
auto base_provider::create_file(const std::string &api_path, api_meta_map &meta)
-> api_error {
auto base_provider::create_file(const std::string &api_path,
api_meta_map &meta) -> api_error {
REPERTORY_USES_FUNCTION_NAME();
bool exists{};
@ -240,9 +238,8 @@ auto base_provider::create_file(const std::string &api_path, api_meta_map &meta)
return api_error::error;
}
auto base_provider::get_api_path_from_source(const std::string &source_path,
std::string &api_path) const
-> api_error {
auto base_provider::get_api_path_from_source(
const std::string &source_path, std::string &api_path) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
if (source_path.empty()) {
@ -255,9 +252,8 @@ auto base_provider::get_api_path_from_source(const std::string &source_path,
return db3_->get_api_path(source_path, api_path);
}
auto base_provider::get_directory_items(const std::string &api_path,
directory_item_list &list) const
-> api_error {
auto base_provider::get_directory_items(
const std::string &api_path, directory_item_list &list) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
bool exists{};
@ -321,10 +317,9 @@ auto base_provider::get_file_size(const std::string &api_path,
return api_error::success;
}
auto base_provider::get_filesystem_item(const std::string &api_path,
bool directory,
filesystem_item &fsi) const
-> api_error {
auto base_provider::get_filesystem_item(
const std::string &api_path, bool directory,
filesystem_item &fsi) const -> api_error {
bool exists{};
auto res = is_directory(api_path, exists);
if (res != api_error::success) {
@ -357,10 +352,9 @@ auto base_provider::get_filesystem_item(const std::string &api_path,
return api_error::success;
}
auto base_provider::get_filesystem_item_and_file(const std::string &api_path,
api_file &file,
filesystem_item &fsi) const
-> api_error {
auto base_provider::get_filesystem_item_and_file(
const std::string &api_path, api_file &file,
filesystem_item &fsi) const -> api_error {
auto res = get_file(api_path, file);
if (res != api_error::success) {
return res;
@ -457,7 +451,7 @@ void base_provider::process_removed_files(std::deque<removed_item> removed_list,
REPERTORY_USES_FUNCTION_NAME();
auto orphaned_directory =
utils::path::combine(get_config().get_data_directory(), {"orphaned"});
utils::path::combine(config_.get_data_directory(), {"orphaned"});
for (const auto &item : removed_list) {
if (stop_requested) {
return;
@ -671,10 +665,8 @@ void base_provider::remove_unmatched_source_files(
return;
}
const auto &cfg = get_config();
auto source_list =
utils::file::directory{cfg.get_cache_directory()}.get_files();
utils::file::directory{config_.get_cache_directory()}.get_files();
for (const auto &source_file : source_list) {
if (stop_requested) {
return;
@ -687,15 +679,15 @@ void base_provider::remove_unmatched_source_files(
}
auto reference_time =
source_file->get_time(cfg.get_eviction_uses_accessed_time()
source_file->get_time(config_.get_eviction_uses_accessed_time()
? utils::file::time_type::accessed
: utils::file::time_type::modified);
if (not reference_time.has_value()) {
continue;
}
auto delay =
(cfg.get_eviction_delay_mins() * 60UL) * utils::time::NANOS_PER_SECOND;
auto delay = (config_.get_eviction_delay_mins() * 60UL) *
utils::time::NANOS_PER_SECOND;
if ((reference_time.value() + static_cast<std::uint64_t>(delay)) >=
utils::time::get_time_now()) {
continue;
@ -739,19 +731,17 @@ auto base_provider::start(api_item_added_callback api_item_added,
auto online{false};
auto unmount_requested{false};
{
const auto &cfg = get_config();
repertory::event_consumer consumer(
"unmount_requested",
[&unmount_requested](const event &) { unmount_requested = true; });
for (std::uint16_t idx = 0U; not online && not unmount_requested &&
(idx < cfg.get_online_check_retry_secs());
(idx < config_.get_online_check_retry_secs());
++idx) {
online = is_online();
if (not online) {
event_system::instance().raise<provider_offline>(
cfg.get_host_config().host_name_or_ip,
cfg.get_host_config().api_port);
config_.get_host_config().host_name_or_ip,
config_.get_host_config().api_port);
std::this_thread::sleep_for(1s);
}
}
@ -761,8 +751,6 @@ auto base_provider::start(api_item_added_callback api_item_added,
return false;
}
cache_size_mgr::instance().initialize(&config_);
polling::instance().set_callback({
"check_deleted",
polling::frequency::low,
@ -773,7 +761,6 @@ auto base_provider::start(api_item_added_callback api_item_added,
}
void base_provider::stop() {
cache_size_mgr::instance().stop();
polling::instance().remove_callback("check_deleted");
db3_.reset();
}

View File

@ -23,8 +23,6 @@
#include "app_config.hpp"
#include "comm/i_http_comm.hpp"
#include "events/event_system.hpp"
#include "events/events.hpp"
#include "file_manager/i_file_manager.hpp"
#include "types/repertory.hpp"
#include "types/s3.hpp"
@ -41,7 +39,9 @@
namespace repertory {
s3_provider::s3_provider(app_config &config, i_http_comm &comm)
: base_provider(config, comm) {}
: base_provider(config, comm) {
get_comm().enable_s3_path_style(config.get_s3_config().use_path_style);
}
auto s3_provider::add_if_not_found(
api_file &file, const std::string &object_name) const -> api_error {
@ -85,7 +85,7 @@ auto s3_provider::create_directory_impl(const std::string &api_path,
api_meta_map &meta) -> api_error {
REPERTORY_USES_FUNCTION_NAME();
const auto &cfg = get_s3_config();
auto cfg = get_config().get_s3_config();
auto is_encrypted = not cfg.encryption_token.empty();
stop_type stop_requested{false};
@ -138,8 +138,7 @@ auto s3_provider::create_file_extra(const std::string &api_path,
api_meta_map &meta) -> api_error {
REPERTORY_USES_FUNCTION_NAME();
const auto &cfg = get_s3_config();
if (not cfg.encryption_token.empty()) {
if (not get_config().get_s3_config().encryption_token.empty()) {
std::string encrypted_file_path;
auto res = get_item_meta(utils::path::get_parent_api_path(api_path),
META_KEY, encrypted_file_path);
@ -151,7 +150,7 @@ auto s3_provider::create_file_extra(const std::string &api_path,
data_buffer result;
utils::encryption::encrypt_data(
cfg.encryption_token,
get_config().get_s3_config().encryption_token,
*(utils::string::split(api_path, '/', false).end() - 1U), result);
meta[META_KEY] = utils::path::create_api_path(
@ -170,8 +169,7 @@ auto s3_provider::create_path_directories(
return api_error::success;
}
const auto &cfg = get_s3_config();
auto encryption_token = cfg.encryption_token;
auto encryption_token = get_config().get_s3_config().encryption_token;
auto is_encrypted = not encryption_token.empty();
auto path_parts = utils::string::split(api_path, '/', false);
@ -181,6 +179,8 @@ auto s3_provider::create_path_directories(
return api_error::error;
}
auto cfg = get_config().get_s3_config();
std::string cur_key{'/'};
std::string cur_path{'/'};
for (std::size_t idx = 0U; idx < path_parts.size(); ++idx) {
@ -242,9 +242,9 @@ auto s3_provider::create_path_directories(
auto s3_provider::decrypt_object_name(std::string &object_name) const
-> api_error {
auto parts = utils::string::split(object_name, '/', false);
for (auto &part : parts) {
for (auto &&part : parts) {
if (not utils::encryption::decrypt_file_name(
get_s3_config().encryption_token, part)) {
get_config().get_s3_config().encryption_token, part)) {
return api_error::decryption_error;
}
}
@ -258,7 +258,7 @@ auto s3_provider::get_directory_item_count(const std::string &api_path) const
REPERTORY_USES_FUNCTION_NAME();
try {
const auto &cfg = get_s3_config();
auto cfg = get_config().get_s3_config();
auto is_encrypted = not cfg.encryption_token.empty();
std::string key;
if (is_encrypted) {
@ -334,7 +334,7 @@ auto s3_provider::get_directory_items_impl(
const std::string &api_path, directory_item_list &list) const -> api_error {
REPERTORY_USES_FUNCTION_NAME();
const auto &cfg = get_s3_config();
auto cfg = get_config().get_s3_config();
auto is_encrypted = not cfg.encryption_token.empty();
auto ret = api_error::success;
@ -439,14 +439,14 @@ auto s3_provider::get_directory_items_impl(
auto node_list =
doc.select_nodes("/ListBucketResult/CommonPrefixes/Prefix");
for (const auto &node : node_list) {
for (auto &&node : node_list) {
add_directory_item(
true, node.node().text().as_string(), 0U,
[](const directory_item &) -> std::uint64_t { return 0U; });
}
node_list = doc.select_nodes("/ListBucketResult/Contents");
for (const auto &node : node_list) {
for (auto &&node : node_list) {
auto child_object_name = utils::path::create_api_path(
node.node().select_node("Key").node().text().as_string());
if (child_object_name == utils::path::create_api_path(prefix)) {
@ -551,7 +551,7 @@ auto s3_provider::get_file_list(api_file_list &list,
}
auto node_list = doc.select_nodes("/ListBucketResult/Contents");
for (const auto &node : node_list) {
for (auto &&node : node_list) {
auto object_name =
std::string{node.node().select_node("Key").node().text().as_string()};
auto api_path{object_name};
@ -559,7 +559,8 @@ auto s3_provider::get_file_list(api_file_list &list,
continue;
}
auto is_encrypted = not get_s3_config().encryption_token.empty();
auto is_encrypted =
not get_config().get_s3_config().encryption_token.empty();
if (is_encrypted) {
auto err = decrypt_object_name(api_path);
if (err != api_error::success) {
@ -609,7 +610,7 @@ auto s3_provider::get_object_info(
REPERTORY_USES_FUNCTION_NAME();
try {
const auto &cfg = get_s3_config();
auto cfg = get_config().get_s3_config();
is_encrypted = not cfg.encryption_token.empty();
std::string key;
@ -660,7 +661,7 @@ auto s3_provider::get_object_list(
std::optional<std::string> token) const -> bool {
curl::requests::http_get get{};
get.allow_timeout = true;
get.aws_service = "aws:amz:" + get_s3_config().region + ":s3";
get.aws_service = "aws:amz:" + get_config().get_s3_config().region + ":s3";
get.path = '/';
get.query["list-type"] = "2";
if (delimiter.has_value() && not delimiter.value().empty()) {
@ -752,7 +753,7 @@ auto s3_provider::read_file_bytes(const std::string &api_path, std::size_t size,
REPERTORY_USES_FUNCTION_NAME();
try {
const auto &cfg = get_s3_config();
auto cfg = get_config().get_s3_config();
auto is_encrypted = not cfg.encryption_token.empty();
std::string key;
if (is_encrypted) {
@ -858,7 +859,7 @@ auto s3_provider::remove_directory_impl(const std::string &api_path)
-> api_error {
REPERTORY_USES_FUNCTION_NAME();
const auto &cfg = get_s3_config();
auto cfg = get_config().get_s3_config();
auto is_encrypted = not cfg.encryption_token.empty();
std::string key;
@ -900,7 +901,7 @@ auto s3_provider::remove_directory_impl(const std::string &api_path)
auto s3_provider::remove_file_impl(const std::string &api_path) -> api_error {
REPERTORY_USES_FUNCTION_NAME();
const auto &cfg = get_s3_config();
auto cfg = get_config().get_s3_config();
auto is_encrypted = not cfg.encryption_token.empty();
std::string key;
@ -948,8 +949,6 @@ auto s3_provider::rename_file(const std::string & /* from_api_path */,
auto s3_provider::start(api_item_added_callback api_item_added,
i_file_manager *mgr) -> bool {
event_system::instance().raise<service_started>("s3_provider");
s3_config_ = get_config().get_s3_config();
get_comm().enable_s3_path_style(s3_config_.use_path_style);
return base_provider::start(api_item_added, mgr);
}
@ -973,7 +972,7 @@ auto s3_provider::upload_file_impl(const std::string &api_path,
file_size = opt_size.value();
}
const auto &cfg = get_s3_config();
auto cfg = get_config().get_s3_config();
auto is_encrypted = not cfg.encryption_token.empty();
std::string key;

View File

@ -37,7 +37,8 @@
#include "utils/utils.hpp"
namespace {
[[nodiscard]] auto get_bucket(const repertory::sia_config &cfg) -> std::string {
[[nodiscard]] auto get_bucket(repertory::sia_config cfg) -> std::string {
repertory::utils::string::trim(cfg.bucket);
if (cfg.bucket.empty()) {
return "default";
}
@ -67,7 +68,7 @@ auto sia_provider::create_directory_impl(const std::string &api_path,
curl::requests::http_put_file put_file{};
put_file.allow_timeout = true;
put_file.path = "/api/worker/objects" + api_path + "/";
put_file.query["bucket"] = get_bucket(get_sia_config());
put_file.query["bucket"] = get_bucket(get_config().get_sia_config());
long response_code{};
stop_type stop_requested{};
@ -99,7 +100,7 @@ auto sia_provider::get_directory_item_count(const std::string &api_path) const
std::uint64_t item_count{};
if (object_list.contains("entries")) {
for (const auto &entry : object_list.at("entries")) {
for (auto &&entry : object_list.at("entries")) {
try {
auto name = entry.at("name").get<std::string>();
auto entry_api_path = utils::path::create_api_path(name);
@ -136,7 +137,7 @@ auto sia_provider::get_directory_items_impl(const std::string &api_path,
}
if (object_list.contains("entries")) {
for (const auto &entry : object_list.at("entries")) {
for (auto &&entry : object_list.at("entries")) {
try {
auto name = entry.at("name").get<std::string>();
auto entry_api_path = utils::path::create_api_path(name);
@ -227,7 +228,7 @@ auto sia_provider::get_file_list(api_file_list &list,
}
if (object_list.contains("entries")) {
for (const auto &entry : object_list.at("entries")) {
for (auto &&entry : object_list.at("entries")) {
auto name = entry.at("name").get<std::string>();
auto entry_api_path = utils::path::create_api_path(name);
@ -288,7 +289,7 @@ auto sia_provider::get_object_info(const std::string &api_path,
curl::requests::http_get get{};
get.allow_timeout = true;
get.path = "/api/bus/objects" + api_path;
get.query["bucket"] = get_bucket(get_sia_config());
get.query["bucket"] = get_bucket(get_config().get_sia_config());
get.response_handler = [&object_info](const data_buffer &data,
long response_code) {
@ -329,7 +330,7 @@ auto sia_provider::get_object_list(const std::string &api_path,
curl::requests::http_get get{};
get.allow_timeout = true;
get.path = "/api/bus/objects" + api_path + "/";
get.query["bucket"] = get_bucket(get_sia_config());
get.query["bucket"] = get_bucket(get_config().get_sia_config());
get.response_handler = [&object_list](const data_buffer &data,
long response_code) {
@ -363,7 +364,7 @@ auto sia_provider::get_total_drive_space() const -> std::uint64_t {
curl::requests::http_get get{};
get.allow_timeout = true;
get.path = "/api/autopilot/config";
get.query["bucket"] = get_bucket(get_sia_config());
get.query["bucket"] = get_bucket(get_config().get_sia_config());
json config_data{};
get.response_handler = [&config_data](const data_buffer &data,
@ -464,7 +465,7 @@ auto sia_provider::is_online() const -> bool {
curl::requests::http_get get{};
get.allow_timeout = true;
get.path = "/api/bus/consensus/state";
get.query["bucket"] = get_bucket(get_sia_config());
get.query["bucket"] = get_bucket(get_config().get_sia_config());
json state_data{};
get.response_handler = [&state_data](const data_buffer &data,
@ -505,7 +506,7 @@ auto sia_provider::read_file_bytes(const std::string &api_path,
curl::requests::http_get get{};
get.path = "/api/worker/objects" + api_path;
get.query["bucket"] = get_bucket(get_sia_config());
get.query["bucket"] = get_bucket(get_config().get_sia_config());
get.range = {{
offset,
offset + size - 1U,
@ -560,7 +561,7 @@ auto sia_provider::remove_directory_impl(const std::string &api_path)
curl::requests::http_delete del{};
del.allow_timeout = true;
del.path = "/api/bus/objects" + api_path + "/";
del.query["bucket"] = get_bucket(get_sia_config());
del.query["bucket"] = get_bucket(get_config().get_sia_config());
long response_code{};
stop_type stop_requested{};
@ -586,7 +587,7 @@ auto sia_provider::remove_file_impl(const std::string &api_path) -> api_error {
curl::requests::http_delete del{};
del.allow_timeout = true;
del.path = "/api/bus/objects" + api_path;
del.query["bucket"] = get_bucket(get_sia_config());
del.query["bucket"] = get_bucket(get_config().get_sia_config());
long response_code{};
stop_type stop_requested{};
@ -618,7 +619,7 @@ auto sia_provider::rename_file(const std::string &from_api_path,
{"mode", "single"},
});
post.path = "/api/bus/objects/rename";
post.query["bucket"] = get_bucket(get_sia_config());
post.query["bucket"] = get_bucket(get_config().get_sia_config());
long response_code{};
stop_type stop_requested{};
@ -643,7 +644,6 @@ auto sia_provider::rename_file(const std::string &from_api_path,
auto sia_provider::start(api_item_added_callback api_item_added,
i_file_manager *mgr) -> bool {
event_system::instance().raise<service_started>("sia_provider");
sia_config_ = get_config().get_sia_config();
return base_provider::start(api_item_added, mgr);
}
@ -660,7 +660,7 @@ auto sia_provider::upload_file_impl(const std::string &api_path,
curl::requests::http_put_file put_file{};
put_file.path = "/api/worker/objects" + api_path;
put_file.query["bucket"] = get_bucket(get_sia_config());
put_file.query["bucket"] = get_bucket(get_config().get_sia_config());
put_file.source_path = source_path;
long response_code{};

View File

@ -22,14 +22,10 @@
#include "rpc/server/full_server.hpp"
#include "app_config.hpp"
#include "events/event_system.hpp"
#include "events/events.hpp"
#include "file_manager/cache_size_mgr.hpp"
#include "file_manager/i_file_manager.hpp"
#include "providers/i_provider.hpp"
#include "types/repertory.hpp"
#include "types/rpc.hpp"
#include "utils/error_utils.hpp"
#include "utils/file.hpp"
#include "utils/path.hpp"
@ -40,20 +36,25 @@ full_server::full_server(app_config &config, i_provider &provider,
void full_server::handle_get_directory_items(const httplib::Request &req,
httplib::Response &res) {
auto api_path = utils::path::create_api_path(req.get_param_value("api_path"));
res.set_content(json({
{"items", fm_.get_directory_items(api_path)},
})
.dump(),
"application/json");
const auto api_path =
utils::path::create_api_path(req.get_param_value("api_path"));
const auto list = fm_.get_directory_items(api_path);
json items = {{"items", std::vector<json>()}};
for (const auto &item : list) {
items["items"].emplace_back(item.to_json());
}
res.set_content(items.dump(), "application/json");
res.status = 200;
}
void full_server::handle_get_drive_information(const httplib::Request & /*req*/,
httplib::Response &res) {
auto dir_size =
utils::file::directory(get_config().get_cache_directory()).size();
res.set_content(
json({
{"cache_space_used", cache_size_mgr::instance().size()},
{"cache_space_used", dir_size},
{"drive_space_total", provider_.get_total_drive_space()},
{"drive_space_used", provider_.get_used_drive_space()},
{"item_count", provider_.get_total_item_count()},
@ -65,9 +66,9 @@ void full_server::handle_get_drive_information(const httplib::Request & /*req*/,
void full_server::handle_get_open_files(const httplib::Request & /*req*/,
httplib::Response &res) {
auto list = fm_.get_open_files();
const auto list = fm_.get_open_files();
json open_files;
json open_files = {{"items", std::vector<json>()}};
for (const auto &kv : list) {
open_files["items"].emplace_back(json({
{"path", kv.first},
@ -80,10 +81,7 @@ void full_server::handle_get_open_files(const httplib::Request & /*req*/,
void full_server::handle_get_pinned_files(const httplib::Request & /*req*/,
httplib::Response &res) {
res.set_content(json({
{"items", provider_.get_pinned_files()},
})
.dump(),
res.set_content(json({{"items", provider_.get_pinned_files()}}).dump(),
"application/json");
res.status = 200;
}
@ -92,10 +90,11 @@ void full_server::handle_get_pinned_status(const httplib::Request &req,
httplib::Response &res) {
REPERTORY_USES_FUNCTION_NAME();
auto api_path = utils::path::create_api_path(req.get_param_value("api_path"));
const auto api_path =
utils::path::create_api_path(req.get_param_value("api_path"));
std::string pinned;
auto result = provider_.get_item_meta(api_path, META_PINNED, pinned);
const auto result = provider_.get_item_meta(api_path, META_PINNED, pinned);
if (result != api_error::success) {
utils::error::raise_api_path_error(function_name, api_path, result,
"failed to get pinned status");
@ -104,10 +103,8 @@ void full_server::handle_get_pinned_status(const httplib::Request &req,
}
res.set_content(
json({
{"pinned",
pinned.empty() ? false : utils::string::to_bool(pinned)},
})
json(
{{"pinned", pinned.empty() ? false : utils::string::to_bool(pinned)}})
.dump(),
"application/json");
res.status = 200;
@ -117,7 +114,8 @@ void full_server::handle_pin_file(const httplib::Request &req,
httplib::Response &res) {
REPERTORY_USES_FUNCTION_NAME();
auto api_path = utils::path::create_api_path(req.get_param_value("api_path"));
const auto api_path =
utils::path::create_api_path(req.get_param_value("api_path"));
bool exists{};
auto result = provider_.is_file(api_path, exists);
@ -145,7 +143,8 @@ void full_server::handle_unpin_file(const httplib::Request &req,
httplib::Response &res) {
REPERTORY_USES_FUNCTION_NAME();
auto api_path = utils::path::create_api_path(req.get_param_value("api_path"));
const auto api_path =
utils::path::create_api_path(req.get_param_value("api_path"));
bool exists{};
auto result = provider_.is_file(api_path, exists);

View File

@ -22,8 +22,6 @@
#include "rpc/server/server.hpp"
#include "app_config.hpp"
#include "events/event_system.hpp"
#include "events/events.hpp"
#include "utils/base64.hpp"
#include "utils/error_utils.hpp"

View File

@ -25,42 +25,18 @@
#include "utils/string.hpp"
namespace repertory {
auto database_type_from_string(std::string type,
database_type default_type) -> database_type {
type = utils::string::to_lower(utils::string::trim(type));
if (type == "rocksdb") {
return database_type::rocksdb;
}
if (type == "sqlite") {
return database_type::sqlite;
}
return default_type;
}
auto database_type_to_string(const database_type &type) -> std::string {
switch (type) {
case database_type::rocksdb:
return "rocksdb";
case database_type::sqlite:
return "sqlite";
default:
return "rocksdb";
}
}
auto download_type_from_string(std::string type,
download_type default_type) -> download_type {
const download_type &default_type)
-> download_type {
type = utils::string::to_lower(utils::string::trim(type));
if (type == "default") {
return download_type::default_;
}
if (type == "direct") {
return download_type::direct;
}
if (type == "fallback") {
return download_type::fallback;
}
if (type == "ring_buffer") {
return download_type::ring_buffer;
}
@ -70,14 +46,14 @@ auto download_type_from_string(std::string type,
auto download_type_to_string(const download_type &type) -> std::string {
switch (type) {
case download_type::default_:
return "default";
case download_type::direct:
return "direct";
case download_type::fallback:
return "fallback";
case download_type::ring_buffer:
return "ring_buffer";
default:
return "default";
return "fallback";
}
}
@ -87,7 +63,6 @@ static const std::unordered_map<api_error, std::string> LOOKUP = {
{api_error::bad_address, "bad_address"},
{api_error::buffer_overflow, "buffer_overflow"},
{api_error::buffer_too_small, "buffer_too_small"},
{api_error::cache_not_initialized, "cache_not_initialized"},
{api_error::comm_error, "comm_error"},
{api_error::decryption_error, "decryption_error"},
{api_error::directory_end_of_files, "directory_end_of_files"},
@ -106,7 +81,6 @@ static const std::unordered_map<api_error, std::string> LOOKUP = {
{api_error::invalid_handle, "invalid_handle"},
{api_error::invalid_operation, "invalid_operation"},
{api_error::invalid_ring_buffer_multiple, "invalid_ring_buffer_multiple"},
{api_error::invalid_ring_buffer_position, "invalid_ring_buffer_position"},
{api_error::invalid_ring_buffer_size, "invalid_ring_buffer_size"},
{api_error::invalid_version, "invalid_version"},
{api_error::item_exists, "item_exists"},

View File

@ -45,15 +45,15 @@ void get_api_authentication_data(std::string &user, std::string &password,
if (success) {
if (user.empty() && password.empty()) {
password = data[JSON_API_AUTH].get<std::string>();
user = data[JSON_API_USER].get<std::string>();
password = data["ApiAuth"].get<std::string>();
user = data["ApiUser"].get<std::string>();
}
port = data[JSON_API_PORT].get<std::uint16_t>();
port = data["ApiPort"].get<std::uint16_t>();
}
}
[[nodiscard]] auto get_provider_type_from_args(std::vector<const char *> args)
-> provider_type {
[[nodiscard]] auto
get_provider_type_from_args(std::vector<const char *> args) -> provider_type {
if (has_option(args, options::s3_option)) {
return provider_type::s3;
}
@ -67,8 +67,8 @@ void get_api_authentication_data(std::string &user, std::string &password,
return provider_type::sia;
}
auto has_option(std::vector<const char *> args, const std::string &option_name)
-> bool {
auto has_option(std::vector<const char *> args,
const std::string &option_name) -> bool {
return std::find_if(args.begin(), args.end(),
[&option_name](const auto &value) -> bool {
return option_name == value;
@ -80,8 +80,8 @@ auto has_option(std::vector<const char *> args, const option &opt) -> bool {
}
auto parse_option(std::vector<const char *> args,
const std::string &option_name, std::uint8_t count)
-> std::vector<std::string> {
const std::string &option_name,
std::uint8_t count) -> std::vector<std::string> {
std::vector<std::string> ret;
auto found{false};
for (std::size_t i = 0U; not found && (i < args.size()); i++) {
@ -119,10 +119,9 @@ auto parse_string_option(std::vector<const char *> args, const option &opt,
return ret;
}
auto parse_drive_options(std::vector<const char *> args,
[[maybe_unused]] provider_type &prov,
[[maybe_unused]] std::string &data_directory)
-> std::vector<std::string> {
auto parse_drive_options(
std::vector<const char *> args, [[maybe_unused]] provider_type &prov,
[[maybe_unused]] std::string &data_directory) -> std::vector<std::string> {
// Strip out options from command line
const auto &option_list = options::option_list;
std::vector<std::string> drive_args;

View File

@ -22,8 +22,6 @@
#include "utils/polling.hpp"
#include "app_config.hpp"
#include "events/event_system.hpp"
#include "events/events.hpp"
#include "utils/tasks.hpp"
namespace repertory {

View File

@ -22,7 +22,6 @@
#include "utils/tasks.hpp"
#include "app_config.hpp"
#include "utils/error_utils.hpp"
namespace repertory {
tasks tasks::instance_;

View File

@ -52,8 +52,6 @@ auto from_api_error(const api_error &err) -> int {
return -EEXIST;
case api_error::file_in_use:
return -EBUSY;
case api_error::invalid_handle:
return -EBADF;
case api_error::invalid_operation:
return -EINVAL;
case api_error::item_not_found:

View File

@ -22,11 +22,7 @@
#include "utils/utils.hpp"
#include "app_config.hpp"
#include "types/startup_exception.hpp"
#include "utils/common.hpp"
#include "utils/error_utils.hpp"
#include "utils/file.hpp"
#include "utils/path.hpp"
#include "utils/string.hpp"
namespace repertory::utils {
@ -48,42 +44,6 @@ void calculate_allocation_size(bool directory, std::uint64_t file_size,
allocation_meta_size = std::to_string(allocation_size);
}
auto create_rocksdb(
const app_config &cfg, const std::string &name,
const std::vector<rocksdb::ColumnFamilyDescriptor> &families,
std::vector<rocksdb::ColumnFamilyHandle *> &handles,
bool clear) -> std::unique_ptr<rocksdb::TransactionDB> {
REPERTORY_USES_FUNCTION_NAME();
auto db_dir = utils::path::combine(cfg.get_data_directory(), {"db"});
if (not utils::file::directory{db_dir}.create_directory()) {
throw startup_exception(
fmt::format("failed to create db directory|", db_dir));
}
auto path = utils::path::combine(db_dir, {name});
if (clear && not utils::file::directory{path}.remove_recursively()) {
utils::error::raise_error(function_name,
"failed to remove " + name + " db|" + path);
}
rocksdb::Options options{};
options.create_if_missing = true;
options.create_missing_column_families = true;
options.db_log_dir = cfg.get_log_directory();
options.keep_log_file_num = 10;
rocksdb::TransactionDB *ptr{};
auto status = rocksdb::TransactionDB::Open(
options, rocksdb::TransactionDBOptions{}, path, families, &handles, &ptr);
if (not status.ok()) {
throw startup_exception(fmt::format("failed to open rocksdb|path{}|error{}",
path, status.ToString()));
}
return std::unique_ptr<rocksdb::TransactionDB>(ptr);
}
auto create_volume_label(const provider_type &prov) -> std::string {
return "repertory_" + app_config::get_provider_name(prov);
}

View File

@ -71,10 +71,11 @@ mount(std::vector<const char *> args, std::string data_directory,
if (generate_config) {
app_config config(prov, data_directory);
if (prov == provider_type::remote) {
auto cfg = config.get_remote_config();
cfg.host_name_or_ip = remote_host;
cfg.api_port = remote_port;
config.set_remote_config(cfg);
config.set_enable_remote_mount(false);
config.set_is_remote_mount(true);
config.set_remote_host_name_or_ip(remote_host);
config.set_remote_port(remote_port);
config.save();
} else if (prov == provider_type::sia &&
config.get_sia_config().bucket.empty()) {
config.set_value_by_name("SiaConfig.Bucket", unique_id);
@ -127,12 +128,12 @@ mount(std::vector<const char *> args, std::string data_directory,
if (prov == provider_type::remote) {
std::uint16_t port{0U};
if (utils::get_next_available_port(config.get_api_port(), port)) {
auto cfg = config.get_remote_config();
cfg.host_name_or_ip = remote_host;
cfg.api_port = remote_port;
config.set_remote_config(cfg);
config.set_remote_host_name_or_ip(remote_host);
config.set_remote_port(remote_port);
config.set_api_port(port);
config.set_is_remote_mount(true);
config.set_enable_remote_mount(false);
config.save();
try {
remote_drive drive(
config,
@ -160,6 +161,8 @@ mount(std::vector<const char *> args, std::string data_directory,
config.set_value_by_name("SiaConfig.Bucket", unique_id);
}
config.set_is_remote_mount(false);
try {
auto provider = create_provider(prov, config);
repertory_drive drive(config, lock, *provider);

View File

@ -1,70 +0,0 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef REPERTORY_TEST_INCLUDE_FIXTURES_FILE_DB_FIXTURE_HPP
#define REPERTORY_TEST_INCLUDE_FIXTURES_FILE_DB_FIXTURE_HPP
#include "test_common.hpp"
#include "app_config.hpp"
#include "db/impl/rdb_file_db.hpp"
#include "db/impl/sqlite_file_db.hpp"
#include "events/consumers/console_consumer.hpp"
#include "events/event_system.hpp"
namespace repertory {
template <typename db_t> class file_db_test : public ::testing::Test {
protected:
static std::unique_ptr<app_config> config;
static console_consumer console_;
static std::unique_ptr<db_t> file_db;
protected:
static void SetUpTestCase() {
static std::uint64_t idx{};
event_system::instance().start();
auto cfg_directory = utils::path::combine(test::get_test_output_dir(),
{
"file_db_test",
std::to_string(++idx),
});
config = std::make_unique<app_config>(provider_type::s3, cfg_directory);
file_db = std::make_unique<db_t>(*config);
}
static void TearDownTestCase() {
file_db.reset();
config.reset();
event_system::instance().stop();
}
};
using file_db_types = ::testing::Types<rdb_file_db, sqlite_file_db>;
template <typename db_t> std::unique_ptr<app_config> file_db_test<db_t>::config;
template <typename db_t> console_consumer file_db_test<db_t>::console_;
template <typename db_t> std::unique_ptr<db_t> file_db_test<db_t>::file_db;
} // namespace repertory
#endif // REPERTORY_TEST_INCLUDE_FIXTURES_FILE_DB_FIXTURE_HPP

View File

@ -25,8 +25,8 @@
#include "test_common.hpp"
#include "app_config.hpp"
#include "db/impl/rdb_file_mgr_db.hpp"
#include "db/impl/sqlite_file_mgr_db.hpp"
#include "db/rdb_file_mgr_db.hpp"
#include "db/sqlite_file_mgr_db.hpp"
#include "events/consumers/console_consumer.hpp"
#include "events/event_system.hpp"
@ -58,7 +58,7 @@ protected:
}
};
using file_mgr_db_types = ::testing::Types<rdb_file_mgr_db, sqlite_file_mgr_db>;
using file_mgr_db_types = ::testing::Types<sqlite_file_mgr_db, rdb_file_mgr_db>;
template <typename db_t>
std::unique_ptr<app_config> file_mgr_db_test<db_t>::config;

View File

@ -109,11 +109,8 @@ protected:
config->set_enable_drive_events(true);
config->set_event_level(event_level::trace);
config->set_s3_config(src_cfg.get_s3_config());
auto r_cfg = config->get_remote_mount();
r_cfg.enable = true;
r_cfg.api_port = 30000U;
config->set_remote_mount(r_cfg);
config->set_enable_remote_mount(true);
config->set_remote_port(30000U);
}
drive_args = std::vector<std::string>({
@ -155,11 +152,8 @@ protected:
config->set_event_level(event_level::trace);
config->set_host_config(src_cfg.get_host_config());
config->set_sia_config(src_cfg.get_sia_config());
auto r_cfg = config->get_remote_mount();
r_cfg.enable = true;
r_cfg.api_port = 30000U;
config->set_remote_mount(r_cfg);
config->set_enable_remote_mount(true);
config->set_remote_port(30000U);
}
drive_args = std::vector<std::string>({

View File

@ -25,8 +25,8 @@
#include "test_common.hpp"
#include "app_config.hpp"
#include "db/impl/rdb_meta_db.hpp"
#include "db/impl/sqlite_meta_db.hpp"
#include "db/rdb_meta_db.hpp"
#include "db/sqlite_meta_db.hpp"
#include "events/consumers/console_consumer.hpp"
#include "events/event_system.hpp"

View File

@ -99,11 +99,8 @@ protected:
config->set_enable_drive_events(true);
config->set_event_level(event_level::trace);
config->set_s3_config(src_cfg.get_s3_config());
auto r_cfg = config->get_remote_mount();
r_cfg.enable = true;
r_cfg.api_port = 30000U;
config->set_remote_mount(r_cfg);
config->set_enable_remote_mount(true);
config->set_remote_port(30000U);
}
drive_args = std::vector<std::string>({
@ -141,11 +138,8 @@ protected:
config->set_event_level(event_level::trace);
config->set_host_config(src_cfg.get_host_config());
config->set_sia_config(src_cfg.get_sia_config());
auto r_cfg = config->get_remote_mount();
r_cfg.enable = true;
r_cfg.api_port = 30000U;
config->set_remote_mount(r_cfg);
config->set_enable_remote_mount(true);
config->set_remote_port(30000U);
}
drive_args = std::vector<std::string>({

View File

@ -29,13 +29,6 @@
namespace repertory {
class mock_open_file : public virtual i_closeable_open_file {
public:
MOCK_METHOD(void, add, (std::uint64_t handle, open_file_data ofd),
(override));
MOCK_METHOD(bool, can_close, (), (const, override));
MOCK_METHOD(bool, close, (), (override));
MOCK_METHOD(std::string, get_api_path, (), (const, override));
MOCK_METHOD(std::size_t, get_chunk_size, (), (const, override));
@ -54,30 +47,14 @@ public:
MOCK_METHOD(boost::dynamic_bitset<>, get_read_state, (), (const, override));
MOCK_METHOD(bool, get_allocated, (), (const, override));
MOCK_METHOD(std::vector<std::uint64_t>, get_handles, (), (const, override));
MOCK_METHOD((std::map<std::uint64_t, open_file_data> &), get_open_data, (),
(override));
MOCK_METHOD((const std::map<std::uint64_t, open_file_data> &), get_open_data,
(), (const, override));
MOCK_METHOD(bool, get_read_state, (std::size_t chunk), (const, override));
MOCK_METHOD(std::string, get_source_path, (), (const, override));
MOCK_METHOD(bool, has_handle, (std::uint64_t handle), (const, override));
MOCK_METHOD(bool, is_complete, (), (const, override));
MOCK_METHOD(bool, is_directory, (), (const, override));
MOCK_METHOD(bool, is_modified, (), (const, override));
MOCK_METHOD(bool, is_write_supported, (), (const, override));
MOCK_METHOD(api_error, native_operation, (native_operation_callback callback),
(override));
@ -90,10 +67,6 @@ public:
data_buffer &data),
(override));
MOCK_METHOD(void, remove, (std::uint64_t handle), (override));
MOCK_METHOD(void, remove_all, (), (override));
MOCK_METHOD(api_error, resize, (std::uint64_t new_file_size), (override));
MOCK_METHOD(void, set_api_path, (const std::string &api_path), (override));
@ -102,6 +75,31 @@ public:
(std::uint64_t write_offset, const data_buffer &data,
std::size_t &bytes_written),
(override));
MOCK_METHOD(void, add, (std::uint64_t handle, open_file_data ofd),
(override));
MOCK_METHOD(bool, can_close, (), (const, override));
MOCK_METHOD(bool, close, (), (override));
MOCK_METHOD(std::vector<std::uint64_t>, get_handles, (), (const, override));
MOCK_METHOD((std::map<std::uint64_t, open_file_data> &), get_open_data, (),
(override));
MOCK_METHOD((const std::map<std::uint64_t, open_file_data> &), get_open_data,
(), (const, override));
MOCK_METHOD(bool, is_complete, (), (const, override));
MOCK_METHOD(bool, is_modified, (), (const, override));
MOCK_METHOD(bool, is_write_supported, (), (const, override));
MOCK_METHOD(void, remove, (std::uint64_t handle), (override));
MOCK_METHOD(void, remove_all, (), (override));
};
} // namespace repertory

View File

@ -1,68 +0,0 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "test_common.hpp"
#include "types/repertory.hpp"
namespace repertory {
TEST(atomic, atomic_primitive) {
atomic<std::uint16_t> value;
value = 5U;
EXPECT_EQ(5U, static_cast<std::uint16_t>(value));
EXPECT_EQ(5U, value.load());
value.store(6U);
EXPECT_EQ(6U, static_cast<std::uint16_t>(value));
EXPECT_EQ(6U, value.load());
}
TEST(atomic, atomic_primitive_equality) {
atomic<std::uint16_t> value1{5U};
atomic<std::uint16_t> value2{5U};
EXPECT_EQ(value1, value1);
EXPECT_EQ(value2, value2);
EXPECT_EQ(value1, value2);
EXPECT_EQ(static_cast<std::uint16_t>(value1), 5U);
EXPECT_EQ(static_cast<std::uint16_t>(value2), 5U);
}
TEST(atomic, atomic_primitive_inequality) {
atomic<std::uint16_t> value1{5U};
atomic<std::uint16_t> value2{6U};
EXPECT_NE(value1, value2);
EXPECT_NE(static_cast<std::uint16_t>(value1), 6U);
EXPECT_NE(static_cast<std::uint16_t>(value2), 5U);
}
TEST(atomic, atomic_struct) {
atomic<encrypt_config> value{
encrypt_config{
.encryption_token = "token",
.path = "path",
},
};
auto data = static_cast<encrypt_config>(value);
EXPECT_STREQ("token", data.encryption_token.c_str());
EXPECT_STREQ("path", data.path.c_str());
}
} // namespace repertory

View File

@ -1,5 +1,5 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@ -30,34 +30,176 @@
namespace repertory {
class config_test : public ::testing::Test {
public:
console_consumer cs;
static console_consumer cs;
static std::atomic<std::uint64_t> idx;
std::string s3_directory{
utils::path::combine(test::get_test_output_dir(), {"config_test", "s3"})};
std::string s3_directory;
std::string sia_directory;
std::string sia_directory{utils::path::combine(test::get_test_output_dir(),
{"config_test", "sia"})};
void SetUp() override {
s3_directory = utils::path::combine(test::get_test_output_dir(),
{
"config_test",
"s3",
std::to_string(++idx),
});
sia_directory = utils::path::combine(test::get_test_output_dir(),
{
"config_test",
"sia",
std::to_string(++idx),
});
event_system::instance().start();
ASSERT_TRUE(
utils::file::directory(
utils::path::combine(test::get_test_output_dir(), {"config_test"}))
.remove_recursively());
}
void TearDown() override { event_system::instance().stop(); }
void TearDown() override {
ASSERT_TRUE(
utils::file::directory(
utils::path::combine(test::get_test_output_dir(), {"config_test"}))
.remove_recursively());
event_system::instance().stop();
}
};
std::atomic<std::uint64_t> config_test::idx{0U};
const auto DEFAULT_SIA_CONFIG = "{\n"
" \"ApiAuth\": \"\",\n"
" \"ApiPort\": 10000,\n"
" \"ApiUser\": \"repertory\",\n"
" \"ChunkDownloaderTimeoutSeconds\": 30,\n"
" \"EnableChunkDownloaderTimeout\": true,\n"
" \"EnableCommDurationEvents\": false,\n"
" \"EnableDriveEvents\": false,\n"
" \"EnableMaxCacheSize\": false,\n"
#if defined(_WIN32)
" \"EnableMountManager\": false,\n"
#endif
" \"EventLevel\": \"info\",\n"
" \"EvictionDelayMinutes\": 10,\n"
" \"EvictionUsesAccessedTime\": false,\n"
" \"HighFreqIntervalSeconds\": 30,\n"
" \"HostConfig\": {\n"
" \"AgentString\": \"Sia-Agent\",\n"
" \"ApiPassword\": \"\",\n"
" \"ApiPort\": 9980,\n"
" \"HostNameOrIp\": \"localhost\",\n"
" \"TimeoutMs\": 60000\n"
" },\n"
" \"LowFreqIntervalSeconds\": 3600,\n"
" \"MaxCacheSizeBytes\": 21474836480,\n"
" \"MaxUploadCount\": 5,\n"
" \"MedFreqIntervalSeconds\": 120,\n"
" \"OnlineCheckRetrySeconds\": 60,\n"
" \"OrphanedFileRetentionDays\": 15,\n"
" \"PreferredDownloadType\": \"fallback\",\n"
" \"ReadAheadCount\": 4,\n"
" \"RemoteMount\": {\n"
" \"EnableRemoteMount\": false,\n"
" \"IsRemoteMount\": false,\n"
" \"RemoteClientPoolSize\": 10,\n"
" \"RemoteHostNameOrIp\": \"\",\n"
" \"RemoteMaxConnections\": 20,\n"
" \"RemotePort\": 20000,\n"
" \"RemoteReceiveTimeoutSeconds\": 120,\n"
" \"RemoteSendTimeoutSeconds\": 30,\n"
" \"RemoteToken\": \"\"\n"
" },\n"
" \"RetryReadCount\": 6,\n"
" \"RingBufferFileSize\": 512,\n"
" \"SiaConfig\": {\n"
" \"Bucket\": \"\"\n"
" },\n"
" \"TaskWaitMillis\": 100,\n"
" \"Version\": " +
std::to_string(REPERTORY_CONFIG_VERSION) +
"\n"
"}";
const auto DEFAULT_S3_CONFIG = "{\n"
" \"ApiAuth\": \"\",\n"
" \"ApiPort\": 10100,\n"
" \"ApiUser\": \"repertory\",\n"
" \"ChunkDownloaderTimeoutSeconds\": 30,\n"
" \"EnableChunkDownloaderTimeout\": true,\n"
" \"EnableCommDurationEvents\": false,\n"
" \"EnableDriveEvents\": false,\n"
" \"EnableMaxCacheSize\": false,\n"
#if defined(_WIN32)
" \"EnableMountManager\": false,\n"
#endif
" \"EventLevel\": \"info\",\n"
" \"EvictionDelayMinutes\": 10,\n"
" \"EvictionUsesAccessedTime\": false,\n"
" \"HighFreqIntervalSeconds\": 30,\n"
" \"LowFreqIntervalSeconds\": 3600,\n"
" \"MaxCacheSizeBytes\": 21474836480,\n"
" \"MaxUploadCount\": 5,\n"
" \"MedFreqIntervalSeconds\": 120,\n"
" \"OnlineCheckRetrySeconds\": 60,\n"
" \"OrphanedFileRetentionDays\": 15,\n"
" \"PreferredDownloadType\": \"fallback\",\n"
" \"ReadAheadCount\": 4,\n"
" \"RemoteMount\": {\n"
" \"EnableRemoteMount\": false,\n"
" \"IsRemoteMount\": false,\n"
" \"RemoteClientPoolSize\": 10,\n"
" \"RemoteHostNameOrIp\": \"\",\n"
" \"RemoteMaxConnections\": 20,\n"
" \"RemotePort\": 20100,\n"
" \"RemoteReceiveTimeoutSeconds\": 120,\n"
" \"RemoteSendTimeoutSeconds\": 30,\n"
" \"RemoteToken\": \"\"\n"
" },\n"
" \"RetryReadCount\": 6,\n"
" \"RingBufferFileSize\": 512,\n"
" \"S3Config\": {\n"
" \"AccessKey\": \"\",\n"
" \"Bucket\": \"\",\n"
" \"EncryptionToken\": \"\",\n"
" \"Region\": \"any\",\n"
" \"SecretKey\": \"\",\n"
" \"TimeoutMs\": 60000,\n"
" \"URL\": \"\",\n"
" \"UsePathStyle\": false,\n"
" \"UseRegionInURL\": false\n"
" },\n"
" \"TaskWaitMillis\": 100,\n"
" \"Version\": " +
std::to_string(REPERTORY_CONFIG_VERSION) +
"\n"
"}";
TEST_F(config_test, sia_default_settings) {
const auto config_file = utils::path::combine(sia_directory, {"config.json"});
for (int i = 0; i < 2; i++) {
app_config config(provider_type::sia, sia_directory);
config.set_remote_token("");
config.set_api_auth("");
EXPECT_TRUE(config.set_value_by_name("HostConfig.ApiPassword", "").empty());
json data;
EXPECT_TRUE(utils::file::read_json_file(config_file, data));
EXPECT_STREQ(DEFAULT_SIA_CONFIG.c_str(), data.dump(2).c_str());
EXPECT_TRUE(
utils::file::directory(utils::path::combine(sia_directory, {"cache"}))
.exists());
EXPECT_TRUE(
utils::file::directory(utils::path::combine(sia_directory, {"logs"}))
.exists());
}
}
TEST_F(config_test, s3_default_settings) {
const auto config_file = utils::path::combine(s3_directory, {"config.json"});
for (int i = 0; i < 2; i++) {
app_config config(provider_type::s3, s3_directory);
config.set_remote_token("");
config.set_api_auth("");
json data;
EXPECT_TRUE(utils::file::read_json_file(config_file, data));
EXPECT_STREQ(DEFAULT_S3_CONFIG.c_str(), data.dump(2).c_str());
EXPECT_TRUE(
utils::file::directory(utils::path::combine(s3_directory, {"cache"}))
.exists());
EXPECT_TRUE(
utils::file::directory(utils::path::combine(s3_directory, {"logs"}))
.exists());
}
}
TEST_F(config_test, api_path) {
std::string original_value;
@ -110,31 +252,45 @@ TEST_F(config_test, api_user) {
}
}
TEST_F(config_test, download_timeout_secs) {
TEST_F(config_test, chunk_downloader_timeout_secs) {
std::uint8_t original_value{};
{
app_config config(provider_type::sia, sia_directory);
original_value = config.get_download_timeout_secs();
config.set_download_timeout_secs(original_value + 5);
EXPECT_EQ(original_value + 5, config.get_download_timeout_secs());
original_value = config.get_chunk_downloader_timeout_secs();
config.set_chunk_downloader_timeout_secs(original_value + 5);
EXPECT_EQ(original_value + 5, config.get_chunk_downloader_timeout_secs());
}
{
app_config config(provider_type::sia, sia_directory);
EXPECT_EQ(original_value + 5, config.get_download_timeout_secs());
EXPECT_EQ(original_value + 5, config.get_chunk_downloader_timeout_secs());
}
}
TEST_F(config_test, enable_download_timeout) {
TEST_F(config_test, enable_chunk_download_timeout) {
bool original_value{};
{
app_config config(provider_type::sia, sia_directory);
original_value = config.get_enable_download_timeout();
config.set_enable_download_timeout(not original_value);
EXPECT_EQ(not original_value, config.get_enable_download_timeout());
original_value = config.get_enable_chunk_download_timeout();
config.set_enable_chunk_downloader_timeout(not original_value);
EXPECT_EQ(not original_value, config.get_enable_chunk_download_timeout());
}
{
app_config config(provider_type::sia, sia_directory);
EXPECT_EQ(not original_value, config.get_enable_download_timeout());
EXPECT_EQ(not original_value, config.get_enable_chunk_download_timeout());
}
}
TEST_F(config_test, enable_comm_duration_events) {
bool original_value{};
{
app_config config(provider_type::sia, sia_directory);
original_value = config.get_enable_comm_duration_events();
config.set_enable_comm_duration_events(not original_value);
EXPECT_EQ(not original_value, config.get_enable_comm_duration_events());
}
{
app_config config(provider_type::sia, sia_directory);
EXPECT_EQ(not original_value, config.get_enable_comm_duration_events());
}
}
@ -152,6 +308,19 @@ TEST_F(config_test, enable_drive_events) {
}
}
TEST_F(config_test, enable_max_cache_size) {
bool original_value{};
{
app_config config(provider_type::sia, sia_directory);
original_value = config.get_enable_max_cache_size();
config.set_enable_max_cache_size(not original_value);
EXPECT_EQ(not original_value, config.get_enable_max_cache_size());
}
{
app_config config(provider_type::sia, sia_directory);
EXPECT_EQ(not original_value, config.get_enable_max_cache_size());
}
}
#if defined(_WIN32)
TEST_F(config_test, enable_mount_manager) {
bool original_value;
@ -333,6 +502,20 @@ TEST_F(config_test, orphaned_file_retention_days_maximum_value) {
}
}
TEST_F(config_test, read_ahead_count) {
std::uint8_t original_value{};
{
app_config config(provider_type::sia, sia_directory);
original_value = config.get_read_ahead_count();
config.set_read_ahead_count(original_value + 5);
EXPECT_EQ(original_value + 5, config.get_read_ahead_count());
}
{
app_config config(provider_type::sia, sia_directory);
EXPECT_EQ(original_value + 5, config.get_read_ahead_count());
}
}
TEST_F(config_test, get_cache_directory) {
{
app_config config(provider_type::sia, sia_directory);
@ -469,170 +652,167 @@ TEST_F(config_test, get_version) {
}
}
// TEST_F(config_test, enable_remote_mount) {
// bool original_value{};
// {
// app_config config(provider_type::sia, sia_directory);
// original_value = config.get_enable_remote_mount();
// config.set_enable_remote_mount(not original_value);
// EXPECT_EQ(not original_value, config.get_enable_remote_mount());
// }
// {
// app_config config(provider_type::sia, sia_directory);
// EXPECT_EQ(not original_value, config.get_enable_remote_mount());
// }
// }
TEST_F(config_test, enable_remote_mount) {
bool original_value{};
{
app_config config(provider_type::sia, sia_directory);
original_value = config.get_enable_remote_mount();
config.set_enable_remote_mount(not original_value);
EXPECT_EQ(not original_value, config.get_enable_remote_mount());
}
{
app_config config(provider_type::sia, sia_directory);
EXPECT_EQ(not original_value, config.get_enable_remote_mount());
}
}
// TEST_F(config_test, is_remote_mount) {
// bool original_value{};
// {
// app_config config(provider_type::sia, sia_directory);
// original_value = config.get_is_remote_mount();
// config.set_is_remote_mount(not original_value);
// EXPECT_EQ(not original_value, config.get_is_remote_mount());
// }
// {
// app_config config(provider_type::sia, sia_directory);
// EXPECT_EQ(not original_value, config.get_is_remote_mount());
// }
// }
TEST_F(config_test, is_remote_mount) {
bool original_value{};
{
app_config config(provider_type::sia, sia_directory);
original_value = config.get_is_remote_mount();
config.set_is_remote_mount(not original_value);
EXPECT_EQ(not original_value, config.get_is_remote_mount());
}
{
app_config config(provider_type::sia, sia_directory);
EXPECT_EQ(not original_value, config.get_is_remote_mount());
}
}
// TEST_F(config_test, enable_remote_mount_fails_if_remote_mount_is_true) {
// app_config config(provider_type::sia, sia_directory);
// config.set_is_remote_mount(true);
// config.set_enable_remote_mount(true);
// EXPECT_FALSE(config.get_enable_remote_mount());
// EXPECT_TRUE(config.get_is_remote_mount());
// }
TEST_F(config_test, enable_remote_mount_fails_if_remote_mount_is_true) {
app_config config(provider_type::sia, sia_directory);
config.set_is_remote_mount(true);
config.set_enable_remote_mount(true);
EXPECT_FALSE(config.get_enable_remote_mount());
EXPECT_TRUE(config.get_is_remote_mount());
}
// TEST_F(config_test, set_is_remote_mount_fails_if_enable_remote_mount_is_true)
// {
// app_config config(provider_type::sia, sia_directory);
// config.set_enable_remote_mount(true);
// config.set_is_remote_mount(true);
// EXPECT_FALSE(config.get_is_remote_mount());
// EXPECT_TRUE(config.get_enable_remote_mount());
// }
TEST_F(config_test, set_is_remote_mount_fails_if_enable_remote_mount_is_true) {
app_config config(provider_type::sia, sia_directory);
config.set_enable_remote_mount(true);
config.set_is_remote_mount(true);
EXPECT_FALSE(config.get_is_remote_mount());
EXPECT_TRUE(config.get_enable_remote_mount());
}
// TEST_F(config_test, remote_host_name_or_ip) {
// {
// app_config config(provider_type::sia, sia_directory);
// config.set_remote_host_name_or_ip("my.host.name");
// EXPECT_STREQ("my.host.name",
// config.get_remote_host_name_or_ip().c_str());
// }
// {
// app_config config(provider_type::sia, sia_directory);
// EXPECT_STREQ("my.host.name",
// config.get_remote_host_name_or_ip().c_str());
// }
// }
TEST_F(config_test, remote_host_name_or_ip) {
{
app_config config(provider_type::sia, sia_directory);
config.set_remote_host_name_or_ip("my.host.name");
EXPECT_STREQ("my.host.name", config.get_remote_host_name_or_ip().c_str());
}
{
app_config config(provider_type::sia, sia_directory);
EXPECT_STREQ("my.host.name", config.get_remote_host_name_or_ip().c_str());
}
}
// TEST_F(config_test, remote_api_port) {
// std::uint16_t original_value{};
// {
// app_config config(provider_type::sia, sia_directory);
// original_value = config.get_remote_api_port();
// config.set_remote_api_port(original_value + 5);
// EXPECT_EQ(original_value + 5, config.get_remote_api_port());
// }
// {
// app_config config(provider_type::sia, sia_directory);
// EXPECT_EQ(original_value + 5, config.get_remote_api_port());
// }
// }
TEST_F(config_test, remote_port) {
std::uint16_t original_value{};
{
app_config config(provider_type::sia, sia_directory);
original_value = config.get_remote_port();
config.set_remote_port(original_value + 5);
EXPECT_EQ(original_value + 5, config.get_remote_port());
}
{
app_config config(provider_type::sia, sia_directory);
EXPECT_EQ(original_value + 5, config.get_remote_port());
}
}
// TEST_F(config_test, remote_receive_timeout_secs) {
// std::uint16_t original_value{};
// {
// app_config config(provider_type::sia, sia_directory);
// original_value = config.get_remote_receive_timeout_secs();
// config.set_remote_receive_timeout_secs(original_value + 5);
// EXPECT_EQ(original_value + 5, config.get_remote_receive_timeout_secs());
// }
// {
// app_config config(provider_type::sia, sia_directory);
// EXPECT_EQ(original_value + 5, config.get_remote_receive_timeout_secs());
// }
// }
TEST_F(config_test, remote_receive_timeout_secs) {
std::uint16_t original_value{};
{
app_config config(provider_type::sia, sia_directory);
original_value = config.get_remote_receive_timeout_secs();
config.set_remote_receive_timeout_secs(original_value + 5);
EXPECT_EQ(original_value + 5, config.get_remote_receive_timeout_secs());
}
{
app_config config(provider_type::sia, sia_directory);
EXPECT_EQ(original_value + 5, config.get_remote_receive_timeout_secs());
}
}
// TEST_F(config_test, remote_send_timeout_secs) {
// std::uint16_t original_value{};
// {
// app_config config(provider_type::sia, sia_directory);
// original_value = config.get_remote_send_timeout_secs();
// config.set_remote_send_timeout_secs(original_value + 5);
// EXPECT_EQ(original_value + 5, config.get_remote_send_timeout_secs());
// }
// {
// app_config config(provider_type::sia, sia_directory);
// EXPECT_EQ(original_value + 5, config.get_remote_send_timeout_secs());
// }
// }
TEST_F(config_test, remote_send_timeout_secs) {
std::uint16_t original_value{};
{
app_config config(provider_type::sia, sia_directory);
original_value = config.get_remote_send_timeout_secs();
config.set_remote_send_timeout_secs(original_value + 5);
EXPECT_EQ(original_value + 5, config.get_remote_send_timeout_secs());
}
{
app_config config(provider_type::sia, sia_directory);
EXPECT_EQ(original_value + 5, config.get_remote_send_timeout_secs());
}
}
// TEST_F(config_test, remote_encryption_token) {
// {
// app_config config(provider_type::sia, sia_directory);
// config.set_remote_encryption_token("myToken");
// EXPECT_STREQ("myToken", config.get_remote_encryption_token().c_str());
// }
// {
// app_config config(provider_type::sia, sia_directory);
// EXPECT_STREQ("myToken", config.get_remote_encryption_token().c_str());
// }
// }
//
// TEST_F(config_test, remote_client_pool_size) {
// std::uint8_t original_value{};
// {
// app_config config(provider_type::sia, sia_directory);
// original_value = config.get_remote_client_pool_size();
// config.set_remote_client_pool_size(original_value + 5);
// EXPECT_EQ(original_value + 5, config.get_remote_client_pool_size());
// }
// {
// app_config config(provider_type::sia, sia_directory);
// EXPECT_EQ(original_value + 5, config.get_remote_client_pool_size());
// }
// }
//
// TEST_F(config_test, remote_client_pool_size_minimum_value) {
// {
// app_config config(provider_type::sia, sia_directory);
// config.set_remote_client_pool_size(0);
// EXPECT_EQ(5, config.get_remote_client_pool_size());
// }
// {
// app_config config(provider_type::sia, sia_directory);
// EXPECT_EQ(5, config.get_remote_client_pool_size());
// }
// }
TEST_F(config_test, remote_token) {
{
app_config config(provider_type::sia, sia_directory);
config.set_remote_token("myToken");
EXPECT_STREQ("myToken", config.get_remote_token().c_str());
}
{
app_config config(provider_type::sia, sia_directory);
EXPECT_STREQ("myToken", config.get_remote_token().c_str());
}
}
// TEST_F(config_test, remote_max_connections) {
// std::uint8_t original_value{};
// {
// app_config config(provider_type::sia, sia_directory);
// original_value = config.get_remote_max_connections();
// config.set_remote_max_connections(original_value + 5);
// EXPECT_EQ(original_value + 5, config.get_remote_max_connections());
// }
// {
// app_config config(provider_type::sia, sia_directory);
// EXPECT_EQ(original_value + 5, config.get_remote_max_connections());
// }
// }
TEST_F(config_test, remote_client_pool_size) {
std::uint8_t original_value{};
{
app_config config(provider_type::sia, sia_directory);
original_value = config.get_remote_client_pool_size();
config.set_remote_client_pool_size(original_value + 5);
EXPECT_EQ(original_value + 5, config.get_remote_client_pool_size());
}
{
app_config config(provider_type::sia, sia_directory);
EXPECT_EQ(original_value + 5, config.get_remote_client_pool_size());
}
}
// TEST_F(config_test, remote_max_connections_minimum_value) {
// {
// app_config config(provider_type::sia, sia_directory);
// config.set_remote_max_connections(0);
// EXPECT_EQ(1, config.get_remote_max_connections());
// }
// {
// app_config config(provider_type::sia, sia_directory);
// EXPECT_EQ(1, config.get_remote_max_connections());
// }
// }
TEST_F(config_test, remote_client_pool_size_minimum_value) {
{
app_config config(provider_type::sia, sia_directory);
config.set_remote_client_pool_size(0);
EXPECT_EQ(5, config.get_remote_client_pool_size());
}
{
app_config config(provider_type::sia, sia_directory);
EXPECT_EQ(5, config.get_remote_client_pool_size());
}
}
TEST_F(config_test, remote_max_connections) {
std::uint8_t original_value{};
{
app_config config(provider_type::sia, sia_directory);
original_value = config.get_remote_max_connections();
config.set_remote_max_connections(original_value + 5);
EXPECT_EQ(original_value + 5, config.get_remote_max_connections());
}
{
app_config config(provider_type::sia, sia_directory);
EXPECT_EQ(original_value + 5, config.get_remote_max_connections());
}
}
TEST_F(config_test, remote_max_connections_minimum_value) {
{
app_config config(provider_type::sia, sia_directory);
config.set_remote_max_connections(0);
EXPECT_EQ(1, config.get_remote_max_connections());
}
{
app_config config(provider_type::sia, sia_directory);
EXPECT_EQ(1, config.get_remote_max_connections());
}
}
TEST_F(config_test, retry_read_count) {
std::uint16_t original_value{};
@ -677,18 +857,4 @@ TEST_F(config_test, task_wait_ms_minimum_value) {
EXPECT_EQ(50U, config.get_task_wait_ms());
}
}
TEST_F(config_test, can_set_database_type) {
{
app_config config(provider_type::sia, sia_directory);
config.set_database_type(database_type::rocksdb);
EXPECT_EQ(database_type::rocksdb, config.get_database_type());
config.set_database_type(database_type::sqlite);
EXPECT_EQ(database_type::sqlite, config.get_database_type());
config.set_database_type(database_type::rocksdb);
EXPECT_EQ(database_type::rocksdb, config.get_database_type());
}
}
} // namespace repertory

View File

@ -1,292 +0,0 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "test_common.hpp"
#include "file_manager/direct_open_file.hpp"
#include "mocks/mock_provider.hpp"
namespace {
constexpr const std::size_t test_chunk_size{1024U};
} // namespace
namespace repertory {
class direct_open_file_test : public ::testing::Test {
public:
console_consumer con_consumer;
mock_provider provider;
protected:
void SetUp() override { event_system::instance().start(); }
void TearDown() override { event_system::instance().stop(); }
};
TEST_F(direct_open_file_test, read_full_file) {
auto &source_file = test::create_random_file(test_chunk_size * 32U);
auto dest_path = test::generate_test_file_name("direct_open_file");
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
filesystem_item fsi;
fsi.api_path = "/test.txt";
fsi.directory = false;
fsi.size = test_chunk_size * 32U;
std::mutex read_mtx;
EXPECT_CALL(provider, read_file_bytes)
.WillRepeatedly([&read_mtx, &source_file](
const std::string & /* api_path */, std::size_t size,
std::uint64_t offset, data_buffer &data,
stop_type &stop_requested) -> api_error {
mutex_lock lock(read_mtx);
EXPECT_FALSE(stop_requested);
std::size_t bytes_read{};
data.resize(size);
auto ret = source_file.read(data, offset, &bytes_read)
? api_error::success
: api_error::os_error;
EXPECT_EQ(bytes_read, data.size());
return ret;
});
{
direct_open_file file(test_chunk_size, 30U, fsi, provider);
auto dest_file = utils::file::file::open_or_create_file(dest_path);
EXPECT_TRUE(dest_file);
auto to_read{fsi.size};
std::size_t chunk{0U};
while (to_read > 0U) {
data_buffer data{};
EXPECT_EQ(api_error::success,
file.read(test_chunk_size, chunk * test_chunk_size, data));
std::size_t bytes_written{};
EXPECT_TRUE(
dest_file->write(data, chunk * test_chunk_size, &bytes_written));
++chunk;
to_read -= data.size();
}
dest_file->close();
source_file.close();
auto hash1 = utils::file::file(source_file.get_path()).sha256();
auto hash2 = utils::file::file(dest_path).sha256();
EXPECT_TRUE(hash1.has_value());
EXPECT_TRUE(hash2.has_value());
if (hash1.has_value() && hash2.has_value()) {
EXPECT_STREQ(hash1.value().c_str(), hash2.value().c_str());
}
}
}
TEST_F(direct_open_file_test, read_full_file_in_reverse) {
auto &source_file = test::create_random_file(test_chunk_size * 32U);
auto dest_path = test::generate_test_file_name("direct_open_file");
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
filesystem_item fsi;
fsi.api_path = "/test.txt";
fsi.directory = false;
fsi.size = test_chunk_size * 32U;
std::mutex read_mtx;
EXPECT_CALL(provider, read_file_bytes)
.WillRepeatedly([&read_mtx, &source_file](
const std::string & /* api_path */, std::size_t size,
std::uint64_t offset, data_buffer &data,
stop_type &stop_requested) -> api_error {
mutex_lock lock(read_mtx);
EXPECT_FALSE(stop_requested);
std::size_t bytes_read{};
data.resize(size);
auto ret = source_file.read(data, offset, &bytes_read)
? api_error::success
: api_error::os_error;
EXPECT_EQ(bytes_read, data.size());
return ret;
});
{
direct_open_file file(test_chunk_size, 30U, fsi, provider);
auto dest_file = utils::file::file::open_or_create_file(dest_path);
EXPECT_TRUE(dest_file);
auto to_read{fsi.size};
std::size_t chunk{file.get_total_chunks() - 1U};
while (to_read > 0U) {
data_buffer data{};
EXPECT_EQ(api_error::success,
file.read(test_chunk_size, chunk * test_chunk_size, data));
std::size_t bytes_written{};
EXPECT_TRUE(
dest_file->write(data, chunk * test_chunk_size, &bytes_written));
--chunk;
to_read -= data.size();
}
dest_file->close();
source_file.close();
auto hash1 = utils::file::file(source_file.get_path()).sha256();
auto hash2 = utils::file::file(dest_path).sha256();
EXPECT_TRUE(hash1.has_value());
EXPECT_TRUE(hash2.has_value());
if (hash1.has_value() && hash2.has_value()) {
EXPECT_STREQ(hash1.value().c_str(), hash2.value().c_str());
}
}
}
TEST_F(direct_open_file_test, read_full_file_in_partial_chunks) {
auto &source_file = test::create_random_file(test_chunk_size * 32U);
auto dest_path = test::generate_test_file_name("test");
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
filesystem_item fsi;
fsi.directory = false;
fsi.api_path = "/test.txt";
fsi.size = test_chunk_size * 32U;
std::mutex read_mtx;
EXPECT_CALL(provider, read_file_bytes)
.WillRepeatedly([&read_mtx, &source_file](
const std::string & /* api_path */, std::size_t size,
std::uint64_t offset, data_buffer &data,
stop_type &stop_requested) -> api_error {
mutex_lock lock(read_mtx);
EXPECT_FALSE(stop_requested);
std::size_t bytes_read{};
data.resize(size);
auto ret = source_file.read(data, offset, &bytes_read)
? api_error::success
: api_error::os_error;
EXPECT_EQ(bytes_read, data.size());
return ret;
});
{
direct_open_file file(test_chunk_size, 30U, fsi, provider);
auto dest_file = utils::file::file::open_or_create_file(dest_path);
EXPECT_TRUE(dest_file);
auto total_read{std::uint64_t(0U)};
while (total_read < fsi.size) {
data_buffer data{};
EXPECT_EQ(api_error::success, file.read(3U, total_read, data));
std::size_t bytes_written{};
EXPECT_TRUE(dest_file->write(data.data(), data.size(), total_read,
&bytes_written));
total_read += data.size();
}
dest_file->close();
source_file.close();
auto hash1 = utils::file::file(source_file.get_path()).sha256();
auto hash2 = utils::file::file(dest_path).sha256();
EXPECT_TRUE(hash1.has_value());
EXPECT_TRUE(hash2.has_value());
if (hash1.has_value() && hash2.has_value()) {
EXPECT_STREQ(hash1.value().c_str(), hash2.value().c_str());
}
}
}
TEST_F(direct_open_file_test, read_full_file_in_partial_chunks_in_reverse) {
auto &source_file = test::create_random_file(test_chunk_size * 32U);
auto dest_path = test::generate_test_file_name("direct_open_file");
EXPECT_CALL(provider, is_read_only()).WillRepeatedly(Return(false));
filesystem_item fsi;
fsi.directory = false;
fsi.api_path = "/test.txt";
fsi.size = test_chunk_size * 32U;
std::mutex read_mtx;
EXPECT_CALL(provider, read_file_bytes)
.WillRepeatedly([&read_mtx, &source_file](
const std::string & /* api_path */, std::size_t size,
std::uint64_t offset, data_buffer &data,
stop_type &stop_requested) -> api_error {
mutex_lock lock(read_mtx);
EXPECT_FALSE(stop_requested);
std::size_t bytes_read{};
data.resize(size);
auto ret = source_file.read(data, offset, &bytes_read)
? api_error::success
: api_error::os_error;
EXPECT_EQ(bytes_read, data.size());
return ret;
});
{
direct_open_file file(test_chunk_size, 30U, fsi, provider);
auto dest_file = utils::file::file::open_or_create_file(dest_path);
EXPECT_TRUE(dest_file);
std::uint64_t total_read{0U};
auto read_size{3U};
while (total_read < fsi.size) {
auto offset = fsi.size - total_read - read_size;
auto remain = fsi.size - total_read;
data_buffer data{};
EXPECT_EQ(api_error::success,
file.read(static_cast<std::size_t>(
std::min(remain, std::uint64_t(read_size))),
(remain >= read_size) ? offset : 0U, data));
std::size_t bytes_written{};
EXPECT_TRUE(dest_file->write(data, (remain >= read_size) ? offset : 0U,
&bytes_written));
total_read += data.size();
}
dest_file->close();
source_file.close();
auto hash1 = utils::file::file(source_file.get_path()).sha256();
auto hash2 = utils::file::file(dest_path).sha256();
EXPECT_TRUE(hash1.has_value());
EXPECT_TRUE(hash2.has_value());
if (hash1.has_value() && hash2.has_value()) {
EXPECT_STREQ(hash1.value().c_str(), hash2.value().c_str());
}
}
}
} // namespace repertory

View File

@ -1,332 +0,0 @@
/*
Copyright <2018-2024> <scott.e.graves@protonmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "fixtures/file_db_fixture.hpp"
namespace repertory {
TYPED_TEST_CASE(file_db_test, file_db_types);
TYPED_TEST(file_db_test, can_add_and_remove_directory) {
this->file_db->clear();
EXPECT_EQ(api_error::success, this->file_db->add_directory("/", "c:\\test"));
auto list = this->file_db->get_item_list();
EXPECT_EQ(1U, list.size());
EXPECT_EQ(api_error::success, this->file_db->remove_item("/"));
list = this->file_db->get_item_list();
EXPECT_EQ(0U, list.size());
}
TYPED_TEST(file_db_test, can_add_and_remove_file) {
this->file_db->clear();
EXPECT_EQ(api_error::success, this->file_db->add_or_update_file({
"/file",
0U,
{},
"c:\\test\\file.txt",
}));
auto list = this->file_db->get_item_list();
EXPECT_EQ(1U, list.size());
EXPECT_EQ(api_error::success, this->file_db->remove_item("/file"));
list = this->file_db->get_item_list();
EXPECT_EQ(0U, list.size());
}
TYPED_TEST(file_db_test, can_get_api_path_for_directory) {
this->file_db->clear();
EXPECT_EQ(api_error::success, this->file_db->add_directory("/", "c:\\test"));
std::string api_path;
EXPECT_EQ(api_error::success,
this->file_db->get_api_path("c:\\test", api_path));
EXPECT_STREQ("/", api_path.c_str());
}
TYPED_TEST(file_db_test, can_get_api_path_for_file) {
this->file_db->clear();
EXPECT_EQ(api_error::success, this->file_db->add_or_update_file({
"/file",
0U,
{},
"c:\\test\\file.txt",
}));
std::string api_path;
EXPECT_EQ(api_error::success,
this->file_db->get_api_path("c:\\test\\file.txt", api_path));
EXPECT_STREQ("/file", api_path.c_str());
}
TYPED_TEST(file_db_test,
item_not_found_is_returned_for_non_existing_source_path) {
this->file_db->clear();
std::string api_path;
EXPECT_EQ(api_error::item_not_found,
this->file_db->get_api_path("c:\\test", api_path));
EXPECT_TRUE(api_path.empty());
}
TYPED_TEST(file_db_test, can_get_directory_api_path) {
this->file_db->clear();
EXPECT_EQ(api_error::success, this->file_db->add_directory("/", "c:\\test"));
std::string api_path;
EXPECT_EQ(api_error::success,
this->file_db->get_directory_api_path("c:\\test", api_path));
EXPECT_STREQ("/", api_path.c_str());
}
TYPED_TEST(
file_db_test,
directory_not_found_is_returned_for_non_existing_directory_source_path) {
this->file_db->clear();
std::string api_path;
EXPECT_EQ(api_error::directory_not_found,
this->file_db->get_directory_api_path("c:\\test", api_path));
EXPECT_TRUE(api_path.empty());
}
TYPED_TEST(file_db_test, can_get_file_api_path) {
this->file_db->clear();
EXPECT_EQ(api_error::success, this->file_db->add_or_update_file({
"/file",
0U,
{},
"c:\\test\\file.txt",
}));
std::string api_path;
EXPECT_EQ(api_error::success,
this->file_db->get_file_api_path("c:\\test\\file.txt", api_path));
EXPECT_STREQ("/file", api_path.c_str());
}
TYPED_TEST(file_db_test,
item_not_found_is_returned_for_non_existing_file_source_path) {
this->file_db->clear();
std::string api_path;
EXPECT_EQ(api_error::item_not_found,
this->file_db->get_file_api_path("c:\\test", api_path));
EXPECT_TRUE(api_path.empty());
}
TYPED_TEST(file_db_test, can_get_directory_source_path) {
this->file_db->clear();
EXPECT_EQ(api_error::success, this->file_db->add_directory("/", "c:\\test"));
std::string source_path;
EXPECT_EQ(api_error::success,
this->file_db->get_directory_source_path("/", source_path));
EXPECT_STREQ("c:\\test", source_path.c_str());
}
TYPED_TEST(
file_db_test,
directory_not_found_is_returned_for_non_existing_directory_api_path) {
this->file_db->clear();
std::string source_path;
EXPECT_EQ(api_error::directory_not_found,
this->file_db->get_directory_source_path("/", source_path));
EXPECT_TRUE(source_path.empty());
}
TYPED_TEST(file_db_test, can_get_file_source_path) {
this->file_db->clear();
EXPECT_EQ(api_error::success, this->file_db->add_or_update_file({
"/file",
0U,
{},
"c:\\test\\file.txt",
}));
std::string source_path;
EXPECT_EQ(api_error::success,
this->file_db->get_file_source_path("/file", source_path));
EXPECT_STREQ("c:\\test\\file.txt", source_path.c_str());
}
TYPED_TEST(file_db_test,
item_not_found_is_returned_for_non_existing_file_api_path) {
this->file_db->clear();
std::string source_path;
EXPECT_EQ(api_error::item_not_found,
this->file_db->get_file_source_path("/file.txt", source_path));
EXPECT_TRUE(source_path.empty());
}
TYPED_TEST(file_db_test, can_get_file_data) {
this->file_db->clear();
EXPECT_EQ(api_error::success, this->file_db->add_or_update_file({
"/file",
1U,
{{}, {}},
"c:\\test\\file.txt",
}));
i_file_db::file_data data{};
EXPECT_EQ(api_error::success, this->file_db->get_file_data("/file", data));
EXPECT_STREQ("/file", data.api_path.c_str());
EXPECT_EQ(1U, data.file_size);
EXPECT_EQ(2U, data.iv_list.size());
EXPECT_STREQ("c:\\test\\file.txt", data.source_path.c_str());
}
TYPED_TEST(file_db_test,
item_not_found_is_returned_for_non_existing_file_data_api_path) {
this->file_db->clear();
i_file_db::file_data data{};
EXPECT_EQ(api_error::item_not_found,
this->file_db->get_file_data("/file", data));
}
TYPED_TEST(file_db_test, can_update_existing_file_iv) {
this->file_db->clear();
EXPECT_EQ(api_error::success, this->file_db->add_or_update_file({
"/file",
1U,
{{}, {}},
"c:\\test\\file.txt",
}));
EXPECT_EQ(api_error::success, this->file_db->add_or_update_file({
"/file",
1U,
{{}, {}, {}},
"c:\\test\\file.txt",
}));
i_file_db::file_data data{};
EXPECT_EQ(api_error::success, this->file_db->get_file_data("/file", data));
EXPECT_STREQ("/file", data.api_path.c_str());
EXPECT_EQ(1U, data.file_size);
EXPECT_EQ(3U, data.iv_list.size());
EXPECT_STREQ("c:\\test\\file.txt", data.source_path.c_str());
EXPECT_EQ(1U, this->file_db->count());
}
TYPED_TEST(file_db_test, can_update_existing_file_size) {
this->file_db->clear();
EXPECT_EQ(api_error::success, this->file_db->add_or_update_file({
"/file",
1U,
{{}, {}},
"c:\\test\\file.txt",
}));
EXPECT_EQ(api_error::success, this->file_db->add_or_update_file({
"/file",
2U,
{{}, {}},
"c:\\test\\file.txt",
}));
i_file_db::file_data data{};
EXPECT_EQ(api_error::success, this->file_db->get_file_data("/file", data));
EXPECT_STREQ("/file", data.api_path.c_str());
EXPECT_EQ(2U, data.file_size);
EXPECT_EQ(2U, data.iv_list.size());
EXPECT_STREQ("c:\\test\\file.txt", data.source_path.c_str());
EXPECT_EQ(1U, this->file_db->count());
}
TYPED_TEST(file_db_test, can_update_existing_file_source_path) {
this->file_db->clear();
EXPECT_EQ(api_error::success, this->file_db->add_or_update_file({
"/file",
1U,
{{}, {}},
"c:\\test\\file.txt",
}));
EXPECT_EQ(api_error::success, this->file_db->add_or_update_file({
"/file",
1U,
{{}, {}},
"c:\\test\\file2.txt",
}));
i_file_db::file_data data{};
EXPECT_EQ(api_error::success, this->file_db->get_file_data("/file", data));
EXPECT_STREQ("/file", data.api_path.c_str());
EXPECT_EQ(1U, data.file_size);
EXPECT_EQ(2U, data.iv_list.size());
EXPECT_STREQ("c:\\test\\file2.txt", data.source_path.c_str());
EXPECT_EQ(1U, this->file_db->count());
}
TYPED_TEST(file_db_test, can_get_source_path_for_directory) {
this->file_db->clear();
EXPECT_EQ(api_error::success, this->file_db->add_directory("/", "c:\\test"));
std::string source_path;
EXPECT_EQ(api_error::success,
this->file_db->get_source_path("/", source_path));
EXPECT_STREQ("c:\\test", source_path.c_str());
}
TYPED_TEST(file_db_test, can_get_source_path_for_file) {
this->file_db->clear();
EXPECT_EQ(api_error::success, this->file_db->add_or_update_file({
"/file",
0U,
{},
"c:\\test\\file.txt",
}));
std::string source_path;
EXPECT_EQ(api_error::success,
this->file_db->get_source_path("/file", source_path));
EXPECT_STREQ("c:\\test\\file.txt", source_path.c_str());
}
TYPED_TEST(file_db_test, item_not_found_is_returned_for_non_existing_api_path) {
this->file_db->clear();
std::string source_path;
EXPECT_EQ(api_error::item_not_found,
this->file_db->get_source_path("/file", source_path));
EXPECT_TRUE(source_path.empty());
}
} // namespace repertory

View File

@ -22,7 +22,6 @@
#include "test_common.hpp"
#include "app_config.hpp"
#include "file_manager/cache_size_mgr.hpp"
#include "file_manager/events.hpp"
#include "file_manager/file_manager.hpp"
#include "file_manager/i_open_file.hpp"
@ -52,7 +51,7 @@ auto file_manager::open(std::shared_ptr<i_closeable_open_file> of,
class file_manager_test : public ::testing::Test {
public:
console_consumer con_consumer;
console_consumer c;
std::unique_ptr<app_config> cfg;
mock_provider mp;
static std::atomic<std::size_t> inst;
@ -67,9 +66,7 @@ protected:
{"file_manager_test" + std::to_string(++inst)});
cfg = std::make_unique<app_config>(provider_type::sia, file_manager_dir);
cfg->set_enable_download_timeout(false);
cache_size_mgr::instance().initialize(cfg.get());
cfg->set_enable_chunk_downloader_timeout(false);
}
void TearDown() override { event_system::instance().stop(); }
@ -107,7 +104,7 @@ TEST_F(file_manager_test, can_start_and_stop) {
}
TEST_F(file_manager_test, can_create_and_close_file) {
cfg->set_enable_download_timeout(true);
cfg->set_enable_chunk_downloader_timeout(true);
EXPECT_CALL(mp, is_read_only()).WillRepeatedly(Return(false));
@ -216,7 +213,7 @@ TEST_F(file_manager_test, can_create_and_close_file) {
}
TEST_F(file_manager_test, can_open_and_close_file) {
cfg->set_enable_download_timeout(true);
cfg->set_enable_chunk_downloader_timeout(true);
EXPECT_CALL(mp, is_read_only()).WillRepeatedly(Return(false));
@ -431,6 +428,16 @@ TEST_F(file_manager_test,
return api_error::success;
});
std::uint64_t handle{};
std::shared_ptr<i_open_file> open_file;
#if defined(_WIN32)
EXPECT_EQ(api_error::success, mgr.open("/test_write_partial_download.txt",
false, {}, handle, open_file));
#else
EXPECT_EQ(api_error::success, mgr.open("/test_write_partial_download.txt",
false, O_RDWR, handle, open_file));
#endif
EXPECT_CALL(mp, read_file_bytes)
.WillRepeatedly([&file](const std::string & /* api_path */,
std::size_t size, std::uint64_t offset,
@ -455,17 +462,6 @@ TEST_F(file_manager_test,
return api_error::download_stopped;
});
std::uint64_t handle{};
std::shared_ptr<i_open_file> open_file;
#if defined(_WIN32)
EXPECT_EQ(api_error::success, mgr.open("/test_write_partial_download.txt",
false, {}, handle, open_file));
#else
EXPECT_EQ(api_error::success, mgr.open("/test_write_partial_download.txt",
false, O_RDWR, handle, open_file));
#endif
EXPECT_CALL(mp, set_item_meta("/test_write_partial_download.txt", _))
.WillOnce(
[](const std::string &, const api_meta_map &meta2) -> api_error {
@ -476,10 +472,6 @@ TEST_F(file_manager_test,
});
EXPECT_CALL(mp, upload_file).Times(0u);
if (not open_file->is_write_supported()) {
EXPECT_TRUE(mgr.get_open_file(handle, true, open_file));
}
std::size_t bytes_written{};
data_buffer data = {0, 1, 2};
EXPECT_EQ(api_error::success, open_file->write(0u, data, bytes_written));
@ -538,7 +530,7 @@ TEST_F(file_manager_test,
}
TEST_F(file_manager_test, upload_occurs_after_write_if_fully_downloaded) {
cfg->set_enable_download_timeout(true);
cfg->set_enable_chunk_downloader_timeout(true);
EXPECT_CALL(mp, is_read_only()).WillRepeatedly(Return(false));
@ -565,6 +557,7 @@ TEST_F(file_manager_test, upload_occurs_after_write_if_fully_downloaded) {
EXPECT_STREQ(source_path.c_str(),
evt2.get_source().get<std::string>().c_str());
});
event_capture capture({"download_end"});
auto now = utils::time::get_time_now();
auto meta = create_meta_attributes(
@ -588,6 +581,16 @@ TEST_F(file_manager_test, upload_occurs_after_write_if_fully_downloaded) {
return api_error::success;
});
std::uint64_t handle{};
std::shared_ptr<i_open_file> open_file;
#if defined(_WIN32)
EXPECT_EQ(api_error::success, mgr.open("/test_write_full_download.txt", false,
{}, handle, open_file));
#else
EXPECT_EQ(api_error::success, mgr.open("/test_write_full_download.txt", false,
O_RDWR, handle, open_file));
#endif
EXPECT_CALL(mp, read_file_bytes)
.WillRepeatedly([&file](const std::string & /* api_path */,
std::size_t size, std::uint64_t offset,
@ -600,17 +603,6 @@ TEST_F(file_manager_test, upload_occurs_after_write_if_fully_downloaded) {
EXPECT_EQ(bytes_read, data.size());
return ret;
});
std::uint64_t handle{};
std::shared_ptr<i_open_file> open_file;
#if defined(_WIN32)
EXPECT_EQ(api_error::success, mgr.open("/test_write_full_download.txt", false,
{}, handle, open_file));
#else
EXPECT_EQ(api_error::success, mgr.open("/test_write_full_download.txt", false,
O_RDWR, handle, open_file));
#endif
EXPECT_CALL(mp, set_item_meta("/test_write_full_download.txt", _))
.WillOnce(
[](const std::string &, const api_meta_map &meta2) -> api_error {
@ -619,33 +611,25 @@ TEST_F(file_manager_test, upload_occurs_after_write_if_fully_downloaded) {
EXPECT_NO_THROW(EXPECT_FALSE(meta2.at(META_WRITTEN).empty()));
return api_error::success;
});
if (not open_file->is_write_supported()) {
EXPECT_TRUE(mgr.get_open_file(handle, true, open_file));
}
event_capture capture({
"item_timeout",
"file_upload_queued",
"file_upload_completed",
});
EXPECT_CALL(mp, upload_file("/test_write_full_download.txt", source_path, _))
.WillOnce(Return(api_error::success));
std::size_t bytes_written{};
data_buffer data = {0, 1, 2};
EXPECT_EQ(api_error::success, open_file->write(0u, data, bytes_written));
EXPECT_EQ(std::size_t(3u), bytes_written);
while (not open_file->is_complete()) {
std::this_thread::sleep_for(10ms);
}
open_file.reset();
capture.wait_for_empty();
EXPECT_CALL(mp, upload_file("/test_write_full_download.txt", source_path, _))
.WillOnce(Return(api_error::success));
event_capture ec2({
"item_timeout",
"file_upload_queued",
"file_upload_completed",
});
mgr.close(handle);
capture.wait_for_empty();
ec2.wait_for_empty();
EXPECT_EQ(std::size_t(0U), mgr.get_open_file_count());
EXPECT_EQ(std::size_t(0U), mgr.get_open_handle_count());
@ -710,14 +694,9 @@ TEST_F(file_manager_test, can_evict_file) {
.WillRepeatedly(Return(api_error::success));
EXPECT_CALL(mp, upload_file(_, _, _)).WillOnce(Return(api_error::success));
if (not open_file->is_write_supported()) {
EXPECT_TRUE(mgr.get_open_file(handle, true, open_file));
}
data_buffer data{{0, 1, 1}};
std::size_t bytes_written{};
auto res = open_file->write(0U, data, bytes_written);
EXPECT_EQ(api_error::success, res);
EXPECT_EQ(api_error::success, open_file->write(0U, data, bytes_written));
auto opt_size = utils::file::file{source_path}.size();
EXPECT_TRUE(opt_size.has_value());
@ -730,6 +709,15 @@ TEST_F(file_manager_test, can_evict_file) {
EXPECT_TRUE(utils::retry_action(
[&mgr]() -> bool { return not mgr.is_processing("/test_evict.txt"); }));
EXPECT_CALL(mp, get_item_meta(_, META_SOURCE, _))
.WillOnce([&source_path](const std::string &api_path,
const std::string &key,
std::string &value) -> api_error {
EXPECT_STREQ("/test_evict.txt", api_path.c_str());
EXPECT_STREQ(META_SOURCE.c_str(), key.c_str());
value = source_path;
return api_error::success;
});
EXPECT_CALL(mp, get_item_meta(_, META_PINNED, _))
.WillOnce([](const std::string &api_path, const std::string &key,
std::string &value) -> api_error {
@ -748,17 +736,6 @@ TEST_F(file_manager_test, evict_file_fails_if_file_is_pinned) {
EXPECT_CALL(mp, is_read_only()).WillRepeatedly(Return(false));
file_manager mgr(*cfg, mp);
EXPECT_CALL(mp, get_filesystem_item)
.WillRepeatedly([](const std::string &api_path, bool directory,
filesystem_item &fsi) -> api_error {
fsi.api_path = api_path;
fsi.api_parent = utils::path::get_parent_api_path(api_path);
fsi.directory = directory;
fsi.size = 2U;
fsi.source_path = "/test/test_open.src";
return api_error::success;
});
EXPECT_CALL(mp, get_item_meta(_, META_PINNED, _))
.WillOnce([](const std::string &api_path, const std::string &key,
std::string &value) -> api_error {
@ -817,17 +794,28 @@ TEST_F(file_manager_test, evict_file_fails_if_file_is_open) {
mgr.close(handle);
}
TEST_F(file_manager_test, evict_file_fails_if_unable_to_get_filesystem_item) {
TEST_F(file_manager_test,
evict_file_fails_if_unable_to_get_source_path_from_item_meta) {
EXPECT_CALL(mp, is_read_only()).WillRepeatedly(Return(false));
file_manager mgr(*cfg, mp);
EXPECT_CALL(mp, get_filesystem_item)
.WillRepeatedly([](const std::string & /* api_path */,
bool /* directory */,
filesystem_item & /* fsi */) -> api_error {
EXPECT_CALL(mp, get_item_meta(_, META_SOURCE, _))
.WillOnce([](const std::string &api_path, const std::string &key,
std::string & /*value*/) -> api_error {
EXPECT_STREQ("/test_open.txt", api_path.c_str());
EXPECT_STREQ(META_SOURCE.c_str(), key.c_str());
return api_error::error;
});
EXPECT_CALL(mp, get_item_meta(_, META_PINNED, _))
.WillOnce([](const std::string &api_path, const std::string &key,
std::string &value) -> api_error {
EXPECT_STREQ("/test_open.txt", api_path.c_str());
EXPECT_STREQ(META_PINNED.c_str(), key.c_str());
value = "0";
return api_error::success;
});
EXPECT_FALSE(mgr.evict_file("/test_open.txt"));
}
@ -835,13 +823,20 @@ TEST_F(file_manager_test, evict_file_fails_if_source_path_is_empty) {
EXPECT_CALL(mp, is_read_only()).WillRepeatedly(Return(false));
file_manager mgr(*cfg, mp);
EXPECT_CALL(mp, get_filesystem_item)
.WillRepeatedly([](const std::string &api_path, bool directory,
filesystem_item &fsi) -> api_error {
fsi.api_path = api_path;
fsi.api_parent = utils::path::get_parent_api_path(api_path);
fsi.directory = directory;
fsi.size = 20U;
EXPECT_CALL(mp, get_item_meta(_, META_SOURCE, _))
.WillOnce([](const std::string &api_path, const std::string &key,
std::string &value) -> api_error {
EXPECT_STREQ("/test_open.txt", api_path.c_str());
EXPECT_STREQ(META_SOURCE.c_str(), key.c_str());
value = "";
return api_error::success;
});
EXPECT_CALL(mp, get_item_meta(_, META_PINNED, _))
.WillOnce([](const std::string &api_path, const std::string &key,
std::string &value) -> api_error {
EXPECT_STREQ("/test_open.txt", api_path.c_str());
EXPECT_STREQ(META_PINNED.c_str(), key.c_str());
value = "0";
return api_error::success;
});
@ -909,10 +904,6 @@ TEST_F(file_manager_test, evict_file_fails_if_file_is_uploading) {
return api_error::success;
});
if (not open_file->is_write_supported()) {
EXPECT_TRUE(mgr.get_open_file(handle, true, open_file));
}
data_buffer data{{0, 1, 1}};
std::size_t bytes_written{};
EXPECT_EQ(api_error::success, open_file->write(0U, data, bytes_written));
@ -956,7 +947,6 @@ TEST_F(file_manager_test, evict_file_fails_if_file_is_modified) {
EXPECT_CALL(mp, is_read_only()).WillRepeatedly(Return(false));
file_manager mgr(*cfg, mp);
EXPECT_CALL(mp, get_filesystem_item)
.WillOnce([](const std::string &api_path, bool directory,
filesystem_item &fsi) -> api_error {
@ -971,12 +961,11 @@ TEST_F(file_manager_test, evict_file_fails_if_file_is_modified) {
});
auto file = std::make_shared<mock_open_file>();
EXPECT_CALL(*file, is_directory).WillOnce(Return(false));
EXPECT_CALL(*file, add).WillOnce(Return());
EXPECT_CALL(*file, get_api_path).WillRepeatedly(Return("/test_evict.txt"));
EXPECT_CALL(*file, get_source_path).WillRepeatedly(Return("/test_evict.src"));
EXPECT_CALL(*file, is_directory).WillOnce(Return(false));
EXPECT_CALL(*file, is_modified).WillRepeatedly(Return(true));
EXPECT_CALL(*file, is_write_supported).WillRepeatedly(Return(true));
EXPECT_CALL(*file, is_modified).Times(2).WillRepeatedly(Return(true));
std::uint64_t handle{};
std::shared_ptr<i_open_file> open_file;
@ -999,21 +988,20 @@ TEST_F(file_manager_test, evict_file_fails_if_file_is_not_complete) {
filesystem_item &fsi) -> api_error {
EXPECT_STREQ("/test_evict.txt", api_path.c_str());
EXPECT_FALSE(directory);
fsi.api_parent = utils::path::get_parent_api_path(api_path);
fsi.api_path = api_path;
fsi.api_parent = utils::path::get_parent_api_path(api_path);
fsi.directory = directory;
fsi.size = 1U;
return api_error::success;
});
auto file = std::make_shared<mock_open_file>();
EXPECT_CALL(*file, is_directory).WillOnce(Return(false));
EXPECT_CALL(*file, add).WillOnce(Return());
EXPECT_CALL(*file, get_api_path).WillRepeatedly(Return("/test_evict.txt"));
EXPECT_CALL(*file, get_source_path).WillRepeatedly(Return("/test_evict.src"));
EXPECT_CALL(*file, is_complete).WillRepeatedly(Return(false));
EXPECT_CALL(*file, is_directory).WillOnce(Return(false));
EXPECT_CALL(*file, is_modified).WillRepeatedly(Return(false));
EXPECT_CALL(*file, is_write_supported).WillRepeatedly(Return(true));
EXPECT_CALL(*file, is_modified).Times(2).WillRepeatedly(Return(false));
EXPECT_CALL(*file, is_complete).Times(2).WillRepeatedly(Return(false));
EXPECT_CALL(mp, set_item_meta("/test_evict.txt", META_SOURCE, _))
.WillOnce(Return(api_error::success));
@ -1428,8 +1416,8 @@ TEST_F(file_manager_test, can_queue_and_remove_upload) {
}
TEST_F(file_manager_test, file_is_closed_after_download_timeout) {
cfg->set_enable_download_timeout(true);
cfg->set_download_timeout_secs(3U);
cfg->set_enable_chunk_downloader_timeout(true);
cfg->set_chunk_downloader_timeout_secs(3U);
polling::instance().start(cfg.get());
@ -1469,26 +1457,6 @@ TEST_F(file_manager_test, file_is_closed_after_download_timeout) {
event_capture capture({"item_timeout"});
EXPECT_CALL(mp, read_file_bytes)
.WillRepeatedly([](const std::string & /* api_path */, std::size_t size,
std::uint64_t offset, data_buffer &data,
stop_type &stop_requested) -> api_error {
if (stop_requested) {
return api_error::download_stopped;
}
if (offset == 0U) {
data.resize(size);
return api_error::success;
}
while (not stop_requested) {
std::this_thread::sleep_for(100ms);
}
return api_error::download_stopped;
});
std::uint64_t handle{};
std::shared_ptr<i_open_file> open_file;
#if defined(_WIN32)
@ -1499,15 +1467,33 @@ TEST_F(file_manager_test, file_is_closed_after_download_timeout) {
O_RDWR, handle, open_file));
#endif
EXPECT_CALL(mp, read_file_bytes)
.WillRepeatedly([](const std::string & /* api_path */,
std::size_t /*size*/, std::uint64_t offset,
data_buffer & /*data*/,
stop_type &stop_requested) -> api_error {
if (stop_requested) {
return api_error::download_stopped;
}
if (offset == 0U) {
return api_error::success;
}
while (not stop_requested) {
std::this_thread::sleep_for(100ms);
}
return api_error::download_stopped;
});
data_buffer data{};
EXPECT_EQ(api_error::success, open_file->read(1U, 0U, data));
mgr.close(handle);
if (open_file->is_write_supported()) {
EXPECT_CALL(mp, set_item_meta("/test_download_timeout.txt", META_SOURCE, _))
.WillOnce(Return(api_error::success));
}
EXPECT_CALL(mp, set_item_meta("/test_download_timeout.txt", META_SOURCE, _))
.WillOnce(Return(api_error::success));
EXPECT_EQ(std::size_t(1U), mgr.get_open_file_count());
capture.wait_for_empty();
@ -1558,7 +1544,7 @@ TEST_F(file_manager_test, remove_file_fails_if_provider_remove_file_fails) {
TEST_F(file_manager_test,
resize_greater_than_chunk_size_sets_new_chunks_to_read) {
cfg->set_enable_download_timeout(true);
cfg->set_enable_chunk_downloader_timeout(true);
EXPECT_CALL(mp, is_read_only()).WillRepeatedly(Return(false));

View File

@ -119,6 +119,7 @@ TYPED_TEST(file_mgr_db_test, can_add_get_and_remove_upload) {
this->file_mgr_db->clear();
EXPECT_TRUE(this->file_mgr_db->add_upload({
"/test0",
2ULL,
"/src/test0",
}));
@ -135,11 +136,13 @@ TYPED_TEST(file_mgr_db_test, uploads_are_correctly_ordered) {
this->file_mgr_db->clear();
EXPECT_TRUE(this->file_mgr_db->add_upload({
"/test08",
utils::time::get_time_now(),
"/src/test0",
}));
EXPECT_TRUE(this->file_mgr_db->add_upload({
"/test07",
utils::time::get_time_now(),
"/src/test1",
}));

Some files were not shown because too many files have changed in this diff Show More