Some checks failed
BlockStorage/repertory_osx/pipeline/head This commit looks good
BlockStorage/repertory_windows/pipeline/head This commit looks good
BlockStorage/repertory/pipeline/head There was a failure building this commit
BlockStorage/repertory_linux_builds/pipeline/head This commit looks good
BlockStorage/repertory_osx_builds/pipeline/head There was a failure building this commit
### Issues * \#1 \[bug\] Unable to mount S3 due to 'item_not_found' exception * \#2 Require bucket name for S3 mounts * \#3 \[bug\] File size is not being updated in S3 mount * \#4 Upgrade to libfuse-3.x.x * \#5 Switch to renterd for Sia support * \#6 Switch to cpp-httplib to further reduce dependencies * \#7 Remove global_data and calculate used disk space per provider * \#8 Switch to libcurl for S3 mount support ### Changes from v1.x.x * Added read-only encrypt provider * Pass-through mount point that transparently encrypts source data using `XChaCha20-Poly1305` * Added S3 encryption support via `XChaCha20-Poly1305` * Added replay protection to remote mounts * Added support base64 writes in remote FUSE * Created static linked Linux binaries for `amd64` and `aarch64` using `musl-libc` * Removed legacy Sia renter support * Removed Skynet support * Fixed multiple remote mount WinFSP API issues on \*NIX servers * Implemented chunked read and write * Writes for non-cached files are performed in chunks of 8Mib * Removed `repertory-ui` support * Removed `FreeBSD` support * Switched to `libsodium` over `CryptoPP` * Switched to `XChaCha20-Poly1305` for remote mounts * Updated `GoogleTest` to v1.14.0 * Updated `JSON for Modern C++` to v3.11.2 * Updated `OpenSSL` to v1.1.1w * Updated `RocksDB` to v8.5.3 * Updated `WinFSP` to 2023 * Updated `boost` to v1.78.0 * Updated `cURL` to v8.3.0 * Updated `zlib` to v1.3 * Use `upload_manager` for all providers * Adds a delay to uploads to prevent excessive API calls * Supports re-upload after mount restart for incomplete uploads * NOTE: Uploads for all providers are full file (no resume support) * Multipart upload support is planned for S3 Reviewed-on: #9
55 lines
1.7 KiB
Python
55 lines
1.7 KiB
Python
import io
|
|
import os.path
|
|
import sys
|
|
import tarfile
|
|
import time
|
|
import zipfile
|
|
|
|
def read_file(path, use_crlf):
|
|
with open(path, 'rb') as file:
|
|
data = file.read()
|
|
|
|
if b'\0' not in data:
|
|
data = data.replace(b'\r', b'')
|
|
if use_crlf:
|
|
data = data.replace(b'\n', b'\r\n')
|
|
|
|
return data
|
|
|
|
def write_zip(target, arcprefix, timestamp, sources):
|
|
with zipfile.ZipFile(target, 'w') as archive:
|
|
for source in sorted(sources):
|
|
data = read_file(source, use_crlf = True)
|
|
path = os.path.join(arcprefix, source)
|
|
info = zipfile.ZipInfo(path)
|
|
info.date_time = time.localtime(timestamp)
|
|
info.compress_type = zipfile.ZIP_DEFLATED
|
|
info.external_attr = 0o644 << 16
|
|
archive.writestr(info, data)
|
|
|
|
def write_tar(target, arcprefix, timestamp, sources, compression):
|
|
with tarfile.open(target, 'w:' + compression) as archive:
|
|
for source in sorted(sources):
|
|
data = read_file(source, use_crlf = False)
|
|
path = os.path.join(arcprefix, source)
|
|
info = tarfile.TarInfo(path)
|
|
info.size = len(data)
|
|
info.mtime = timestamp
|
|
archive.addfile(info, io.BytesIO(data))
|
|
|
|
if len(sys.argv) < 5:
|
|
raise RuntimeError('Usage: python archive.py <target> <archive prefix> <timestamp> <source files>')
|
|
|
|
target, arcprefix, timestamp = sys.argv[1:4]
|
|
sources = sys.argv[4:]
|
|
|
|
# tarfile._Stream._init_write_gz always writes current time to gzip header
|
|
time.time = lambda: timestamp
|
|
|
|
if target.endswith('.zip'):
|
|
write_zip(target, arcprefix, int(timestamp), sources)
|
|
elif target.endswith('.tar.gz') or target.endswith('.tar.bz2'):
|
|
write_tar(target, arcprefix, int(timestamp), sources, compression = os.path.splitext(target)[1][1:])
|
|
else:
|
|
raise NotImplementedError('File type not supported: ' + target)
|