grouping
This commit is contained in:
198
src/api.js
198
src/api.js
@@ -21,7 +21,7 @@ const s3 = new S3Client({
|
||||
},
|
||||
});
|
||||
|
||||
/* ---------------- helpers ---------------- */
|
||||
/* ---------- helpers ---------- */
|
||||
|
||||
const toListed = (folderKey) => (o) => {
|
||||
const d = o.LastModified ?? new Date(0);
|
||||
@@ -68,47 +68,44 @@ const isTar = (name) => name.endsWith(TAR_EXT);
|
||||
const isDmg = (name) => name.endsWith(".dmg");
|
||||
const isSetup = (name) => name.endsWith("_setup.exe");
|
||||
|
||||
// --- normalization + parsing (operate on baseStem!)
|
||||
const normalize_platform = (p) => {
|
||||
// normalize platform/arch and parse FROM baseStem so arch is clean
|
||||
const norm_platform = (p) => {
|
||||
const v = (p || "").toLowerCase();
|
||||
if (v === "macos") return "darwin";
|
||||
return v;
|
||||
};
|
||||
|
||||
const normalize_arch = (a) => {
|
||||
const norm_arch = (a) => {
|
||||
const v = (a || "").toLowerCase();
|
||||
if (v === "arm64" || v === "aarch64") return "aarch64";
|
||||
if (v === "x86_64" || v === "x86-64") return "x86-64";
|
||||
return v;
|
||||
};
|
||||
|
||||
// parts[0]=product, parts[1]=version, parts[2]=build, parts[3]=platform, parts[4]=arch
|
||||
// parts: 0=product, 1=version, 2=build, 3=platform, 4=arch
|
||||
const parseName = (fname) => {
|
||||
const stem = baseStem(fname);
|
||||
const parts = stem.split("_");
|
||||
const product = parts[0] ?? "";
|
||||
const version = parts[1] ?? "";
|
||||
const build = parts[2] ?? "";
|
||||
const platform = normalize_platform(parts[3] ?? "");
|
||||
const arch = normalize_arch(parts[4] ?? "");
|
||||
const groupId = `${product}_${platform}_${arch}`; // retention groups
|
||||
const releaseId = `${product}_${build}`; // build grouping (emit)
|
||||
return { parts, product, version, build, platform, arch, groupId, releaseId };
|
||||
const platform = norm_platform(parts[3] ?? "");
|
||||
const arch = norm_arch(parts[4] ?? "");
|
||||
const groupId = `${product}_${platform}_${arch}`; // retention
|
||||
const releaseId = `${product}_${build}`; // build grouping
|
||||
const tripleId = `${product}_${build}_${platform}_${arch}`; // emit bucket
|
||||
return {
|
||||
product,
|
||||
version,
|
||||
build,
|
||||
platform,
|
||||
arch,
|
||||
groupId,
|
||||
releaseId,
|
||||
tripleId,
|
||||
};
|
||||
};
|
||||
|
||||
// platform/arch sort preference within a build
|
||||
const platformRank = (platform) =>
|
||||
platform === "darwin"
|
||||
? 0
|
||||
: platform === "windows"
|
||||
? 1
|
||||
: platform === "linux"
|
||||
? 2
|
||||
: 3;
|
||||
|
||||
const archRank = (arch) => (arch === "aarch64" ? 0 : arch === "x86-64" ? 1 : 2);
|
||||
|
||||
/* ---------------- public API ---------------- */
|
||||
/* ---------- public API ---------- */
|
||||
|
||||
const cleanOldItems = async () => {
|
||||
const keys = Array.from(oldKeys);
|
||||
@@ -153,66 +150,62 @@ const getBucketFiles = async (folderName) => {
|
||||
const contents = (data.Contents ?? []).filter((it) => it.Key !== folderKey);
|
||||
const listed = contents.map(toListed(folderKey)).sort(byNewest);
|
||||
|
||||
// Index by base stem; aggregate tar/dmg/setup under the same platform+arch for a build
|
||||
// quick lookups
|
||||
const byKey = Object.fromEntries(listed.map((i) => [i.key, i]));
|
||||
const byBase = new Map(); // base -> { rep, tar, dmg, setup, sidecars:Set<string>, platform, arch, releaseId }
|
||||
|
||||
// First, group files by *base stem* to detect what each stem represents
|
||||
const byBase = new Map(); // base -> { rep, tar, dmg, setup, platform, arch, releaseId }
|
||||
for (const it of listed) {
|
||||
const base = baseStem(it.name);
|
||||
let g = byBase.get(base);
|
||||
if (!g) {
|
||||
const meta = parseName(it.name);
|
||||
g = {
|
||||
rep: null,
|
||||
rep: it,
|
||||
tar: null,
|
||||
dmg: null,
|
||||
setup: null,
|
||||
sidecars: new Set(),
|
||||
platform: "",
|
||||
arch: "",
|
||||
releaseId: "",
|
||||
platform: meta.platform,
|
||||
arch: meta.arch,
|
||||
releaseId: meta.releaseId,
|
||||
tripleId: meta.tripleId,
|
||||
};
|
||||
byBase.set(base, g);
|
||||
}
|
||||
|
||||
const meta = parseName(it.name); // <-- now from baseStem(), so arch/platform are clean
|
||||
if (isTar(it.name)) {
|
||||
g.tar = it;
|
||||
g.rep = g.rep ?? it;
|
||||
} else if (isDmg(it.name)) {
|
||||
g.dmg = it;
|
||||
g.rep = g.rep ?? it;
|
||||
} else if (isSetup(it.name)) {
|
||||
g.setup = it;
|
||||
g.rep = g.rep ?? it;
|
||||
} else if (it.name.endsWith(".sha256") || it.name.endsWith(".sig")) {
|
||||
g.sidecars.add(it.key);
|
||||
}
|
||||
|
||||
// ensure metadata captured (first non-sidecar wins, but we can fill missing)
|
||||
if (!g.platform && meta.platform) g.platform = meta.platform;
|
||||
if (!g.arch && meta.arch) g.arch = meta.arch;
|
||||
if (!g.releaseId && meta.releaseId) g.releaseId = meta.releaseId;
|
||||
if (isTar(it.name)) g.tar = it;
|
||||
else if (isDmg(it.name)) g.dmg = it;
|
||||
else if (isSetup(it.name)) g.setup = it;
|
||||
// rep remains whatever we first saw; sort values come from actual artifacts anyway
|
||||
}
|
||||
|
||||
const groups = Array.from(byBase.values()).filter((g) => g.rep);
|
||||
const baseGroups = Array.from(byBase.values()).filter(
|
||||
(g) => g.tar || g.dmg || g.setup,
|
||||
);
|
||||
|
||||
// ---------- Retention: newest 3 per product+platform+arch (nightly) ----------
|
||||
// ---------- Retention: newest 3 per (product+platform+arch) in nightly ----------
|
||||
const keepBaseSet = new Set();
|
||||
if (folder === "nightly") {
|
||||
const buckets = new Map(); // groupId -> [groups]
|
||||
for (const g of groups) {
|
||||
const meta = parseName(g.rep.name);
|
||||
const gid = meta.groupId;
|
||||
if (!buckets.has(gid)) buckets.set(gid, []);
|
||||
buckets.get(gid).push(g);
|
||||
const buckets = new Map(); // groupId -> [bases...]
|
||||
for (const g of baseGroups) {
|
||||
// use the best available artifact for sort
|
||||
const rep = g.tar ?? g.dmg ?? g.setup ?? g.rep;
|
||||
const { groupId } = parseName(rep.name);
|
||||
if (!buckets.has(groupId)) buckets.set(groupId, []);
|
||||
buckets.get(groupId).push(g);
|
||||
}
|
||||
for (const [, arr] of buckets) {
|
||||
arr.sort((a, b) => (b.rep?.sort ?? 0) - (a.rep?.sort ?? 0));
|
||||
arr.sort((a, b) => {
|
||||
const sa = (a.tar ?? a.dmg ?? a.setup ?? a.rep)?.sort ?? 0;
|
||||
const sb = (b.tar ?? b.dmg ?? b.setup ?? b.rep)?.sort ?? 0;
|
||||
return sb - sa;
|
||||
});
|
||||
arr.forEach((g, idx) => {
|
||||
const base = baseStem(g.rep.name);
|
||||
const rep = g.tar ?? g.dmg ?? g.setup ?? g.rep;
|
||||
const base = baseStem(rep.name);
|
||||
if (idx < 3) {
|
||||
keepBaseSet.add(base);
|
||||
} else {
|
||||
// mark artifacts in this base for cleanup
|
||||
if (g.tar)
|
||||
[g.tar.key, ...sidecarsFor(g.tar.key)].forEach((k) =>
|
||||
oldKeys.add(k),
|
||||
@@ -229,61 +222,92 @@ const getBucketFiles = async (folderName) => {
|
||||
});
|
||||
}
|
||||
} else {
|
||||
for (const g of groups) {
|
||||
keepBaseSet.add(baseStem(g.rep.name));
|
||||
for (const g of baseGroups) {
|
||||
const rep = g.tar ?? g.dmg ?? g.setup ?? g.rep;
|
||||
keepBaseSet.add(baseStem(rep.name));
|
||||
}
|
||||
}
|
||||
|
||||
// ---------- Emit: group by release (build) -> platform -> arch ----------
|
||||
const kept = groups.filter((g) => keepBaseSet.has(baseStem(g.rep.name)));
|
||||
// ---------- Aggregate by (product, build, platform, arch) => "triple" ----------
|
||||
const triples = new Map(); // tripleId -> { repSort, releaseId, platform, arch, tar?, setup?, dmg? }
|
||||
for (const g of baseGroups) {
|
||||
const rep = g.tar ?? g.dmg ?? g.setup ?? g.rep;
|
||||
const { tripleId, releaseId, platform, arch } = parseName(rep.name);
|
||||
|
||||
const releases = new Map(); // releaseId -> { repSort:number, items:[g...] }
|
||||
for (const g of kept) {
|
||||
const rid = g.releaseId || parseName(g.rep.name).releaseId;
|
||||
if (!releases.has(rid)) {
|
||||
releases.set(rid, { repSort: g.rep.sort, items: [] });
|
||||
// skip bases not kept by retention
|
||||
if (!keepBaseSet.has(baseStem(rep.name))) continue;
|
||||
|
||||
let t = triples.get(tripleId);
|
||||
if (!t) {
|
||||
t = {
|
||||
repSort: rep.sort,
|
||||
releaseId,
|
||||
platform,
|
||||
arch,
|
||||
tar: null,
|
||||
setup: null,
|
||||
dmg: null,
|
||||
};
|
||||
triples.set(tripleId, t);
|
||||
}
|
||||
const bucket = releases.get(rid);
|
||||
bucket.repSort = Math.max(bucket.repSort, g.rep.sort);
|
||||
bucket.items.push(g);
|
||||
t.repSort = Math.max(t.repSort, rep.sort);
|
||||
if (g.tar) t.tar = g.tar;
|
||||
if (g.setup) t.setup = g.setup;
|
||||
if (g.dmg) t.dmg = g.dmg;
|
||||
}
|
||||
|
||||
// ---------- Group triples by release (build), newest-first ----------
|
||||
const releases = new Map(); // releaseId -> { repSort, items:[triple] }
|
||||
for (const t of triples.values()) {
|
||||
let r = releases.get(t.releaseId);
|
||||
if (!r) {
|
||||
r = { repSort: t.repSort, items: [] };
|
||||
releases.set(t.releaseId, r);
|
||||
}
|
||||
r.repSort = Math.max(r.repSort, t.repSort);
|
||||
r.items.push(t);
|
||||
}
|
||||
const releaseList = Array.from(releases.entries()).sort(
|
||||
(a, b) => b[1].repSort - a[1].repSort,
|
||||
);
|
||||
|
||||
// Sort triples inside a release if you want consistent platform/arch ordering (optional)
|
||||
const platformRank = (p) =>
|
||||
p === "darwin" ? 0 : p === "windows" ? 1 : p === "linux" ? 2 : 3;
|
||||
const archRank = (a) => (a === "aarch64" ? 0 : a === "x86-64" ? 1 : 2);
|
||||
|
||||
// ---------- Emit in three GROUPS per triple: (tar-group) then (setup-group) then (dmg-group) ----------
|
||||
const out = [];
|
||||
for (const [, bucket] of releaseList) {
|
||||
// group windows _setup together with the same platform block by sorting on platform+arch
|
||||
bucket.items.sort((a, b) => {
|
||||
const pr = platformRank(a.platform) - platformRank(b.platform);
|
||||
if (pr !== 0) return pr;
|
||||
const ar = archRank(a.arch) - archRank(b.arch);
|
||||
if (ar !== 0) return ar;
|
||||
return (b.rep?.sort ?? 0) - (a.rep?.sort ?? 0); // tie-breaker: newest first
|
||||
return b.repSort - a.repSort; // tie-breaker newest-first
|
||||
});
|
||||
|
||||
for (const g of bucket.items) {
|
||||
// tar + sidecars
|
||||
if (g.tar) {
|
||||
out.push(g.tar);
|
||||
for (const sk of sidecarsFor(g.tar.key)) {
|
||||
for (const t of bucket.items) {
|
||||
// 1) tar.gz group
|
||||
if (t.tar) {
|
||||
out.push(t.tar);
|
||||
for (const sk of sidecarsFor(t.tar.key)) {
|
||||
const it = byKey[sk];
|
||||
if (it) out.push(it);
|
||||
}
|
||||
}
|
||||
// dmg + sidecars
|
||||
if (g.dmg) {
|
||||
out.push(g.dmg);
|
||||
for (const sk of sidecarsFor(g.dmg.key)) {
|
||||
// 2) _setup.exe group
|
||||
if (t.setup) {
|
||||
out.push(t.setup);
|
||||
for (const sk of sidecarsFor(t.setup.key)) {
|
||||
const it = byKey[sk];
|
||||
if (it) out.push(it);
|
||||
}
|
||||
}
|
||||
// setup + sidecars (now guaranteed to live in the same platform block)
|
||||
if (g.setup) {
|
||||
out.push(g.setup);
|
||||
for (const sk of sidecarsFor(g.setup.key)) {
|
||||
// 3) .dmg group
|
||||
if (t.dmg) {
|
||||
out.push(t.dmg);
|
||||
for (const sk of sidecarsFor(t.dmg.key)) {
|
||||
const it = byKey[sk];
|
||||
if (it) out.push(it);
|
||||
}
|
||||
|
Reference in New Issue
Block a user