231 lines
7.3 KiB
TypeScript
231 lines
7.3 KiB
TypeScript
import fs from "fs";
|
|
import path from "path";
|
|
import crypto from "crypto";
|
|
import mime from "mime";
|
|
import { WebsiteApi } from "guichet-sdk-ts";
|
|
import {
|
|
S3Client,
|
|
ListObjectsV2Command,
|
|
DeleteObjectsCommand,
|
|
DeleteObjectsCommandOutput,
|
|
HeadObjectCommand,
|
|
} from "@aws-sdk/client-s3";
|
|
import { Upload } from "@aws-sdk/lib-storage";
|
|
import { PromisePool } from "@supercharge/promise-pool";
|
|
import { openApiConf } from "./auth.js";
|
|
|
|
const MD5METAFIELD = "dfl-md5sum";
|
|
|
|
// Walks through the local directory at path `dir`, and for each file it contains, returns :
|
|
// - `localPath`: its path on the local filesystem (includes `dir`). On windows, this path
|
|
// will typically use `\` as separator.
|
|
// - `s3Path`: an equivalent path as we would store it in an S3 bucket, using '/' as separator.
|
|
// This path includes `s3Prefix` as a prefix if provided. If `s3Prefix` is null, `s3Path`
|
|
// is relative to the root (of the form "a/b/c", instead of "/a/b/c" if `s3Prefix` is "").
|
|
async function getLocalFiles(dir: string, s3Prefix: string | null): Promise<{ localPath: string, s3Path: string}[]> {
|
|
const entries = await fs.promises.readdir(dir, { withFileTypes: true });
|
|
const files = await Promise.all(entries.map(entry => {
|
|
const localPath = path.join(dir, entry.name);
|
|
const s3Path = s3Prefix ? s3Prefix + "/" + entry.name : entry.name;
|
|
if (entry.isDirectory()) {
|
|
return getLocalFiles(localPath, s3Path)
|
|
} else {
|
|
return Promise.resolve([{ localPath, s3Path }])
|
|
}
|
|
}));
|
|
return files.flat()
|
|
}
|
|
|
|
async function getFileMd5(file: string): Promise<string> {
|
|
const hash = crypto.createHash('md5');
|
|
for await (const chunk of fs.createReadStream(file)) {
|
|
hash.update(chunk as Buffer);
|
|
}
|
|
return hash.digest('hex')
|
|
}
|
|
|
|
async function getBucketFiles(client: S3Client, Bucket: string):
|
|
Promise<Map<string, { size: number | null }>>
|
|
{
|
|
const files = new Map();
|
|
let done = false;
|
|
let cmd = new ListObjectsV2Command({ Bucket });
|
|
while (!done) {
|
|
const resp = await client.send(cmd);
|
|
if (resp.$metadata.httpStatusCode != 200) {
|
|
// TODO: better error handling?
|
|
console.error(resp);
|
|
process.exit(1)
|
|
}
|
|
|
|
if (resp.Contents) {
|
|
for (const item of resp.Contents) {
|
|
if (item.Key) {
|
|
files.set(item.Key, { size: item.Size })
|
|
}
|
|
}
|
|
}
|
|
|
|
if (resp.NextContinuationToken) {
|
|
cmd = new ListObjectsV2Command({
|
|
Bucket,
|
|
ContinuationToken: resp.NextContinuationToken
|
|
})
|
|
} else {
|
|
done = true
|
|
}
|
|
}
|
|
return files
|
|
}
|
|
|
|
async function uploadFile(client: S3Client, Bucket: string, Key: string, Body: any, md5: string) {
|
|
// use `path.posix` because `Key` is a path in a bucket that uses `/` as separator.
|
|
let ContentType = mime.getType(path.posix.extname(Key)) ?? undefined;
|
|
|
|
// add charset=utf-8 by default on text files (TODO: allow the user to override this)
|
|
if (ContentType && ContentType.startsWith("text/")) {
|
|
ContentType = ContentType + "; charset=utf-8";
|
|
}
|
|
|
|
// store the md5 checksum in the object metadata; it will be used to skip
|
|
// subsequent uploads if the file has not changed.
|
|
const Metadata = { [MD5METAFIELD]: md5 };
|
|
|
|
const params = { Bucket, Key, Body, ContentType, Metadata };
|
|
const parallelUpload = new Upload({ client, params });
|
|
|
|
parallelUpload.on("httpUploadProgress", progress => {
|
|
process.stdout.moveCursor(0, -1)
|
|
process.stdout.clearLine(1)
|
|
process.stdout.write(`Sending ${progress.Key}`);
|
|
if (! (progress.loaded == progress.total && progress.part == 1)) {
|
|
process.stdout.write(` (${progress.loaded}/${progress.total})`);
|
|
}
|
|
process.stdout.write("\n");
|
|
});
|
|
await parallelUpload.done();
|
|
}
|
|
|
|
async function deleteFiles(client: S3Client, Bucket: string, files: string[]): Promise<DeleteObjectsCommandOutput | null> {
|
|
if (files.length == 0) {
|
|
return null
|
|
}
|
|
return await client.send(new DeleteObjectsCommand({
|
|
Bucket,
|
|
Delete: {
|
|
Objects: files.map(f => { return { Key: f }}),
|
|
},
|
|
}));
|
|
}
|
|
|
|
// Checks whether a remote file needs to be updated by its local copy.
|
|
//
|
|
// We first check whether files differ, and if not compare the md5 checksum we
|
|
// previously stored in the object metadata (if it exists) with the local file's
|
|
// md5 checksum.
|
|
async function needsUpdate(
|
|
client: S3Client,
|
|
localFile: string,
|
|
localMd5: string,
|
|
Bucket: string,
|
|
Key: string,
|
|
remoteSize: number | null,
|
|
): Promise<boolean> {
|
|
if (remoteSize) {
|
|
const localSize = (await fs.promises.stat(localFile)).size;
|
|
if (
|
|
localSize == 0 /* stat can return 0 in case of error */
|
|
|| localSize != remoteSize
|
|
) {
|
|
return true
|
|
}
|
|
}
|
|
|
|
// fetch metadata for the object and see if we previously stored its md5
|
|
const resp = await client.send(new HeadObjectCommand({ Bucket, Key }));
|
|
if (resp.$metadata.httpStatusCode != 200) {
|
|
// TODO: better error handling?
|
|
throw resp
|
|
}
|
|
const remoteMd5 = resp.Metadata ? resp.Metadata[MD5METAFIELD] : null;
|
|
|
|
if (!remoteMd5) {
|
|
return true
|
|
}
|
|
|
|
// we have a remote md5, compare it with the local one
|
|
return (localMd5 != remoteMd5)
|
|
}
|
|
|
|
export async function deploy(vhost: string, localFolder: string) {
|
|
const conf = await openApiConf();
|
|
|
|
// Get paths of the local files to deploy
|
|
const localFiles = await getLocalFiles(localFolder, "").catch(err => {
|
|
if (err.errno = -2) {
|
|
console.error(`Error: directory '${localFolder}' does not exist`);
|
|
} else {
|
|
console.error(err);
|
|
}
|
|
process.exit(1)
|
|
});
|
|
|
|
// Get website info from guichet (bucket name and keys)
|
|
const api = new WebsiteApi(conf);
|
|
let vhostInfo = await api.getWebsite({ vhost }).catch(err => {
|
|
if (err.response.status == 404) {
|
|
console.error(`Error: website '${vhost}' does not exist`);
|
|
} else {
|
|
console.error(err);
|
|
}
|
|
process.exit(1)
|
|
});
|
|
|
|
// List the files currently stored in the bucket
|
|
// @FIXME this info could be returned by the guichet API
|
|
const s3client = new S3Client({
|
|
endpoint: "https://garage.deuxfleurs.fr",
|
|
region: "garage",
|
|
forcePathStyle: true,
|
|
credentials: {
|
|
accessKeyId: vhostInfo.accessKeyId!,
|
|
secretAccessKey: vhostInfo.secretAccessKey!,
|
|
},
|
|
});
|
|
const Bucket = vhostInfo.vhost!.name!;
|
|
const remoteFiles = await getBucketFiles(s3client, Bucket);
|
|
|
|
// Delete files that are present in the bucket but not locally.
|
|
// Do this before sending the new files to avoid hitting the size quota
|
|
// unnecessarily.
|
|
const resp = await deleteFiles(
|
|
s3client,
|
|
Bucket,
|
|
[...remoteFiles]
|
|
.filter(([name, _]) => !localFiles.find(({ s3Path }) => s3Path == name))
|
|
.map(([name, _]) => name)
|
|
);
|
|
if (resp && resp!.$metadata.httpStatusCode != 200) {
|
|
// TODO: better error handling?
|
|
console.error(resp);
|
|
process.exit(1)
|
|
}
|
|
|
|
// Uploads a local file unless the remote copy is the same
|
|
async function processFile(localPath: string, s3Path: string) {
|
|
const localMd5 = await getFileMd5(localPath);
|
|
const remoteFile = remoteFiles.get(s3Path);
|
|
if (
|
|
!remoteFile ||
|
|
await needsUpdate(s3client, localPath, localMd5, Bucket, s3Path, remoteFile.size)
|
|
) {
|
|
uploadFile(s3client, Bucket, s3Path, fs.createReadStream(localPath), localMd5)
|
|
}
|
|
};
|
|
|
|
// Control concurrency while uploading
|
|
await PromisePool
|
|
.for(localFiles)
|
|
.withConcurrency(6)
|
|
.process(({ localPath, s3Path }) => processFile(localPath, s3Path));
|
|
}
|