UNPKG

@atomist/sdm

Version:

Atomist Software Delivery Machine SDK

92 lines 4.01 kB
"use strict"; /* * Copyright © 2020 Atomist, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ Object.defineProperty(exports, "__esModule", { value: true }); exports.getCacheConfig = exports.getCachePath = exports.S3GoalCacheArchiveStore = void 0; const retry_1 = require("@atomist/automation-client/lib/util/retry"); const AWS = require("aws-sdk"); const fs = require("fs-extra"); /** * Goal archive store that stores the compressed archives in a AWS * S3 bucket. All failures are caught and logged. If * retrieval fails, the error is rethrown so the cache-miss listeners * will be invoked. */ class S3GoalCacheArchiveStore { async store(gi, classifier, archivePath) { const file = fs.createReadStream(archivePath); return this.awsS3(gi, classifier, async (storage, bucket, cachePath) => storage.putObject({ Bucket: bucket, Key: cachePath, Body: file }).promise(), "store"); } async delete(gi, classifier) { await this.awsS3(gi, classifier, async (storage, bucket, cachePath) => storage.deleteObject({ Bucket: bucket, Key: cachePath }).promise(), "delete"); } async retrieve(gi, classifier, targetArchivePath) { await this.awsS3(gi, classifier, async (storage, bucket, cachePath) => { return new Promise((resolve, reject) => { storage .getObject({ Bucket: bucket, Key: cachePath }) .createReadStream() .on("error", reject) .pipe(fs.createWriteStream(targetArchivePath)) .on("error", reject) .on("close", () => resolve(targetArchivePath)); }); }, "retrieve"); } async awsS3(gi, classifier, op, verb) { const cacheConfig = getCacheConfig(gi); const cachePath = getCachePath(cacheConfig, classifier); const storage = new AWS.S3({ region: cacheConfig.region }); const objectUri = `s3://${cacheConfig.bucket}/${cachePath}`; const gerund = verb.replace(/e$/, "ing"); try { gi.progressLog.write(`${gerund} cache archive ${objectUri}`); await retry_1.doWithRetry(() => op(storage, cacheConfig.bucket, cachePath), `${verb} cache archive`); gi.progressLog.write(`${verb}d cache archive ${objectUri}`); return objectUri; } catch (e) { e.message = `Failed to ${verb} cache archive ${objectUri}: ${e.message}`; gi.progressLog.write(e.message); if (verb === "retrieve") { throw e; } } return undefined; } } exports.S3GoalCacheArchiveStore = S3GoalCacheArchiveStore; /** Construct object path for cache configuration and classifier. */ function getCachePath(cacheConfig, classifier = "default") { return [cacheConfig.path, classifier, "cache.tar.gz"].join("/"); } exports.getCachePath = getCachePath; /** * Retrieve cache configuration and populate with default values. */ function getCacheConfig(gi) { const cacheConfig = gi.configuration.sdm.cache || {}; cacheConfig.bucket = cacheConfig.bucket || `sdm-${gi.context.workspaceId}-${gi.configuration.name}-goal-cache` .toLowerCase() .replace(/[^-a-z0-9]*/g, "") .replace(/--+/g, "-"); cacheConfig.path = cacheConfig.path || "goal-cache"; return cacheConfig; } exports.getCacheConfig = getCacheConfig; //# sourceMappingURL=cache.js.map