@salesforce/source-deploy-retrieve
Version:
JavaScript library to run Salesforce metadata deploys and retrieves
275 lines • 18.4 kB
JavaScript
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.JsToXml = exports.ZipWriter = exports.StandardWriter = exports.ComponentWriter = exports.ComponentConverter = exports.stream2buffer = exports.pipeline = void 0;
/*
* Copyright (c) 2020, salesforce.com, inc.
* All rights reserved.
* Licensed under the BSD 3-Clause license.
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
*/
const node_path_1 = require("node:path");
const node_stream_1 = require("node:stream");
const node_util_1 = require("node:util");
const core_1 = require("@salesforce/core");
const jszip_1 = __importDefault(require("jszip"));
const graceful_fs_1 = require("graceful-fs");
const fast_xml_parser_1 = require("fast-xml-parser");
const core_2 = require("@salesforce/core");
const constants_1 = require("../common/constants");
const fileSystemHandler_1 = require("../utils/fileSystemHandler");
const types_1 = require("../client/types");
const resolve_1 = require("../resolve");
const metadataTransformerFactory_1 = require("./transformers/metadataTransformerFactory");
const convertContext_1 = require("./convertContext/convertContext");
;
const messages = new core_1.Messages('@salesforce/source-deploy-retrieve', 'sdr', new Map([["md_request_fail", "Metadata API request failed: %s"], ["error_convert_invalid_format", "Invalid conversion format '%s'"], ["error_could_not_infer_type", "%s: Could not infer a metadata type"], ["error_unexpected_child_type", "Unexpected child metadata [%s] found for parent type [%s]"], ["noParent", "Could not find parent type for %s (%s)"], ["error_expected_source_files", "%s: Expected source files for type '%s'"], ["error_failed_convert", "Component conversion failed: %s"], ["error_merge_metadata_target_unsupported", "Merge convert for metadata target format currently unsupported"], ["error_missing_adapter", "Missing adapter '%s' for metadata type '%s'"], ["error_missing_transformer", "Missing transformer '%s' for metadata type '%s'"], ["error_missing_type_definition", "Missing metadata type definition in registry for id '%s'."], ["error_missing_child_type_definition", "Type %s does not have a child type definition %s."], ["noChildTypes", "No child types found in registry for %s (reading %s at %s)"], ["error_no_metadata_xml_ignore", "Metadata xml file %s is forceignored but is required for %s."], ["noSourceIgnore", "%s metadata types require source files, but %s is forceignored."], ["noSourceIgnore.actions", "- Metadata types with content are composed of two files: a content file (ie MyApexClass.cls) and a -meta.xml file (i.e MyApexClass.cls-meta.xml). You must include both files in your .forceignore file. Or try appending \u201C\\*\u201D to your existing .forceignore entry.\n\nSee <https://developer.salesforce.com/docs/atlas.en-us.sfdx_dev.meta/sfdx_dev/sfdx_dev_exclude_source.htm> for examples"], ["error_path_not_found", "%s: File or folder not found"], ["noContentFound", "SourceComponent %s (metadata type = %s) is missing its content file."], ["noContentFound.actions", ["Ensure the content file exists in the expected location.", "If the content file is in your .forceignore file, ensure the meta-xml file is also ignored to completely exclude it."]], ["error_parsing_xml", "SourceComponent %s (metadata type = %s) does not have an associated metadata xml to parse"], ["error_expected_file_path", "%s: path is to a directory, expected a file"], ["error_expected_directory_path", "%s: path is to a file, expected a directory"], ["error_directory_not_found_or_not_directory", "%s: path is not a directory"], ["error_no_directory_stream", "%s doesn't support readable streams on directories."], ["error_no_source_to_deploy", "No source-backed components present in the package."], ["error_no_components_to_retrieve", "No components in the package to retrieve."], ["error_static_resource_expected_archive_type", "A StaticResource directory must have a content type of application/zip or application/jar - found %s for %s."], ["error_static_resource_missing_resource_file", "A StaticResource must have an associated .resource file, missing %s.resource-meta.xml"], ["error_no_job_id", "The %s operation is missing a job ID. Initialize an operation with an ID, or start a new job."], ["missingApiVersion", "Could not determine an API version to use for the generated manifest. Tried looking for sourceApiVersion in sfdx-project.json, apiVersion from config vars, and the highest apiVersion from the APEX REST endpoint. Using API version 58.0 as a last resort."], ["invalid_xml_parsing", "error parsing %s due to:\\n message: %s\\n line: %s\\n code: %s"], ["zipBufferError", "Zip buffer was not created during conversion"], ["undefinedComponentSet", "Unable to construct a componentSet. Check the logs for more information."], ["replacementsFileNotRead", "The file \"%s\" specified in the \"replacements\" property of sfdx-project.json could not be read."], ["unsupportedBundleType", "Unsupported Bundle Type: %s"], ["filePathGeneratorNoTypeSupport", "Type not supported for filepath generation: %s"], ["missingFolderType", "The registry has %s as is inFolder but it does not have a folderType"], ["tooManyFiles", "Multiple files found for path: %s."], ["cantGetName", "Unable to calculate fullName from path: %s (%s)"], ["missingMetaFileSuffix", "The metadata registry is configured incorrectly for %s. Expected a metaFileSuffix."], ["uniqueIdElementNotInRegistry", "No uniqueIdElement found in registry for %s (reading %s at %s)."], ["uniqueIdElementNotInChild", "The uniqueIdElement %s was not found the child (reading %s at %s)."], ["suggest_type_header", "A metadata type lookup for \"%s\" found the following close matches:"], ["suggest_type_did_you_mean", "-- Did you mean \".%s%s\" instead for the \"%s\" metadata type?"], ["suggest_type_more_suggestions", "Additional suggestions:\nConfirm the file name, extension, and directory names are correct. Validate against the registry at:\n<https://github.com/forcedotcom/source-deploy-retrieve/blob/main/src/registry/metadataRegistry.json>\n\nIf the type is not listed in the registry, check that it has Metadata API support via the Metadata Coverage Report:\n<https://developer.salesforce.com/docs/metadata-coverage>\n\nIf the type is available via Metadata API but not in the registry\n\n- Open an issue <https://github.com/forcedotcom/cli/issues>\n- Add the type via PR. Instructions: <https://github.com/forcedotcom/source-deploy-retrieve/blob/main/contributing/metadata.md>"], ["type_name_suggestions", "Confirm the metadata type name is correct. Validate against the registry at:\n<https://github.com/forcedotcom/source-deploy-retrieve/blob/main/src/registry/metadataRegistry.json>\n\nIf the type is not listed in the registry, check that it has Metadata API support via the Metadata Coverage Report:\n<https://developer.salesforce.com/docs/metadata-coverage>\n\nIf the type is available via Metadata API but not in the registry\n\n- Open an issue <https://github.com/forcedotcom/cli/issues>\n- Add the type via PR. Instructions: <https://github.com/forcedotcom/source-deploy-retrieve/blob/main/contributing/metadata.md>"]]));
exports.pipeline = (0, node_util_1.promisify)(node_stream_1.pipeline);
const stream2buffer = async (stream) => new Promise((resolve, reject) => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const buf = Array();
stream.on('data', (chunk) => buf.push(chunk));
stream.on('end', () => resolve(Buffer.concat(buf)));
stream.on('error', (err) => reject(`error converting stream - ${err}`));
});
exports.stream2buffer = stream2buffer;
class ComponentConverter extends node_stream_1.Transform {
targetFormat;
mergeSet;
defaultDirectory;
context = new convertContext_1.ConvertContext();
transformerFactory;
constructor(targetFormat, registry, mergeSet, defaultDirectory) {
super({ objectMode: true });
this.targetFormat = targetFormat;
this.mergeSet = mergeSet;
this.defaultDirectory = defaultDirectory;
this.transformerFactory = new metadataTransformerFactory_1.MetadataTransformerFactory(registry, this.context);
}
async _transform(chunk, encoding, callback) {
let err;
const writeInfos = [];
// Only transform components not marked for delete.
if (!chunk.isMarkedForDelete()) {
try {
const converts = [];
const transformer = this.transformerFactory.getTransformer(chunk);
transformer.defaultDirectory = this.defaultDirectory;
const mergeWith = this.mergeSet?.getSourceComponents(chunk);
switch (this.targetFormat) {
case 'source':
if (mergeWith) {
for (const mergeComponent of mergeWith) {
converts.push(transformer.toSourceFormat({ component: chunk, mergeWith: mergeComponent, mergeSet: this.mergeSet }));
}
}
if (converts.length === 0) {
converts.push(transformer.toSourceFormat({ component: chunk, mergeSet: this.mergeSet }));
}
break;
case 'metadata':
converts.push(transformer.toMetadataFormat(chunk));
break;
default:
throw new core_1.SfError(messages.getMessage('error_convert_invalid_format', [this.targetFormat]), 'LibraryError');
}
// could maybe improve all this with lazy async collections...
(await Promise.all(converts)).forEach((infos) => writeInfos.push(...infos));
}
catch (e) {
err = e;
}
}
callback(err, { component: chunk, writeInfos });
}
/**
* Called at the end when all components have passed through the pipeline. Finalizers
* take care of any additional work to be done at this stage e.g. recomposing child components.
*/
async _flush(callback) {
let err;
try {
for await (const finalizerResult of this.context.executeFinalizers(this.defaultDirectory)) {
finalizerResult.forEach((result) => this.push(result));
}
}
catch (e) {
err = e;
}
callback(err);
}
}
exports.ComponentConverter = ComponentConverter;
class ComponentWriter extends node_stream_1.Writable {
rootDestination;
logger;
constructor(rootDestination) {
super({ objectMode: true });
this.rootDestination = rootDestination;
this.logger = core_2.Logger.childFromRoot(this.constructor.name);
}
}
exports.ComponentWriter = ComponentWriter;
class StandardWriter extends ComponentWriter {
/** filepaths that converted files were written to */
converted = [];
deleted = [];
forceignore;
constructor(rootDestination) {
super(rootDestination);
this.forceignore = resolve_1.ForceIgnore.findAndCreate(rootDestination);
}
async _write(chunk, encoding, callback) {
let err;
if (chunk.writeInfos.length !== 0) {
try {
const toResolve = new Set();
// it is a reasonable expectation that when a conversion call exits, the files of
// every component has been written to the destination. This await ensures the microtask
// queue is empty when that call exits and overall less memory is consumed.
await Promise.all(chunk.writeInfos
.map(makeWriteInfoAbsolute(this.rootDestination))
.filter(existsOrDoesntMatchIgnored(this.forceignore))
.map((info) => {
if (info.shouldDelete) {
this.deleted.push({
filePath: info.output,
state: types_1.ComponentStatus.Deleted,
type: info.type,
fullName: info.fullName,
});
return graceful_fs_1.promises.rm(info.output, { force: true, recursive: true });
}
// if there are children, resolve each file. o/w just pick one of the files to resolve
// "resolve" means "make these show up in the FileResponses"
if (toResolve.size === 0 ||
chunk.component.type.children !== undefined ||
// make each decomposed label show up in the fileResponses
chunk.component.type.strategies?.transformer === 'decomposedLabels') {
// This is a workaround for a server side ListViews bug where
// duplicate components are sent. W-9614275
if (toResolve.has(info.output)) {
this.logger.debug(`Ignoring duplicate metadata for: ${info.output}`);
return;
}
toResolve.add(info.output);
}
(0, fileSystemHandler_1.ensureFileExists)(info.output);
return (0, exports.pipeline)(info.source, (0, graceful_fs_1.createWriteStream)(info.output));
}));
this.converted.push(...toResolve);
}
catch (e) {
err = e;
}
}
callback(err);
}
}
exports.StandardWriter = StandardWriter;
class ZipWriter extends ComponentWriter {
/**
* Count of files (not directories) added to the zip file.
*/
fileCount = 0;
zip = (0, jszip_1.default)();
zipBuffer;
constructor(rootDestination) {
super(rootDestination);
const destination = rootDestination ? `for: ${rootDestination}` : 'in memory';
this.logger.debug(`generating zip ${destination}`);
}
get buffer() {
return this.zipBuffer;
}
async _write(chunk, encoding, callback) {
let err;
try {
await Promise.all(chunk.writeInfos.filter(isWriteInfoWithSource).map(async (writeInfo) => {
// we don't want to prematurely zip folder types when their children might still be not in the zip
// those files we'll leave open as ReadableStreams until the zip finalizes
if (Boolean(chunk.component.type.folderType) || Boolean(chunk.component.type.folderContentType)) {
return this.addToZip(writeInfo.source, writeInfo.output);
}
// everything else can be zipped immediately to reduce the number of open files (windows has a low limit!) and help perf
const streamAsBuffer = await (0, exports.stream2buffer)(writeInfo.source);
return this.addToZip(streamAsBuffer, writeInfo.output);
}));
}
catch (e) {
err = e;
}
callback(err);
}
async _final(callback) {
let err;
try {
this.zipBuffer = await this.zip.generateAsync({
type: 'nodebuffer',
compression: 'DEFLATE',
compressionOptions: { level: 3 },
});
this.logger.debug('Generated zip complete');
}
catch (e) {
err = e;
}
callback(err);
}
addToZip(contents, path) {
// Ensure only posix paths are added to zip files
const posixPath = path.replace(/\\/g, '/');
this.zip.file(posixPath, contents);
this.fileCount++;
}
}
exports.ZipWriter = ZipWriter;
/**
* Convenient wrapper to serialize a js object to XML content. Implemented as a stream
* to be used as a valid source for ComponentWriters in the conversion pipeline,
* even though it's not beneficial in the typical way a stream is.
*/
class JsToXml extends node_stream_1.Readable {
xmlObject;
constructor(xmlObject) {
super();
this.xmlObject = xmlObject;
}
_read() {
const builder = new fast_xml_parser_1.XMLBuilder({
format: true,
indentBy: ' ',
ignoreAttributes: false,
cdataPropName: '__cdata',
commentPropName: constants_1.XML_COMMENT_PROP_NAME,
});
const builtXml = String(builder.build(this.xmlObject));
const xmlContent = correctComments(constants_1.XML_DECL.concat(handleSpecialEntities(builtXml)));
this.push(xmlContent);
this.push(null);
}
}
exports.JsToXml = JsToXml;
/** xmlBuilder likes to add newline and indent before/after the comment (hypothesis: it uses `<` as a hint to newlint/indent) */
const correctComments = (xml) => xml.includes('<!--') ? xml.replace(/\s+<!--(.*?)-->\s+/g, '<!--$1-->') : xml;
/**
* use this function to handle special html entities.
* XmlBuilder will otherwise replace ex: ` ` with `'&#160;'` (escape the &)
* This is a separate function to allow for future handling of other special entities
*
* See https://github.com/NaturalIntelligence/fast-xml-parser/blob/fa5a7339a5ae2ca4aea8a256179b82464dbf510e/docs/v4/5.Entities.md
* The parser can call addEntities to support more, but the Builder does not have that option.
* You also can't use Builder.tagValueProcessor to use this function
* because the escaping of `&` happens AFTER that is called.
* */
const handleSpecialEntities = (xml) => xml.replaceAll('&#160;', ' ');
/** discriminate between the shouldDelete and the regular WriteInfo */
const isWriteInfoWithSource = (writeInfo) => writeInfo.source !== undefined;
const makeWriteInfoAbsolute = (rootDestination = '') => (writeInfo) => ({
...writeInfo,
output: (0, node_path_1.isAbsolute)(writeInfo.output) ? writeInfo.output : (0, node_path_1.join)(rootDestination, writeInfo.output),
});
const existsOrDoesntMatchIgnored = (forceignore) => (writeInfo) => (0, graceful_fs_1.existsSync)(writeInfo.output) || forceignore.accepts(writeInfo.output);
//# sourceMappingURL=streams.js.map
;