@unito/integration-debugger
Version:
The Unito Integration Debugger
488 lines (487 loc) • 23 kB
JavaScript
// For the Integration Debugger, the CrawlerDriver is "the thing that crawls an integration".
//
// For example, when a developer press the "[n]ext" command in the UI, the CrawlerDriver is asked
// to execute the next "discovered step" of the integration.
//
// Under the hood, the CrawlerDriver communicates with a low-level Crawler to perform its duties:
//
// 1. The CrawlerDriver asks the Crawler to execute a step.
// 2. The Crawler executes the step and returns the executed step AND discovered steps.
// 3. At that point, the CrawlerDriver may:
// 3.1. Perform additional validation on the step.
// 3.2. Ask the Crawler to execute none/some/all of the discovered steps.
// 3.3. Add additional steps to execute.
//
// What the CrawlerDriver decides to do at step 3 is defined by the "step checks".
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
var desc = Object.getOwnPropertyDescriptor(m, k);
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
desc = { enumerable: true, get: function() { return m[k]; } };
}
Object.defineProperty(o, k2, desc);
}) : (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
o[k2] = m[k];
}));
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
Object.defineProperty(o, "default", { enumerable: true, value: v });
}) : function(o, v) {
o["default"] = v;
});
var __importStar = (this && this.__importStar) || (function () {
var ownKeys = function(o) {
ownKeys = Object.getOwnPropertyNames || function (o) {
var ar = [];
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
return ar;
};
return ownKeys(o);
};
return function (mod) {
if (mod && mod.__esModule) return mod;
var result = {};
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
__setModuleDefault(result, mod);
return result;
};
})();
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.Instance = exports.Operation = void 0;
exports.createWithProxyCrawler = createWithProxyCrawler;
exports.createWithDirectCrawler = createWithDirectCrawler;
exports.createWithDummyCrawler = createWithDummyCrawler;
const crypto_1 = __importDefault(require("crypto"));
const IntegrationsPlatform = __importStar(require("@unito/integrations-platform-client"));
const urls_1 = require("../resources/urls");
const headers_1 = __importDefault(require("../resources/headers"));
const stepChecks_1 = __importDefault(require("./stepChecks"));
const Crawler = __importStar(require("./crawler"));
const Generator = __importStar(require("./generator"));
exports.Operation = Crawler.Operation;
class Instance {
crawler;
generator;
stepChecks;
visitedSteps;
options;
/**
* Depending on the context (createWithProxyCrawler, createWithDirectCrawler, ...), we
* produce HTTP headers for the step, stored in "headersIn".
*/
prepareHeaders;
constructor(crawler, generator, prepareHeaders, options = {
readOnly: false,
[exports.Operation.GetCollection]: { itemsPerPage: Number.MAX_SAFE_INTEGER, followNextPage: true },
}) {
this.crawler = crawler;
this.generator = generator;
this.prepareHeaders = prepareHeaders;
this.visitedSteps = new Set();
this.stepChecks = Object.fromEntries(Object.entries(stepChecks_1.default).filter(([, check]) => check.activatedByDefault));
if ((options[exports.Operation.GetCollection].itemsPerPage ?? -1) < 0) {
options[exports.Operation.GetCollection].itemsPerPage = Number.MAX_SAFE_INTEGER;
}
options[exports.Operation.GetCollection].followNextPage ??= true;
this.options = options;
}
startFrom(step) {
// A deep copy of the step is performed so we don't care mutating this object or not.
const stepCopy = JSON.parse(JSON.stringify(step));
this.prepareHeaders(stepCopy);
this.crawler.remove(stepCopy);
this.crawler.prepend(stepCopy);
}
remaining() {
return this.crawler.remaining();
}
getRelationSchema(schemaPath) {
return this.crawler.getRelationSchema(schemaPath);
}
getFieldSchemas(schemaPath) {
return this.crawler.getFieldSchemas(schemaPath);
}
set stepCheckKeys(keys) {
this.stepChecks = Object.fromEntries(Object.entries(stepChecks_1.default).filter(([key]) => keys === undefined || keys.includes(key)));
}
async next() {
// Crawl the next step.
const stepResult = await this.crawler.next();
// No more steps to crawl.
if (!stepResult) {
return;
}
// Keep track of the crawled step.
const { step } = stepResult;
this.visitedSteps.add(Crawler.buildStepUniqueId(step.operation, step.path));
// Prepare discovered checks headers here
for (const step of stepResult.discoveredSteps) {
this.prepareHeaders(step);
}
// Add discoveredSteps first so steps like GetItem that should follow a CreateItem go before a subsequent DeleteItem
this.addDiscoveredSteps(stepResult);
// Perform checks on the step.
for (const [checkKey, stepCheck] of Object.entries(this.stepChecks)) {
// The step may have errors but some checks can / want to validate steps in error.
const canValidate = stepResult.step.errors.length === 0 || stepCheck.validateOnError;
// Validate the step.
if (canValidate && stepCheck.validate) {
stepCheck.validate(stepResult.step, this);
}
const context = stepResult.step.context;
// After validation, a step must not contain errors to prepare additional steps, we don't support "prepareOnError"
// If the executed step has a context present (steps generated by checks), only prepare if the current
// stepCheck needs to.
if ((!context?.name || stepCheck.prepareOnPreparedSteps) && stepResult.step.errors.length === 0) {
// A deep copy of the step is performed so checks don't need to care mutating it or not.
const stepResultCopy = JSON.parse(JSON.stringify(stepResult));
// Then, we need to prepareHeaders on each copy, before having the stepCheck prepare the step.
this.prepareHeaders(stepResultCopy.step);
// Prepare the steps.
const preparedSteps = await stepCheck.prepare(stepResultCopy, this);
// Flag and then append the prepared steps.
for (const preparedStep of preparedSteps) {
preparedStep.context = { name: checkKey };
this.crawler.append(preparedStep);
}
}
}
// Before returning the step result, blur any sensitive data.
return this.sanitizeStep(stepResult.step);
}
/**
* When we discover a step, we always crawl it to perform basic checks (defined in @unito/integration-api).
* This could be considered a check like the others but it feels wrong.
* The crawling of an integration is too core to be considered a simple check that can be toggled on/off.
* If allowed to be toggled off, it could be a surprising behavior for the developer
* (ex: why is the crawl stopping now?).
*
* Configuration options can be used to change the default behavior and reduce the quantity of steps crawled.
* @see {@link CrawlerDriverOptions}
*/
addDiscoveredSteps(stepResult) {
// For GetCollection Steps, we add discovered GetItem and GetCollection (next pages) based on CrawlerDriverOptions
if (stepResult.step.operation === exports.Operation.GetCollection) {
const getCollectionOptions = this.options[exports.Operation.GetCollection];
const getItemSteps = stepResult.discoveredSteps.filter(step => step.operation === exports.Operation.GetItem);
for (const step of getItemSteps.slice(0, getCollectionOptions.itemsPerPage)) {
if (this.canCrawl(step)) {
this.crawler.append(step);
}
}
if (this.options[exports.Operation.GetCollection].followNextPage) {
const getCollectionSteps = stepResult.discoveredSteps.filter(step => step.operation === exports.Operation.GetCollection);
for (const step of getCollectionSteps) {
if (this.canCrawl(step)) {
this.crawler.append(step);
}
}
}
}
// In other cases, we simply add everything.
else {
for (const discoveredStep of stepResult.discoveredSteps) {
if (this.canCrawl(discoveredStep)) {
this.crawler.append(discoveredStep);
}
}
}
}
/**
* Determines if a discovered step can be crawled
*
* A step with a request schema cannot be crawled automatically without a check (see checks/requestSchema).
*
* @param step
* @returns true if a discovered step can be crawled
*/
canCrawl(step) {
const uniqueId = Crawler.buildStepUniqueId(step.operation, step.path);
return (!step.requestSchema &&
!this.visitedSteps.has(uniqueId) &&
this.crawler
.remaining()
.findIndex(remainingStep => Crawler.buildStepUniqueId(remainingStep.operation, remainingStep.path) === uniqueId) === -1);
}
/**
* Returns a sanitized version of the step, where sensitive data is blurred.
* @param step
* @returns sanitized step
*/
sanitizeStep(step) {
const sanitizedStep = JSON.parse(JSON.stringify(step));
if (sanitizedStep.headersIn) {
if (sanitizedStep.headersIn[headers_1.default.SECRETS]?.length) {
sanitizedStep.headersIn[headers_1.default.SECRETS] = '**********';
}
if (sanitizedStep.headersIn[headers_1.default.CREDENTIALS]?.length) {
sanitizedStep.headersIn[headers_1.default.CREDENTIALS] = '**********';
}
}
if (sanitizedStep.headersOut) {
if (sanitizedStep.headersOut[headers_1.default.SECRETS]?.length) {
sanitizedStep.headersOut[headers_1.default.SECRETS] = '**********';
}
if (sanitizedStep.headersOut[headers_1.default.CREDENTIALS]?.length) {
sanitizedStep.headersOut[headers_1.default.CREDENTIALS] = '**********';
}
}
return sanitizedStep;
}
}
exports.Instance = Instance;
async function createWithProxyCrawler(credentialId, options) {
const crawler = await Crawler.create({
proxyCall: async (step) => {
const credentialIdHeader = credentialId.toString();
const xUnitoCorrelationId = crypto_1.default.randomUUID();
const path = step.path ?? '/';
let response;
if (options?.readOnly && !Crawler.ReadOperations.includes(step.operation)) {
return {
status: 500,
headers: {},
payload: {
code: '500',
message: 'This operation is not allowed in "read-only" mode',
},
};
}
try {
switch (step.operation) {
case Crawler.Operation.CreateItem:
response = await IntegrationsPlatform.postProxyGraph(credentialIdHeader, path, step.payloadIn ? step.payloadIn : undefined, { xUnitoCorrelationId });
break;
case Crawler.Operation.UpdateItem:
response = await IntegrationsPlatform.patchProxyGraph(credentialIdHeader, path, step.payloadIn ? step.payloadIn : undefined, { xUnitoCorrelationId });
break;
case Crawler.Operation.DeleteItem:
response = await IntegrationsPlatform.deleteProxyGraph(credentialIdHeader, path, {
xUnitoCorrelationId,
});
break;
case Crawler.Operation.SubscribeWebhook:
case Crawler.Operation.UnsubscribeWebhook:
response = await IntegrationsPlatform.updateWebhookSubscription(credentialIdHeader, (step.payloadIn ?? {}), {
xUnitoCorrelationId,
});
break;
case Crawler.Operation.GetCredentialAccount:
response = await IntegrationsPlatform.getProxyMe(credentialIdHeader, {
xUnitoCorrelationId,
});
break;
case Crawler.Operation.GetBlob:
{
const rawResponse = await IntegrationsPlatform.fetchProxyGraph(credentialIdHeader, path, {
xUnitoCorrelationId,
accept: 'application/octet-stream',
});
response = {
status: rawResponse.status,
headers: rawResponse.headers,
data: rawResponse.status === 200 ? await extractGetBlobStatistics(rawResponse) : await rawResponse.json(),
};
if (rawResponse.status === 200) {
response.data = await extractGetBlobStatistics(rawResponse);
}
else {
response.data = await rawResponse.json();
}
}
break;
default:
response = await IntegrationsPlatform.getProxyGraph(credentialIdHeader, path, {
xUnitoCorrelationId,
});
break;
}
}
catch (err) {
if (err instanceof IntegrationsPlatform.HttpError) {
response = {
status: err.status,
data: err.data,
};
}
else {
response = {
status: 500,
data: { message: err instanceof Error ? err.message : 'Unknown error' },
};
}
}
return {
status: response.status,
headers: response.headers ? Object.fromEntries(response.headers.entries()) : {},
payload: response.data ?? {},
};
},
timeout: options?.timeout,
});
const generator = await Generator.create(crawler);
return new Instance(crawler, generator, () => {
/* NOOP */
}, options);
}
async function createWithDirectCrawler(integrationUrl, graphUrl, credentialAccountUrl, webhookParsingUrl, webhookSubscriptionsUrl, webhookAcknowledgeRelativeUrl, credentialPayload, secretsPayload, options) {
const prepareHeaders = (step) => {
step.headersIn = {
[headers_1.default.CREDENTIALS]: Buffer.from(JSON.stringify(credentialPayload)).toString('base64'),
[headers_1.default.SECRETS]: Buffer.from(JSON.stringify(secretsPayload)).toString('base64'),
[headers_1.default.CORRELATION_ID]: crypto_1.default.randomUUID(),
//[Headers.OPERATION_DEADLINE]: We do not set it here because it is set by the Crawler before executing the step
Accept: step.operation === exports.Operation.GetBlob ? 'application/octet-stream' : 'application/json',
'Content-Type': 'application/json',
};
};
const crawler = await Crawler.create({
proxyCall: async (step) => {
const absoluteGraphUrl = (0, urls_1.toSafeUrl)(integrationUrl, step.path ?? graphUrl);
const absoluteCredentialAccountUrl = (0, urls_1.toSafeUrl)(integrationUrl, step.path ?? credentialAccountUrl);
const absoluteWebhookSubscriptionUrl = (0, urls_1.toSafeUrl)(integrationUrl, webhookSubscriptionsUrl ?? '');
let response = undefined;
let responseHeaders = {};
let responsePayload = {};
let responseStatus = 200;
if (options?.readOnly && !Crawler.ReadOperations.includes(step.operation)) {
return {
status: 500,
headers: {},
payload: {
code: '500',
message: 'This operation is not allowed in "read-only" mode',
},
};
}
try {
switch (step.operation) {
case Crawler.Operation.CreateItem:
response = await fetch(absoluteGraphUrl, {
method: 'POST',
headers: step.headersIn,
body: step.payloadIn ? JSON.stringify(step.payloadIn) : undefined,
});
break;
case Crawler.Operation.UpdateItem:
response = await fetch(absoluteGraphUrl, {
// Do not lowercase the PATCH method below because it won't be
// normalized back to PATCH by fetch.
// See: https://fetch.spec.whatwg.org/#concept-method-normalize
method: 'PATCH',
headers: step.headersIn,
body: step.payloadIn ? JSON.stringify(step.payloadIn) : undefined,
});
break;
case Crawler.Operation.DeleteItem:
response = await fetch(absoluteGraphUrl, {
// Do not lowercase the DELETE method below because it won't be
// normalized back to DELETE by fetch.
// See: https://fetch.spec.whatwg.org/#concept-method-normalize
method: 'DELETE',
headers: step.headersIn,
});
break;
case Crawler.Operation.SubscribeWebhook:
case Crawler.Operation.UnsubscribeWebhook:
response = await fetch(absoluteWebhookSubscriptionUrl, {
method: 'PUT',
headers: step.headersIn,
body: step.payloadIn ? JSON.stringify(step.payloadIn) : undefined,
});
break;
case Crawler.Operation.GetCredentialAccount:
response = await fetch(absoluteCredentialAccountUrl, {
method: 'GET',
headers: step.headersIn,
});
break;
case Crawler.Operation.GetBlob:
case Crawler.Operation.GetItem:
case Crawler.Operation.GetCollection:
response = await fetch(absoluteGraphUrl, {
method: 'GET',
headers: step.headersIn,
});
break;
default:
throw new Error(`Operation ${step.operation} is not supported yet`);
}
if (response) {
responseStatus = response.status;
responseHeaders = Object.fromEntries(response.headers.entries());
try {
if (step.operation === exports.Operation.GetBlob && response.status === 200) {
responsePayload = await extractGetBlobStatistics(response);
}
else {
responsePayload = await response.json();
}
}
catch {
/* TODO */
}
}
}
catch (err) {
const error = err;
responseStatus = error.status;
responsePayload = error.data;
}
return {
status: responseStatus,
headers: responseHeaders,
payload: responsePayload,
};
},
timeout: options?.timeout,
});
const generator = await Generator.create(crawler);
return new Instance(crawler, generator, prepareHeaders, options);
}
async function createWithDummyCrawler(proxyCall, generatorOptions, options) {
const crawler = await Crawler.create({ proxyCall });
const generator = await Generator.create(crawler, generatorOptions);
return new Instance(crawler, generator, () => {
/* NOOP */
}, options);
}
async function extractGetBlobStatistics(response) {
const startTime = process.hrtime.bigint();
let numberOfChunks = 0;
let maxChunkSize;
let minChunkSize;
let totalSizeInByte = 0;
const reader = response.body?.getReader();
let isDone = false;
try {
while (reader && !isDone) {
const chunk = await reader.read();
isDone = chunk.done;
if (chunk.value) {
++numberOfChunks;
totalSizeInByte += chunk.value.length;
maxChunkSize = Math.max(maxChunkSize ?? 0, chunk.value.length);
minChunkSize = Math.min(minChunkSize ?? Number.MAX_SAFE_INTEGER, chunk.value.length);
}
}
}
finally {
reader?.releaseLock();
}
return {
blob: {
status: response.status,
totalSizeInByte,
numberOfChunks,
maxChunkSize,
minChunkSize,
averageChunkSize: totalSizeInByte / numberOfChunks,
durationInMillis: Number(process.hrtime.bigint() - startTime) / 1000000,
},
};
}
;