@temporalio/worker
Version:
Temporal.io SDK Worker sub-package
962 lines (961 loc) • 72.2 kB
JavaScript
"use strict";
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
var desc = Object.getOwnPropertyDescriptor(m, k);
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
desc = { enumerable: true, get: function() { return m[k]; } };
}
Object.defineProperty(o, k2, desc);
}) : (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
o[k2] = m[k];
}));
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
Object.defineProperty(o, "default", { enumerable: true, value: v });
}) : function(o, v) {
o["default"] = v;
});
var __importStar = (this && this.__importStar) || function (mod) {
if (mod && mod.__esModule) return mod;
var result = {};
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
__setModuleDefault(result, mod);
return result;
};
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.Worker = exports.NativeWorker = exports.defaultPayloadConverter = void 0;
exports.parseWorkflowCode = parseWorkflowCode;
const node_crypto_1 = __importDefault(require("node:crypto"));
const promises_1 = __importDefault(require("node:fs/promises"));
const path = __importStar(require("node:path"));
const vm = __importStar(require("node:vm"));
const node_events_1 = require("node:events");
const node_timers_1 = require("node:timers");
const rxjs_1 = require("rxjs");
const operators_1 = require("rxjs/operators");
const common_1 = require("@temporalio/common");
Object.defineProperty(exports, "defaultPayloadConverter", { enumerable: true, get: function () { return common_1.defaultPayloadConverter; } });
const internal_non_workflow_1 = require("@temporalio/common/lib/internal-non-workflow");
const proto_utils_1 = require("@temporalio/common/lib/proto-utils");
const time_1 = require("@temporalio/common/lib/time");
const logger_1 = require("@temporalio/common/lib/logger");
const type_helpers_1 = require("@temporalio/common/lib/type-helpers");
const logs_1 = require("@temporalio/workflow/lib/logs");
const core_bridge_1 = require("@temporalio/core-bridge");
const proto_1 = require("@temporalio/proto");
const activity_1 = require("./activity");
const connection_1 = require("./connection");
const pkg_1 = __importDefault(require("./pkg"));
const replay_1 = require("./replay");
const runtime_1 = require("./runtime");
const rxutils_1 = require("./rxutils");
const utils_1 = require("./utils");
const worker_options_1 = require("./worker-options");
const workflow_codec_runner_1 = require("./workflow-codec-runner");
const bundler_1 = require("./workflow/bundler");
const reusable_vm_1 = require("./workflow/reusable-vm");
const threaded_vm_1 = require("./workflow/threaded-vm");
const vm_1 = require("./workflow/vm");
const errors_1 = require("./errors");
function addBuildIdIfMissing(options, bundleCode) {
const bid = options.buildId; // eslint-disable-line deprecation/deprecation
if (bid != null) {
return options;
}
const suffix = bundleCode ? `+${node_crypto_1.default.createHash('sha256').update(bundleCode).digest('hex')}` : '';
return { ...options, buildId: `${pkg_1.default.name}@${pkg_1.default.version}${suffix}` };
}
class NativeWorker {
runtime;
nativeWorker;
type = 'worker';
pollWorkflowActivation;
pollActivityTask;
completeWorkflowActivation;
completeActivityTask;
recordActivityHeartbeat;
initiateShutdown;
static async create(runtime, connection, options) {
const nativeWorker = await runtime.registerWorker((0, connection_1.extractNativeClient)(connection), (0, worker_options_1.toNativeWorkerOptions)(options));
return new NativeWorker(runtime, nativeWorker);
}
static async createReplay(runtime, options) {
const [worker, historyPusher] = await runtime.createReplayWorker((0, worker_options_1.toNativeWorkerOptions)(options));
return {
worker: new NativeWorker(runtime, worker),
historyPusher,
};
}
constructor(runtime, nativeWorker) {
this.runtime = runtime;
this.nativeWorker = nativeWorker;
this.pollWorkflowActivation = core_bridge_1.native.workerPollWorkflowActivation.bind(undefined, nativeWorker);
this.pollActivityTask = core_bridge_1.native.workerPollActivityTask.bind(undefined, nativeWorker);
this.completeWorkflowActivation = core_bridge_1.native.workerCompleteWorkflowActivation.bind(undefined, nativeWorker);
this.completeActivityTask = core_bridge_1.native.workerCompleteActivityTask.bind(undefined, nativeWorker);
this.recordActivityHeartbeat = core_bridge_1.native.workerRecordActivityHeartbeat.bind(undefined, nativeWorker);
this.initiateShutdown = core_bridge_1.native.workerInitiateShutdown.bind(undefined, nativeWorker);
}
flushCoreLogs() {
this.runtime.flushLogs();
}
async finalizeShutdown() {
await this.runtime.deregisterWorker(this.nativeWorker);
}
}
exports.NativeWorker = NativeWorker;
function formatTaskToken(taskToken) {
return Buffer.from(taskToken).toString('base64');
}
/**
* The temporal Worker connects to Temporal Server and runs Workflows and Activities.
*/
class Worker {
runtime;
nativeWorker;
workflowCreator;
options;
logger;
metricMeter;
connection;
isReplayWorker;
activityHeartbeatSubject = new rxjs_1.Subject();
stateSubject = new rxjs_1.BehaviorSubject('INITIALIZED');
// Pushing an error to this subject causes the Worker to initiate a graceful shutdown, after
// which the Worker will be in FAILED state and the `run` promise will throw the first error
// published on this subject.
unexpectedErrorSubject = new rxjs_1.Subject();
// Pushing an error to this subject will cause the worker to IMMEDIATELY fall into FAILED state.
//
// The `run` promise will throw the first error reported on either this subject or the
// `unexpectedErrorSubject` subject. That is, suppose that an "unexpected error" comes in,
// which triggers graceful shutdown of the Worker, and then, while attempting to gracefully shut
// down the Worker, we get some "instant terminate error". The Worker's `run` promise will throw
// the _initial error_ rather than the "instant terminate error" that came later. This is so to
// avoid masking the original error with a subsequent one that will likely be less relevant.
// Both errors will still be reported to the logger.
instantTerminateErrorSubject = new rxjs_1.Subject();
workflowPollerStateSubject = new rxjs_1.BehaviorSubject('POLLING');
activityPollerStateSubject = new rxjs_1.BehaviorSubject('POLLING');
/**
* Whether or not this worker has an outstanding workflow poll request
*/
hasOutstandingWorkflowPoll = false;
/**
* Whether or not this worker has an outstanding activity poll request
*/
hasOutstandingActivityPoll = false;
numInFlightActivationsSubject = new rxjs_1.BehaviorSubject(0);
numInFlightActivitiesSubject = new rxjs_1.BehaviorSubject(0);
numInFlightNonLocalActivitiesSubject = new rxjs_1.BehaviorSubject(0);
numInFlightLocalActivitiesSubject = new rxjs_1.BehaviorSubject(0);
numCachedWorkflowsSubject = new rxjs_1.BehaviorSubject(0);
numHeartbeatingActivitiesSubject = new rxjs_1.BehaviorSubject(0);
evictionsEmitter = new node_events_1.EventEmitter();
static nativeWorkerCtor = NativeWorker;
// Used to add uniqueness to replay worker task queue names
static replayWorkerCount = 0;
static SELF_INDUCED_SHUTDOWN_EVICTION = {
message: 'Shutting down',
reason: replay_1.EvictionReason.FATAL,
};
workflowCodecRunner;
/**
* Create a new Worker.
* This method initiates a connection to the server and will throw (asynchronously) on connection failure.
*/
static async create(options) {
const runtime = runtime_1.Runtime.instance();
const logger = logger_1.LoggerWithComposedMetadata.compose(runtime.logger, {
sdkComponent: common_1.SdkComponent.worker,
taskQueue: options.taskQueue ?? 'default',
});
const metricMeter = runtime.metricMeter.withTags({
namespace: options.namespace ?? 'default',
taskQueue: options.taskQueue ?? 'default',
});
const nativeWorkerCtor = this.nativeWorkerCtor;
const compiledOptions = (0, worker_options_1.compileWorkerOptions)(options, logger, metricMeter);
logger.debug('Creating worker', {
options: {
...compiledOptions,
...(compiledOptions.workflowBundle && (0, worker_options_1.isCodeBundleOption)(compiledOptions.workflowBundle)
? {
// Avoid dumping workflow bundle code to the console
workflowBundle: {
code: `<string of length ${compiledOptions.workflowBundle.code.length}>`,
},
}
: {}),
},
});
const bundle = await this.getOrCreateBundle(compiledOptions, logger);
let workflowCreator = undefined;
if (bundle) {
workflowCreator = await this.createWorkflowCreator(bundle, compiledOptions, logger);
}
// Create a new connection if one is not provided with no CREATOR reference
// so it can be automatically closed when this Worker shuts down.
const connection = options.connection ?? (await connection_1.InternalNativeConnection.connect());
let nativeWorker;
const compiledOptionsWithBuildId = addBuildIdIfMissing(compiledOptions, bundle?.code);
try {
nativeWorker = await nativeWorkerCtor.create(runtime, connection, compiledOptionsWithBuildId);
}
catch (err) {
// We just created this connection, close it
if (!options.connection) {
await connection.close();
}
throw err;
}
(0, connection_1.extractReferenceHolders)(connection).add(nativeWorker);
return new this(runtime, nativeWorker, workflowCreator, compiledOptionsWithBuildId, logger, metricMeter, connection);
}
static async createWorkflowCreator(workflowBundle, compiledOptions, logger) {
const registeredActivityNames = new Set(compiledOptions.activities.keys());
// This isn't required for vscode, only for Chrome Dev Tools which doesn't support debugging worker threads.
// We also rely on this in debug-replayer where we inject a global variable to be read from workflow context.
if (compiledOptions.debugMode) {
if (compiledOptions.reuseV8Context) {
return await reusable_vm_1.ReusableVMWorkflowCreator.create(workflowBundle, compiledOptions.isolateExecutionTimeoutMs, registeredActivityNames);
}
return await vm_1.VMWorkflowCreator.create(workflowBundle, compiledOptions.isolateExecutionTimeoutMs, registeredActivityNames);
}
else {
return await threaded_vm_1.ThreadedVMWorkflowCreator.create({
workflowBundle,
threadPoolSize: compiledOptions.workflowThreadPoolSize,
isolateExecutionTimeoutMs: compiledOptions.isolateExecutionTimeoutMs,
reuseV8Context: compiledOptions.reuseV8Context ?? true,
registeredActivityNames,
logger,
});
}
}
/**
* Create a replay Worker, and run the provided history against it. Will resolve as soon as
* the history has finished being replayed, or if the workflow produces a nondeterminism error.
*
* @param workflowId If provided, use this as the workflow id during replay. Histories do not
* contain a workflow id, so it must be provided separately if your workflow depends on it.
* @throws {@link DeterminismViolationError} if the workflow code is not compatible with the history.
* @throws {@link ReplayError} on any other replay related error.
*/
static async runReplayHistory(options, history, workflowId) {
const validated = this.validateHistory(history);
const result = await this.runReplayHistories(options, [
{ history: validated, workflowId: workflowId ?? 'fake' },
]).next();
if (result.done)
throw new common_1.IllegalStateError('Expected at least one replay result');
if (result.value.error)
throw result.value.error;
}
/**
* Create a replay Worker, running all histories provided by the passed in iterable.
*
* Returns an async iterable of results for each history replayed.
*/
static async *runReplayHistories(options, histories) {
const [worker, pusher] = await this.constructReplayWorker(options);
const rt = worker.runtime;
const evictions = (0, node_events_1.on)(worker.evictionsEmitter, 'eviction');
const runPromise = worker.run().then(() => {
throw new errors_1.ShutdownError('Worker was shutdown');
});
void runPromise.catch(() => {
// ignore to avoid unhandled rejections
});
let innerError = undefined;
try {
try {
for await (const { history, workflowId } of histories) {
const validated = this.validateHistory(history);
await rt.pushHistory(pusher, workflowId, validated);
const next = await Promise.race([evictions.next(), runPromise]);
if (next.done) {
break; // This shouldn't happen, handle just in case
}
const [{ runId, evictJob }] = next.value;
const error = (0, replay_1.evictionReasonToReplayError)(evictJob);
// We replay one workflow at a time so the workflow ID comes from the histories iterable.
yield {
workflowId,
runId,
error,
};
}
}
catch (err) {
innerError = err;
}
}
finally {
try {
rt.closeHistoryStream(pusher);
worker.shutdown();
}
catch {
// ignore in case worker was already shutdown
}
try {
await runPromise;
}
catch (err) {
/* eslint-disable no-unsafe-finally */
if (err instanceof errors_1.ShutdownError) {
if (innerError !== undefined)
throw innerError;
return;
}
else if (innerError === undefined) {
throw err;
}
else {
throw new errors_1.CombinedWorkerRunError('Worker run failed with inner error', {
cause: {
workerError: err,
innerError,
},
});
}
/* eslint-enable no-unsafe-finally */
}
}
}
static validateHistory(history) {
if (typeof history !== 'object' || history == null) {
throw new TypeError(`Expected a non-null history object, got ${typeof history}`);
}
const { eventId } = history.events[0];
// in a "valid" history, eventId would be Long
if (typeof eventId === 'string') {
return (0, proto_utils_1.historyFromJSON)(history);
}
else {
return history;
}
}
static async constructReplayWorker(options) {
const nativeWorkerCtor = this.nativeWorkerCtor;
const fixedUpOptions = {
taskQueue: (options.replayName ?? 'fake_replay_queue') + '-' + this.replayWorkerCount,
debugMode: true,
...options,
};
this.replayWorkerCount++;
const runtime = runtime_1.Runtime.instance();
const logger = logger_1.LoggerWithComposedMetadata.compose(runtime.logger, {
sdkComponent: 'worker',
taskQueue: fixedUpOptions.taskQueue,
});
const metricMeter = runtime.metricMeter.withTags({
namespace: 'default',
taskQueue: fixedUpOptions.taskQueue,
});
const compiledOptions = (0, worker_options_1.compileWorkerOptions)(fixedUpOptions, logger, metricMeter);
const bundle = await this.getOrCreateBundle(compiledOptions, logger);
if (!bundle) {
throw new TypeError('ReplayWorkerOptions must contain workflowsPath or workflowBundle');
}
const workflowCreator = await this.createWorkflowCreator(bundle, compiledOptions, logger);
const replayHandle = await nativeWorkerCtor.createReplay(runtime, addBuildIdIfMissing(compiledOptions, bundle.code));
return [
new this(runtime, replayHandle.worker, workflowCreator, compiledOptions, logger, metricMeter, undefined, true),
replayHandle.historyPusher,
];
}
static async getOrCreateBundle(compiledOptions, logger) {
if (compiledOptions.workflowBundle) {
if (compiledOptions.workflowsPath) {
logger.warn('Ignoring WorkerOptions.workflowsPath because WorkerOptions.workflowBundle is set');
}
if (compiledOptions.bundlerOptions) {
logger.warn('Ignoring WorkerOptions.bundlerOptions because WorkerOptions.workflowBundle is set');
}
const modules = new Set(compiledOptions.interceptors.workflowModules);
// Warn if user tries to customize the default set of workflow interceptor modules
if (modules &&
new Set([...modules, ...bundler_1.defaultWorkflowInterceptorModules]).size !== bundler_1.defaultWorkflowInterceptorModules.length) {
logger.warn('Ignoring WorkerOptions.interceptors.workflowModules because WorkerOptions.workflowBundle is set.\n' +
'To use workflow interceptors with a workflowBundle, pass them in the call to bundleWorkflowCode.');
}
if ((0, worker_options_1.isCodeBundleOption)(compiledOptions.workflowBundle)) {
return parseWorkflowCode(compiledOptions.workflowBundle.code);
}
else if ((0, worker_options_1.isPathBundleOption)(compiledOptions.workflowBundle)) {
const code = await promises_1.default.readFile(compiledOptions.workflowBundle.codePath, 'utf8');
return parseWorkflowCode(code, compiledOptions.workflowBundle.codePath);
}
else {
throw new TypeError('Invalid WorkflowOptions.workflowBundle');
}
}
else if (compiledOptions.workflowsPath) {
const bundler = new bundler_1.WorkflowCodeBundler({
logger,
workflowsPath: compiledOptions.workflowsPath,
workflowInterceptorModules: compiledOptions.interceptors.workflowModules,
failureConverterPath: compiledOptions.dataConverter?.failureConverterPath,
payloadConverterPath: compiledOptions.dataConverter?.payloadConverterPath,
ignoreModules: compiledOptions.bundlerOptions?.ignoreModules,
webpackConfigHook: compiledOptions.bundlerOptions?.webpackConfigHook,
});
const bundle = await bundler.createBundle();
return parseWorkflowCode(bundle.code);
}
else {
return undefined;
}
}
/**
* Create a new Worker from nativeWorker.
*/
constructor(runtime, nativeWorker,
/**
* Optional WorkflowCreator - if not provided, Worker will not poll on Workflows
*/
workflowCreator, options,
/** Logger bound to 'sdkComponent: worker' */
logger, metricMeter, connection, isReplayWorker = false) {
this.runtime = runtime;
this.nativeWorker = nativeWorker;
this.workflowCreator = workflowCreator;
this.options = options;
this.logger = logger;
this.metricMeter = metricMeter;
this.connection = connection;
this.isReplayWorker = isReplayWorker;
this.workflowCodecRunner = new workflow_codec_runner_1.WorkflowCodecRunner(options.loadedDataConverter.payloadCodecs);
}
/**
* An Observable which emits each time the number of in flight activations changes
*/
get numInFlightActivations$() {
return this.numInFlightActivationsSubject;
}
/**
* An Observable which emits each time the number of in flight Activity tasks changes
*/
get numInFlightActivities$() {
return this.numInFlightActivitiesSubject;
}
/**
* An Observable which emits each time the number of cached workflows changes
*/
get numRunningWorkflowInstances$() {
return this.numCachedWorkflowsSubject;
}
/**
* Get the poll state of this worker
*/
getState() {
// Setters and getters require the same visibility, add this public getter function
return this.stateSubject.getValue();
}
/**
* Get a status overview of this Worker
*/
getStatus() {
return {
runState: this.state,
numHeartbeatingActivities: this.numHeartbeatingActivitiesSubject.value,
workflowPollerState: this.workflowPollerStateSubject.value,
activityPollerState: this.activityPollerStateSubject.value,
hasOutstandingWorkflowPoll: this.hasOutstandingWorkflowPoll,
hasOutstandingActivityPoll: this.hasOutstandingActivityPoll,
numCachedWorkflows: this.numCachedWorkflowsSubject.value,
numInFlightWorkflowActivations: this.numInFlightActivationsSubject.value,
numInFlightActivities: this.numInFlightActivitiesSubject.value,
numInFlightNonLocalActivities: this.numInFlightNonLocalActivitiesSubject.value,
numInFlightLocalActivities: this.numInFlightLocalActivitiesSubject.value,
};
}
get state() {
return this.stateSubject.getValue();
}
set state(state) {
this.logger.info('Worker state changed', { state });
this.stateSubject.next(state);
}
/**
* Start shutting down the Worker. The Worker stops polling for new tasks and sends
* {@link https://typescript.temporal.io/api/namespaces/activity#cancellation | cancellation}
* (via a {@link CancelledFailure} with `message` set to `'WORKER_SHUTDOWN'`) to running Activities.
* Note: if the Activity accepts cancellation (i.e. re-throws or allows the `CancelledFailure`
* to be thrown out of the Activity function), the Activity Task will be marked as failed, not
* cancelled. It's helpful for the Activity Task to be marked failed during shutdown because the
* Server will retry the Activity sooner (than if the Server had to wait for the Activity Task
* to time out).
*
* When called, immediately transitions {@link state} to `'STOPPING'` and asks Core to shut down.
* Once Core has confirmed that it's shutting down, the Worker enters `'DRAINING'` state. It will
* stay in that state until both task pollers receive a `ShutdownError`, at which point we'll
* transition to `DRAINED` state. Once all currently running Activities and Workflow Tasks have
* completed, the Worker transitions to `'STOPPED'`.
*/
shutdown() {
if (this.state !== 'RUNNING') {
throw new common_1.IllegalStateError(`Not running. Current state: ${this.state}`);
}
this.state = 'STOPPING';
try {
this.nativeWorker.initiateShutdown();
this.state = 'DRAINING';
}
catch (error) {
// This is totally unexpected, and indicates there's something horribly wrong with the Worker
// state. Attempt to shutdown gracefully will very likely hang, so just terminate immediately.
this.logger.error('Failed to initiate shutdown', { error });
this.instantTerminateErrorSubject.error(error);
}
}
/**
* An observable that completes when {@link state} becomes `'DRAINED'` or throws if {@link state} transitions to
* `'STOPPING'` and remains that way for {@link this.options.shutdownForceTimeMs}.
*/
forceShutdown$() {
if (this.options.shutdownForceTimeMs == null) {
return rxjs_1.EMPTY;
}
return (0, rxjs_1.race)(this.stateSubject.pipe((0, operators_1.filter)((state) => state === 'STOPPING'), (0, operators_1.delay)(this.options.shutdownForceTimeMs), (0, operators_1.tap)({
next: () => {
// Inject the error into the instantTerminateError subject so that we don't mask
// any error that might have caused the Worker to shutdown in the first place.
this.logger.debug('Shutdown force time expired, terminating worker');
this.instantTerminateErrorSubject.error(new errors_1.GracefulShutdownPeriodExpiredError('Timed out while waiting for worker to shutdown gracefully'));
},
})), this.stateSubject.pipe((0, operators_1.filter)((state) => state === 'DRAINED'), (0, operators_1.first)())).pipe((0, operators_1.ignoreElements)());
}
/**
* An observable which repeatedly polls for new tasks unless worker becomes suspended.
* The observable stops emitting once core is shutting down.
*/
pollLoop$(pollFn) {
return (0, rxjs_1.from)((async function* () {
for (;;) {
try {
yield await pollFn();
}
catch (err) {
if (err instanceof errors_1.ShutdownError) {
break;
}
throw err;
}
}
})());
}
/**
* Process Activity tasks
*/
activityOperator() {
return (0, rxjs_1.pipe)((0, rxutils_1.closeableGroupBy)(({ base64TaskToken }) => base64TaskToken), (0, operators_1.mergeMap)((group$) => {
return group$.pipe((0, rxutils_1.mergeMapWithState)(async (activity, { task, base64TaskToken, protobufEncodedTask }) => {
const { taskToken, variant } = task;
if (!variant) {
throw new TypeError('Got an activity task without a "variant" attribute');
}
// We either want to return an activity result (for failures) or pass on the activity for running at a later stage
// If cancel is requested we ignore the result of this function
// We don't run the activity directly in this operator because we need to return the activity in the state
// so it can be cancelled if requested
let output;
switch (variant) {
case 'start': {
let info = undefined;
try {
if (activity !== undefined) {
throw new common_1.IllegalStateError(`Got start event for an already running activity: ${base64TaskToken}`);
}
info = await extractActivityInfo(task, this.options.loadedDataConverter, this.options.namespace, this.options.taskQueue);
const { activityType } = info;
// Use the corresponding activity if it exists, otherwise, fallback to default activity function (if exists)
const fn = this.options.activities.get(activityType) ?? this.options.activities.get('default');
if (typeof fn !== 'function') {
throw common_1.ApplicationFailure.create({
type: 'NotFoundError',
message: `Activity function ${activityType} is not registered on this Worker, available activities: ${JSON.stringify([...this.options.activities.keys()])}`,
nonRetryable: false,
});
}
let args;
try {
args = await (0, internal_non_workflow_1.decodeArrayFromPayloads)(this.options.loadedDataConverter, task.start?.input);
}
catch (err) {
throw common_1.ApplicationFailure.fromError(err, {
message: `Failed to parse activity args for activity ${activityType}: ${(0, type_helpers_1.errorMessage)(err)}`,
nonRetryable: false,
});
}
const headers = task.start?.headerFields ?? {};
const input = {
args,
headers,
};
this.logger.trace('Starting activity', (0, activity_1.activityLogAttributes)(info));
activity = new activity_1.Activity(info, fn, this.options.loadedDataConverter, (details) => this.activityHeartbeatSubject.next({
type: 'heartbeat',
info: info,
taskToken,
base64TaskToken,
details,
onError() {
activity?.cancel('HEARTBEAT_DETAILS_CONVERSION_FAILED'); // activity must be defined
},
}), this.logger, this.metricMeter, this.options.interceptors.activity);
output = { type: 'run', activity, input };
break;
}
catch (e) {
const error = (0, common_1.ensureApplicationFailure)(e);
this.logger.error(`Error while processing ActivityTask.start: ${(0, type_helpers_1.errorMessage)(error)}`, {
...(info ? (0, activity_1.activityLogAttributes)(info) : {}),
error: e,
task: JSON.stringify(task.toJSON()),
taskEncoded: Buffer.from(protobufEncodedTask).toString('base64'),
});
output = {
type: 'result',
result: {
failed: {
failure: await (0, internal_non_workflow_1.encodeErrorToFailure)(this.options.loadedDataConverter, error),
},
},
};
break;
}
}
case 'cancel': {
output = { type: 'ignore' };
if (activity === undefined) {
this.logger.trace('Tried to cancel a non-existing activity', {
taskToken: base64TaskToken,
});
break;
}
// NOTE: activity will not be considered cancelled until it confirms cancellation (by throwing a CancelledFailure)
this.logger.trace('Cancelling activity', (0, activity_1.activityLogAttributes)(activity.info));
const reason = task.cancel?.reason;
if (reason === undefined || reason === null) {
// Special case of Lang side cancellation during shutdown (see `activity.shutdown.evict` above)
activity.cancel('WORKER_SHUTDOWN');
}
else {
activity.cancel(proto_1.coresdk.activity_task.ActivityCancelReason[reason]);
}
break;
}
}
return { state: activity, output: { taskToken, output } };
}, undefined // initial value
), (0, operators_1.mergeMap)(async ({ output, taskToken }) => {
if (output.type === 'ignore') {
return undefined;
}
if (output.type === 'result') {
return { taskToken, result: output.result };
}
const { base64TaskToken } = output.activity.info;
this.activityHeartbeatSubject.next({
type: 'create',
base64TaskToken,
});
let result;
const numInFlightBreakdownSubject = output.activity.info.isLocal
? this.numInFlightLocalActivitiesSubject
: this.numInFlightNonLocalActivitiesSubject;
this.numInFlightActivitiesSubject.next(this.numInFlightActivitiesSubject.value + 1);
numInFlightBreakdownSubject.next(numInFlightBreakdownSubject.value + 1);
try {
result = await output.activity.run(output.input);
}
finally {
numInFlightBreakdownSubject.next(numInFlightBreakdownSubject.value - 1);
this.numInFlightActivitiesSubject.next(this.numInFlightActivitiesSubject.value - 1);
}
const status = result.failed ? 'failed' : result.completed ? 'completed' : 'cancelled';
if (status === 'failed') {
// Make sure to flush the last heartbeat
this.logger.trace('Activity failed, waiting for heartbeats to be flushed', {
...(0, activity_1.activityLogAttributes)(output.activity.info),
status,
});
await new Promise((resolve) => {
this.activityHeartbeatSubject.next({
type: 'completion',
flushRequired: true,
base64TaskToken,
callback: resolve,
});
});
}
else {
// Notify the Activity heartbeat state mapper that the Activity has completed
this.activityHeartbeatSubject.next({
type: 'completion',
flushRequired: false,
base64TaskToken,
callback: () => undefined,
});
}
this.logger.trace('Activity resolved', {
...(0, activity_1.activityLogAttributes)(output.activity.info),
status,
});
return { taskToken, result };
}), (0, operators_1.filter)((result) => result !== undefined), (0, operators_1.map)((rest) => proto_1.coresdk.ActivityTaskCompletion.encodeDelimited(rest).finish()), (0, operators_1.tap)({
next: () => {
group$.close();
},
}));
}));
}
/**
* Process activations from the same workflow execution to an observable of completions.
*
* Injects a synthetic eviction activation when the worker transitions to no longer polling.
*/
handleWorkflowActivations(activations$) {
const syntheticEvictionActivations$ = this.workflowPollerStateSubject.pipe(
// Core has indicated that it will not return any more poll results; evict all cached WFs.
(0, operators_1.filter)((state) => state !== 'POLLING'), (0, operators_1.first)(), (0, operators_1.map)(() => ({
activation: proto_1.coresdk.workflow_activation.WorkflowActivation.create({
runId: activations$.key,
jobs: [{ removeFromCache: Worker.SELF_INDUCED_SHUTDOWN_EVICTION }],
}),
synthetic: true,
})), (0, operators_1.takeUntil)(activations$.pipe((0, operators_1.last)(undefined, null))));
const activations$$ = activations$.pipe((0, operators_1.map)((activation) => ({ activation, synthetic: false })));
return (0, rxjs_1.merge)(activations$$, syntheticEvictionActivations$).pipe((0, operators_1.tap)(() => {
this.numInFlightActivationsSubject.next(this.numInFlightActivationsSubject.value + 1);
}), (0, rxutils_1.mergeMapWithState)(this.handleActivation.bind(this), undefined), (0, operators_1.tap)(({ close }) => {
this.numInFlightActivationsSubject.next(this.numInFlightActivationsSubject.value - 1);
if (close) {
activations$.close();
this.numCachedWorkflowsSubject.next(this.numCachedWorkflowsSubject.value - 1);
}
}), (0, operators_1.takeWhile)(({ close }) => !close, true /* inclusive */), (0, operators_1.map)(({ completion }) => completion), (0, operators_1.filter)((result) => result !== undefined));
}
/**
* Process a single activation to a completion.
*/
async handleActivation(workflow, { activation, synthetic }) {
try {
const removeFromCacheIx = activation.jobs.findIndex(({ removeFromCache }) => removeFromCache);
const close = removeFromCacheIx !== -1;
const jobs = activation.jobs;
if (close) {
const asEvictJob = jobs.splice(removeFromCacheIx, 1)[0].removeFromCache;
if (asEvictJob) {
this.evictionsEmitter.emit('eviction', {
runId: activation.runId,
evictJob: asEvictJob,
});
}
}
activation.jobs = jobs;
if (jobs.length === 0) {
this.logger.trace('Disposing workflow', workflow ? workflow.logAttributes : { runId: activation.runId });
await workflow?.workflow.dispose();
if (!close) {
throw new common_1.IllegalStateError('Got a Workflow activation with no jobs');
}
const completion = synthetic
? undefined
: proto_1.coresdk.workflow_completion.WorkflowActivationCompletion.encodeDelimited({
runId: activation.runId,
successful: {},
}).finish();
return { state: undefined, output: { close, completion } };
}
const decodedActivation = await this.workflowCodecRunner.decodeActivation(activation);
if (workflow === undefined) {
const initWorkflowDetails = decodedActivation.jobs[0]?.initializeWorkflow;
if (initWorkflowDetails == null)
throw new common_1.IllegalStateError('Received workflow activation for an untracked workflow with no init workflow job');
workflow = await this.createWorkflow(decodedActivation, initWorkflowDetails);
}
let isFatalError = false;
try {
const unencodedCompletion = await workflow.workflow.activate(decodedActivation);
const completion = await this.workflowCodecRunner.encodeCompletion(unencodedCompletion);
return { state: workflow, output: { close, completion } };
}
catch (err) {
if (err instanceof errors_1.UnexpectedError) {
isFatalError = true;
}
throw err;
}
finally {
// Fatal error means we cannot call into this workflow again unfortunately
if (!isFatalError) {
// When processing workflows through runReplayHistories, Core may still send non-replay
// activations on the very last Workflow Task in some cases. Though Core is technically exact
// here, the fact that sinks marked with callDuringReplay = false may get called on a replay
// worker is definitely a surprising behavior. For that reason, we extend the isReplaying flag in
// this case to also include anything running under in a replay worker.
const isReplaying = activation.isReplaying || this.isReplayWorker;
const calls = await workflow.workflow.getAndResetSinkCalls();
await this.processSinkCalls(calls, isReplaying, workflow.logAttributes);
}
this.logger.trace('Completed activation', workflow.logAttributes);
}
}
catch (error) {
let logMessage = 'Failed to process Workflow Activation';
if (error instanceof errors_1.UnexpectedError) {
// Something went wrong in the workflow; we'll do our best to shut the Worker
// down gracefully, but then we'll need to terminate the Worker ASAP.
logMessage = 'An unexpected error occured while processing Workflow Activation. Initiating Worker shutdown.';
this.unexpectedErrorSubject.error(error);
}
this.logger.error(logMessage, {
runId: activation.runId,
...workflow?.logAttributes,
error,
workflowExists: workflow !== undefined,
});
const completion = proto_1.coresdk.workflow_completion.WorkflowActivationCompletion.encodeDelimited({
runId: activation.runId,
failed: {
failure: await (0, internal_non_workflow_1.encodeErrorToFailure)(this.options.loadedDataConverter, error),
},
}).finish();
// We do not dispose of the Workflow yet, wait to be evicted from Core.
// This is done to simplify the Workflow lifecycle so Core is the sole driver.
return { state: undefined, output: { close: true, completion } };
}
}
async createWorkflow(activation, initWorkflowJob) {
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
const workflowCreator = this.workflowCreator;
if (!(initWorkflowJob.workflowId != null &&
initWorkflowJob.workflowType != null &&
initWorkflowJob.randomnessSeed != null &&
initWorkflowJob.firstExecutionRunId != null &&
initWorkflowJob.attempt != null &&
initWorkflowJob.startTime != null)) {
throw new TypeError(`Malformed InitializeWorkflow activation: ${JSON.stringify(initWorkflowJob)}`);
}
if (activation.timestamp == null) {
throw new TypeError('Got activation with no timestamp, cannot create a new Workflow instance');
}
const { workflowId, randomnessSeed, workflowType, parentWorkflowInfo, rootWorkflow, workflowExecutionTimeout, workflowRunTimeout, workflowTaskTimeout, continuedFromExecutionRunId, firstExecutionRunId, retryPolicy, attempt, cronSchedule, workflowExecutionExpirationTime, cronScheduleToScheduleInterval, priority, } = initWorkflowJob;
// Note that we can't do payload conversion here, as there's no guarantee that converted payloads would be safe to
// transfer through the V8 message port. Those will therefore be set in the Activator's initializeWorkflow job handler.
const workflowInfo = {
workflowId,
runId: activation.runId,
workflowType,
searchAttributes: {},
typedSearchAttributes: new common_1.TypedSearchAttributes(),
parent: (0, utils_1.convertToParentWorkflowType)(parentWorkflowInfo),
root: (0, utils_1.convertToRootWorkflowType)(rootWorkflow),
taskQueue: this.options.taskQueue,
namespace: this.options.namespace,
firstExecutionRunId,
continuedFromExecutionRunId: continuedFromExecutionRunId || undefined,
startTime: (0, time_1.tsToDate)(initWorkflowJob.startTime),
runStartTime: (0, time_1.tsToDate)(activation.timestamp),
executionTimeoutMs: (0, time_1.optionalTsToMs)(workflowExecutionTimeout),
executionExpirationTime: (0, time_1.optionalTsToDate)(workflowExecutionExpirationTime),
runTimeoutMs: (0, time_1.optionalTsToMs)(workflowRunTimeout),
taskTimeoutMs: (0, time_1.requiredTsToMs)(workflowTaskTimeout, 'workflowTaskTimeout'),
retryPolicy: (0, common_1.decompileRetryPolicy)(retryPolicy),
attempt,
cronSchedule: cronSchedule || undefined,
// 0 is the default, and not a valid value, since crons are at least a minute apart
cronScheduleToScheduleInterval: (0, time_1.optionalTsToMs)(cronScheduleToScheduleInterval) || undefined,
historyLength: activation.historyLength,
// Exact truncation for multi-petabyte histories
// A zero value means that it was not set by the server
historySize: activation.historySizeBytes.toNumber(),
continueAsNewSuggested: activation.continueAsNewSuggested,
currentBuildId: activation.deploymentVersionForCurrentTask?.buildId ?? '',
currentDeploymentVersion: (0, utils_1.convertDeploymentVersion)(activation.deploymentVersionForCurrentTask),
unsafe: {
now: () => Date.now(), // re-set in initRuntime
isReplaying: activation.isReplaying,
},
priority: (0, common_1.decodePriority)(priority),
};
const logAttributes = (0, logs_1.workflowLogAttributes)(workflowInfo);
this.logger.trace('Creating workflow', logAttributes);
const workflow = await workflowCreator.createWorkflow({
info: workflowInfo,
randomnessSeed: randomnessSeed.toBytes(),
now: (0, time_1.tsToMs)(activation.timestamp),
showStackTraceSources: this.options.showStackTraceSources,
});
this.numCachedWorkflowsSubject.next(this.numCachedWorkflowsSubject.value + 1);
return { workflow, logAttributes };
}
/**
* Process extracted external calls from Workflow post activation.
*
* Each SinkCall is translated into a injected sink function call.
*
* This function does not throw, it will log in case of missing sinks
* or failed sink function invocations.
*/
async processSinkCalls(externalCalls, isReplaying, logAttributes) {
const { sinks } = this.options;
const filteredCalls = externalCalls
// Fix depreacted usage of the 'defaultWorkerLogger' sink
.map((call) => (call.ifaceName === 'defaultWorkerLogger' ? { ...call, ifaceName: '__temporal_logger' } : call))
// Map sink call to the corresponding sink function definition
.map((call) => ({ call, sink: sinks?.[call.ifaceName]?.[call.fnName] }))
// Reject calls to undefined sink definitions
.filter(({ call: { ifaceName, fnName }, sink }) => {
if (sink !== undefined)
return true;
this.logger.error('Workflow referenced an unregistered external sink', {
...logAttributes,
ifaceName,
fnName,
});
return false;
})
// If appropriate, reject calls to sink functions not configured with `callDuringReplay = true`
.filter(({ sink }) => sink?.callDuringReplay || !isReplaying);
// Make a wrapper function, to make things easier afterward
await Promise.all(filteredCalls.map(async ({ call, sink }) => {
try {
await sink?.fn(call.workflowInfo, ...call.args);
}
catch (error) {
this.logger.error('External sink function threw an error', {
...logAttributes,
ifaceName: call.ifaceName,
fnName: call.fnName,
error,
workflowInfo: call.workflowInfo,
});
}
}));
}
/**
* Listen on heartbeats emitted from activities and send them to core.
* Errors from core responses are translated to cancellation requests and fed back via the activityFeedbackSubject.
*/
activityHeartbeat$() {
function process(state, heartbeat) {
return {
state: { ...state, processing: true, pending: undefined },
output: { type: 'send', heartbeat },
};
}
function storePending(state, heartbeat) {
return { state: { ...state, pending: heartbeat }, output: null };
}
function complete(callback) {
return {
state: {
pending: undefined,