@langchain/langgraph
Version:
LangGraph
332 lines • 15.2 kB
JavaScript
Object.defineProperty(exports, "__esModule", { value: true });
exports.PregelRunner = void 0;
const types_js_1 = require("./types.cjs");
const index_js_1 = require("./utils/index.cjs");
const constants_js_1 = require("../constants.cjs");
const errors_js_1 = require("../errors.cjs");
const retry_js_1 = require("./retry.cjs");
const PROMISE_ADDED_SYMBOL = Symbol.for("promiseAdded");
function createPromiseBarrier() {
const barrier = {
next: () => void 0,
wait: Promise.resolve(PROMISE_ADDED_SYMBOL),
};
function waitHandler(resolve) {
barrier.next = () => {
barrier.wait = new Promise(waitHandler);
resolve(PROMISE_ADDED_SYMBOL);
};
}
barrier.wait = new Promise(waitHandler);
return barrier;
}
/**
* Responsible for handling task execution on each tick of the {@link PregelLoop}.
*/
class PregelRunner {
/**
* Construct a new PregelRunner, which executes tasks from the provided PregelLoop.
* @param loop - The PregelLoop that produces tasks for this runner to execute.
*/
constructor({ loop, nodeFinished, }) {
Object.defineProperty(this, "nodeFinished", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
Object.defineProperty(this, "loop", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
this.loop = loop;
this.nodeFinished = nodeFinished;
}
/**
* Execute tasks from the current step of the PregelLoop.
*
* Note: this method does NOT call {@link PregelLoop}#tick. That must be handled externally.
* @param options - Options for the execution.
*/
async tick(options = {}) {
const { timeout, retryPolicy, onStepWrite, maxConcurrency } = options;
const nodeErrors = new Set();
let graphBubbleUp;
const exceptionSignalController = new AbortController();
// Start task execution
const pendingTasks = Object.values(this.loop.tasks).filter((t) => t.writes.length === 0);
const currentSignals = this._initializeAbortSignals({
exceptionSignalController,
timeout,
signal: options.signal,
});
const taskStream = this._executeTasksWithRetry(pendingTasks, {
signals: currentSignals,
retryPolicy,
maxConcurrency,
});
for await (const { task, error, signalAborted } of taskStream) {
this._commit(task, error);
if ((0, errors_js_1.isGraphInterrupt)(error)) {
graphBubbleUp = error;
}
else if ((0, errors_js_1.isGraphBubbleUp)(error) && !(0, errors_js_1.isGraphInterrupt)(graphBubbleUp)) {
graphBubbleUp = error;
}
else if (error && (nodeErrors.size === 0 || !signalAborted)) {
/*
* The goal here is to capture the exception that causes the graph to terminate early. In
* theory it's possible for multiple nodes to throw, so this also handles the edge case of
* capturing concurrent exceptions thrown before the node saw an abort. This is checked via
* the signalAborted flag, which records the state of the abort signal at the time the node
* execution finished.
*
* There is a case however where one node throws some error causing us to trigger an abort,
* which then causes other concurrently executing nodes to throw their own AbortErrors. In
* this case we don't care about reporting the abort errors thrown by the other nodes,
* because they don't tell the user anything about what caused the graph execution to
* terminate early, so we ignore them (and any other errors that occur after the node sees
* an abort signal).
*/
exceptionSignalController.abort();
nodeErrors.add(error);
}
}
onStepWrite?.(this.loop.step, Object.values(this.loop.tasks)
.map((task) => task.writes)
.flat());
if (nodeErrors.size === 1) {
throw Array.from(nodeErrors)[0];
}
else if (nodeErrors.size > 1) {
throw new AggregateError(Array.from(nodeErrors), `Multiple errors occurred during superstep ${this.loop.step}. See the "errors" field of this exception for more details.`);
}
if ((0, errors_js_1.isGraphInterrupt)(graphBubbleUp)) {
throw graphBubbleUp;
}
if ((0, errors_js_1.isGraphBubbleUp)(graphBubbleUp) && this.loop.isNested) {
throw graphBubbleUp;
}
}
/**
* Initializes the current AbortSignals for the PregelRunner, handling the various ways that
* AbortSignals must be chained together so that the PregelLoop can be interrupted if necessary
* while still allowing nodes to gracefully exit.
*
* This method must only be called once per PregelRunner#tick. It has the side effect of updating
* the PregelLoop#config with the new AbortSignals so they may be propagated correctly to future
* ticks and subgraph calls.
*
* @param options - Options for the initialization.
* @returns The current abort signals.
* @internal
*/
_initializeAbortSignals({ exceptionSignalController, timeout, signal, }) {
const previousSignals = this.loop.config.configurable?.[constants_js_1.CONFIG_KEY_ABORT_SIGNALS] ?? {};
// This is true when a node calls a subgraph and, rather than forwarding its own AbortSignal,
// it creates a new AbortSignal and passes that along instead.
const subgraphCalledWithSignalCreatedByNode = signal &&
previousSignals.composedAbortSignal &&
signal !== previousSignals.composedAbortSignal;
const externalAbortSignal = subgraphCalledWithSignalCreatedByNode
? // Chain the signals here to make sure that the subgraph receives the external abort signal in
// addition to the signal created by the node.
(0, index_js_1.combineAbortSignals)(previousSignals.externalAbortSignal, signal)
: // Otherwise, just keep using the external abort signal, or initialize it if it hasn't been
// assigned yet
previousSignals.externalAbortSignal ?? signal;
const errorAbortSignal = previousSignals.errorAbortSignal
? // Chaining here rather than always using a fresh one handles the case where a subgraph is
// called in a parallel branch to some other node in the parent graph.
(0, index_js_1.combineAbortSignals)(previousSignals.errorAbortSignal, exceptionSignalController.signal)
: exceptionSignalController.signal;
const timeoutAbortSignal = timeout
? AbortSignal.timeout(timeout)
: undefined;
const composedAbortSignal = (0, index_js_1.combineAbortSignals)(...(externalAbortSignal ? [externalAbortSignal] : []), ...(timeoutAbortSignal ? [timeoutAbortSignal] : []), errorAbortSignal);
const currentSignals = {
externalAbortSignal,
errorAbortSignal,
timeoutAbortSignal,
composedAbortSignal,
};
this.loop.config = (0, index_js_1.patchConfigurable)(this.loop.config, {
[constants_js_1.CONFIG_KEY_ABORT_SIGNALS]: currentSignals,
});
return currentSignals;
}
/**
* Concurrently executes tasks with the requested retry policy, yielding a {@link SettledPregelTask} for each task as it completes.
* @param tasks - The tasks to execute.
* @param options - Options for the execution.
*/
async *_executeTasksWithRetry(tasks, options) {
const { retryPolicy, maxConcurrency, signals } = options ?? {};
const barrier = createPromiseBarrier();
const executingTasksMap = {};
const thisCall = {
executingTasksMap,
barrier,
retryPolicy,
scheduleTask: async (task, writeIdx, call) => this.loop.acceptPush(task, writeIdx, call),
};
if (signals?.composedAbortSignal?.aborted) {
// note: don't use throwIfAborted here because it throws a DOMException,
// which isn't consistent with how we throw on abort below.
throw new Error("Abort");
}
let startedTasksCount = 0;
let listener;
const timeoutOrCancelSignal = signals?.externalAbortSignal || signals?.timeoutAbortSignal
? (0, index_js_1.combineAbortSignals)(...(signals.externalAbortSignal
? [signals.externalAbortSignal]
: []), ...(signals.timeoutAbortSignal ? [signals.timeoutAbortSignal] : []))
: undefined;
const abortPromise = timeoutOrCancelSignal
? new Promise((_resolve, reject) => {
listener = () => reject(new Error("Abort"));
timeoutOrCancelSignal.addEventListener("abort", listener, {
once: true,
});
})
: undefined;
while ((startedTasksCount === 0 || Object.keys(executingTasksMap).length > 0) &&
tasks.length) {
for (; Object.values(executingTasksMap).length <
(maxConcurrency ?? tasks.length) && startedTasksCount < tasks.length; startedTasksCount += 1) {
const task = tasks[startedTasksCount];
executingTasksMap[task.id] = (0, retry_js_1._runWithRetry)(task, retryPolicy, { [constants_js_1.CONFIG_KEY_CALL]: call?.bind(thisCall, this, task) }, signals?.composedAbortSignal).catch((error) => {
return {
task,
error,
signalAborted: signals?.composedAbortSignal?.aborted,
};
});
}
const settledTask = await Promise.race([
...Object.values(executingTasksMap),
...(abortPromise ? [abortPromise] : []),
barrier.wait,
]);
if (settledTask === PROMISE_ADDED_SYMBOL) {
continue;
}
yield settledTask;
delete executingTasksMap[settledTask.task.id];
}
}
/**
* Determines what writes to apply based on whether the task completed successfully, and what type of error occurred.
*
* Throws an error if the error is a {@link GraphBubbleUp} error and {@link PregelLoop}#isNested is true.
*
* @param task - The task to commit.
* @param error - The error that occurred, if any.
*/
_commit(task, error) {
if (error !== undefined) {
if ((0, errors_js_1.isGraphInterrupt)(error)) {
if (error.interrupts.length) {
const interrupts = error.interrupts.map((interrupt) => [constants_js_1.INTERRUPT, interrupt]);
const resumes = task.writes.filter((w) => w[0] === constants_js_1.RESUME);
if (resumes.length) {
interrupts.push(...resumes);
}
this.loop.putWrites(task.id, interrupts);
}
}
else if ((0, errors_js_1.isGraphBubbleUp)(error) && task.writes.length) {
this.loop.putWrites(task.id, task.writes);
}
else {
this.loop.putWrites(task.id, [
[constants_js_1.ERROR, { message: error.message, name: error.name }],
]);
}
}
else {
if (this.nodeFinished &&
(task.config?.tags == null || !task.config.tags.includes(constants_js_1.TAG_HIDDEN))) {
this.nodeFinished(String(task.name));
}
if (task.writes.length === 0) {
// Add no writes marker
task.writes.push([constants_js_1.NO_WRITES, null]);
}
// Save task writes to checkpointer
this.loop.putWrites(task.id, task.writes);
}
}
}
exports.PregelRunner = PregelRunner;
async function call(runner, task, func, name, input, options = {}) {
// Schedule PUSH tasks, collect promises
const scratchpad = task.config?.configurable?.[constants_js_1.CONFIG_KEY_SCRATCHPAD];
if (!scratchpad) {
throw new Error(`BUG: No scratchpad found on task ${task.name}__${task.id}`);
}
const cnt = scratchpad.callCounter;
scratchpad.callCounter += 1;
// schedule the next task, if the callback returns one
const wcall = new types_js_1.Call({
func,
name,
input,
cache: options.cache,
retry: options.retry,
callbacks: options.callbacks,
});
const nextTask = await this.scheduleTask(task, cnt, wcall);
if (!nextTask)
return undefined;
// Check if this task is already running
const existingPromise = this.executingTasksMap[nextTask.id];
if (existingPromise !== undefined) {
// If the parent task was retried, the next task might already be running
return existingPromise;
}
if (nextTask.writes.length > 0) {
// If it already ran, return the result
const returns = nextTask.writes.filter(([c]) => c === constants_js_1.RETURN);
const errors = nextTask.writes.filter(([c]) => c === constants_js_1.ERROR);
if (returns.length > 0) {
// Task completed successfully
if (returns.length === 1)
return Promise.resolve(returns[0][1]);
// should be unreachable
throw new Error(`BUG: multiple returns found for task ${nextTask.name}__${nextTask.id}`);
}
if (errors.length > 0) {
// Task failed
if (errors.length === 1) {
const errorValue = errors[0][1];
const error =
// eslint-disable-next-line no-instanceof/no-instanceof
errorValue instanceof Error
? errorValue
: new Error(String(errorValue));
return Promise.reject(error);
}
// the only way this should happen is if the task executes multiple times and writes aren't cleared
throw new Error(`BUG: multiple errors found for task ${nextTask.name}__${nextTask.id}`);
}
return undefined;
}
else {
// Schedule the next task with retry
const prom = (0, retry_js_1._runWithRetry)(nextTask, options.retry, {
[constants_js_1.CONFIG_KEY_CALL]: call.bind(this, runner, nextTask),
});
this.executingTasksMap[nextTask.id] = prom;
this.barrier.next();
return prom.then(({ result, error }) => {
if (error)
return Promise.reject(error);
return result;
});
}
}
//# sourceMappingURL=runner.js.map
;