llamaindex
Version:
<p align="center"> <img height="100" width="100" alt="LlamaIndex logo" src="https://ts.llamaindex.ai/square.svg" /> </p> <h1 align="center">LlamaIndex.TS</h1> <h3 align="center"> Data framework for your LLM application. </h3>
462 lines (445 loc) • 16 kB
JavaScript
import { AgentRunner, validateAgentParams, callTool, AgentWorker, createReadableStream, consumeAsyncIterable } from '@llamaindex/core/agent';
export * from '@llamaindex/core/agent';
import { isAsyncIterable, extractText, stringifyJSONToMessageContent } from '@llamaindex/core/utils';
import { AsyncLocalStorage, randomUUID } from '@llamaindex/env';
import { Settings as Settings$1 } from '@llamaindex/core/global';
import { PromptHelper } from '@llamaindex/core/indices';
import { SentenceSplitter } from '@llamaindex/core/node-parser';
const getReACTAgentSystemHeader = (tools)=>{
const description = tools.map((tool)=>`- ${tool.metadata.name}: ${tool.metadata.description} with schema: ${JSON.stringify(tool.metadata.parameters)}`).join("\n");
const names = tools.map((tool)=>tool.metadata.name).join(", ");
return `You are designed to help with a variety of tasks, from answering questions to providing summaries to other types of analyses.
## Tools
You have access to a wide variety of tools. You are responsible for using
the tools in any sequence you deem appropriate to complete the task at hand.
This may require breaking the task into subtasks and using different tools
to complete each subtask.
You have access to the following tools:
${description}
## Output Format
To answer the question, please use the following format.
"""
Thought: I need to use a tool to help me answer the question.
Action: tool name (one of ${names}) if using a tool.
Action Input: the input to the tool, in a JSON format representing the kwargs (e.g. {{"input": "hello world", "num_beams": 5}})
"""
Please ALWAYS start with a Thought.
Please use a valid JSON format for the Action Input. Do NOT do this {{'input': 'hello world', 'num_beams': 5}}.
If this format is used, the user will respond in the following format:
""""
Observation: tool response
""""
You should keep repeating the above format until you have enough information
to answer the question without using any more tools. At that point, you MUST respond
in the one of the following two formats:
""""
Thought: I can answer without using any more tools.
Answer: [your answer here]
""""
""""
Thought: I cannot answer the question with the provided tools.
Answer: Sorry, I cannot answer your query.
""""
## Current Conversation
Below is the current conversation consisting of interleaving human and assistant messages.`;
};
/**
* @internal
*/ class GlobalSettings {
#prompt;
#promptHelper;
#nodeParser;
#chunkOverlap;
#promptHelperAsyncLocalStorage;
#nodeParserAsyncLocalStorage;
#chunkOverlapAsyncLocalStorage;
#promptAsyncLocalStorage;
get debug() {
return Settings$1.debug;
}
get llm() {
return Settings$1.llm;
}
set llm(llm) {
Settings$1.llm = llm;
}
withLLM(llm, fn) {
return Settings$1.withLLM(llm, fn);
}
get promptHelper() {
if (this.#promptHelper === null) {
this.#promptHelper = new PromptHelper();
}
return this.#promptHelperAsyncLocalStorage.getStore() ?? this.#promptHelper;
}
set promptHelper(promptHelper) {
this.#promptHelper = promptHelper;
}
withPromptHelper(promptHelper, fn) {
return this.#promptHelperAsyncLocalStorage.run(promptHelper, fn);
}
get embedModel() {
return Settings$1.embedModel;
}
set embedModel(embedModel) {
Settings$1.embedModel = embedModel;
}
withEmbedModel(embedModel, fn) {
return Settings$1.withEmbedModel(embedModel, fn);
}
get nodeParser() {
if (this.#nodeParser === null) {
this.#nodeParser = new SentenceSplitter({
chunkSize: this.chunkSize,
chunkOverlap: this.chunkOverlap
});
}
return this.#nodeParserAsyncLocalStorage.getStore() ?? this.#nodeParser;
}
set nodeParser(nodeParser) {
this.#nodeParser = nodeParser;
}
withNodeParser(nodeParser, fn) {
return this.#nodeParserAsyncLocalStorage.run(nodeParser, fn);
}
get callbackManager() {
return Settings$1.callbackManager;
}
set callbackManager(callbackManager) {
Settings$1.callbackManager = callbackManager;
}
withCallbackManager(callbackManager, fn) {
return Settings$1.withCallbackManager(callbackManager, fn);
}
set chunkSize(chunkSize) {
Settings$1.chunkSize = chunkSize;
}
get chunkSize() {
return Settings$1.chunkSize;
}
withChunkSize(chunkSize, fn) {
return Settings$1.withChunkSize(chunkSize, fn);
}
get chunkOverlap() {
return this.#chunkOverlapAsyncLocalStorage.getStore() ?? this.#chunkOverlap;
}
set chunkOverlap(chunkOverlap) {
if (typeof chunkOverlap === "number") {
this.#chunkOverlap = chunkOverlap;
}
}
withChunkOverlap(chunkOverlap, fn) {
return this.#chunkOverlapAsyncLocalStorage.run(chunkOverlap, fn);
}
get prompt() {
return this.#promptAsyncLocalStorage.getStore() ?? this.#prompt;
}
set prompt(prompt) {
this.#prompt = prompt;
}
withPrompt(prompt, fn) {
return this.#promptAsyncLocalStorage.run(prompt, fn);
}
constructor(){
this.#prompt = {};
this.#promptHelper = null;
this.#nodeParser = null;
this.#promptHelperAsyncLocalStorage = new AsyncLocalStorage();
this.#nodeParserAsyncLocalStorage = new AsyncLocalStorage();
this.#chunkOverlapAsyncLocalStorage = new AsyncLocalStorage();
this.#promptAsyncLocalStorage = new AsyncLocalStorage();
}
}
const Settings = new GlobalSettings();
function reasonFormatter(reason) {
switch(reason.type){
case "observation":
return `Observation: ${stringifyJSONToMessageContent(reason.observation)}`;
case "action":
return `Thought: ${reason.thought}\nAction: ${reason.action}\nInput: ${stringifyJSONToMessageContent(reason.input)}`;
case "response":
{
return `Thought: ${reason.thought}\nAnswer: ${extractText(reason.response.message.content)}`;
}
}
}
function extractJsonStr(text) {
const pattern = /\{.*\}/s;
const match = text.match(pattern);
if (!match) {
throw new SyntaxError(`Could not extract json string from output: ${text}`);
}
return match[0];
}
function extractFinalResponse(inputText) {
const pattern = /\s*Thought:(.*?)Answer:(.*?)$/s;
const match = inputText.match(pattern);
if (!match) {
throw new Error(`Could not extract final answer from input text: ${inputText}`);
}
const thought = match[1].trim();
const answer = match[2].trim();
return [
thought,
answer
];
}
function extractToolUse(inputText) {
const pattern = /\s*Thought: (.*?)\nAction: ([a-zA-Z0-9_]+).*?\.*[Input:]*.*?(\{.*?\})/s;
const match = inputText.match(pattern);
if (!match) {
throw new Error(`Could not extract tool use from input text: "${inputText}"`);
}
const thought = match[1].trim();
const action = match[2].trim();
const actionInput = match[3].trim();
return [
thought,
action,
actionInput
];
}
function actionInputParser(jsonStr) {
const processedString = jsonStr.replace(/(?<!\w)'|'(?!\w)/g, '"');
const pattern = /"(\w+)":\s*"([^"]*)"/g;
const matches = [
...processedString.matchAll(pattern)
];
return Object.fromEntries(matches);
}
const reACTOutputParser = async (output, onResolveType)=>{
let reason = null;
if (isAsyncIterable(output)) {
const [peakStream, finalStream] = createReadableStream(output).tee();
const reader = peakStream.getReader();
let type = null;
let content = "";
for(;;){
const { done, value } = await reader.read();
if (done) {
break;
}
content += value.delta;
if (content.includes("Action:")) {
type = "action";
} else if (content.includes("Answer:")) {
type = "answer";
}
}
if (type === null) {
// `Thought:` is always present at the beginning of the output.
type = "thought";
}
reader.releaseLock();
if (!type) {
throw new Error("Could not determine type of output");
}
onResolveType(type, finalStream);
// step 2: do the parsing from content
switch(type){
case "action":
{
// have to consume the stream to get the full content
const response = await consumeAsyncIterable(peakStream, content);
const [thought, action, input] = extractToolUse(response.content);
const jsonStr = extractJsonStr(input);
let json;
try {
json = JSON.parse(jsonStr);
} catch (e) {
json = actionInputParser(jsonStr);
}
reason = {
type: "action",
thought,
action,
input: json
};
break;
}
case "thought":
{
const thought = "(Implicit) I can answer without any more tools!";
const response = await consumeAsyncIterable(peakStream, content);
reason = {
type: "response",
thought,
response: {
raw: peakStream,
message: response
}
};
break;
}
case "answer":
{
const response = await consumeAsyncIterable(peakStream, content);
const [thought, answer] = extractFinalResponse(response.content);
reason = {
type: "response",
thought,
response: {
raw: response,
message: {
role: "assistant",
content: answer
}
}
};
break;
}
default:
{
throw new Error(`Invalid type: ${type}`);
}
}
} else {
const content = extractText(output.message.content);
const type = content.includes("Answer:") ? "answer" : content.includes("Action:") ? "action" : "thought";
onResolveType(type, output);
// step 2: do the parsing from content
switch(type){
case "action":
{
const [thought, action, input] = extractToolUse(content);
const jsonStr = extractJsonStr(input);
let json;
try {
json = JSON.parse(jsonStr);
} catch (e) {
json = actionInputParser(jsonStr);
}
reason = {
type: "action",
thought,
action,
input: json
};
break;
}
case "thought":
{
const thought = "(Implicit) I can answer without any more tools!";
reason = {
type: "response",
thought,
response: {
raw: output,
message: {
role: "assistant",
content: extractText(output.message.content)
}
}
};
break;
}
case "answer":
{
const [thought, answer] = extractFinalResponse(content);
reason = {
type: "response",
thought,
response: {
raw: output,
message: {
role: "assistant",
content: answer
}
}
};
break;
}
default:
{
throw new Error(`Invalid type: ${type}`);
}
}
}
if (reason === null) {
throw new TypeError("Reason is null");
}
return reason;
};
const chatFormatter = async (tools, messages, currentReasons)=>{
const header = getReACTAgentSystemHeader(tools);
const reasonMessages = [];
for (const reason of currentReasons){
const response = await reasonFormatter(reason);
reasonMessages.push({
role: reason.type === "observation" ? "user" : "assistant",
content: response
});
}
return [
{
role: "system",
content: header
},
...messages,
...reasonMessages
];
};
class ReACTAgentWorker extends AgentWorker {
constructor(...args){
super(...args), this.taskHandler = ReActAgent.taskHandler;
}
}
class ReActAgent extends AgentRunner {
constructor(params){
validateAgentParams(params);
super({
llm: params.llm ?? Settings.llm,
chatHistory: params.chatHistory ?? [],
runner: new ReACTAgentWorker(),
systemPrompt: params.systemPrompt ?? null,
tools: "tools" in params ? params.tools : params.toolRetriever.retrieve.bind(params.toolRetriever),
verbose: params.verbose ?? false
});
}
createStore() {
return {
reasons: []
};
}
static{
this.taskHandler = async (step, enqueueOutput)=>{
const { llm, stream, getTools } = step.context;
const lastMessage = step.context.store.messages.at(-1).content;
const tools = await getTools(lastMessage);
const messages = await chatFormatter(tools, step.context.store.messages, step.context.store.reasons);
const response = await llm.chat({
// @ts-expect-error boolean
stream,
messages
});
const reason = await reACTOutputParser(response, (type, response)=>{
enqueueOutput({
taskStep: step,
output: response,
isLast: type !== "action"
});
});
step.context.logger.log("current reason: %O", reason);
step.context.store.reasons = [
...step.context.store.reasons,
reason
];
if (reason.type === "action") {
const tool = tools.find((tool)=>tool.metadata.name === reason.action);
const toolOutput = await callTool(tool, {
id: randomUUID(),
input: reason.input,
name: reason.action
}, step.context.logger);
step.context.store.reasons = [
...step.context.store.reasons,
{
type: "observation",
observation: toolOutput.output
}
];
}
};
}
}
export { ReACTAgentWorker, ReActAgent };