UNPKG

llamaindex

Version:

<p align="center"> <img height="100" width="100" alt="LlamaIndex logo" src="https://ts.llamaindex.ai/square.svg" /> </p> <h1 align="center">LlamaIndex.TS</h1> <h3 align="center"> Data framework for your LLM application. </h3>

470 lines (452 loc) 16.3 kB
Object.defineProperty(exports, '__esModule', { value: true }); var agent = require('@llamaindex/core/agent'); var utils = require('@llamaindex/core/utils'); var env = require('@llamaindex/env'); var global = require('@llamaindex/core/global'); var indices = require('@llamaindex/core/indices'); var nodeParser = require('@llamaindex/core/node-parser'); const getReACTAgentSystemHeader = (tools)=>{ const description = tools.map((tool)=>`- ${tool.metadata.name}: ${tool.metadata.description} with schema: ${JSON.stringify(tool.metadata.parameters)}`).join("\n"); const names = tools.map((tool)=>tool.metadata.name).join(", "); return `You are designed to help with a variety of tasks, from answering questions to providing summaries to other types of analyses. ## Tools You have access to a wide variety of tools. You are responsible for using the tools in any sequence you deem appropriate to complete the task at hand. This may require breaking the task into subtasks and using different tools to complete each subtask. You have access to the following tools: ${description} ## Output Format To answer the question, please use the following format. """ Thought: I need to use a tool to help me answer the question. Action: tool name (one of ${names}) if using a tool. Action Input: the input to the tool, in a JSON format representing the kwargs (e.g. {{"input": "hello world", "num_beams": 5}}) """ Please ALWAYS start with a Thought. Please use a valid JSON format for the Action Input. Do NOT do this {{'input': 'hello world', 'num_beams': 5}}. If this format is used, the user will respond in the following format: """" Observation: tool response """" You should keep repeating the above format until you have enough information to answer the question without using any more tools. At that point, you MUST respond in the one of the following two formats: """" Thought: I can answer without using any more tools. Answer: [your answer here] """" """" Thought: I cannot answer the question with the provided tools. Answer: Sorry, I cannot answer your query. """" ## Current Conversation Below is the current conversation consisting of interleaving human and assistant messages.`; }; /** * @internal */ class GlobalSettings { #prompt; #promptHelper; #nodeParser; #chunkOverlap; #promptHelperAsyncLocalStorage; #nodeParserAsyncLocalStorage; #chunkOverlapAsyncLocalStorage; #promptAsyncLocalStorage; get debug() { return global.Settings.debug; } get llm() { return global.Settings.llm; } set llm(llm) { global.Settings.llm = llm; } withLLM(llm, fn) { return global.Settings.withLLM(llm, fn); } get promptHelper() { if (this.#promptHelper === null) { this.#promptHelper = new indices.PromptHelper(); } return this.#promptHelperAsyncLocalStorage.getStore() ?? this.#promptHelper; } set promptHelper(promptHelper) { this.#promptHelper = promptHelper; } withPromptHelper(promptHelper, fn) { return this.#promptHelperAsyncLocalStorage.run(promptHelper, fn); } get embedModel() { return global.Settings.embedModel; } set embedModel(embedModel) { global.Settings.embedModel = embedModel; } withEmbedModel(embedModel, fn) { return global.Settings.withEmbedModel(embedModel, fn); } get nodeParser() { if (this.#nodeParser === null) { this.#nodeParser = new nodeParser.SentenceSplitter({ chunkSize: this.chunkSize, chunkOverlap: this.chunkOverlap }); } return this.#nodeParserAsyncLocalStorage.getStore() ?? this.#nodeParser; } set nodeParser(nodeParser) { this.#nodeParser = nodeParser; } withNodeParser(nodeParser, fn) { return this.#nodeParserAsyncLocalStorage.run(nodeParser, fn); } get callbackManager() { return global.Settings.callbackManager; } set callbackManager(callbackManager) { global.Settings.callbackManager = callbackManager; } withCallbackManager(callbackManager, fn) { return global.Settings.withCallbackManager(callbackManager, fn); } set chunkSize(chunkSize) { global.Settings.chunkSize = chunkSize; } get chunkSize() { return global.Settings.chunkSize; } withChunkSize(chunkSize, fn) { return global.Settings.withChunkSize(chunkSize, fn); } get chunkOverlap() { return this.#chunkOverlapAsyncLocalStorage.getStore() ?? this.#chunkOverlap; } set chunkOverlap(chunkOverlap) { if (typeof chunkOverlap === "number") { this.#chunkOverlap = chunkOverlap; } } withChunkOverlap(chunkOverlap, fn) { return this.#chunkOverlapAsyncLocalStorage.run(chunkOverlap, fn); } get prompt() { return this.#promptAsyncLocalStorage.getStore() ?? this.#prompt; } set prompt(prompt) { this.#prompt = prompt; } withPrompt(prompt, fn) { return this.#promptAsyncLocalStorage.run(prompt, fn); } constructor(){ this.#prompt = {}; this.#promptHelper = null; this.#nodeParser = null; this.#promptHelperAsyncLocalStorage = new env.AsyncLocalStorage(); this.#nodeParserAsyncLocalStorage = new env.AsyncLocalStorage(); this.#chunkOverlapAsyncLocalStorage = new env.AsyncLocalStorage(); this.#promptAsyncLocalStorage = new env.AsyncLocalStorage(); } } const Settings = new GlobalSettings(); function reasonFormatter(reason) { switch(reason.type){ case "observation": return `Observation: ${utils.stringifyJSONToMessageContent(reason.observation)}`; case "action": return `Thought: ${reason.thought}\nAction: ${reason.action}\nInput: ${utils.stringifyJSONToMessageContent(reason.input)}`; case "response": { return `Thought: ${reason.thought}\nAnswer: ${utils.extractText(reason.response.message.content)}`; } } } function extractJsonStr(text) { const pattern = /\{.*\}/s; const match = text.match(pattern); if (!match) { throw new SyntaxError(`Could not extract json string from output: ${text}`); } return match[0]; } function extractFinalResponse(inputText) { const pattern = /\s*Thought:(.*?)Answer:(.*?)$/s; const match = inputText.match(pattern); if (!match) { throw new Error(`Could not extract final answer from input text: ${inputText}`); } const thought = match[1].trim(); const answer = match[2].trim(); return [ thought, answer ]; } function extractToolUse(inputText) { const pattern = /\s*Thought: (.*?)\nAction: ([a-zA-Z0-9_]+).*?\.*[Input:]*.*?(\{.*?\})/s; const match = inputText.match(pattern); if (!match) { throw new Error(`Could not extract tool use from input text: "${inputText}"`); } const thought = match[1].trim(); const action = match[2].trim(); const actionInput = match[3].trim(); return [ thought, action, actionInput ]; } function actionInputParser(jsonStr) { const processedString = jsonStr.replace(/(?<!\w)'|'(?!\w)/g, '"'); const pattern = /"(\w+)":\s*"([^"]*)"/g; const matches = [ ...processedString.matchAll(pattern) ]; return Object.fromEntries(matches); } const reACTOutputParser = async (output, onResolveType)=>{ let reason = null; if (utils.isAsyncIterable(output)) { const [peakStream, finalStream] = agent.createReadableStream(output).tee(); const reader = peakStream.getReader(); let type = null; let content = ""; for(;;){ const { done, value } = await reader.read(); if (done) { break; } content += value.delta; if (content.includes("Action:")) { type = "action"; } else if (content.includes("Answer:")) { type = "answer"; } } if (type === null) { // `Thought:` is always present at the beginning of the output. type = "thought"; } reader.releaseLock(); if (!type) { throw new Error("Could not determine type of output"); } onResolveType(type, finalStream); // step 2: do the parsing from content switch(type){ case "action": { // have to consume the stream to get the full content const response = await agent.consumeAsyncIterable(peakStream, content); const [thought, action, input] = extractToolUse(response.content); const jsonStr = extractJsonStr(input); let json; try { json = JSON.parse(jsonStr); } catch (e) { json = actionInputParser(jsonStr); } reason = { type: "action", thought, action, input: json }; break; } case "thought": { const thought = "(Implicit) I can answer without any more tools!"; const response = await agent.consumeAsyncIterable(peakStream, content); reason = { type: "response", thought, response: { raw: peakStream, message: response } }; break; } case "answer": { const response = await agent.consumeAsyncIterable(peakStream, content); const [thought, answer] = extractFinalResponse(response.content); reason = { type: "response", thought, response: { raw: response, message: { role: "assistant", content: answer } } }; break; } default: { throw new Error(`Invalid type: ${type}`); } } } else { const content = utils.extractText(output.message.content); const type = content.includes("Answer:") ? "answer" : content.includes("Action:") ? "action" : "thought"; onResolveType(type, output); // step 2: do the parsing from content switch(type){ case "action": { const [thought, action, input] = extractToolUse(content); const jsonStr = extractJsonStr(input); let json; try { json = JSON.parse(jsonStr); } catch (e) { json = actionInputParser(jsonStr); } reason = { type: "action", thought, action, input: json }; break; } case "thought": { const thought = "(Implicit) I can answer without any more tools!"; reason = { type: "response", thought, response: { raw: output, message: { role: "assistant", content: utils.extractText(output.message.content) } } }; break; } case "answer": { const [thought, answer] = extractFinalResponse(content); reason = { type: "response", thought, response: { raw: output, message: { role: "assistant", content: answer } } }; break; } default: { throw new Error(`Invalid type: ${type}`); } } } if (reason === null) { throw new TypeError("Reason is null"); } return reason; }; const chatFormatter = async (tools, messages, currentReasons)=>{ const header = getReACTAgentSystemHeader(tools); const reasonMessages = []; for (const reason of currentReasons){ const response = await reasonFormatter(reason); reasonMessages.push({ role: reason.type === "observation" ? "user" : "assistant", content: response }); } return [ { role: "system", content: header }, ...messages, ...reasonMessages ]; }; class ReACTAgentWorker extends agent.AgentWorker { constructor(...args){ super(...args), this.taskHandler = ReActAgent.taskHandler; } } class ReActAgent extends agent.AgentRunner { constructor(params){ agent.validateAgentParams(params); super({ llm: params.llm ?? Settings.llm, chatHistory: params.chatHistory ?? [], runner: new ReACTAgentWorker(), systemPrompt: params.systemPrompt ?? null, tools: "tools" in params ? params.tools : params.toolRetriever.retrieve.bind(params.toolRetriever), verbose: params.verbose ?? false }); } createStore() { return { reasons: [] }; } static{ this.taskHandler = async (step, enqueueOutput)=>{ const { llm, stream, getTools } = step.context; const lastMessage = step.context.store.messages.at(-1).content; const tools = await getTools(lastMessage); const messages = await chatFormatter(tools, step.context.store.messages, step.context.store.reasons); const response = await llm.chat({ // @ts-expect-error boolean stream, messages }); const reason = await reACTOutputParser(response, (type, response)=>{ enqueueOutput({ taskStep: step, output: response, isLast: type !== "action" }); }); step.context.logger.log("current reason: %O", reason); step.context.store.reasons = [ ...step.context.store.reasons, reason ]; if (reason.type === "action") { const tool = tools.find((tool)=>tool.metadata.name === reason.action); const toolOutput = await agent.callTool(tool, { id: env.randomUUID(), input: reason.input, name: reason.action }, step.context.logger); step.context.store.reasons = [ ...step.context.store.reasons, { type: "observation", observation: toolOutput.output } ]; } }; } } exports.ReACTAgentWorker = ReACTAgentWorker; exports.ReActAgent = ReActAgent; Object.keys(agent).forEach(function (k) { if (k !== 'default' && !Object.prototype.hasOwnProperty.call(exports, k)) Object.defineProperty(exports, k, { enumerable: true, get: function () { return agent[k]; } }); });