UNPKG

@hashbrownai/react

Version:

React components for Hashbrown AI

875 lines (853 loc) 30.8 kB
import { jsx } from 'react/jsx-runtime'; import { createContext, useRef, useCallback, useSyncExternalStore, useMemo, useContext, useEffect, useState, createElement } from 'react'; import { ɵdeepEqual as _deepEqual, fryHashbrown, ɵcreateRuntimeImpl as _createRuntimeImpl, ɵcreateRuntimeFunctionImpl as _createRuntimeFunctionImpl, s, ɵui as _ui } from '@hashbrownai/core'; /** * Creates an object used to expose a component for use by the LLM. * * @example * ```ts * exposeComponent( * CardComponent, // The React component to be exposed. * { // The exposed component configuration. * name: 'CardComponent', * description: 'Show a card with children components to the user', * children: 'any', * props: { * title: s.string('The title of the card'), * description: s.streaming.string('The description of the card'), * }, * }, * }); * ``` * * @returns An object representing the component in order to expose it to the LLM. * @public */ function exposeComponent( /** * The component to be exposed. */ component, /** * The configuration object for the component, excluding the component itself. */ config) { return Object.assign({ component }, config); } const HashbrownContext = /*#__PURE__*/createContext(undefined); /** * The context for the Hashbrown provider. This is used to store the URL and middleware for contacting the Hashbrown endpoint. * * @public * @example * ```ts * <HashbrownProvider url="https://your.api.local/chat"> * <App /> * </HashbrownProvider> * ``` */ const HashbrownProvider = ( /** * The options for the Hashbrown provider. */ props) => { const { url, middleware, emulateStructuredOutput, transport, children } = props; return jsx(HashbrownContext.Provider, { value: { url, middleware, emulateStructuredOutput, transport }, children: children }); }; /** * Connects a Hashbrown Signal to React's reactivity system * * @param signal - The signal to connect to React * @returns The value contained in the signal */ function useHashbrownSignal(signal) { const lastValue = useRef(signal()); const hasSkippedFirstCall = useRef(false); const equality = useCallback((a, b) => _deepEqual(a, b), []); const read = useCallback(() => { const value = signal(); if (!equality(value, lastValue.current)) { lastValue.current = value; } return lastValue.current; }, [signal, equality]); const getServerSnapshot = useCallback(() => lastValue.current, []); const subscribe = useCallback(onStoreChange => { let lastRead; const cleanup = signal.subscribe(value => { if (!hasSkippedFirstCall.current) { hasSkippedFirstCall.current = true; return; } const currentValue = read(); if (currentValue !== lastRead) { lastRead = currentValue; onStoreChange(); } }); return () => { cleanup(); hasSkippedFirstCall.current = false; }; }, [signal, read]); return useSyncExternalStore(subscribe, read, getServerSnapshot); } /** * This React hook creates a chat instance used to interact with the LLM. * The result object contains functions and state enabling you to send and recieve messages and monitor the state of the chat. * * The `useChat` hook provides the most basic functionality for un-structured chats. Unstructured chats include things like general chats and natural language controls. * * @public * @returns An object containing chat state and functions to interact with the chat. * @typeParam Tools - The set of tool definitions available to the chat. * @example * This example demonstrates how to use the `useChat` hook to create a simple chat component. * * ```tsx * const MyChatComponent = () => { * const { messages, sendMessage, status } = useChat({ * model: 'gpt-4o', * system: 'You are a helpful assistant.', * tools: [], * }); * * const handleSendMessage = () => { * sendMessage({ role: 'user', content: 'Hello, how are you?' }); * }; * * return ( * <div> * <button onClick={handleSendMessage}>Send Message</button> * <div>Status: {status}</div> * <ul> * {messages.map((msg, index) => ( * <li key={index}>{msg.content}</li> * ))} * </ul> * </div> * ); * }; * ``` */ function useChat( /** * The options for the chat. */ options) { var _a, _b; const tools = useMemo(() => { var _a; return (_a = options.tools) !== null && _a !== void 0 ? _a : []; }, // eslint-disable-next-line react-hooks/exhaustive-deps (_a = options.tools) !== null && _a !== void 0 ? _a : []); const config = useContext(HashbrownContext); if (!config) { throw new Error('HashbrownContext not found'); } const hashbrownRef = useRef(null); if (!hashbrownRef.current) { hashbrownRef.current = fryHashbrown({ apiUrl: config.url, middleware: config.middleware, emulateStructuredOutput: config.emulateStructuredOutput, debugName: options.debugName, model: options.model, system: options.system, tools, debounce: options.debounceTime, retries: options.retries, transport: (_b = options.transport) !== null && _b !== void 0 ? _b : config.transport, ui: false, threadId: options.threadId }); } function getHashbrown() { const instance = hashbrownRef.current; if (!instance) { throw new Error('Hashbrown not found'); } return instance; } useEffect(() => { return getHashbrown().sizzle(); }, []); useEffect(() => { var _a; getHashbrown().updateOptions({ apiUrl: config.url, middleware: config.middleware, emulateStructuredOutput: config.emulateStructuredOutput, debugName: options.debugName, model: options.model, system: options.system, tools, debounce: options.debounceTime, retries: options.retries, transport: (_a = options.transport) !== null && _a !== void 0 ? _a : config.transport, ui: false, threadId: options.threadId }); }, [config.url, config.middleware, config.emulateStructuredOutput, config.transport, options.debounceTime, options.debugName, options.model, options.retries, options.system, options.transport, options.threadId, tools]); const internalMessages = useHashbrownSignal(getHashbrown().messages); const isReceiving = useHashbrownSignal(getHashbrown().isReceiving); const isSending = useHashbrownSignal(getHashbrown().isSending); const isGenerating = useHashbrownSignal(getHashbrown().isGenerating); const isRunningToolCalls = useHashbrownSignal(getHashbrown().isRunningToolCalls); const isLoading = useHashbrownSignal(getHashbrown().isLoading); const exhaustedRetries = useHashbrownSignal(getHashbrown().exhaustedRetries); const error = useHashbrownSignal(getHashbrown().error); const sendingError = useHashbrownSignal(getHashbrown().sendingError); const generatingError = useHashbrownSignal(getHashbrown().generatingError); const lastAssistantMessage = useHashbrownSignal(getHashbrown().lastAssistantMessage); const isLoadingThread = useHashbrownSignal(getHashbrown().isLoadingThread); const isSavingThread = useHashbrownSignal(getHashbrown().isSavingThread); const threadLoadError = useHashbrownSignal(getHashbrown().threadLoadError); const threadSaveError = useHashbrownSignal(getHashbrown().threadSaveError); const sendMessage = useCallback(message => { getHashbrown().sendMessage(message); }, []); const setMessages = useCallback(messages => { getHashbrown().setMessages(messages); }, []); const reload = useCallback(() => { const lastMessage = internalMessages[internalMessages.length - 1]; if (lastMessage.role === 'assistant') { getHashbrown().setMessages(internalMessages.slice(0, -1)); return true; } return false; }, [internalMessages]); const stop = useCallback((clearStreamingMessage = false) => { getHashbrown().stop(clearStreamingMessage); }, []); return { messages: internalMessages, sendMessage, setMessages, stop, reload, error, isGenerating, isReceiving, isSending, isRunningToolCalls, isLoading, exhaustedRetries, sendingError, generatingError, lastAssistantMessage, isLoadingThread, isSavingThread, threadLoadError, threadSaveError }; } /****************************************************************************** Copyright (c) Microsoft Corporation. Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ***************************************************************************** */ /* global Reflect, Promise, SuppressedError, Symbol, Iterator */ function __rest(s, e) { var t = {}; for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0) t[p] = s[p]; if (s != null && typeof Object.getOwnPropertySymbols === "function") for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) { if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i])) t[p[i]] = s[p[i]]; } return t; } function __awaiter(thisArg, _arguments, P, generator) { function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } return new (P || (P = Promise))(function (resolve, reject) { function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } step((generator = generator.apply(thisArg, _arguments || [])).next()); }); } typeof SuppressedError === "function" ? SuppressedError : function (error, suppressed, message) { var e = new Error(message); return e.name = "SuppressedError", e.error = error, e.suppressed = suppressed, e; }; /** * This React hook creates a change instance used to interact with the LLM. * The result object contains functions and state enabling you to send and recieve messages and monitor the state of the chat. * * @public * @remarks * The `useCompletion` hook provides functionality for completing unstructured inputs with predicted unstructured outputs. This is useful for things like natural language autocompletions. * * @example * ```ts * const { output } = useCompletion({ * model: 'gpt-4o-mini', * input: firstName, * system: `Help the user generate a last name for the given first name.`, * }); * ``` */ function useCompletion( /** * The options to configure the completion chat. */ options) { const _a = useChat(Object.assign({}, options)), { setMessages } = _a, chat = __rest(_a, ["setMessages"]); useEffect(() => { if (!options.input) return; setMessages([{ role: 'user', content: options.input }]); }, [setMessages, options.input]); const output = useMemo(() => { const message = chat.messages.find(message => message.role === 'assistant' && !(message.toolCalls && message.toolCalls.length) && message.content); if (!message) return null; if (typeof message.content !== 'string') return null; return message.content; }, [chat.messages]); return { output, reload: chat.reload, error: chat.error, isLoading: chat.isLoading, isReceiving: chat.isReceiving, isSending: chat.isSending, isGenerating: chat.isGenerating, isRunningToolCalls: chat.isRunningToolCalls, sendingError: chat.sendingError, generatingError: chat.generatingError, exhaustedRetries: chat.exhaustedRetries, isLoadingThread: chat.isLoadingThread, isSavingThread: chat.isSavingThread, threadLoadError: chat.threadLoadError, threadSaveError: chat.threadSaveError }; } /* eslint-disable @typescript-eslint/no-explicit-any */ /** * Creates a new runtime. * * @param options - The options for creating the runtime. * @returns A reference to the runtime. * * @public */ function useRuntime(options) { var _a; // eslint-disable-next-line react-hooks/exhaustive-deps const functions = useMemo(() => options.functions, (_a = options.functions) !== null && _a !== void 0 ? _a : []); const runtime = useMemo(() => _createRuntimeImpl({ functions, timeout: options.timeout }), [functions, options.timeout]); return runtime; } /* eslint-disable no-redeclare */ /* eslint-disable @typescript-eslint/no-explicit-any */ /** * @public */ function useRuntimeFunction(cfg) { const argsSchemaRef = useRef('args' in cfg ? cfg.args : undefined); const resultSchemaRef = useRef('result' in cfg ? cfg.result : undefined); // eslint-disable-next-line react-hooks/exhaustive-deps const handler = useCallback(cfg.handler, cfg.deps); const fn = useMemo(() => { return _createRuntimeFunctionImpl({ args: argsSchemaRef.current, result: resultSchemaRef.current, handler, name: cfg.name, description: cfg.description }); }, [handler, cfg.name, cfg.description]); return fn; } /** * This React hook creates a chat instance used to interact with the LLM. * The result object contains functions and state enabling you to send and receive messages and monitor the state of the chat. * * @public * @remarks * The `useStructuredChat` hook provides functionality for structured chats. Structured chats are used when you want to use the LLM to generate structured data according to a defined schema. This is particularly useful for: * - Generating typed data structures * - Creating form responses * - Building UI components * - Extracting information into a specific format * * @returns An object containing chat state and functions to interact with the chat. * * @example * In this example, the LLM will respond with a JSON object containing the translations of the input message into English, Spanish, and French. * ```tsx * const { messages, sendMessage } = useStructuredChat({ * model: 'gpt-4o', * system: 'You are a helpful translator that provides accurate translations.', * schema: s.object('Translations', { * english: s.string('English translation'), * spanish: s.string('Spanish translation'), * french: s.string('French translation') * }), * }); * ``` */ function useStructuredChat(options) { var _a, _b, _c; const config = useContext(HashbrownContext); if (!config) { throw new Error('HashbrownContext not found'); } const tools = useMemo(() => { var _a; return (_a = options.tools) !== null && _a !== void 0 ? _a : []; }, // eslint-disable-next-line react-hooks/exhaustive-deps (_a = options.tools) !== null && _a !== void 0 ? _a : []); const [schema] = useState(options.schema); const hashbrown = useRef(null); if (!hashbrown.current) { hashbrown.current = fryHashbrown({ apiUrl: config.url, middleware: config.middleware, emulateStructuredOutput: config.emulateStructuredOutput, model: options.model, system: options.system, responseSchema: schema, tools, debugName: options.debugName, debounce: options.debounceTime, retries: options.retries, transport: (_b = options.transport) !== null && _b !== void 0 ? _b : config.transport, ui: (_c = options.ui) !== null && _c !== void 0 ? _c : false, threadId: options.threadId }); } function getHashbrown() { const instance = hashbrown.current; if (!instance) { throw new Error('Hashbrown not found'); } return instance; } useEffect(() => { return getHashbrown().sizzle(); }, []); useEffect(() => { var _a, _b; getHashbrown().updateOptions({ apiUrl: config.url, middleware: config.middleware, emulateStructuredOutput: config.emulateStructuredOutput, model: options.model, system: options.system, responseSchema: schema, tools, debugName: options.debugName, debounce: options.debounceTime, retries: options.retries, transport: (_a = options.transport) !== null && _a !== void 0 ? _a : config.transport, ui: (_b = options.ui) !== null && _b !== void 0 ? _b : false, threadId: options.threadId }); }, [config.url, config.middleware, config.emulateStructuredOutput, config.transport, options.model, options.system, options.debugName, schema, tools, options.debounceTime, options.retries, options.transport, options.ui, options.threadId]); const internalMessages = useHashbrownSignal(hashbrown.current.messages); const isReceiving = useHashbrownSignal(hashbrown.current.isReceiving); const isSending = useHashbrownSignal(hashbrown.current.isSending); const isGenerating = useHashbrownSignal(hashbrown.current.isGenerating); const isRunningToolCalls = useHashbrownSignal(hashbrown.current.isRunningToolCalls); const isLoading = useHashbrownSignal(hashbrown.current.isLoading); const exhaustedRetries = useHashbrownSignal(hashbrown.current.exhaustedRetries); const error = useHashbrownSignal(hashbrown.current.error); const sendingError = useHashbrownSignal(hashbrown.current.sendingError); const generatingError = useHashbrownSignal(hashbrown.current.generatingError); const lastAssistantMessage = useHashbrownSignal(hashbrown.current.lastAssistantMessage); const isLoadingThread = useHashbrownSignal(hashbrown.current.isLoadingThread); const isSavingThread = useHashbrownSignal(hashbrown.current.isSavingThread); const threadLoadError = useHashbrownSignal(hashbrown.current.threadLoadError); const threadSaveError = useHashbrownSignal(hashbrown.current.threadSaveError); const sendMessage = useCallback(message => { getHashbrown().sendMessage(message); }, []); const stop = useCallback((clearStreamingMessage = false) => { getHashbrown().stop(clearStreamingMessage); }, []); const resendMessages = useCallback(() => { getHashbrown().resendMessages(); }, []); const setMessages = useCallback(messages => { getHashbrown().setMessages(messages); }, []); const reload = useCallback(() => { const lastMessage = internalMessages[internalMessages.length - 1]; if (lastMessage.role === 'assistant') { getHashbrown().setMessages(internalMessages.slice(0, -1)); return true; } return false; }, [internalMessages]); return { messages: internalMessages, stop, sendMessage, resendMessages, setMessages, reload, error, isGenerating, isReceiving, isSending, isRunningToolCalls, isLoading, exhaustedRetries, sendingError, generatingError, lastAssistantMessage, isLoadingThread, isSavingThread, threadLoadError, threadSaveError }; } /** * This React hook creates a completion instance that predicts structured data based on input context. * The result object contains the predicted structured output and state for monitoring the completion. * * @public * @typeParam Input - The type of the input to predict from. * @typeParam Schema - The schema to use for the chat. * @remarks * The `useStructuredCompletion` hook provides functionality for predicting structured data based on input context. This is particularly useful for: * - Smart form field suggestions * - Context-aware recommendations * - Predictive UI generation * - Intelligent defaults * * @returns An object containing the predicted structured output and completion state. * * @example * In this example, the LLM will predict a color palette based on a given theme or mood. * ```tsx * const { output } = useStructuredCompletion({ * model: 'gpt-4o', * system: `Predict a color palette based on the given mood or theme. For example, * if the theme is "Calm Ocean", suggest appropriate colors.`, * input: theme, * schema: s.object('Color Palette', { * colors: s.array( * 'The colors in the palette', * s.string('Hex color code') * ) * }) * }); * ``` */ function useStructuredCompletion(options) { var _a; const _b = useStructuredChat(Object.assign(Object.assign({}, options), { ui: (_a = options.ui) !== null && _a !== void 0 ? _a : false })), { setMessages } = _b, chat = __rest(_b, ["setMessages"]); useEffect(() => { if (!options.input) return; setMessages([{ role: 'user', content: options.input }]); }, [setMessages, options.input]); const output = useMemo(() => { const message = chat.messages.find(message => message.role === 'assistant' && message.content); if (!message) return null; return message.content; }, [chat.messages]); return { output, reload: chat.reload, error: chat.error, isLoading: chat.isLoading, isReceiving: chat.isReceiving, isSending: chat.isSending, isGenerating: chat.isGenerating, isRunningToolCalls: chat.isRunningToolCalls, sendingError: chat.sendingError, generatingError: chat.generatingError, exhaustedRetries: chat.exhaustedRetries, isLoadingThread: chat.isLoadingThread, isSavingThread: chat.isSavingThread, threadLoadError: chat.threadLoadError, threadSaveError: chat.threadSaveError }; } /* eslint no-redeclare: off */ /* eslint-disable @typescript-eslint/no-explicit-any */ /** * @public */ function useTool(input) { const { name, description, handler, deps } = input; // assumes the schema will never change const [schema] = useState('schema' in input ? input.schema : s.object('Empty schema', {})); // assumes the handler should only change if its deps change, // which enables the use of anonymous functions in the handler. // eslint-disable-next-line react-hooks/exhaustive-deps const stableHandler = useCallback(handler, deps); const tool = useMemo(() => { return { name, description, schema, handler: stableHandler }; }, [name, description, schema, stableHandler]); return tool; } /** * Creates a tool that allows the LLM to run JavaScript code. It is run * in a stateful JavaScript environment, with no access to the internet, the DOM, * or any function that you have not explicitly defined. * * @public * @param options - The options for creating the tool. * @returns The tool. */ function useToolJavaScript({ runtime }) { return useTool({ name: 'javascript', description: ['Whenever you send a message containing JavaScript code to javascript, it will be', 'executed in a stateful JavaScript environment. javascript will respond with the output', `of the execution or time out after ${runtime.timeout / 1000} seconds. Internet access`, 'for this session is disabled. Do not make external web requests or API calls as they', 'will fail.', '', 'Important: Prefer calling javascript once with a large amount of code, rather than calling it', 'multiple times with smaller amounts of code.', '', 'The following functions are available to you:', runtime.describe()].join('\n'), schema: s.streaming.object('The result', { code: s.streaming.string('The JavaScript code to run') }), deps: [runtime], handler: (_a, abortSignal_1) => __awaiter(this, [_a, abortSignal_1], void 0, function* ({ code }, abortSignal) { return runtime.run(code, abortSignal); }) }); } /** * This React hook creates a chat instance that can generate and render UI components. * The result object contains functions and state enabling you to send and receive messages and monitor the state of the chat. * * @public * @typeParam Tools - The set of tool definitions available to the chat. * @remarks * The `useUiChat` hook provides functionality for generating UI components through chat. This is particularly useful for: * - Dynamic UI generation * - Interactive chat interfaces * - Component-based responses * - Building chat-based UIs * * @returns An object containing chat state, functions to interact with the chat, and rendered UI components. * * @example * In this example, the LLM will respond with a UI component that can be rendered directly in your React application. * ```tsx * const { messages, sendMessage } = useUiChat({ * model: 'gpt-4o', * system: 'You are a helpful assistant that can generate UI components.', * components: [ * exposeComponent(Button, { * name: 'Button', * description: 'A clickable button component', * props: { * label: s.string('The text to display on the button'), * onClick: s.function('Function to call when clicked') * } * }) * ] * }); * ``` */ const useUiChat = options => { const { components: initialComponents } = options, chatOptions = __rest(options, ["components"]); const [components, setComponents] = useState(initialComponents); const [flattenedComponents] = useState(_ui.flattenComponents(initialComponents)); const ui = useMemo(() => { return s.object('UI', { ui: s.streaming.array('List of elements', _ui.createComponentSchema(components)) }); }, [components]); const systemAsString = useMemo(() => { if (typeof chatOptions.system === 'string') { return chatOptions.system; } const output = chatOptions.system.compile(components, ui); if (chatOptions.system.diagnostics.length > 0) { throw new Error(`System prompt has ${chatOptions.system.diagnostics.length} errors: \n\n${chatOptions.system.diagnostics.map(d => d.message).join('\n\n')}`); } return output; }, [chatOptions.system, components, ui]); const chat = useStructuredChat(Object.assign(Object.assign({}, chatOptions), { schema: ui, system: systemAsString, ui: true })); const buildContent = useCallback((nodes, parentKey = '') => { if (typeof nodes === 'string') { return nodes; } const elements = nodes.map((element, index) => { var _a; const key = `${parentKey}_${index}`; const { $tag, $children, $props } = element; const componentType = (_a = flattenedComponents.get($tag)) === null || _a === void 0 ? void 0 : _a.component; if ($tag && componentType) { const children = element.$children ? buildContent($children, key) : null; return /*#__PURE__*/createElement(componentType, Object.assign(Object.assign({}, $props), { children, key })); } throw new Error(`Unknown element type. ${$tag}`); }); return elements; }, [flattenedComponents]); const uiChatMessages = useMemo(() => { return chat.messages.map(message => { var _a; if (message.role === 'assistant') { return Object.assign(Object.assign({}, message), { ui: ((_a = message.content) === null || _a === void 0 ? void 0 : _a.ui) ? buildContent(message.content.ui) : null }); } return message; }); }, [buildContent, chat.messages]); const lastAssistantMessage = useMemo(() => { return uiChatMessages.findLast(message => message.role === 'assistant'); }, [uiChatMessages]); return Object.assign(Object.assign({}, chat), { messages: uiChatMessages, setComponents, lastAssistantMessage }); }; /** * A React hook that generates UI completions using the provided component set. * * @public */ const useUiCompletion = options => { var _a; const { components: initialComponents, system, tools } = options, completionOptions = __rest(options, ["components", "system", "tools"]); const [components, setComponents] = useState(initialComponents); const [flattenedComponents] = useState(_ui.flattenComponents(initialComponents)); const uiSchema = useMemo(() => { return s.object('UI', { ui: s.streaming.array('List of elements', _ui.createComponentSchema(components)) }); }, [components]); const systemAsString = useMemo(() => { if (typeof system === 'string') { return system; } const compiled = system.compile(components, uiSchema); if (system.diagnostics.length > 0) { throw new Error(`System prompt has ${system.diagnostics.length} errors: \n\n${system.diagnostics.map(d => d.message).join('\n\n')}`); } return compiled; }, [system, components, uiSchema]); const structured = useStructuredCompletion(Object.assign(Object.assign({}, completionOptions), { schema: uiSchema, system: systemAsString, tools, ui: true })); const buildContent = useCallback((nodes, parentKey = '') => { if (typeof nodes === 'string') { return nodes; } const elements = nodes.map((node, index) => { var _a; const key = `${parentKey}_${index}`; const { $tag, $props, $children } = node; const componentType = (_a = flattenedComponents.get($tag)) === null || _a === void 0 ? void 0 : _a.component; if ($tag && componentType) { const children = node.$children ? buildContent($children, key) : null; return /*#__PURE__*/createElement(componentType, Object.assign(Object.assign({}, $props), { children, key })); } throw new Error(`Unknown element type. ${$tag}`); }); return elements; }, [flattenedComponents]); const rawOutput = structured.output; const message = useMemo(() => { if (!rawOutput) { return null; } return { role: 'assistant', content: rawOutput, toolCalls: [], ui: rawOutput.ui ? buildContent(rawOutput.ui) : null }; }, [rawOutput, buildContent]); return Object.assign(Object.assign({}, structured), { output: message, ui: (_a = message === null || message === void 0 ? void 0 : message.ui) !== null && _a !== void 0 ? _a : null, rawOutput, setComponents }); }; export { HashbrownProvider, exposeComponent, useChat, useCompletion, useRuntime, useRuntimeFunction, useStructuredChat, useStructuredCompletion, useTool, useToolJavaScript, useUiChat, useUiCompletion };