UNPKG

@coursebuilder/core

Version:

Core package for Course Builder

274 lines (255 loc) 9.29 kB
import { streamingChatPromptExecutor } from "./chunk-2G25HKU3.js"; import { z } from "./chunk-JLNB6NRA.js"; import { __name } from "./chunk-VLQXSCFN.js"; // src/inngest/co-gardener/resource-chat.ts import { NonRetriableError } from "inngest"; import { Liquid } from "liquidjs"; var ChatResourceSchema = z.object({ id: z.string(), type: z.string(), updatedAt: z.string().nullable(), createdAt: z.string().nullable(), title: z.string().nullable().optional(), body: z.string().nullable().optional(), transcript: z.string().nullable().optional(), wordLevelSrt: z.string().nullable().optional(), resources: z.array(z.any()).optional() }); var RESOURCE_CHAT_REQUEST_EVENT = "resource/chat-request-event"; var resourceChatConfig = { id: `resource-chat`, name: "Resource Chat", rateLimit: { key: "event.user.id", limit: 5, period: "1m" } }; var resourceChatTrigger = { event: RESOURCE_CHAT_REQUEST_EVENT }; var ChatCompletionRequestMessageRoleEnum = /* @__PURE__ */ function(ChatCompletionRequestMessageRoleEnum2) { ChatCompletionRequestMessageRoleEnum2["System"] = "system"; ChatCompletionRequestMessageRoleEnum2["User"] = "user"; ChatCompletionRequestMessageRoleEnum2["Assistant"] = "assistant"; ChatCompletionRequestMessageRoleEnum2["Tool"] = "tool"; return ChatCompletionRequestMessageRoleEnum2; }({}); var resourceChatHandler = /* @__PURE__ */ __name(async ({ event, step, openaiProvider, partyProvider, db }) => { const resourceId = event.data.resourceId; const workflowTrigger = event.data.selectedWorkflow; const currentUserMessage = event.data.messages[event.data.messages.length - 1]; if (currentUserMessage?.content) { await step.run(`partykit broadcast user prompt [${resourceId}]`, async () => { await partyProvider.broadcastMessage({ body: { body: currentUserMessage.content, requestId: resourceId, name: "resource.chat.prompted", // @ts-expect-error userId: event.user.id }, roomId: resourceId }); }); } const resource = await step.run("get the resource", async () => { return db.getContentResource(resourceId); }); if (!resource) { throw new NonRetriableError(`Resource not found for id (${resourceId})`); } const videoResource = await step.run("get the video resource", async () => { return db.getVideoResource(resource.resources?.[0]?.resource.id); }); const messages = await resourceChatWorkflowExecutor({ db, openaiProvider, partyProvider, step, workflowTrigger, resourceId, resource: { ...videoResource, ...resource, ...resource.fields, resources: resource.resources ?? [] }, messages: event.data.messages, // @ts-expect-error user: event.user }); return { resource: { ...videoResource, ...resource }, messages }; }, "resourceChatHandler"); async function resourceChatWorkflowExecutor({ step, workflowTrigger, resourceId, messages, resource, user, openaiProvider, partyProvider, db, model }) { const basicPrompt = { id: "basic", name: "Basic", type: "prompt", fields: { body: `# Instructions Pause. Take a deep breath and think. This is important. Think step by step. You are serving as a writing assistant for a content creator that is publishing a {% if transcript %}video based{% endif %} post for software developers. The content creator will ask questions and expect concise answers that aren't corny or generic. Keep responses scoped to the post and it's direct contents. Do not include additional information. Do not include information that is not directly related to the post or the {% if transcript %}video{% endif %}. Use simple language. Use only most popular 2000 english words {% if transcript %}and words used in the transcript{% endif %}. Simple is better. Flourish is bad. **People will hate you if you try to sound clever.** {% if wordLevelSrt %} add screenshots from the video when they are relevant and would be useful to the reader using the following template replacing {{timestampInSeconds}} with the time noted in the transcript: ![{{descriptiveAltTextForVisuallyImpaired}}](https://image.mux.com/{{muxPlaybackId}}/thumbnail.png?time={{timestampInSeconds}}) be precise with the timestamps! {% if wordLevelSrt %} use this word level SRT to get the exact timestamp: word level srt start {{wordLevelSrt}} {% endif %} word level srt end {% endif %} Get it right and there's $200 cash gratuity in it for you ;) {% if transcript %}The goal is to build a really good written version of the existing video, not edit the video itself. The video is done. The post transcript is the final representation of the video. The post transcript is the source of truth.{% endif %} Keep the language simple and don't use words not in the post {% if transcript %}transcript{% endif %}. {% if transcript %}Do not make direct references to the video. Do not make direct references to the transcript.{% endif %} Do not make direct references to the content creator. Do not make direct references to 'the post'. Jump right to the point. Post Type: {{type}} Post Title: {{title}} {% if transcript %}Post Transcript: {{transcript}}{% endif %} Post Body: {{body}} ` } }; let prompt = workflowTrigger === "basic" ? basicPrompt : await step.run("Load Prompt", async () => { return db.getContentResource(workflowTrigger); }); if (!prompt) { prompt = basicPrompt; } let systemPrompt = { role: "system", content: prompt.fields.body }; let seedMessages = []; try { const actionParsed = z.array(z.object({ role: z.enum([ "system", "user", "assistant", "tool" ]), content: z.string() })).parse(JSON.parse(prompt.fields.body)); const actionMessages = []; for (const actionMessage of actionParsed) { const liquidParsedContent = await step.run("parse json content", async () => { const engine = new Liquid(); const contentString = typeof actionMessage.content === "string" ? actionMessage.content : JSON.stringify(actionMessage.content); return await engine.parseAndRender(contentString, { ...resource }); }); actionMessages.push({ role: actionMessage.role, content: liquidParsedContent }); } if (actionMessages.length > 0) { ; [systemPrompt = { role: "system", content: prompt.fields.body }, ...seedMessages] = actionMessages; } } catch (e) { systemPrompt = await step.run(`parse system prompt`, async () => { try { const engine = new Liquid(); const originalSystemPrompt = systemPrompt; const contentString = typeof originalSystemPrompt.content === "string" ? originalSystemPrompt.content : JSON.stringify(originalSystemPrompt.content); return { role: originalSystemPrompt.role, content: await engine.parseAndRender(contentString || "", resource) }; } catch (e2) { console.error(e2.message); return { role: "system", content: prompt.fields.body }; } }); } if (messages.length <= 2 && systemPrompt) { messages = [ systemPrompt, ...seedMessages, ...messages ]; } const currentUserMessage = messages[messages.length - 1]; const currentResourceMetadata = messages[messages.length - 2]; messages = await step.run("answer the user prompt", async () => { if (!currentUserMessage) { throw new Error("No user message"); } const engine = new Liquid(); const currentUserContentString = typeof currentUserMessage.content === "string" ? currentUserMessage.content : JSON.stringify(currentUserMessage.content); currentUserMessage.content = await engine.parseAndRender(currentUserContentString ?? "", resource); if (currentResourceMetadata) { const metadataContentString = typeof currentResourceMetadata.content === "string" ? currentResourceMetadata.content : JSON.stringify(currentResourceMetadata.content); currentResourceMetadata.content = await engine.parseAndRender(metadataContentString ?? "", resource); } return streamingChatPromptExecutor({ requestId: resourceId, promptMessages: messages, model: prompt.fields.model || model || "gpt-4-turbo", provider: openaiProvider }); }); await step.run(`partykit broadcast [${resourceId}]`, async () => { return await partyProvider.broadcastMessage({ body: { body: messages, requestId: resourceId, name: "resource.chat.completed", metadata: { workflow: workflowTrigger } }, roomId: resourceId }); }); return messages; } __name(resourceChatWorkflowExecutor, "resourceChatWorkflowExecutor"); var resourceChat = { config: resourceChatConfig, trigger: resourceChatTrigger, handler: resourceChatHandler }; export { ChatResourceSchema, RESOURCE_CHAT_REQUEST_EVENT, resourceChatConfig, resourceChatTrigger, ChatCompletionRequestMessageRoleEnum, resourceChatHandler, resourceChatWorkflowExecutor, resourceChat }; //# sourceMappingURL=chunk-WD7V3XGS.js.map