@coursebuilder/core
Version:
Core package for Course Builder
1 lines • 7.48 kB
Source Map (JSON)
{"version":3,"sources":["../../src/providers/openai.ts"],"sourcesContent":["import { createOpenAI } from '@ai-sdk/openai'\nimport { streamText, type CoreMessage } from 'ai'\n\nimport { AIOutput } from '../types'\n\nexport const STREAM_COMPLETE = `\\\\ok`\n\n/**\n * PartyKit chunk publisher that buffers and sends chunks at intervals\n * to maintain the expected streaming behavior for the UI\n */\nclass PartyKitChunkPublisher {\n\trequestId: string\n\tinterval = 250\n\tbuffer: {\n\t\tcontents: string\n\t\tsignal?: Promise<unknown>\n\t}\n\tpartyUrl: string\n\n\tconstructor(requestId: string, partyUrlBase: string) {\n\t\tthis.requestId = requestId\n\t\tthis.buffer = {\n\t\t\tcontents: '',\n\t\t}\n\t\tthis.partyUrl = `${partyUrlBase}/party/${requestId}`\n\t}\n\n\tasync publishMessage(message: string) {\n\t\tawait this.sendToPartyKit(message, this.requestId, this.partyUrl)\n\t}\n\n\tasync appendToBufferAndPublish(text: string) {\n\t\tlet resolve = (_val?: any) => {}\n\t\tthis.buffer.contents += text\n\n\t\tif (this.buffer.signal) {\n\t\t\t// Already enqueued.\n\t\t\treturn\n\t\t}\n\n\t\tthis.buffer.signal = new Promise((r) => {\n\t\t\tresolve = r\n\t\t})\n\n\t\tsetTimeout(() => {\n\t\t\tif (this.buffer.contents.length === 0) {\n\t\t\t\tresolve()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tthis.sendToPartyKit(this.buffer.contents, this.requestId, this.partyUrl)\n\t\t\tresolve()\n\t\t\tthis.buffer = {\n\t\t\t\tcontents: '',\n\t\t\t}\n\t\t}, this.interval)\n\t}\n\n\tasync waitForBuffer() {\n\t\tawait this.buffer.signal\n\t}\n\n\tprivate async sendToPartyKit(\n\t\tbody: string,\n\t\trequestId: string,\n\t\tpartyUrl: string,\n\t) {\n\t\treturn await fetch(partyUrl, {\n\t\t\tmethod: 'POST',\n\t\t\tbody: JSON.stringify({\n\t\t\t\tbody,\n\t\t\t\trequestId,\n\t\t\t\tname: 'ai.message',\n\t\t\t}),\n\t\t}).catch((e) => {\n\t\t\tconsole.error('Failed to send chunk to PartyKit:', e)\n\t\t})\n\t}\n}\n\nexport interface LlmProviderConfig {\n\tid: string\n\tname: string\n\ttype: string\n\toptions: LlmProviderConsumerConfig\n\tapiKey: string\n\tpartyUrlBase: string\n\tbaseUrl?: string\n\tdefaultModel?: string\n\tcreateChatCompletion: (\n\t\toptions: CreateChatCompletionOptions,\n\t) => Promise<AIOutput | null>\n}\n\nexport type LlmProviderConsumerConfig = Omit<\n\tPartial<LlmProviderConfig>,\n\t'options' | 'type'\n> & {\n\tapiKey: string\n\tpartyUrlBase: string\n\tbaseUrl?: string\n\tdefaultModel?: string\n}\n\nexport type CreateChatCompletionOptions = {\n\tmessages: CoreMessage[]\n\tchatId: string\n\tmodel: string\n}\n\nexport default function OpenAIProvider(\n\toptions: LlmProviderConsumerConfig,\n): LlmProviderConfig {\n\tconst client = createOpenAI({\n\t\tapiKey: options.apiKey,\n\t\t...(options.baseUrl && {\n\t\t\tbaseURL: options.baseUrl,\n\t\t}),\n\t})\n\n\treturn {\n\t\tid: 'openai',\n\t\tname: 'OpenAI',\n\t\ttype: 'llm',\n\t\toptions,\n\t\t...options,\n\t\tcreateChatCompletion: async (\n\t\t\tcreateChatOptions: CreateChatCompletionOptions,\n\t\t) => {\n\t\t\ttry {\n\t\t\t\tconst modelName =\n\t\t\t\t\tcreateChatOptions.model || options.defaultModel || 'gpt-4o'\n\n\t\t\t\t// Create PartyKit publisher with buffering behavior\n\t\t\t\tconst publisher = new PartyKitChunkPublisher(\n\t\t\t\t\tcreateChatOptions.chatId,\n\t\t\t\t\toptions.partyUrlBase,\n\t\t\t\t)\n\n\t\t\t\tconst result = await streamText({\n\t\t\t\t\tmodel: client(modelName),\n\t\t\t\t\tmessages: createChatOptions.messages,\n\t\t\t\t\tonChunk: async ({ chunk }) => {\n\t\t\t\t\t\tif (chunk.type === 'text-delta') {\n\t\t\t\t\t\t\t// Use the buffered publisher to maintain expected streaming behavior\n\t\t\t\t\t\t\tawait publisher.appendToBufferAndPublish(chunk.textDelta)\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t})\n\n\t\t\t\t// We need to consume the stream to make result.text resolve\n\t\t\t\t// Since we're already handling chunks in onChunk, we can consume textStream to completion\n\t\t\t\tlet fullText = ''\n\t\t\t\tfor await (const textPart of result.textStream) {\n\t\t\t\t\tfullText += textPart\n\t\t\t\t}\n\n\t\t\t\t// Wait for any remaining buffered content to be sent\n\t\t\t\tawait publisher.waitForBuffer()\n\n\t\t\t\t// Send completion signal using the expected format\n\t\t\t\tawait publisher.publishMessage(STREAM_COMPLETE)\n\n\t\t\t\treturn {\n\t\t\t\t\trole: 'assistant',\n\t\t\t\t\tcontent: fullText,\n\t\t\t\t}\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error('OpenAI streaming error:', error)\n\t\t\t\tthrow error\n\t\t\t}\n\t\t},\n\t} as const\n}\n\nexport const MockOpenAIProvider: LlmProviderConfig = {\n\tid: 'mock-openai' as const,\n\tname: 'Mock OpenAI',\n\ttype: 'llm',\n\toptions: {\n\t\tapiKey: 'mock-api-key',\n\t\tpartyUrlBase: 'mock-callback-url',\n\t},\n\tapiKey: 'mock-api-key',\n\tpartyUrlBase: 'mock-callback-url',\n\tcreateChatCompletion: () => Promise.resolve(null),\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;;;;;;;oBAA6B;AAC7B,gBAA6C;AAItC,IAAMA,kBAAkB;AAL/B;AAWA,IAAMC,0BAAN,WAAMA;EASLC,YAAYC,WAAmBC,cAAsB;AARrDD;AACAE,oCAAW;AACXC;AAIAC;AAGC,SAAKJ,YAAYA;AACjB,SAAKG,SAAS;MACbE,UAAU;IACX;AACA,SAAKD,WAAW,GAAGH,YAAAA,UAAsBD,SAAAA;EAC1C;EAEA,MAAMM,eAAeC,SAAiB;AACrC,UAAM,KAAKC,eAAeD,SAAS,KAAKP,WAAW,KAAKI,QAAQ;EACjE;EAEA,MAAMK,yBAAyBC,MAAc;AAC5C,QAAIC,UAAU,wBAACC,SAAAA;IAAgB,GAAjB;AACd,SAAKT,OAAOE,YAAYK;AAExB,QAAI,KAAKP,OAAOU,QAAQ;AAEvB;IACD;AAEA,SAAKV,OAAOU,SAAS,IAAIC,QAAQ,CAACC,MAAAA;AACjCJ,gBAAUI;IACX,CAAA;AAEAC,eAAW,MAAA;AACV,UAAI,KAAKb,OAAOE,SAASY,WAAW,GAAG;AACtCN,gBAAAA;AACA;MACD;AACA,WAAKH,eAAe,KAAKL,OAAOE,UAAU,KAAKL,WAAW,KAAKI,QAAQ;AACvEO,cAAAA;AACA,WAAKR,SAAS;QACbE,UAAU;MACX;IACD,GAAG,KAAKH,QAAQ;EACjB;EAEA,MAAMgB,gBAAgB;AACrB,UAAM,KAAKf,OAAOU;EACnB;EAEA,MAAcL,eACbW,MACAnB,WACAI,UACC;AACD,WAAO,MAAMgB,MAAMhB,UAAU;MAC5BiB,QAAQ;MACRF,MAAMG,KAAKC,UAAU;QACpBJ;QACAnB;QACAwB,MAAM;MACP,CAAA;IACD,CAAA,EAAGC,MAAM,CAACC,MAAAA;AACTC,cAAQC,MAAM,qCAAqCF,CAAAA;IACpD,CAAA;EACD;AACD,GAnEM5B,sCAAN;AAmGe,SAAf,eACC+B,SAAkC;AAElC,QAAMC,aAASC,4BAAa;IAC3BC,QAAQH,QAAQG;IAChB,GAAIH,QAAQI,WAAW;MACtBC,SAASL,QAAQI;IAClB;EACD,CAAA;AAEA,SAAO;IACNE,IAAI;IACJX,MAAM;IACNY,MAAM;IACNP;IACA,GAAGA;IACHQ,sBAAsB,OACrBC,sBAAAA;AAEA,UAAI;AACH,cAAMC,YACLD,kBAAkBE,SAASX,QAAQY,gBAAgB;AAGpD,cAAMC,YAAY,IAAI5C,uBACrBwC,kBAAkBK,QAClBd,QAAQ5B,YAAY;AAGrB,cAAM2C,SAAS,UAAMC,sBAAW;UAC/BL,OAAOV,OAAOS,SAAAA;UACdO,UAAUR,kBAAkBQ;UAC5BC,SAAS,OAAO,EAAEC,MAAK,MAAE;AACxB,gBAAIA,MAAMZ,SAAS,cAAc;AAEhC,oBAAMM,UAAUjC,yBAAyBuC,MAAMC,SAAS;YACzD;UACD;QACD,CAAA;AAIA,YAAIC,WAAW;AACf,yBAAiBC,YAAYP,OAAOQ,YAAY;AAC/CF,sBAAYC;QACb;AAGA,cAAMT,UAAUxB,cAAa;AAG7B,cAAMwB,UAAUpC,eAAeT,eAAAA;AAE/B,eAAO;UACNwD,MAAM;UACNC,SAASJ;QACV;MACD,SAAStB,OAAO;AACfD,gBAAQC,MAAM,2BAA2BA,KAAAA;AACzC,cAAMA;MACP;IACD;EACD;AACD;AA/DwB2B;AAiEjB,IAAMC,qBAAwC;EACpDrB,IAAI;EACJX,MAAM;EACNY,MAAM;EACNP,SAAS;IACRG,QAAQ;IACR/B,cAAc;EACf;EACA+B,QAAQ;EACR/B,cAAc;EACdoC,sBAAsB,MAAMvB,QAAQH,QAAQ,IAAA;AAC7C;","names":["STREAM_COMPLETE","PartyKitChunkPublisher","constructor","requestId","partyUrlBase","interval","buffer","partyUrl","contents","publishMessage","message","sendToPartyKit","appendToBufferAndPublish","text","resolve","_val","signal","Promise","r","setTimeout","length","waitForBuffer","body","fetch","method","JSON","stringify","name","catch","e","console","error","options","client","createOpenAI","apiKey","baseUrl","baseURL","id","type","createChatCompletion","createChatOptions","modelName","model","defaultModel","publisher","chatId","result","streamText","messages","onChunk","chunk","textDelta","fullText","textPart","textStream","role","content","OpenAIProvider","MockOpenAIProvider"]}