UNPKG

deep-research

Version:

Open source deep research TS/JS library with built in web search, reasoning, and bibliography generation

167 lines (132 loc) 22.5 kB
"use strict";var U=Object.defineProperty;var g=(t,e)=>U(t,"name",{value:e,configurable:!0});var N=require("@openrouter/ai-sdk-provider"),v=require("ai"),_=require("zod"),C=require("ts-retry"),G=require("jigsawstack");const P=class P{constructor({OPENAI_API_KEY:e,DEEPINFRA_API_KEY:r,defaultModel:o,reasoningModel:n,outputModel:a,OPENROUTER_API_KEY:s}){this.providers=new Map,this.models={default:o||N.createOpenRouter({apiKey:s}).languageModel("moonshotai/kimi-k2-0905"),reasoning:n||N.createOpenRouter({apiKey:s}).languageModel("x-ai/grok-4.1-fast",{reasoning:{effort:"medium"}}),output:a||N.createOpenRouter({apiKey:s}).languageModel("moonshotai/kimi-k2-0905")}}static getInstance({OPENAI_API_KEY:e,DEEPINFRA_API_KEY:r,OPENROUTER_API_KEY:o,defaultModel:n,reasoningModel:a,outputModel:s}){return P.instance||(P.instance=new P({OPENAI_API_KEY:e,DEEPINFRA_API_KEY:r,OPENROUTER_API_KEY:o,defaultModel:n,reasoningModel:a,outputModel:s})),P.instance}getModel(e){return this.models[e]}getProvider(e){return this.providers.get(e)}};g(P,"AIProvider");let j=P;const R={max_depth:3,max_breadth:3,max_output_tokens:32e3,logging:{enabled:!1}},Y=g(({prompt:t,queries:e,research_sources:r})=>` You are a world-class context generator. Your task is to generate a context overview for the following queries and sources that relates to the main prompt: Extract all the information from the sources that is relevant to the main prompt. Main Prompt: ${t} Sub-Queries and Sources: ${e?.map(o=>{const n=r?.filter(a=>a.url&&a.url.length>0);return n&&n.length>0?`**${o}** ${n.map(a=>` [${a.reference_number}] ${a.title||"No title"} (${a.url}) Content and Snippets: ${a.content?a.content:a.snippets?.join(` `)}`).join(` `)}`:`**${o}** (No sources found)`}).join(` `)} `.trim(),"CONTEXT_GENERATION_PROMPT"),K=g(({prompt:t,reasoning:e,queries:r,sources:o,config:n})=>{const a=`You are a world-class research planner. Your primary goal is to construct a comprehensive research plan and a set of effective search queries to thoroughly investigate the given prompt. INSTRUCTIONS: 1. A Detailed Research Plan: - Clearly outline the overall research strategy and methodology you propose. - Identify key areas, themes, or sub-topics that need to be investigated to ensure comprehensive coverage of the prompt. - Suggest the types of information, data, or sources (e.g., academic papers, official reports, news articles, expert opinions) that would be most valuable for this research. - The plan should be logical, actionable, and designed for efficient information gathering. 2. A List of Focused Search Queries: - Generate a list of specific and targeted search queries. - These queries should be optimized to yield relevant, high-quality, and diverse search results from search engines. - The set of queries should collectively aim to cover the main aspects identified in your research plan. - Ensure queries are distinct and avoid redundancy. 3. Generate how deep the research should be: - Generate a number to determine how deep the research should be to fully explore this prompt 4. Generate how broad the research should be: - Generate a number to determine how broad the research should be to fully explore this prompt Output in the given JSON schema. `.trim(),s=` ${e?`Reasoning: ${e}`:""} ${r?` Sub-Queries and Sources previously generated: ${r.map(l=>{const h=o?.find(c=>c.query===l);return h&&h.search_results.results.length>0?`**${l}** ${h.search_results.results.map(c=>` [${c.reference_number}] ${c.title||"No title"} (${c.url}) Content and Snippets: ${c.content?c.content:c.snippets?.join(` `)}`).join(` `)}`:`**${l}** (No sources found)`}).join(` `)}`:""} User Prompt: ${t} `.trim(),i=_.z.object({subQueries:_.z.array(_.z.string()).min(1).max(n.max_breadth).describe("An array of high-quality, non-redundant search queries (min 1, max N) that together provide comprehensive research coverage for the user prompt"),researchPlan:_.z.string().describe("A detailed plan explaining the research approach and methodology"),depth:_.z.number().min(1).max(n.max_depth).describe("A number representing the depth of the research"),breadth:_.z.number().min(1).max(n.max_breadth).describe("A number representing the breadth of the research")});return{system:a,user:s,schema:i}},"RESEARCH_PROMPT_TEMPLATE"),z=g(({prompt:t,reasoning:e,queries:r,sources:o,researchPlan:n})=>{const a=` You are a world-class analyst. Your primary purpose is to help decide if the data provided is sufficient to complete the given prompt. Current datetime is: ${new Date().toISOString()} INSTRUCTIONS: - If the reasoning is sufficient to answer the prompt set "isComplete" to true. - In either case, provide a brief explanation in "reason" describing your judgement. Response in the given JSON schema. `.trim(),s=` Research Plan: "${n}" Sub-Queries and Sources previously generated: ${r.map(l=>{const h=o?.find(c=>c.query===l);return h&&h.search_results.results.length>0?`**${l}** ${h.search_results.results.map(c=>` [${c.reference_number}] ${c.title||"No title"} (${c.url}) Content and Snippets: ${c.content?c.content:c.snippets?.join(` `)}`).join(` `)}`:`**${l}** (No sources found)`}).join(` `)} Reasoning generated previously: "${e}" Prompt: "${t}" `.trim(),i=_.z.object({isComplete:_.z.boolean().describe("If the reasoning is sufficient to answer the main prompt set to true."),reason:_.z.string().describe("The reason for the decision")});return{system:a,user:s,schema:i}},"DECISION_MAKING_PROMPT"),F=g(({prompt:t,researchPlan:e,queries:r,sources:o})=>{const n="".trim(),a=` Proposed research plan: "${e}" Context for each query: ${r?.map(s=>{const i=o?.find(l=>l.query===s);return i?`**Query: ${s}** Context: ${i.context}`:`**Query: ${s}** Context: No context found`}).join(` `)} Prompt: "${t}" `.trim();return{system:n,user:a}},"REASONING_SEARCH_RESULTS_PROMPT"),D=g(({prompt:t,sources:e,targetOutputTokens:r,researchPlan:o,reasoning:n,queries:a,phase:s,currentReport:i})=>{const l=r?r*3:void 0,h=l?Math.max(l-i.length,0):void 0,c=l?i.length>=l:void 0,u=` You are a world-class analyst. Your primary purpose is to help users answer their prompt. GENERAL GUIDELINES: - If you are about to reach your output token limit, ensure you properly close all JSON objects and strings to prevent parsing errors. - Only use the sources provided in the context. - Cite every factual claim or statistic with in-text references using the reference numbers by the sources provided (e.g. "[1]"). - **Never repeat a heading that is already present in the Existing Draft.** - When writing mathematical equations, always use single dollar sign syntax ($...$) for inline equations and double dollar signs ($$...$$) for block equations. Do not use (...) or [...] delimiters. INSTRUCTIONS: - generate in the - Make sure your report is addressing the prompt. - Make sure your report is comprehensive and covers all the sub-topics. - Make sure your report is well-researched and well-cited. - Make sure your report is well-written and well-structured. - Make sure your report is well-organized and well-formatted. `;let d="";switch(s){case"initial":d=` Do not generate a reference or conclusion section. Return phase as "continuation" `;break;case"continuation":c===!1?d=` Generate a continuation of the report. No need to include the initial report. ${h?`You still need \u2248${h.toLocaleString()} characters.`:""} Do not generate a reference or conclusion section. Return phase as "continuation" `:d=` - This is your FINAL response for this question. - If the provided sources are insufficient, give your best definitive answer. - YOU MUST conclude your answer now, regardless of whether you feel it's complete. Return phase as "done" `;break}const w=` ${r?`Target length: \u2248 ${(r*3).toLocaleString()} characters (${r} tokens \xD73)`:""} CONTEXT: Latest Research Plan: ${o} Latest Reasoning Snapshot: ${n} Sub-Queries and Sources: ${a?.map(b=>{const y=e?.find(f=>f.query===b);return y&&y.search_results.results.length>0?`**${b}** ${y.search_results.results.map(f=>` [${f.reference_number}] ${f.title||"No title"} (${f.url}) Content and Snippets: ${f.content?f.content:f.snippets?.join(` `)}`).join(` `)}`:`**${b}** (No sources found)`}).join(` `)} ${i?`Current Draft: ${i}`:""} ${d} Prompt: "${t}" `.trim(),m=_.z.object({text:_.z.string().describe("The final report"),phase:_.z.enum(["initial","continuation","done"]).describe("The phase of the report")});return{system:u,user:w,schema:m}},"FINAL_REPORT_PROMPT"),E={research:K,reasoningSearchResults:F,decisionMaking:z,finalReport:D,contextGeneration:Y},$=class ${constructor(){this._enabled=!1}static getInstance(){return $._instance||($._instance=new $),$._instance}setEnabled(e){this._enabled=e}log(...e){this._enabled&&console.log(...e)}error(...e){console.error(...e)}warn(...e){this._enabled&&console.warn(...e)}info(...e){this._enabled&&console.info(...e)}};g($,"Logger");let k=$;const p=k.getInstance(),Q=g(async({reasoning:t,prompt:e,aiProvider:r,queries:o,sources:n,researchPlan:a})=>{const s=E.decisionMaking({reasoning:t,prompt:e,queries:o,sources:n,researchPlan:a}),i=await v.generateObject({model:r.getModel("default"),output:"object",system:s.system,prompt:s.user,schema:s.schema,maxRetries:3});return{decision:i,usage:i.usage}},"decisionMaking"),W=g(async({prompt:t,researchPlan:e,sources:r,queries:o,aiProvider:n})=>{try{const a=E.reasoningSearchResults({prompt:t,researchPlan:e,sources:r,queries:o});p.log("REASONING WITH",a);const s=await v.generateText({model:n.getModel("reasoning"),prompt:a.user});if(s.reasoning)return{reasoning:s.reasoning,usage:s.usage};const i=s.text.match(/<think>([\s\S]*?)<\/think>|<thinking>([\s\S]*?)<\/thinking>/);return i?{reasoning:i[1]||i[2],usage:s.usage}:{reasoning:s.text,usage:s.usage}}catch(a){throw p.error("Fatal error in reasoningSearchResults:",a.message||a),p.error(" Error details:",a),new Error(`reasoning evaluation failed: ${a.message||"Unknown error"}`)}},"reasoningSearchResults"),J=g(async({report:t,sources:e})=>{const r=new Map;e.forEach(i=>{i.search_results&&Array.isArray(i.search_results.results)&&i.search_results.results.forEach(l=>{l.reference_number&&r.set(l.reference_number,l)})}),p.log(`Reference map size: ${r.size}`);const o=/\[(\d+(?:\s*,\s*\d+)*)\]/g,n=t.replace(o,(i,l)=>{const h=l.split(",").map(c=>parseInt(c.trim(),10));if(h.length===1){const c=h[0],u=r.get(c);return u?`[[${c}](${u.url})]`:(p.log(`No source found for citation [${c}]`),i)}else return`[${h.map(u=>{const d=r.get(u);return d?`[${u}](${d.url})`:(p.log(`No source found for citation part ${u}`),`${u}`)}).join(", ")}]`});let a=` ## References `;const s=Array.from(r.entries()).sort((i,l)=>i[0]-l[0]);return p.log(`Generating bibliography with ${s.length} entries`),s.forEach(([i,l])=>{const h=l.title||"No title";a+=`${i}. [${h}](${l.url}) `}),{reportWithSources:n,bibliography:a}},"processReportForSources"),L=g(async({sources:t,prompt:e,maxOutputTokens:r,targetOutputTokens:o,aiProvider:n,reasoning:a,researchPlan:s,queries:i})=>{let l="",h=0,c="initial",u=0;do{p.log(`[Iteration ${h}] phase=${c}`);const m=E.finalReport({currentReport:l,prompt:e,sources:t,targetOutputTokens:o,researchPlan:s,reasoning:a,queries:i,phase:c});p.log(` [Iteration ${h}] phase=${c}`),p.log(`SYSTEM PROMPT: `+m.system),p.log(`USER PROMPT: `+m.user);const b=await v.generateObject({model:n.getModel("output"),system:m.system,prompt:m.user,schema:m.schema,experimental_repairText:g(async({text:y,error:f})=>f&&f.message&&f.message.includes("Unterminated string")?y+'"}':y,"experimental_repairText")});if(c=b.object.phase,l+=b.object.text,p.log(`PHASE==============================: `+b.object.phase),p.log(`MODEL OUTPUT: `+b.object.text),c==="continuation"){const y=o?o*4:void 0;y&&l.length>=y&&(c="done"),r&&l.length>=r/3-2e3&&(c="done")}h++,u+=b.usage.totalTokens}while(c!=="done");const{reportWithSources:d,bibliography:w}=await J({report:l,sources:t});return p.log("Done processing report for sources"),{report:d,bibliography:w,tokenUsage:u}},"generateFinalReport"),H=g(async({aiProvider:t,prompt:e,reasoning:r,queries:o,sources:n,config:a})=>{try{const s=E.research({prompt:e,reasoning:r,queries:o,sources:n,config:a}),i=await v.generateObject({model:t.getModel("default"),maxRetries:3,system:s.system,prompt:s.user,experimental_repairText:g(async({text:l,error:h})=>{console.log("Error with structured output while generating research plan",h);const c=await v.generateObject({model:t.getModel("default"),schema:s.schema,system:"Fix the json error and dont change anything else",prompt:l});return c?JSON.stringify(c.object):null},"experimental_repairText"),schema:s.schema,mode:"json"});return p.log("Research Prompts",E.research({prompt:e,reasoning:r,queries:o,sources:n,config:a})),{subQueries:i.object.subQueries,researchPlan:i.object.researchPlan,depth:i.object.depth,breadth:i.object.breadth,tokenUsage:i.usage}}catch(s){throw p.error(`Error generating research plan: ${s.message||s}`),new Error(`Research evaluation failed: ${s.message||"Unknown error"}`)}},"generateResearchPlan"),T=class T{static cleanContent(e){const r=e.content?this.contentPipeline(e.content):void 0,o=e.snippets?e.snippets.map(n=>this.contentPipeline(n)):void 0;return{...e,content:r,snippets:o}}static contentPipeline(e){return this.contentSteps.reduce((r,o)=>o(r),e)}};g(T,"ContentCleaner");let I=T;I.contentSteps=[t=>t.replace(/<[^>]*>/g," "),t=>t.replace(/\.[A-Za-z][\w-]*\s*\{[^}]*\}/g,""),t=>t.replace(/\.(MJX|mjx)[-\w]*\s*\{[^}]*\}/g,""),t=>t.replace(/@font-face\s*\{[^}]*\}/g,""),t=>t.replace(/\b(display|position|font-family|src|font-weight|font-style|margin|padding|border|width|height|min-width|max-width|text-align|line-height|box-sizing):[^;}]*(;|$)/g,""),t=>t.replace(/\w+(\.\w+)*\s*\{[^{}]*\}/g,""),t=>t.replace(/url\([^)]*\)/g,""),t=>t.replace(/\.mjx-chtml\s*\{[^}]*\}/g,""),t=>t.replace(/\.mjx-[-\w]+/g,""),t=>t.replace(/\s+/g," "),t=>t.replace(/[^\w\s.,!?;:()"'-]/g," "),t=>t.replace(/[""]/g,'"').replace(/['']/g,"'"),t=>t.replace(/(\d+)([a-zA-Z])/g,"$1 $2").replace(/([a-zA-Z])(\d+)/g,"$1 $2").replace(/\.{3,}/g,"...").replace(/\s*-\s*/g," - "),t=>t.replace(/https?:\/\/\S+/g,""),t=>t.replace(/([.!?])\s*([A-Z])/g,"$1 $2"),t=>t.replace(/l\.mjx-chtml/g,""),t=>t.replace(/X\.mjx-chtml/g,""),t=>t.replace(/format\(\'woff\'\)/g,""),t=>t.replace(/format\(\'opentype\'\)/g,""),t=>t.replace(/\{\s*\}/g,""),t=>t.replace(/\s{2,}/g," "),t=>t.trim(),t=>{const e=t.slice(-1);return!".,!?".includes(e)&&t.length>0?t+".":t}];const X=g(({sources:t})=>{const e=new Map;return t.map(r=>({...r,search_results:{results:r.search_results.results.filter(o=>e.has(o.url)?!1:(e.set(o.url,!0),!0))}}))},"deduplicateSearchResults"),Z=g(({sources:t})=>{const e=new Map;let r=1;return t.map(o=>({...o,search_results:{results:o.search_results.results.map(n=>(e.has(n.url)||e.set(n.url,r++),{...n,reference_number:e.get(n.url)||0}))}}))},"mapSearchResultsToNumbers"),x=class x{constructor({apiKey:e}){this.jigsawInstance=G.JigsawStack({apiKey:e||process.env.JIGSAW_API_KEY})}static getInstance({apiKey:e}){return x.instance||(x.instance=new x({apiKey:e})),x.instance}};g(x,"JigsawProvider");let M=x;const S=class S{constructor(e){this.jigsaw=null,this.customSearchFunction=null,e.web_search&&(this.customSearchFunction=e.web_search),e.JIGSAW_API_KEY&&(this.jigsaw=M.getInstance({apiKey:e.JIGSAW_API_KEY}))}static getInstance(e){return S.instance||(S.instance=new S(e)),S.instance}async fireWebSearches(e){const r=e.map(async o=>{try{if(this.customSearchFunction)return await this.customSearchFunction(o);if(this.jigsaw){const n=await C.retryAsync(async()=>await this.jigsaw.jigsawInstance.web.search({query:o,ai_overview:!1,max_results:3}),{delay:C.createExponetialDelay(2e3),maxTry:3,onError:g((s,i)=>(console.warn(`API request failed (attempt ${i}/3):`,s.message),!0),"onError")});if(!n||!n.results)throw console.error("Invalid response structure:",n),new Error("Invalid search response structure");const a=n.results.slice(0,3).map(s=>{const i={...s,content:typeof s.content=="string"?s.content:s.content?.text||""};return{...I.cleanContent(i)}}).filter(s=>s.content&&s.content.length>0||s.snippets&&s.snippets.length>0);return{...n,search_results:{results:a}}}throw new Error("No search method available")}catch(n){return console.error("Full error details:",n),{query:o,search_results:{results:[]}}}});return Promise.all(r)}async searchAndGenerateContext({queries:e,prompt:r,aiProvider:o,sources:n}){const s=(await this.fireWebSearches(e)).filter(u=>u.search_results&&u.search_results.results&&u.search_results.results.length>0);if(s.length===0)return console.warn("No search results found for any query"),[];const i=s.map(u=>u.query),l=await this.contextGenerator({queries:i,sources:s,prompt:r,aiProvider:o}),h=s.map((u,d)=>{const w=u.search_results.results.filter(m=>m.content&&m.content.trim()!==""||m.snippets&&m.snippets.length>0);return w.length===0?null:{query:u.query,search_results:{results:w},context:l[d]||"",geo_results:u.geo_results,image_urls:u.image_urls,links:u.links}}).filter(u=>u!==null);return X({sources:[...n,...h]})}async contextGenerator({queries:e,sources:r,prompt:o,aiProvider:n}){try{return await Promise.all(e.map(async s=>{const l=(r.find(u=>u.query===s)?.search_results.results||[]).map(u=>!u.content||u.content.trim()===""?u.snippets&&u.snippets.length>0?{...u,content:u.snippets.join(` `)}:null:u).filter(u=>u!==null);let h=E.contextGeneration({prompt:o,queries:[s],research_sources:l});return console.log(`${s} Prompt to Model: ${h.length} characters`),(await v.generateText({model:n.getModel("default"),providerOptions:{openai:{reasoning_effort:"minimal"}},prompt:h,maxRetries:3})).text}))}catch(a){return console.error("Error generating context overview:",a),"Error generating context overview."}}};g(S,"WebSearchProvider");let O=S;const q=class q{constructor(e){this.prompt="",this.finalReport="",this.tokenUsage={research_tokens:0,reasoning_tokens:0,report_tokens:0,decision_tokens:0,total_tokens:0},this.researchPlan="",this.reasoning="",this.decision={isComplete:!1,reason:""},this.logger=k.getInstance(),this.queries=[],this.sources=[],this.config=this.validateConfig(e),this.config.logging&&this.config.logging.enabled!==void 0&&this.logger.setEnabled(this.config.logging.enabled),this.webSearchProvider=O.getInstance(this.config),this.aiProvider=j.getInstance({OPENAI_API_KEY:this.config.OPENAI_API_KEY,OPENROUTER_API_KEY:this.config.OPENROUTER_API_KEY,defaultModel:this.config.models?.default,reasoningModel:this.config.models?.reasoning,outputModel:this.config.models?.output})}validateConfig(e){if(e.max_output_tokens&&e.target_output_tokens&&e.max_output_tokens<e.target_output_tokens)throw new Error("maxOutputChars must be greater than targetOutputChars");return{config:{...R,...e||{}},max_output_tokens:e.max_output_tokens||R.max_output_tokens,target_output_tokens:e.target_output_tokens,max_depth:e.max_depth||R.max_depth,max_breadth:e.max_breadth||R.max_breadth,JIGSAW_API_KEY:e.JIGSAW_API_KEY||process.env.JIGSAW_API_KEY||(e.web_search?null:(()=>{throw new Error("JIGSAW_API_KEY must be provided in config")})()),OPENAI_API_KEY:e.OPENAI_API_KEY||process.env.OPENAI_API_KEY||(e.models?.default&&e.models?.output?null:(()=>{throw new Error("Either OPENAI_API_KEY or models.default and models.output must be provided in config")})()),OPENROUTER_API_KEY:e.OPENROUTER_API_KEY||process.env.OPENROUTER_API_KEY||(e.models?.reasoning?null:(()=>{throw new Error("OpenRouter API key must be provided in config")})()),logging:{...R.logging,...e.logging||{}},models:{...e.models||{}},web_search:e.web_search}}async generate(e){p.log(`Running research with prompt: ${e}`),this.prompt=e;let r=0;do{r++,p.log(`[Step 1] Generating research plan... at ${r}`);const{subQueries:s,researchPlan:i,depth:l,breadth:h,tokenUsage:c}=await H({aiProvider:this.aiProvider,prompt:this.prompt,reasoning:this.reasoning,queries:this.queries,sources:this.sources,config:this.config});this.queries=[...this.queries||[],...s],this.researchPlan=i,this.config.max_depth=l,this.config.max_breadth=h,this.tokenUsage.research_tokens=c.totalTokens,p.log(`Research plan: ${this.researchPlan}`),p.log(`Research queries: ${this.queries.join(` `)}`),p.log(`Research depth and breadth: ${this.config.max_depth} ${this.config.max_breadth}`),p.log(`[Step 2] Running initial web searches with ${this.queries.length} queries...`);const u=await this.webSearchProvider.searchAndGenerateContext({queries:this.queries,prompt:this.prompt,aiProvider:this.aiProvider,sources:this.sources});this.sources=u,p.log("[Step 3] Reasoning about the search results...");const d=await W({prompt:this.prompt,researchPlan:this.researchPlan,sources:this.sources,queries:this.queries,aiProvider:this.aiProvider});this.reasoning=d.reasoning,this.tokenUsage.reasoning_tokens=d.usage.totalTokens,p.log(`Reasoning: ${d}`),p.log("[Step 4] Decision making...");const{decision:w,usage:m}=await Q({reasoning:this.reasoning,prompt:this.prompt,queries:this.queries,sources:this.sources,researchPlan:this.researchPlan,aiProvider:this.aiProvider});this.decision=w.object,this.tokenUsage.decision_tokens=m.totalTokens,p.log(`Decision making: ${this.decision.isComplete} ${this.decision.reason}`)}while(!this.decision.isComplete&&r<this.config.max_depth);this.sources=Z({sources:this.sources}),p.log("[Step 5] Generating report...");const{report:o,bibliography:n,tokenUsage:a}=await L({sources:this.sources,prompt:this.prompt,targetOutputTokens:this.config.target_output_tokens,maxOutputTokens:this.config.max_output_tokens,aiProvider:this.aiProvider,reasoning:this.reasoning,researchPlan:this.researchPlan,queries:this.queries});return this.tokenUsage.report_tokens=a,this.tokenUsage.total_tokens=this.tokenUsage.research_tokens+this.tokenUsage.reasoning_tokens+this.tokenUsage.decision_tokens+this.tokenUsage.report_tokens,{status:"success",data:{text:o,bibliography:n,metadata:{prompt:this.prompt,reasoning:this.reasoning,research_plan:this.researchPlan,queries:this.queries,sources:this.sources}},_usage:this.tokenUsage}}};g(q,"DeepResearch");let A=q;const B=g(t=>new A(t),"createDeepResearch");exports.DeepResearch=A,exports.createDeepResearch=B;