document-outline-extractor
Version:
Extract structured outlines from documents with optional AI enhancement
119 lines (105 loc) • 4.03 kB
text/typescript
import {
ChatMessage,
ChatCompletionOptions,
ChatCompletionResponse,
OpenAIConfig
} from './types';
export class OpenAIClient {
private config: Required<Omit<OpenAIConfig, 'apiVersion'>> & { apiVersion?: string };
constructor(config: OpenAIConfig) {
this.config = {
model: 'gpt-4o-mini',
temperature: 0.3,
maxTokens: 2000,
...config,
baseUrl: config.baseUrl.replace(/\/$/, ''),
apiKey: config.apiKey
};
}
/**
* Generate outline using OpenAI
*/
async generateOutline(
systemPrompt: string,
content: string,
options?: { temperature?: number; maxTokens?: number; maxCompletionTokens?: number }
): Promise<string | null> {
try {
const response = await this.chatCompletion({
messages: [
{ role: 'system', content: systemPrompt + '\n\nIMPORTANT: Return the response as a valid JSON object with the following structure:\n{\n "outline": [\n {\n "level": 1,\n "title": "Main Title",\n "children": [\n {\n "level": 2,\n "title": "Subtopic",\n "children": []\n }\n ]\n }\n ]\n}' },
{ role: 'user', content: content }
],
responseFormat: { type: 'json_object' },
...(options?.temperature !== undefined ? { temperature: options.temperature } : {}),
...(options?.maxTokens !== undefined ? { maxTokens: options.maxTokens } : {}),
...(options?.maxCompletionTokens !== undefined ? { maxCompletionTokens: options.maxCompletionTokens } : {})
});
const content_str = this.extractContent(response);
if (!content_str) return null;
// Parse and validate JSON response
try {
const jsonResponse = JSON.parse(content_str);
return JSON.stringify(jsonResponse, null, 2);
} catch (parseError) {
console.error('Failed to parse JSON response:', parseError);
return content_str; // Fallback to original content
}
} catch (error) {
console.error('OpenAI outline generation error:', error);
return null;
}
}
/**
* Perform chat completion request
*/
async chatCompletion(options: Partial<ChatCompletionOptions>): Promise<ChatCompletionResponse> {
const url = this.buildUrl();
const body: any = {
model: this.config.model,
messages: options.messages!,
temperature: options.temperature ?? this.config.temperature,
...(options.responseFormat ? { response_format: options.responseFormat } : {}),
...(options.reasoningEffort ? { reasoning_effort: options.reasoningEffort } : {})
};
// Handle both max_tokens and max_completion_tokens (prefer max_completion_tokens if both provided)
if (options.maxCompletionTokens !== undefined) {
body.max_completion_tokens = options.maxCompletionTokens;
} else if (options.maxTokens !== undefined || this.config.maxTokens !== undefined) {
body.max_tokens = options.maxTokens ?? this.config.maxTokens;
}
const response = await fetch(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${this.config.apiKey}`
},
body: JSON.stringify(body)
});
if (!response.ok) {
const errorText = await this.safeReadText(response);
throw new Error(`OpenAI API error: ${response.status} ${errorText.slice(0, 500)}`);
}
return await response.json();
}
/**
* Extract content from first choice
*/
extractContent(response: ChatCompletionResponse): string | null {
return response?.choices?.[0]?.message?.content?.trim() || null;
}
private buildUrl(): string {
const base = `${this.config.baseUrl}/chat/completions`;
if (this.config.apiVersion) {
return `${base}?api-version=${this.config.apiVersion}`;
}
return base;
}
private async safeReadText(response: Response): Promise<string> {
try {
return await response.text();
} catch {
return '';
}
}
}