@metricinsights/concierge
Version:
Metric Insights Concierge Component
121 lines • 3.55 kB
TypeScript
import { ContentParser } from './parsers';
import { ParserResult } from './parsers/types';
export interface TokenResponse {
access_token: string;
/**
* The type of token
*
* legacy - can be get via /api/get_token
* access-token - can be created with the UI on the instance via path /api-token
*/
token_type: 'legacy' | 'access-token';
/**
* The time to live of the token in seconds.
*
* The token will refresh automatically when it reaches 50% of the ttl.
*
* 0 means the token never expires.
*
* @default 300
*/
ttl?: number;
}
export interface ConciergeApiOptions {
/**
* The headers to be sent to the API
*
* @default { 'Content-Type': 'application/json', 'Accept': 'application/json' }
*/
headers?: Record<string, string>;
/**
* Whether to use streaming for the API
*
* @default true
*/
useStreaming?: boolean;
/**
* The content parser to be used for the API
*
* @default new ContentParser()
*/
contentParser?: ContentParser;
}
export interface StreamingChunk {
data: any;
conversation_ts: any;
message_id: any;
raw: any;
blocks?: ParserResult[];
}
export declare class ConciergeApi {
private baseUrl;
private headers;
private _token;
private tokenRefreshInterval;
private useStreaming;
private conversationTimestamp;
readonly contentParser: ContentParser;
private lastRequestTimestamp;
constructor(options?: ConciergeApiOptions);
destroy(): void;
protected set token(value: TokenResponse);
protected get token(): TokenResponse;
/**
* Get a new token from the server and set it
*
* This method is protected and can be overridden by extending classes
*/
protected getToken(): Promise<void>;
/**
* Refresh the token
*
* This method is private and handles the internal token refresh logic
*/
private refreshToken;
private buildHeaders;
private buildSearchParams;
makeRequest(q: string): Promise<{
messageId: string | null;
data: string;
conversation_ts: string | null;
blocks: ParserResult[];
raw: string;
}>;
loadAdditionalContent(url: string): Promise<{
messageId: string | null;
data: string;
conversation_ts: string | null;
blocks: ParserResult[];
raw: string;
}>;
/**
* Parse a streaming chunk into blocks (markdown, latex, html)
*/
private parseStreamingBlocks;
makeStreamingRequest(q: string, signal?: AbortSignal): Promise<{
messageId: string | null;
conversation_ts: string | null;
[Symbol.asyncIterator]: () => AsyncGenerator<{
messageId: string | null;
data: string;
conversation_ts: string | null;
blocks: ParserResult[];
raw: string;
}, void, unknown>;
}>;
/**
* Make a streaming request using XMLHttpRequest instead of fetch
* This provides more detailed debugging capabilities and direct access to raw data
*/
makeStreamingRequestXHR(q: string): Promise<{
messageId: string | null;
[Symbol.asyncIterator](): AsyncGenerator<StreamingChunk, void, unknown>;
}>;
/**
* Reset the conversation timestamp
* Call this method when starting a new conversation
*/
resetConversationTimestamp(): void;
}
export type { ParserResult };
//# sourceMappingURL=index.d.ts.map