UNPKG

openpipe

Version:

OpenPipe TypeScript SDK: Fine-Tuning, Inference, and Metrics for Production Apps

1,245 lines (1,237 loc) 42.9 kB
import * as openai from 'openai'; import { ChatCompletion } from 'openai/resources'; type ApiRequestOptions = { readonly method: 'GET' | 'PUT' | 'POST' | 'DELETE' | 'OPTIONS' | 'HEAD' | 'PATCH'; readonly url: string; readonly path?: Record<string, any>; readonly cookies?: Record<string, any>; readonly headers?: Record<string, any>; readonly query?: Record<string, any>; readonly formData?: Record<string, any>; readonly body?: any; readonly mediaType?: string; readonly responseHeader?: string; readonly errors?: Record<number, string>; }; interface OnCancel { readonly isResolved: boolean; readonly isRejected: boolean; readonly isCancelled: boolean; (cancelHandler: () => void): void; } declare class CancelablePromise<T> implements Promise<T> { #private; constructor(executor: (resolve: (value: T | PromiseLike<T>) => void, reject: (reason?: any) => void, onCancel: OnCancel) => void); get [Symbol.toStringTag](): string; then<TResult1 = T, TResult2 = never>(onFulfilled?: ((value: T) => TResult1 | PromiseLike<TResult1>) | null, onRejected?: ((reason: any) => TResult2 | PromiseLike<TResult2>) | null): Promise<TResult1 | TResult2>; catch<TResult = never>(onRejected?: ((reason: any) => TResult | PromiseLike<TResult>) | null): Promise<T | TResult>; finally(onFinally?: (() => void) | null): Promise<T>; cancel(): void; get isCancelled(): boolean; } type Resolver<T> = (options: ApiRequestOptions) => Promise<T>; type Headers = Record<string, string>; type OpenAPIConfig = { BASE: string; VERSION: string; WITH_CREDENTIALS: boolean; CREDENTIALS: 'include' | 'omit' | 'same-origin'; TOKEN?: string | Resolver<string> | undefined; USERNAME?: string | Resolver<string> | undefined; PASSWORD?: string | Resolver<string> | undefined; HEADERS?: Headers | Resolver<Headers> | undefined; ENCODE_PATH?: ((path: string) => string) | undefined; }; declare abstract class BaseHttpRequest { readonly config: OpenAPIConfig; constructor(config: OpenAPIConfig); abstract request<T>(options: ApiRequestOptions): CancelablePromise<T>; } declare class DefaultService { readonly httpRequest: BaseHttpRequest; constructor(httpRequest: BaseHttpRequest); /** * @deprecated * DEPRECATED: we no longer support prompt caching. * @param requestBody * @returns any Successful response * @throws ApiError */ checkCache(requestBody: { /** * Unix timestamp in milliseconds */ requestedAt: number; /** * JSON-encoded request payload */ reqPayload?: any; /** * Extra tags to attach to the call for filtering. Eg { "userId": "123", "prompt_id": "populate-title" } */ tags?: Record<string, string>; }): CancelablePromise<{ /** * JSON-encoded response payload */ respPayload?: any; }>; /** * OpenAI-compatible route for generating inference and optionally logging the request. * @param requestBody * @returns any Successful response * @throws ApiError */ createChatCompletion(requestBody: Record<string, any>): CancelablePromise<{ id: string; object: 'chat.completion'; created: number; model: string; choices: Array<{ finish_reason: ('length' | 'function_call' | 'tool_calls' | 'stop' | 'content_filter'); index: number; message: { reasoning_content?: string | null; content?: string | null; refusal?: string | null; role: 'assistant'; function_call?: { name?: string; arguments?: string; } | null; tool_calls?: Array<{ id: string; function: { name: string; arguments: string; }; type: 'function'; }> | null; }; logprobs?: { content?: Array<{ token: string; bytes: Array<number> | null; logprob: number; top_logprobs: Array<{ token: string; bytes: Array<number> | null; logprob: number; }>; }> | null; refusal?: Array<{ token: string; bytes: Array<number> | null; logprob: number; top_logprobs: Array<{ token: string; bytes: Array<number> | null; logprob: number; }>; }> | null; } | null; content_filter_results?: Record<string, any>; criteria_results?: Record<string, ({ status: 'success'; score: number; explanation?: string; errorCode?: number; errorMessage?: string; } | { status: 'error'; score?: number; explanation?: string; errorCode: number; errorMessage: string; })>; }>; usage?: { prompt_tokens: number; completion_tokens: number; total_tokens: number; prompt_cache_hit_tokens?: number; prompt_cache_miss_tokens?: number; completion_tokens_details?: { reasoning_tokens?: number | null; audio_tokens?: number | null; text_tokens?: number | null; accepted_prediction_tokens?: number | null; rejected_prediction_tokens?: number | null; } | null; prompt_tokens_details?: { cached_tokens?: number | null; audio_tokens?: number | null; } | null; criteria?: Record<string, { /** * The total number of tokens used to generate the criterion judgement. Only returned for OpenPipe-trained reward models currently. */ total_tokens: number; }>; }; } | null>; /** * Record request logs from OpenAI models * @param requestBody * @returns any Successful response * @throws ApiError */ report(requestBody: { /** * Unix timestamp in milliseconds */ requestedAt?: number; /** * Unix timestamp in milliseconds */ receivedAt?: number; /** * JSON-encoded request payload */ reqPayload?: any; /** * JSON-encoded response payload */ respPayload?: any; /** * HTTP status code of response */ statusCode?: number; /** * User-friendly error message */ errorMessage?: string; /** * DEPRECATED: use "reqPayload.metadata" to attach extra metadata tags to the call for filtering. Eg { "userId": "123", "prompt_id": "populate-title" } */ tags?: Record<string, (string | number | boolean | 'null' | null)>; }): CancelablePromise<{ status: ('ok' | 'error'); }>; /** * Record request logs from Anthropic models * @param requestBody * @returns any Successful response * @throws ApiError */ reportAnthropic(requestBody: { /** * Unix timestamp in milliseconds */ requestedAt?: number; /** * Unix timestamp in milliseconds */ receivedAt?: number; /** * JSON-encoded request payload */ reqPayload?: Record<string, any>; /** * JSON-encoded response payload */ respPayload?: { id: string; content: Array<({ text: string; type: 'text'; citations?: Array<({ cited_text: string; document_index: number; document_title: string | null; end_char_index: number; start_char_index: number; type: 'char_location'; } | { cited_text: string; document_index: number; document_title: string | null; end_page_number: number; start_page_number: number; type: 'page_location'; } | { cited_text: string; document_index: number; document_title: string | null; end_block_index: number; start_block_index: number; type: 'content_block_location'; })> | null; } | { id: string; name: string; type: 'tool_use'; input?: any; } | { thinking: string; signature: string; type: 'thinking'; } | { data: string; type: 'redacted_thinking'; })>; model: string; role: 'assistant'; stop_reason: ('end_turn' | 'max_tokens' | 'stop_sequence' | 'tool_use' | 'null' | null); stop_sequence: (string | 'null' | null); type: 'message'; usage: { input_tokens: number; output_tokens: number; cache_creation_input_tokens: number | null; cache_read_input_tokens: number | null; }; }; /** * HTTP status code of response */ statusCode?: number; /** * User-friendly error message */ errorMessage?: string; /** * Extra metadata tags to attach to the call for filtering. Eg { "userId": "123", "prompt_id": "populate-title" } */ metadata?: Record<string, string>; /** * Deprecated: use "metadata" instead */ tags?: Record<string, (string | number | boolean | 'null' | null)>; }): CancelablePromise<{ status: ('ok' | 'error'); }>; /** * @deprecated * DEPRECATED: use "/logs/update-metadata" instead * @param requestBody * @returns any Successful response * @throws ApiError */ updateLogTags(requestBody: { filters: Array<{ /** * The field to filter on. Possible fields include: `model`, `completionId`, and `tags.your_tag_name`. */ field: string; equals: (string | number | boolean); }>; /** * Extra tags to attach to the call for filtering. Eg { "userId": "123", "prompt_id": "populate-title" } */ tags: Record<string, (string | number | boolean | 'null' | null)>; }): CancelablePromise<{ matchedLogs: number; }>; /** * Update tags metadata for logged calls matching the provided filters. * @param requestBody * @returns any Successful response * @throws ApiError */ updateLogMetadata(requestBody: { filters: Array<{ /** * The field to filter on. Possible fields include: `model`, `completionId`, and `metadata.your_tag_name`. */ field: string; equals: (string | number | boolean); }>; /** * Extra metadata to attach to the call for filtering. Eg { "userId": "123", "prompt_id": "populate-title" } */ metadata: Record<string, (string | 'null' | null)>; }): CancelablePromise<{ matchedLogs: number; }>; /** * Get the latest logged call (only for local testing) * @returns any Successful response * @throws ApiError */ localTestingOnlyGetLatestLoggedCall(): CancelablePromise<{ createdAt: string; cacheHit: boolean; statusCode: number | null; errorMessage: string | null; reqPayload?: any; respPayload?: any; tags: Record<string, string | null>; metadata: Record<string, string | null>; } | null>; /** * Get a judgement of a completion against the specified criterion * @param requestBody * @returns any Successful response * @throws ApiError */ getCriterionJudgement(requestBody: { /** * The ID of the criterion to judge. */ criterion_id: string; input?: { /** * All messages sent to the model when generating the output. */ messages: Array<({ role: 'system'; content?: (string | Array<{ type: 'text'; text: string; }>); name?: string; } | { role: 'user'; content?: (string | Array<({ type: 'text'; text: string; } | { type: 'image_url'; image_url: { detail?: ('auto' | 'low' | 'high'); url: string; }; } | { type: 'input_audio'; input_audio: { data: string; format: 'wav' | 'mp3'; }; })>); name?: string; } | { role: 'assistant'; audio?: { id: string; } | null; content?: (string | Array<({ type: 'text'; text: string; } | { type: 'refusal'; refusal: string; })> | 'null' | null); function_call?: { name?: string; arguments?: string; } | null; tool_calls?: Array<{ id: string; function: { name: string; arguments: string; }; type: 'function'; }> | null; name?: string; refusal?: string | null; annotations?: Array<{ type: 'url_citation'; url_citation: { start_index: number; end_index: number; title: string; url: string; }; }>; } | { role: 'developer'; content?: (string | Array<{ type: 'text'; text: string; }>); name?: string; } | { role: 'tool'; content?: (string | Array<{ type: 'text'; text: string; }>); tool_call_id: string; } | { role: 'function'; name: string; content: (string | 'null' | null); })>; /** * The tool choice to use when generating the output, if any. */ tool_choice?: ('none' | 'auto' | 'required' | { type?: 'function'; function?: { name: string; }; }); /** * The tools available to the model when generating the output, if any. */ tools?: Array<{ function: { name: string; parameters?: Record<string, any>; description?: string; strict?: boolean | null; }; type: 'function'; }>; }; /** * The completion message of the model. */ output: { reasoning_content?: string | null; content?: string | null; refusal?: string | null; role: 'assistant'; function_call?: { name?: string; arguments?: string; } | null; tool_calls?: Array<{ id: string; function: { name: string; arguments: string; }; type: 'function'; }> | null; }; }): CancelablePromise<{ /** * A score of 0 means the output failed this completion, and a score of 1 means it passed. A criteria may also return a decimal scores between 0 and 1, indicating the model's confidence or 'likelihood' that the criteria passed. */ score: number; /** * An explanation of the score including the model's reasoning, if applicable. */ explanation?: string; usage?: { /** * The total number of tokens used to generate the criterion judgement. Only returned for OpenPipe-trained reward models currently. */ total_tokens: number; }; }>; /** * Create a new dataset. * @param requestBody * @returns any Successful response * @throws ApiError */ createDataset(requestBody: { name: string; }): CancelablePromise<{ object: 'dataset'; id: string; name: string; created: string; updated: string; dataset_entry_count: number; fine_tune_count: number; }>; /** * List datasets for a project. * @returns any Successful response * @throws ApiError */ listDatasets(): CancelablePromise<{ object: 'list'; data: Array<{ object: 'dataset'; id: string; name: string; created: string; updated: string; dataset_entry_count: number; fine_tune_count: number; }>; }>; /** * Delete a dataset. * @param datasetId * @returns any Successful response * @throws ApiError */ deleteDataset(datasetId: string): CancelablePromise<{ id: string; object: 'dataset'; deleted: boolean; }>; /** * Add new dataset entries. * @param datasetId * @param requestBody * @returns any Successful response * @throws ApiError */ createDatasetEntries(datasetId: string, requestBody: { entries: Array<{ messages: Array<({ role: 'system'; content?: (string | Array<{ type: 'text'; text: string; }>); name?: string; } | { role: 'user'; content?: (string | Array<({ type: 'text'; text: string; } | { type: 'image_url'; image_url: { detail?: ('auto' | 'low' | 'high'); url: string; }; } | { type: 'input_audio'; input_audio: { data: string; format: 'wav' | 'mp3'; }; })>); name?: string; } | { role: 'assistant'; audio?: { id: string; } | null; content?: (string | Array<({ type: 'text'; text: string; } | { type: 'refusal'; refusal: string; })> | 'null' | null); function_call?: { name?: string; arguments?: string; } | null; tool_calls?: Array<{ id: string; function: { name: string; arguments: string; }; type: 'function'; }> | null; name?: string; refusal?: string | null; annotations?: Array<{ type: 'url_citation'; url_citation: { start_index: number; end_index: number; title: string; url: string; }; }>; } | { role: 'developer'; content?: (string | Array<{ type: 'text'; text: string; }>); name?: string; } | { role: 'tool'; content?: (string | Array<{ type: 'text'; text: string; }>); tool_call_id: string; } | { role: 'function'; name: string; content: (string | 'null' | null); })>; rejected_message?: { reasoning_content?: string | null; content?: string | null; refusal?: string | null; role: 'assistant'; function_call?: { name?: string; arguments?: string; } | null; tool_calls?: Array<{ id: string; function: { name: string; arguments: string; }; type: 'function'; }> | null; }; tool_choice?: ('none' | 'auto' | 'required' | { type?: 'function'; function?: { name: string; }; }); tools?: Array<{ function: { name: string; parameters?: Record<string, any>; description?: string; strict?: boolean | null; }; type: 'function'; }>; response_format?: ({ type: 'text'; } | { type: 'json_object'; } | { type: 'json_schema'; json_schema: { name: string; description?: string; schema?: Record<string, any>; strict?: boolean | null; }; }); split?: 'TRAIN' | 'TEST'; metadata?: Record<string, string>; }>; }): CancelablePromise<{ object: 'dataset.entries.creation'; entries_created: number; errors: { object: 'list'; data: Array<{ object: 'dataset.entries.creation.error'; entry_index: number; message: string; }>; }; }>; /** * Train a new model. * @param requestBody * @returns any Successful response * @throws ApiError */ createModel(requestBody: { datasetId: string; slug: string; pruningRuleIds?: Array<string>; trainingConfig: ({ provider: 'openpipe'; /** * The base model to train from. This could be a base model name or the slug of a previously trained model. Supported base models include: `meta-llama/Meta-Llama-3.1-8B-Instruct`, `meta-llama/Meta-Llama-3.1-70B-Instruct`, `meta-llama/Llama-3.3-70B-Instruct`, `meta-llama/Llama-3.1-8B`, `meta-llama/Llama-3.1-70B`, `Qwen/Qwen2.5-72B-Instruct`, `Qwen/Qwen2.5-Coder-7B-Instruct`, `Qwen/Qwen2.5-Coder-32B-Instruct`, `Qwen/Qwen2.5-1.5B-Instruct`, `Qwen/Qwen2.5-7B-Instruct`, `Qwen/Qwen2-VL-7B-Instruct`, `Qwen/Qwen2.5-14B-Instruct`, `Qwen/Qwen3-8B`, `Qwen/Qwen3-14B`, `mistralai/Mistral-Nemo-Base-2407`, `mistralai/Mistral-Small-24B-Base-2501`, `meta-llama/Llama-3.2-1B-Instruct`, `meta-llama/Llama-3.2-3B-Instruct`, `google/gemma-3-1b-it`, `google/gemma-3-4b-it`, `google/gemma-3-12b-it`, `google/gemma-3-27b-it` */ baseModel: string; /** * Whether to enable SFT training. If true, the model will be trained using SFT. Can be used in conjunction with DPO training. */ enable_sft?: boolean; /** * Whether to enable DPO training. If true, the model will be trained using DPO. Can be used in conjunction with SFT training. */ enable_preference_tuning?: boolean; /** * Hyperparameters for SFT training job. Ensure `enable_sft` is true. If no SFT hyperparameters are provided, default values will be used. */ sft_hyperparameters?: { batch_size?: ('auto' | number); learning_rate_multiplier?: number; num_epochs?: number; }; /** * Hyperparameters for DPO training job. Ensure `enable_preference_tuning` is true. If no preference hyperparameters are provided, default values will be used. */ preference_hyperparameters?: { variant?: ('DPO' | 'APO Zero'); learning_rate_multiplier?: number; num_epochs?: number; training_beta?: number; adapter_weight?: number; }; /** * DEPRECATED: Use the `sft_hyperparameters` and `preference_hyperparameters` fields instead. */ hyperparameters?: { is_sft_enabled?: boolean; batch_size?: ('auto' | number); learning_rate_multiplier?: number; num_epochs?: number; is_preference_tuning_enabled?: boolean; preference_tuning_variant?: ('DPO' | 'APO Zero'); preference_tuning_learning_rate_multiplier?: number; preference_tuning_num_epochs?: number; preference_tuning_training_beta?: number; preference_tuning_adapter_weight?: number; }; } | { provider: 'openpipeReward'; /** * The base model to train from. This could be a base model name or the slug of a previously trained model. Supported base models include: `meta-llama/Llama-3.2-1B-Instruct`, `meta-llama/Llama-3.2-3B-Instruct`, `meta-llama/Meta-Llama-3.1-8B-Instruct`, `Qwen/Qwen2.5-0.5B-Instruct`, `Qwen/Qwen2.5-1.5B-Instruct`, `Qwen/Qwen2.5-3B-Instruct`, `Qwen/Qwen2.5-7B-Instruct`, `Qwen/Qwen3-8B` */ baseModel: string; hyperparameters?: { batch_size?: ('auto' | number); learning_rate_multiplier?: number; num_epochs?: number; }; } | { provider: 'openai'; baseModel: 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-2024-08-06' | 'gpt-3.5-turbo-0125'; /** * Whether to enable SFT training. If true, the model will be trained using SFT. Can be used in conjunction with DPO training. */ enable_sft?: boolean; /** * Whether to enable DPO training. If true, the model will be trained using DPO. Can be used in conjunction with SFT training. */ enable_preference_tuning?: boolean; /** * Hyperparameters for SFT training job. Ensure `enable_sft` is true. If no SFT hyperparameters are provided, default values will be used. */ sft_hyperparameters?: { batch_size?: number; learning_rate_multiplier?: number; n_epochs?: number; }; /** * Hyperparameters for DPO training job. Ensure `enable_preference_tuning` is true. If no preference hyperparameters are provided, default values will be used. */ preference_hyperparameters?: { beta?: number; batch_size?: number; learning_rate_multiplier?: number; n_epochs?: number; }; /** * DEPRECATED: Use the `sft_hyperparameters` and `preference_hyperparameters` fields instead. */ hyperparameters?: { is_sft_enabled?: boolean; batch_size?: number; learning_rate_multiplier?: number; n_epochs?: number; is_preference_tuning_enabled?: boolean; preference_tuning_beta?: number; preference_tuning_batch_size?: number; preference_tuning_learning_rate_multiplier?: number; preference_tuning_n_epochs?: number; }; } | { provider: 'gemini'; baseModel: 'models/gemini-1.0-pro-001' | 'models/gemini-1.5-flash-001-tuning'; /** * Hyperparameters for SFT training job. If no SFT hyperparameters are provided, default values will be used. */ sft_hyperparameters?: { epochs?: number; batch_size?: number; learning_rate?: number; learning_rate_multiplier?: number; }; /** * DEPRECATED: Use the `sft_hyperparameters` field instead. */ hyperparameters?: { epochs?: number; batch_size?: number; learning_rate?: number; learning_rate_multiplier?: number; }; }); defaultTemperature?: number; }): CancelablePromise<{ id: string; name: string; object: 'model'; description: string | null; created: string; updated: string; openpipe: { baseModel: string; hyperparameters: Record<string, any> | null; status: 'PENDING' | 'TRAINING' | 'DEPLOYED' | 'ERROR' | 'DEPRECATED' | 'PENDING_DEPRECATION' | 'QUEUED' | 'PROVISIONING'; datasetId: string; errorMessage: string | null; }; contextWindow: number; maxCompletionTokens: number; capabilities: Array<'chat' | 'tools' | 'json'>; pricing: { /** * $/million tokens */ chatIn: number; /** * $/million tokens */ chatOut: number; }; owned_by: string; }>; /** * List all models for a project. * @returns any Successful response * @throws ApiError */ listModels(): CancelablePromise<{ object: 'list'; data: Array<{ id: string; name: string; object: 'model'; description: string | null; created: string; updated: string; openpipe: { baseModel: string; hyperparameters: Record<string, any> | null; status: 'PENDING' | 'TRAINING' | 'DEPLOYED' | 'ERROR' | 'DEPRECATED' | 'PENDING_DEPRECATION' | 'QUEUED' | 'PROVISIONING'; datasetId: string; errorMessage: string | null; }; contextWindow: number; maxCompletionTokens: number; capabilities: Array<'chat' | 'tools' | 'json'>; pricing: { /** * $/million tokens */ chatIn: number; /** * $/million tokens */ chatOut: number; }; owned_by: string; }>; }>; /** * Get a model by ID. * @param modelSlug * @returns any Successful response * @throws ApiError */ getModel(modelSlug: string): CancelablePromise<{ id: string; name: string; object: 'model'; description: string | null; created: string; updated: string; openpipe: { baseModel: string; hyperparameters: Record<string, any> | null; status: 'PENDING' | 'TRAINING' | 'DEPLOYED' | 'ERROR' | 'DEPRECATED' | 'PENDING_DEPRECATION' | 'QUEUED' | 'PROVISIONING'; datasetId: string; errorMessage: string | null; }; contextWindow: number; maxCompletionTokens: number; capabilities: Array<'chat' | 'tools' | 'json'>; pricing: { /** * $/million tokens */ chatIn: number; /** * $/million tokens */ chatOut: number; }; owned_by: string; }>; /** * Delete an existing model. * @param modelSlug * @returns any Successful response * @throws ApiError */ deleteModel(modelSlug: string): CancelablePromise<{ id: string; object: 'model'; deleted: boolean; }>; /** * @deprecated * DEPRECATED: use the `/datasets` endpoint instead * @param requestBody * @returns any Successful response * @throws ApiError */ unstableDatasetCreate(requestBody: { name: string; }): CancelablePromise<{ datasetId: string; }>; /** * @deprecated * DEPRECATED: use the `/datasets/{dataset}` endpoint instead * @param requestBody * @returns any Successful response * @throws ApiError */ unstableDatasetDelete(requestBody: { datasetId: string; }): CancelablePromise<{ datasetId: string; }>; /** * @deprecated * DEPRECATED: use the `/datasets` endpoint instead * @returns any Successful response * @throws ApiError */ unstableDatasetList(): CancelablePromise<Array<{ id: string; name: string; createdAt: string; updatedAt: string; datasetEntryCount: number; fineTuneCount: number; }>>; /** * @deprecated * DEPRECATED: use the `/datasets/{dataset}/entries` endpoint instead * @param requestBody * @returns any Successful response * @throws ApiError */ unstableDatasetEntryCreate(requestBody: { datasetId: string; entries: Array<{ messages: Array<({ role: 'system'; content?: (string | Array<{ type: 'text'; text: string; }>); name?: string; } | { role: 'user'; content?: (string | Array<({ type: 'text'; text: string; } | { type: 'image_url'; image_url: { detail?: ('auto' | 'low' | 'high'); url: string; }; } | { type: 'input_audio'; input_audio: { data: string; format: 'wav' | 'mp3'; }; })>); name?: string; } | { role: 'assistant'; audio?: { id: string; } | null; content?: (string | Array<({ type: 'text'; text: string; } | { type: 'refusal'; refusal: string; })> | 'null' | null); function_call?: { name?: string; arguments?: string; } | null; tool_calls?: Array<{ id: string; function: { name: string; arguments: string; }; type: 'function'; }> | null; name?: string; refusal?: string | null; annotations?: Array<{ type: 'url_citation'; url_citation: { start_index: number; end_index: number; title: string; url: string; }; }>; } | { role: 'developer'; content?: (string | Array<{ type: 'text'; text: string; }>); name?: string; } | { role: 'tool'; content?: (string | Array<{ type: 'text'; text: string; }>); tool_call_id: string; } | { role: 'function'; name: string; content: (string | 'null' | null); })>; rejected_message?: { reasoning_content?: string | null; content?: string | null; refusal?: string | null; role: 'assistant'; function_call?: { name?: string; arguments?: string; } | null; tool_calls?: Array<{ id: string; function: { name: string; arguments: string; }; type: 'function'; }> | null; }; function_call?: ('none' | 'auto' | { name: string; }); functions?: Array<{ name: string; parameters?: Record<string, any>; description?: string; strict?: boolean | null; }>; tool_choice?: ('none' | 'auto' | 'required' | { type?: 'function'; function?: { name: string; }; }); tools?: Array<{ function: { name: string; parameters?: Record<string, any>; description?: string; strict?: boolean | null; }; type: 'function'; }>; response_format?: ({ type: 'text'; } | { type: 'json_object'; } | { type: 'json_schema'; json_schema: { name: string; description?: string; schema?: Record<string, any>; strict?: boolean | null; }; }); split?: 'TRAIN' | 'TEST'; judgement?: 'PASS' | 'FAIL'; metadata?: Record<string, string>; }>; }): CancelablePromise<{ createdEntries: number; errors: Array<{ index: number; message: string; }>; }>; /** * @deprecated * DEPRECATED * @param requestBody * @returns any Successful response * @throws ApiError */ unstableFinetuneCreate(requestBody: { datasetId: string; slug: string; /** * The base model to fine-tune from. Supported models include: meta-llama/Meta-Llama-3.1-8B-Instruct, meta-llama/Meta-Llama-3.1-70B-Instruct */ baseModel: string; overrides?: { is_sft_enabled?: boolean; batch_size?: ('auto' | number); learning_rate_multiplier?: number; num_epochs?: number; is_preference_tuning_enabled?: boolean; preference_tuning_variant?: ('DPO' | 'APO Zero'); preference_tuning_learning_rate_multiplier?: number; preference_tuning_num_epochs?: number; preference_tuning_training_beta?: number; preference_tuning_adapter_weight?: number; }; }): CancelablePromise<{ id: string; }>; /** * @deprecated * DEPRECATED: use the `/models/{model}` endpoint instead * @param id * @param slug * @returns any Successful response * @throws ApiError */ unstableFinetuneGet(id?: string, slug?: string): CancelablePromise<{ id: string; status: ('PENDING' | 'STARTED' | 'TRANSFERRING_TRAINING_DATA' | 'TRAINING' | 'DEPLOYING' | 'DEPLOYED' | 'ERROR' | 'DEPRECATED' | 'PENDING_DEPRECATION' | 'QUEUED' | 'PROVISIONING'); slug: string; baseModel: string; errorMessage: string | null; datasetId: string; createdAt: string; }>; /** * @deprecated * DEPRECATED: use the `/models/{model}` endpoint instead * @param requestBody * @returns any Successful response * @throws ApiError */ unstableFinetuneDelete(requestBody: { id?: string; slug?: string; }): CancelablePromise<{ deleted: string; }>; } type OpenPipeConfig = { apiKey?: string; baseUrl?: string; fallbackClient?: openai.OpenAI; }; type CacheSetting = "readWrite" | "readOnly" | "writeOnly"; type OpenPipeArgs = { openpipe?: { logRequest?: boolean; cache?: CacheSetting; tags?: Record<string, string | number | boolean | null>; fallback?: { model: string; timeout?: number; }; criteria?: string[]; }; }; type OpenPipeMeta = { reportingFinished: Promise<void>; }; type CriteriaResultsType = NonNullable<Awaited<ReturnType<DefaultService["createChatCompletion"]>>>["choices"][number]["criteria_results"]; type OpenPipeChatCompletion = ChatCompletion & { choices: (ChatCompletion["choices"][number] & { criteria_results?: CriteriaResultsType; })[]; }; export { BaseHttpRequest as B, CancelablePromise as C, DefaultService as D, type OpenPipeMeta as O, type OpenAPIConfig as a, type OpenPipeConfig as b, type OpenPipeArgs as c, type OpenPipeChatCompletion as d };