sambanova
Version:
The official TypeScript library for the Samba Nova API
1,307 lines (1,080 loc) • 32.5 kB
text/typescript
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import { APIResource } from '../../core/resource';
import * as CompletionsAPI from './completions';
import { APIPromise } from '../../core/api-promise';
import { Stream } from '../../core/streaming';
import { RequestOptions } from '../../internal/request-options';
export class Completions extends APIResource {
/**
* Create chat-based completion
*
* @example
* ```ts
* const completion = await client.chat.completions.create({
* messages: [
* {
* content: 'create a poem using palindromes',
* role: 'user',
* },
* ],
* model: 'string',
* });
* ```
*/
create(
body: CompletionCreateParamsNonStreaming,
options?: RequestOptions,
): APIPromise<CompletionCreateResponse>;
create(
body: CompletionCreateParamsStreaming,
options?: RequestOptions,
): APIPromise<Stream<ChatCompletionStreamResponse>>;
create(
body: CompletionCreateParamsBase,
options?: RequestOptions,
): APIPromise<Stream<ChatCompletionStreamResponse> | CompletionCreateResponse>;
create(
body: CompletionCreateParams,
options?: RequestOptions,
): APIPromise<CompletionCreateResponse> | APIPromise<Stream<ChatCompletionStreamResponse>> {
return this._client.post('/chat/completions', { body, ...options, stream: body.stream ?? false }) as
| APIPromise<CompletionCreateResponse>
| APIPromise<Stream<ChatCompletionStreamResponse>>;
}
}
/**
* chat completion response returned by the model
*/
export interface ChatCompletionResponse {
/**
* A unique identifier for the chat completion.
*/
id: string;
choices: Array<ChatCompletionResponse.Choice>;
/**
* The Unix timestamp (in seconds) of when the chat completion was created.
*/
created: number;
/**
* The model used for the chat completion.
*/
model: string;
/**
* The object type, always `chat.completion`.
*/
object: 'chat.completion';
/**
* Backend configuration that the model runs with.
*/
system_fingerprint: string;
/**
* Usage metrics for the completion, embeddings,transcription or translation
* request
*/
usage: ChatCompletionResponse.Usage | null;
}
export namespace ChatCompletionResponse {
export interface Choice {
/**
* The reason the model stopped generating tokens. Will be `stop` if the model hit
* a natural stop point or a provided stop sequence, `length` if the maximum number
* of tokens specified in the request was reached, `tool_calls` if the model called
* a tool.
*/
finish_reason: 'stop' | 'length' | 'tool_calls';
/**
* The index of the choice in the list of choices
*/
index: number;
message: Choice.Message;
/**
* Completion Log Probs object
*/
logprobs?: Choice.Logprobs | null;
[]: unknown;
}
export namespace Choice {
export interface Message {
/**
* The contents of the assistant message.
*/
content: string | null;
/**
* The role of the messages author
*/
role: 'assistant' | 'user' | 'system' | 'tool';
/**
* The tool calls generated by the model.
*/
tool_calls?: Array<Message.ToolCall> | null;
[]: unknown;
}
export namespace Message {
export interface ToolCall {
/**
* ID of the tool call.
*/
id: string;
/**
* The tool that the model called.
*/
function: ToolCall.Function;
/**
* type of the tool cal. only `function` is supported.
*/
type: 'function';
/**
* index of tool call chunk only used when using streaming
*/
index?: number | null;
[]: unknown;
}
export namespace ToolCall {
/**
* The tool that the model called.
*/
export interface Function {
/**
* The arguments to call the function with, as generated by the model in JSON
* format. Note that the model does not always generate valid JSON, and may
* hallucinate parameters not defined by your function schema. Validate the
* arguments in your code before calling your function.
*/
arguments: string;
/**
* The name of the function to call.
*/
name: string;
[]: unknown;
}
}
}
/**
* Completion Log Probs object
*/
export interface Logprobs {
content: Logprobs.Content;
[]: unknown;
}
export namespace Logprobs {
export interface Content {
token: string;
logprob: number;
top_logprobs: Content.TopLogprobs;
bytes?: Array<number> | null;
[]: unknown;
}
export namespace Content {
export interface TopLogprobs {
token: string;
logprob: number;
bytes?: Array<number> | null;
[]: unknown;
}
}
}
}
/**
* Usage metrics for the completion, embeddings,transcription or translation
* request
*/
export interface Usage {
/**
* acceptance rate
*/
acceptance_rate?: number;
/**
* number of tokens generated in completion
*/
completion_tokens?: number;
/**
* completion tokens per second after first token generation
*/
completion_tokens_after_first_per_sec?: number;
/**
* completion tokens per second after first token generation first ten
*/
completion_tokens_after_first_per_sec_first_ten?: number;
/**
* completion tokens per second after first token generation
*/
completion_tokens_after_first_per_sec_graph?: number;
/**
* completion tokens per second
*/
completion_tokens_per_sec?: number;
/**
* The Unix timestamp (in seconds) of when the generation finished.
*/
end_time?: number;
/**
* whether or not is last response, always true for non streaming response
*/
is_last_response?: true;
/**
* number of tokens used in the prompt sent
*/
prompt_tokens?: number;
/**
* Extra tokens details
*/
prompt_tokens_details?: Usage.PromptTokensDetails;
/**
* The Unix timestamp (in seconds) of when the generation started.
*/
start_time?: number;
/**
* also TTF, time (in seconds) taken to generate the first token
*/
time_to_first_token?: number;
/**
* total time (in seconds) taken to generate the full generation
*/
total_latency?: number;
/**
* prompt tokens + completion tokens
*/
total_tokens?: number;
/**
* tokens per second including prompt and completion
*/
total_tokens_per_sec?: number;
[]: unknown;
}
export namespace Usage {
/**
* Extra tokens details
*/
export interface PromptTokensDetails {
/**
* amount of cached tokens
*/
cached_tokens?: number;
[]: unknown;
}
}
}
/**
* streamed chunk of a chat completion response returned by the model
*/
export interface ChatCompletionStreamResponse {
/**
* A unique identifier for the chat completion.
*/
id: string;
/**
* A list of chat completion choices.
*/
choices: Array<ChatCompletionStreamResponse.Choice> | null;
/**
* The Unix timestamp (in seconds) of when the chat completion was created.
*/
created: number;
/**
* The model used for the chat completion.
*/
model: string;
/**
* The object type, always `chat.completion.chunk`.
*/
object: 'chat.completion.chunk';
/**
* Backend configuration that the model runs with.
*/
system_fingerprint: string;
/**
* Usage metrics for the completion, embeddings,transcription or translation
* request
*/
usage?: ChatCompletionStreamResponse.Usage | null;
[]: unknown;
}
export namespace ChatCompletionStreamResponse {
export interface Choice {
/**
* completion response delta chunk generated by streamed model responses.
*/
delta: Choice.Delta;
/**
* The index of the choice in the list of choices
*/
index: number;
/**
* The reason the model stopped generating tokens. Will be `stop` if the model hit
* a natural stop point or a provided stop sequence, `length` if the maximum number
* of tokens specified in the request was reached, `tool_calls` if the model called
* a tool.
*/
finish_reason?: 'stop' | 'length' | 'tool_calls' | null;
/**
* Completion Log Probs object
*/
logprobs?: Choice.Logprobs | null;
[]: unknown;
}
export namespace Choice {
/**
* completion response delta chunk generated by streamed model responses.
*/
export interface Delta {
/**
* Channel (returned by reasoning models like gpt oss)
*/
channel?: string | null;
/**
* The contents of the assistant message.
*/
content?: string | null;
/**
* Reasoning (returned by reasoning models like gpt oss)
*/
reasoning?: string | null;
/**
* The role of the messages author
*/
role?: 'assistant' | 'user' | 'system' | 'tool' | null;
/**
* The tool calls generated by the model.
*/
tool_calls?: Array<Delta.ToolCall> | null;
[]: unknown;
}
export namespace Delta {
export interface ToolCall {
/**
* ID of the tool call.
*/
id: string;
/**
* The tool that the model called.
*/
function: ToolCall.Function;
/**
* type of the tool cal. only `function` is supported.
*/
type: 'function';
/**
* index of tool call chunk only used when using streaming
*/
index?: number | null;
[]: unknown;
}
export namespace ToolCall {
/**
* The tool that the model called.
*/
export interface Function {
/**
* The arguments to call the function with, as generated by the model in JSON
* format. Note that the model does not always generate valid JSON, and may
* hallucinate parameters not defined by your function schema. Validate the
* arguments in your code before calling your function.
*/
arguments: string;
/**
* The name of the function to call.
*/
name: string;
[]: unknown;
}
}
}
/**
* Completion Log Probs object
*/
export interface Logprobs {
content: Logprobs.Content;
[]: unknown;
}
export namespace Logprobs {
export interface Content {
token: string;
logprob: number;
top_logprobs: Content.TopLogprobs;
bytes?: Array<number> | null;
[]: unknown;
}
export namespace Content {
export interface TopLogprobs {
token: string;
logprob: number;
bytes?: Array<number> | null;
[]: unknown;
}
}
}
}
/**
* Usage metrics for the completion, embeddings,transcription or translation
* request
*/
export interface Usage {
/**
* acceptance rate
*/
acceptance_rate?: number;
/**
* number of tokens generated in completion
*/
completion_tokens?: number;
/**
* completion tokens per second after first token generation
*/
completion_tokens_after_first_per_sec?: number;
/**
* completion tokens per second after first token generation first ten
*/
completion_tokens_after_first_per_sec_first_ten?: number;
/**
* completion tokens per second after first token generation
*/
completion_tokens_after_first_per_sec_graph?: number;
/**
* completion tokens per second
*/
completion_tokens_per_sec?: number;
/**
* The Unix timestamp (in seconds) of when the generation finished.
*/
end_time?: number;
/**
* whether or not is last response, always true for non streaming response
*/
is_last_response?: true;
/**
* number of tokens used in the prompt sent
*/
prompt_tokens?: number;
/**
* Extra tokens details
*/
prompt_tokens_details?: Usage.PromptTokensDetails;
/**
* The Unix timestamp (in seconds) of when the generation started.
*/
start_time?: number;
/**
* also TTF, time (in seconds) taken to generate the first token
*/
time_to_first_token?: number;
/**
* total time (in seconds) taken to generate the full generation
*/
total_latency?: number;
/**
* prompt tokens + completion tokens
*/
total_tokens?: number;
/**
* tokens per second including prompt and completion
*/
total_tokens_per_sec?: number;
[]: unknown;
}
export namespace Usage {
/**
* Extra tokens details
*/
export interface PromptTokensDetails {
/**
* amount of cached tokens
*/
cached_tokens?: number;
[]: unknown;
}
}
}
/**
* other kind of errors
*/
export interface GeneralError {
error: GeneralError.Error;
}
export namespace GeneralError {
export interface Error {
/**
* error code
*/
code?: string | null;
/**
* error message
*/
message?: string;
/**
* error params
*/
param?: string | null;
/**
* error type
*/
type?: string;
}
}
/**
* error in model output generation
*/
export interface ModelOutputError {
error: ModelOutputError.Error;
}
export namespace ModelOutputError {
export interface Error {
/**
* error code
*/
code?: string | null;
/**
* error message
*/
message?: string;
/**
* raw output with errors generated by the model
*/
model_output?: string;
/**
* error params
*/
param?: string | null;
/**
* error type
*/
type?: string;
}
}
/**
* chat completion response returned by the model
*/
export type CompletionCreateResponse =
| ChatCompletionResponse
| ChatCompletionStreamResponse
| ModelOutputError
| GeneralError;
export type CompletionCreateParams = CompletionCreateParamsNonStreaming | CompletionCreateParamsStreaming;
export interface CompletionCreateParamsBase {
/**
* A list of messages comprising the conversation so far.
*/
messages: Array<
| CompletionCreateParams.SystemMessage
| CompletionCreateParams.UserMessage
| CompletionCreateParams.AssistantMessage
| CompletionCreateParams.ToolMessage
>;
/**
* The model ID to use (e.g. Meta-Llama-3.3-70B-Instruct). See available
* [models](https://docs.sambanova.ai/cloud/docs/get-started/supported-models)
*/
model:
| (string & {})
| 'Meta-Llama-3.3-70B-Instruct'
| 'Meta-Llama-3.2-1B-Instruct'
| 'Meta-Llama-3.2-3B-Instruct'
| 'Llama-3.2-11B-Vision-Instruct'
| 'Llama-3.2-90B-Vision-Instruct'
| 'Meta-Llama-3.1-8B-Instruct'
| 'Meta-Llama-3.1-70B-Instruct'
| 'Meta-Llama-3.1-405B-Instruct'
| 'Qwen2.5-Coder-32B-Instruct'
| 'Qwen2.5-72B-Instruct'
| 'QwQ-32B-Preview'
| 'Meta-Llama-Guard-3-8B'
| 'DeepSeek-R1'
| 'DeepSeek-R1-0528'
| 'DeepSeek-V3-0324'
| 'DeepSeek-V3.1'
| 'DeepSeek-V3.1-Terminus'
| 'DeepSeek-R1-Distill-Llama-70B'
| 'Llama-4-Maverick-17B-128E-Instruct'
| 'Llama-4-Scout-17B-16E-Instruct'
| 'Qwen3-32B'
| 'Llama-3.3-Swallow-70B-Instruct-v0.4'
| 'gpt-oss-120b'
| 'ALLaM-7B-Instruct-preview';
/**
* A dictionary of additional keyword arguments to pass into the chat template. Use
* this to provide extra context or parameters that the model's chat template can
* process. Keys must be strings; values may be any valid JSON type.
*/
chat_template_kwargs?: CompletionCreateParams.ChatTemplateKwargs | null;
/**
* If true, sampling is enabled during output generation. If false, deterministic
* decoding is used.
*/
do_sample?: boolean | null;
/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their
* existing frequency in the text so far, decreasing the model's likelihood to
* repeat the same line verbatim.
*/
frequency_penalty?: number;
/**
* This is not yet supported by our models. Modify the likelihood of specified
* tokens appearing in the completion.
*/
logit_bias?: { [key: string]: number } | null;
/**
* This is not yet supported by our models. Whether to return log probabilities of
* the output tokens or not. If true, returns the log probabilities of each output
* token returned in the `content` of `message`.
*/
logprobs?: boolean | null;
/**
* The maximum number of tokens that can be generated in the chat completion. The
* total length of input tokens and generated tokens is limited by the model's
* context length.
*/
max_completion_tokens?: number | null;
/**
* The maximum number of tokens that can be generated in the chat completion. The
* total length of input tokens and generated tokens is limited by the model's
* context length.
*/
max_tokens?: number | null;
/**
* This is not yet supported by our models. How many chat completion choices to
* generate for each input message.
*/
n?: number | null;
/**
* Whether to enable parallel function calling during tool use.
*/
parallel_tool_calls?: boolean | null;
/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on
* whether they appear in the text so far, increasing the model's likelihood to
* talk about new topics.
*/
presence_penalty?: number | null;
/**
* Value specifying the amount of reasoning the model is allowed to do, increasing
* it will increase the number of output reasoning tokens generated by the model,
* but will improve quality of the responses. allowed values are 'low', 'medium',
* 'high'
*/
reasoning_effort?: 'low' | 'medium' | 'high' | null;
/**
* An object specifying the format that the model must output. Setting to
* `{ "type": "json_object"}` enables JSON mode, which will check the message the
* model generates is valid JSON. **Important:** when using JSON mode, you **must**
* also instruct the model to produce JSON yourself via a system or user message.
* Setting to `{ "type": "json_schema", "json_schema": {<your_schema>}"}` enables
* JSON schema mode, which will check the message the model generates is valid
* object of type <your_schema>.
*/
response_format?:
| CompletionCreateParams.ResponseFormatText
| CompletionCreateParams.ResponseFormatJsonObject
| CompletionCreateParams.ResponseFormatJsonSchema
| null;
/**
* This is not yet supported by our models.
*/
seed?: number | null;
/**
* Sequences where the API will stop generating tokens. The returned text will not
* contain the stop sequence.
*/
stop?: string | null | Array<string>;
/**
* If set, partial message deltas will be sent. Tokens will be sent as data-only
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
* as they become available, with the stream terminated by a `data: [DONE]`
* message.
*/
stream?: boolean | null;
/**
* Options for streaming response. Only set this when setting stream as true
*/
stream_options?: CompletionCreateParams.StreamOptions | null;
/**
* What sampling temperature to use, determines the degree of randomness in the
* response. between 0 and 2, Higher values like 0.8 will make the output more
* random, while lower values like 0.2 will make it more focused and deterministic.
* Is recommended altering this, top_p or top_k but not more than one of these.
*/
temperature?: number | null;
/**
* Controls which (if any) tool is called by the model. `none` means the model will
* not call any tool and instead generates a message. `auto` means the model can
* pick between generating a message or calling one or more tools. `required` means
* the model must call one or more tools. Specifying a particular tool via
* `{"type": "function", "function": {"name": "my_function"}}` forces the model to
* call that tool.
*/
tool_choice?: 'none' | 'auto' | 'required' | CompletionCreateParams.ToolChoiceObject | null;
/**
* A list of tools the model may call. Use this to provide a list of functions the
* model may generate JSON inputs for.
*/
tools?: Array<CompletionCreateParams.Tool> | null;
/**
* Amount limit of token choices. An alternative to sampling with temperature, the
* model considers the results of the first K tokens with higher probability. So 10
* means only the first 10 tokens with higher probability are considered. Is
* recommended altering this, top_p or temperature but not more than one of these.
*/
top_k?: number | null;
/**
* This is not yet supported by our models. An integer between 0 and 20 specifying
* the number of most likely tokens to return at each token position, each with an
* associated log probability. `logprobs` must be set to `true` if this parameter
* is used.
*/
top_logprobs?: number | null;
/**
* Cumulative probability for token choices. An alternative to sampling with
* temperature, called nucleus sampling, where the model considers the results of
* the tokens with top_p probability mass. So 0.1 means only the tokens comprising
* the top 10% probability mass are considered. Is recommended altering this, top_k
* or temperature but not more than one of these.
*/
top_p?: number | null;
[]: unknown;
}
export namespace CompletionCreateParams {
export interface SystemMessage {
/**
* The contents of the system message.
*/
content: string | Array<SystemMessage.TextContentPartArray> | null;
/**
* The role of the messages author, in this case `system`.
*/
role: 'system';
[]: unknown;
}
export namespace SystemMessage {
export interface TextContentPartArray {
/**
* string content of the message
*/
text: string;
/**
* type of content to send. in this case `text`.
*/
type: 'text';
[]: unknown;
}
}
export interface UserMessage {
/**
* The contents of the user message.
*/
content:
| string
| Array<UserMessage.TextContent | UserMessage.ImageContent | UserMessage.AudioContent>
| null;
/**
* The role of the messages author, in this case `user`.
*/
role: 'user';
[]: unknown;
}
export namespace UserMessage {
export interface TextContent {
/**
* string content of the message
*/
text: string;
/**
* type of content to send. in this case `text`.
*/
type: 'text';
[]: unknown;
}
export interface ImageContent {
image_url: ImageContent.ImageURL;
/**
* type of content to send. in this case `image_url`.
*/
type: 'image_url';
[]: unknown;
}
export namespace ImageContent {
export interface ImageURL {
/**
* Either a URL of the image or the base64 encoded image data. currently only
* base64 encoded image supported
*/
url?: string;
}
}
export interface AudioContent {
audio_content: AudioContent.AudioContent;
/**
* type of content to send. in this case `audio_content`.
*/
type: 'audio_content';
[]: unknown;
}
export namespace AudioContent {
export interface AudioContent {
/**
* the base64 encoded audio data.
*/
content?: string;
}
}
}
export interface AssistantMessage {
/**
* The contents of the assistant message.
*/
content: string | Array<AssistantMessage.TextContentPartArray> | null;
/**
* The role of the messages author, in this case `assistant`.
*/
role: 'assistant';
/**
* Channel (returned by reasoning models like gpt oss)
*/
channel?: string | null;
/**
* Reasoning (returned by reasoning models like gpt oss)
*/
reasoning?: string | null;
/**
* The tool calls generated by the model.
*/
tool_calls?: Array<AssistantMessage.ToolCall> | null;
[]: unknown;
}
export namespace AssistantMessage {
export interface TextContentPartArray {
/**
* string content of the message
*/
text: string;
/**
* type of content to send. in this case `text`.
*/
type: 'text';
[]: unknown;
}
export interface ToolCall {
/**
* ID of the tool call.
*/
id: string;
/**
* The tool that the model called.
*/
function: ToolCall.Function;
/**
* type of the tool cal. only `function` is supported.
*/
type: 'function';
/**
* index of tool call chunk only used when using streaming
*/
index?: number | null;
[]: unknown;
}
export namespace ToolCall {
/**
* The tool that the model called.
*/
export interface Function {
/**
* The arguments to call the function with, as generated by the model in JSON
* format. Note that the model does not always generate valid JSON, and may
* hallucinate parameters not defined by your function schema. Validate the
* arguments in your code before calling your function.
*/
arguments: string;
/**
* The name of the function to call.
*/
name: string;
[]: unknown;
}
}
}
export interface ToolMessage {
/**
* The contents of the tool message.
*/
content: string | Array<ToolMessage.TextContentPartArray>;
/**
* The role of the messages author, in this case `tool`.
*/
role: 'tool';
[]: unknown;
}
export namespace ToolMessage {
export interface TextContentPartArray {
/**
* string content of the message
*/
text: string;
/**
* type of content to send. in this case `text`.
*/
type: 'text';
[]: unknown;
}
}
/**
* A dictionary of additional keyword arguments to pass into the chat template. Use
* this to provide extra context or parameters that the model's chat template can
* process. Keys must be strings; values may be any valid JSON type.
*/
export interface ChatTemplateKwargs {
/**
* Enables the model's internal reasoning or "thinking" mode, if supported by the
* chat template (deepseek models).
*/
enable_thinking?: boolean;
[]: unknown;
}
/**
* Specifies that the model should produce output as plain text.
*/
export interface ResponseFormatText {
type: 'text';
[]: unknown;
}
/**
* Specifies that the model should produce output as a raw JSON object.
*/
export interface ResponseFormatJsonObject {
type: 'json_object';
[]: unknown;
}
/**
* Specifies that the model should produce output conforming to a given JSON
* schema.
*/
export interface ResponseFormatJsonSchema {
/**
* A JSON Schema definition the model's structured output. Follows standard JSON
* Schema syntax.
*/
json_schema: ResponseFormatJsonSchema.JsonSchema;
type: 'json_schema';
[]: unknown;
}
export namespace ResponseFormatJsonSchema {
/**
* A JSON Schema definition the model's structured output. Follows standard JSON
* Schema syntax.
*/
export interface JsonSchema {
/**
* name of the object schema
*/
name: string;
/**
* description the json schema
*/
description?: string | null;
/**
* Actual json schema object
*/
schema?: unknown | null;
/**
* whether or not to do an strict validation of the schema
*/
strict?: boolean | null;
[]: unknown;
}
}
/**
* Options for streaming response. Only set this when setting stream as true
*/
export interface StreamOptions {
/**
* Whether to include the usage metrics in a final chunk or not
*/
include_usage?: boolean | null;
[]: unknown;
}
export interface ToolChoiceObject {
/**
* Specifies a tool the model should use. Use it to force the model to call that
* specific tool.
*/
function: ToolChoiceObject.Function;
/**
* The type of the tool. only `function` is supported.
*/
type: 'function';
[]: unknown;
}
export namespace ToolChoiceObject {
/**
* Specifies a tool the model should use. Use it to force the model to call that
* specific tool.
*/
export interface Function {
/**
* the name of the tool expected to be used by the model
*/
name: string;
[]: unknown;
}
}
export interface Tool {
function: Tool.Function;
/**
* The type of the tool. Currently, only `function` is supported.
*/
type: string;
[]: unknown;
}
export namespace Tool {
export interface Function {
/**
* The name of the function to be called. Must be a-z, A-Z, 0-9, or contain
* underscores and dashes.
*/
name: string;
/**
* A description of what the function does, used by the model to choose when and
* how to call the function.
*/
description?: string | null;
/**
* The parameters the functions accepts, described as a JSON Schema object. see the
* [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
* documentation about the format. Omitting `parameters` defines a function with an
* empty parameter list.
*/
parameters?: { [key: string]: unknown };
[]: unknown;
}
}
export type CompletionCreateParamsNonStreaming = CompletionsAPI.CompletionCreateParamsNonStreaming;
export type CompletionCreateParamsStreaming = CompletionsAPI.CompletionCreateParamsStreaming;
}
export interface CompletionCreateParamsNonStreaming extends CompletionCreateParamsBase {
/**
* If set, partial message deltas will be sent. Tokens will be sent as data-only
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
* as they become available, with the stream terminated by a `data: [DONE]`
* message.
*/
stream?: false | null;
[]: unknown;
}
export interface CompletionCreateParamsStreaming extends CompletionCreateParamsBase {
/**
* If set, partial message deltas will be sent. Tokens will be sent as data-only
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
* as they become available, with the stream terminated by a `data: [DONE]`
* message.
*/
stream: true;
[]: unknown;
}
export declare namespace Completions {
export {
type ChatCompletionResponse as ChatCompletionResponse,
type ChatCompletionStreamResponse as ChatCompletionStreamResponse,
type GeneralError as GeneralError,
type ModelOutputError as ModelOutputError,
type CompletionCreateResponse as CompletionCreateResponse,
type CompletionCreateParams as CompletionCreateParams,
type CompletionCreateParamsNonStreaming as CompletionCreateParamsNonStreaming,
type CompletionCreateParamsStreaming as CompletionCreateParamsStreaming,
};
}