sambanova
Version:
The official TypeScript library for the Samba Nova API
286 lines • 10.1 kB
TypeScript
import { APIResource } from "../../core/resource.js";
import * as TranscriptionsAPI from "./transcriptions.js";
import { APIPromise } from "../../core/api-promise.js";
import { Stream } from "../../core/streaming.js";
import { type Uploadable } from "../../core/uploads.js";
import { RequestOptions } from "../../internal/request-options.js";
export declare class Transcriptions extends APIResource {
/**
* Transcribes audio into the input language.
*
* @example
* ```ts
* const transcription =
* await client.audio.transcriptions.create({
* file: fs.createReadStream('path/to/file'),
* model: 'Whisper-Large-v3',
* });
* ```
*/
create(body: TranscriptionCreateParamsNonStreaming, options?: RequestOptions): APIPromise<TranscriptionCreateResponse>;
create(body: TranscriptionCreateParamsStreaming, options?: RequestOptions): APIPromise<Stream<TranscriptionStreamResponse>>;
create(body: TranscriptionCreateParamsBase, options?: RequestOptions): APIPromise<Stream<TranscriptionStreamResponse> | TranscriptionCreateResponse>;
}
/**
* Transcription response json object
*/
export interface TranscriptionResponse {
/**
* audio file text transcription
*/
text: string;
[]: unknown;
}
/**
* streamed chunk of a transcription response returned by the model
*/
export interface TranscriptionStreamResponse {
/**
* A unique identifier for the chat completion.
*/
id: string;
/**
* A list of transcription or translation choices.
*/
choices: Array<TranscriptionStreamResponse.Choice> | null;
/**
* The Unix timestamp (in seconds) of when the chat completion was created.
*/
created: number;
/**
* The model used for the chat completion.
*/
model: string;
/**
* The object type, always `chat.completion.chunk`.
*/
object: 'chat.completion.chunk';
/**
* Backend configuration that the model runs with.
*/
system_fingerprint: string;
/**
* Usage metrics for the completion, embeddings,transcription or translation
* request
*/
usage?: TranscriptionStreamResponse.Usage | null;
[]: unknown;
}
export declare namespace TranscriptionStreamResponse {
/**
* Transcription or translation chunk choice generated by streamed model responses.
*/
interface Choice {
/**
* Transcription or translation response delta chunk generated by streamed model
* responses.
*/
delta: Choice.Delta;
/**
* The index of the choice in the list of choices
*/
index: number;
/**
* The reason the model stopped generating tokens
*/
finish_reason?: 'stop' | null;
/**
* Completion Log Probs object
*/
logprobs?: Choice.Logprobs | null;
[]: unknown;
}
namespace Choice {
/**
* Transcription or translation response delta chunk generated by streamed model
* responses.
*/
interface Delta {
/**
* The content delta of the transcription or translation.
*/
content: string | null;
/**
* The role of the messages author
*/
role?: 'assistant' | null;
[]: unknown;
}
/**
* Completion Log Probs object
*/
interface Logprobs {
content: Logprobs.Content;
[]: unknown;
}
namespace Logprobs {
interface Content {
token: string;
logprob: number;
top_logprobs: Content.TopLogprobs;
bytes?: Array<number> | null;
[]: unknown;
}
namespace Content {
interface TopLogprobs {
token: string;
logprob: number;
bytes?: Array<number> | null;
[]: unknown;
}
}
}
}
/**
* Usage metrics for the completion, embeddings,transcription or translation
* request
*/
interface Usage {
/**
* acceptance rate
*/
acceptance_rate?: number;
/**
* number of tokens generated in completion
*/
completion_tokens?: number;
/**
* completion tokens per second after first token generation
*/
completion_tokens_after_first_per_sec?: number;
/**
* completion tokens per second after first token generation first ten
*/
completion_tokens_after_first_per_sec_first_ten?: number;
/**
* completion tokens per second after first token generation
*/
completion_tokens_after_first_per_sec_graph?: number;
/**
* completion tokens per second
*/
completion_tokens_per_sec?: number;
/**
* The Unix timestamp (in seconds) of when the generation finished.
*/
end_time?: number;
/**
* whether or not is last response, always true for non streaming response
*/
is_last_response?: true;
/**
* number of tokens used in the prompt sent
*/
prompt_tokens?: number;
/**
* Extra tokens details
*/
prompt_tokens_details?: Usage.PromptTokensDetails;
/**
* The Unix timestamp (in seconds) of when the generation started.
*/
start_time?: number;
/**
* also TTF, time (in seconds) taken to generate the first token
*/
time_to_first_token?: number;
/**
* total time (in seconds) taken to generate the full generation
*/
total_latency?: number;
/**
* prompt tokens + completion tokens
*/
total_tokens?: number;
/**
* tokens per second including prompt and completion
*/
total_tokens_per_sec?: number;
[]: unknown;
}
namespace Usage {
/**
* Extra tokens details
*/
interface PromptTokensDetails {
/**
* amount of cached tokens
*/
cached_tokens?: number;
[]: unknown;
}
}
}
/**
* Transcription response json object
*/
export type TranscriptionCreateResponse = TranscriptionResponse | TranscriptionStreamResponse;
export type TranscriptionCreateParams = TranscriptionCreateParamsNonStreaming | TranscriptionCreateParamsStreaming;
export interface TranscriptionCreateParamsBase {
/**
* The audio file object to transcribe or translate, in one of these formats: FLAC,
* MP3, MP4, MPEG, MPGA, M4A, Ogg, WAV, or WebM format. File size limit is 25MB.
*/
file: Uploadable;
/**
* The model ID to use See available
* [models](https://docs.sambanova.ai/cloud/docs/get-started/supported-models)
*/
model: (string & {}) | 'Whisper-Large-v3';
/**
* Optional language of the input audio. Supplying the input language in ISO-639-1
* (e.g. en) format will improve accuracy and latency.
*/
language?: 'en' | 'zh' | 'de' | 'es' | 'ru' | 'ko' | 'fr' | 'ja' | 'pt' | 'tr' | 'pl' | 'ca' | 'nl' | 'ar' | 'sv' | 'it' | 'id' | 'hi' | 'fi' | 'vi' | 'he' | 'uk' | 'el' | 'ms' | 'cs' | 'ro' | 'da' | 'hu' | 'ta' | 'no' | 'th' | 'ur' | 'hr' | 'bg' | 'lt' | 'la' | 'mi' | 'ml' | 'cy' | 'sk' | 'te' | 'fa' | 'lv' | 'bn' | 'sr' | 'az' | 'sl' | 'kn' | 'et' | 'mk' | 'br' | 'eu' | 'is' | 'hy' | 'ne' | 'mn' | 'bs' | 'kk' | 'sq' | 'sw' | 'gl' | 'mr' | 'pa' | 'si' | 'km' | 'sn' | 'yo' | 'so' | 'af' | 'oc' | 'ka' | 'be' | 'tg' | 'sd' | 'gu' | 'am' | 'yi' | 'lo' | 'uz' | 'fo' | 'ht' | 'ps' | 'tk' | 'nn' | 'mt' | 'sa' | 'lb' | 'my' | 'bo' | 'tl' | 'mg' | 'as' | 'tt' | 'haw' | 'ln' | 'ha' | 'ba' | 'jw' | 'su' | 'yue' | null;
/**
* Optional text prompt provided to influence transcription Translation style or
* vocabulary. Example: “Please transcribe carefully, including pauses and
* hesitations.”
*/
prompt?: string | null;
/**
* Output format JSON or text.
*/
response_format?: 'json' | 'text';
/**
* Enables streaming responses.
*/
stream?: boolean;
/**
* Optional settings that apply when `stream` is true.
*/
stream_options?: TranscriptionCreateParams.StreamOptions | null;
[]: unknown;
}
export declare namespace TranscriptionCreateParams {
/**
* Optional settings that apply when `stream` is true.
*/
interface StreamOptions {
/**
* If true and `stream` is enabled, optional usage metadata will be included in the
* last streamed response event.
*/
include_usage?: boolean | null;
}
type TranscriptionCreateParamsNonStreaming = TranscriptionsAPI.TranscriptionCreateParamsNonStreaming;
type TranscriptionCreateParamsStreaming = TranscriptionsAPI.TranscriptionCreateParamsStreaming;
}
export interface TranscriptionCreateParamsNonStreaming extends TranscriptionCreateParamsBase {
/**
* Enables streaming responses.
*/
stream?: false;
[]: unknown;
}
export interface TranscriptionCreateParamsStreaming extends TranscriptionCreateParamsBase {
/**
* Enables streaming responses.
*/
stream: true;
[]: unknown;
}
export declare namespace Transcriptions {
export { type TranscriptionResponse as TranscriptionResponse, type TranscriptionStreamResponse as TranscriptionStreamResponse, type TranscriptionCreateResponse as TranscriptionCreateResponse, type TranscriptionCreateParams as TranscriptionCreateParams, type TranscriptionCreateParamsNonStreaming as TranscriptionCreateParamsNonStreaming, type TranscriptionCreateParamsStreaming as TranscriptionCreateParamsStreaming, };
}
//# sourceMappingURL=transcriptions.d.ts.map