UNPKG

@google-ai/generativelanguage

Version:
666 lines (665 loc) 37.5 kB
import type * as gax from 'google-gax'; import type { Callback, CallOptions, Descriptors, ClientOptions } from 'google-gax'; import * as protos from '../../protos/protos'; /** * API for using Large Models that generate multimodal content and have * additional capabilities beyond text generation. * @class * @memberof v1beta */ export declare class GenerativeServiceClient { private _terminated; private _opts; private _providedCustomServicePath; private _gaxModule; private _gaxGrpc; private _protos; private _defaults; private _universeDomain; private _servicePath; private _log; auth: gax.GoogleAuth; descriptors: Descriptors; warn: (code: string, message: string, warnType?: string) => void; innerApiCalls: { [name: string]: Function; }; pathTemplates: { [name: string]: gax.PathTemplate; }; generativeServiceStub?: Promise<{ [name: string]: Function; }>; /** * Construct an instance of GenerativeServiceClient. * * @param {object} [options] - The configuration object. * The options accepted by the constructor are described in detail * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). * The common options are: * @param {object} [options.credentials] - Credentials object. * @param {string} [options.credentials.client_email] * @param {string} [options.credentials.private_key] * @param {string} [options.email] - Account email address. Required when * using a .pem or .p12 keyFilename. * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or * .p12 key downloaded from the Google Developers Console. If you provide * a path to a JSON file, the projectId option below is not necessary. * NOTE: .pem and .p12 require you to specify options.email as well. * @param {number} [options.port] - The port on which to connect to * the remote host. * @param {string} [options.projectId] - The project ID from the Google * Developer's Console, e.g. 'grape-spaceship-123'. We will also check * the environment variable GCLOUD_PROJECT for your project ID. If your * app is running in an environment which supports * {@link https://cloud.google.com/docs/authentication/application-default-credentials Application Default Credentials}, * your project ID will be detected automatically. * @param {string} [options.apiEndpoint] - The domain name of the * API remote host. * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. * Follows the structure of {@link gapicConfig}. * @param {boolean} [options.fallback] - Use HTTP/1.1 REST mode. * For more information, please check the * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you * need to avoid loading the default gRPC version and want to use the fallback * HTTP implementation. Load only fallback version and pass it to the constructor: * ``` * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC * const client = new GenerativeServiceClient({fallback: true}, gax); * ``` */ constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback); /** * Initialize the client. * Performs asynchronous operations (such as authentication) and prepares the client. * This function will be called automatically when any class method is called for the * first time, but if you need to initialize it before calling an actual method, * feel free to call initialize() directly. * * You can await on this method if you want to make sure the client is initialized. * * @returns {Promise} A promise that resolves to an authenticated service stub. */ initialize(): Promise<{ [name: string]: Function; }>; /** * The DNS address for this API service. * @deprecated Use the apiEndpoint method of the client instance. * @returns {string} The DNS address for this service. */ static get servicePath(): string; /** * The DNS address for this API service - same as servicePath. * @deprecated Use the apiEndpoint method of the client instance. * @returns {string} The DNS address for this service. */ static get apiEndpoint(): string; /** * The DNS address for this API service. * @returns {string} The DNS address for this service. */ get apiEndpoint(): string; get universeDomain(): string; /** * The port for this API service. * @returns {number} The default port for this service. */ static get port(): number; /** * The scopes needed to make gRPC calls for every method defined * in this service. * @returns {string[]} List of default scopes. */ static get scopes(): never[]; getProjectId(): Promise<string>; getProjectId(callback: Callback<string, undefined, undefined>): void; /** * Generates a model response given an input `GenerateContentRequest`. * Refer to the [text generation * guide](https://ai.google.dev/gemini-api/docs/text-generation) for detailed * usage information. Input capabilities differ between models, including * tuned models. Refer to the [model * guide](https://ai.google.dev/gemini-api/docs/models/gemini) and [tuning * guide](https://ai.google.dev/gemini-api/docs/model-tuning) for details. * * @param {Object} request * The request object that will be sent. * @param {string} request.model * Required. The name of the `Model` to use for generating the completion. * * Format: `models/{model}`. * @param {google.ai.generativelanguage.v1beta.Content} [request.systemInstruction] * Optional. Developer set [system * instruction(s)](https://ai.google.dev/gemini-api/docs/system-instructions). * Currently, text only. * @param {number[]} request.contents * Required. The content of the current conversation with the model. * * For single-turn queries, this is a single instance. For multi-turn queries * like [chat](https://ai.google.dev/gemini-api/docs/text-generation#chat), * this is a repeated field that contains the conversation history and the * latest request. * @param {number[]} [request.tools] * Optional. A list of `Tools` the `Model` may use to generate the next * response. * * A `Tool` is a piece of code that enables the system to interact with * external systems to perform an action, or set of actions, outside of * knowledge and scope of the `Model`. Supported `Tool`s are `Function` and * `code_execution`. Refer to the [Function * calling](https://ai.google.dev/gemini-api/docs/function-calling) and the * [Code execution](https://ai.google.dev/gemini-api/docs/code-execution) * guides to learn more. * @param {google.ai.generativelanguage.v1beta.ToolConfig} [request.toolConfig] * Optional. Tool configuration for any `Tool` specified in the request. Refer * to the [Function calling * guide](https://ai.google.dev/gemini-api/docs/function-calling#function_calling_mode) * for a usage example. * @param {number[]} [request.safetySettings] * Optional. A list of unique `SafetySetting` instances for blocking unsafe * content. * * This will be enforced on the `GenerateContentRequest.contents` and * `GenerateContentResponse.candidates`. There should not be more than one * setting for each `SafetyCategory` type. The API will block any contents and * responses that fail to meet the thresholds set by these settings. This list * overrides the default settings for each `SafetyCategory` specified in the * safety_settings. If there is no `SafetySetting` for a given * `SafetyCategory` provided in the list, the API will use the default safety * setting for that category. Harm categories HARM_CATEGORY_HATE_SPEECH, * HARM_CATEGORY_SEXUALLY_EXPLICIT, HARM_CATEGORY_DANGEROUS_CONTENT, * HARM_CATEGORY_HARASSMENT, HARM_CATEGORY_CIVIC_INTEGRITY are supported. * Refer to the [guide](https://ai.google.dev/gemini-api/docs/safety-settings) * for detailed information on available safety settings. Also refer to the * [Safety guidance](https://ai.google.dev/gemini-api/docs/safety-guidance) to * learn how to incorporate safety considerations in your AI applications. * @param {google.ai.generativelanguage.v1beta.GenerationConfig} [request.generationConfig] * Optional. Configuration options for model generation and outputs. * @param {string} [request.cachedContent] * Optional. The name of the content * [cached](https://ai.google.dev/gemini-api/docs/caching) to use as context * to serve the prediction. Format: `cachedContents/{cachedContent}` * @param {object} [options] * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. * @returns {Promise} - The promise which resolves to an array. * The first element of the array is an object representing {@link protos.google.ai.generativelanguage.v1beta.GenerateContentResponse|GenerateContentResponse}. * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } * for more details and examples. * @example <caption>include:samples/generated/v1beta/generative_service.generate_content.js</caption> * region_tag:generativelanguage_v1beta_generated_GenerativeService_GenerateContent_async */ generateContent(request?: protos.google.ai.generativelanguage.v1beta.IGenerateContentRequest, options?: CallOptions): Promise<[ protos.google.ai.generativelanguage.v1beta.IGenerateContentResponse, (protos.google.ai.generativelanguage.v1beta.IGenerateContentRequest | undefined), {} | undefined ]>; generateContent(request: protos.google.ai.generativelanguage.v1beta.IGenerateContentRequest, options: CallOptions, callback: Callback<protos.google.ai.generativelanguage.v1beta.IGenerateContentResponse, protos.google.ai.generativelanguage.v1beta.IGenerateContentRequest | null | undefined, {} | null | undefined>): void; generateContent(request: protos.google.ai.generativelanguage.v1beta.IGenerateContentRequest, callback: Callback<protos.google.ai.generativelanguage.v1beta.IGenerateContentResponse, protos.google.ai.generativelanguage.v1beta.IGenerateContentRequest | null | undefined, {} | null | undefined>): void; /** * Generates a grounded answer from the model given an input * `GenerateAnswerRequest`. * * @param {Object} request * The request object that will be sent. * @param {google.ai.generativelanguage.v1beta.GroundingPassages} request.inlinePassages * Passages provided inline with the request. * @param {google.ai.generativelanguage.v1beta.SemanticRetrieverConfig} request.semanticRetriever * Content retrieved from resources created via the Semantic Retriever * API. * @param {string} request.model * Required. The name of the `Model` to use for generating the grounded * response. * * Format: `model=models/{model}`. * @param {number[]} request.contents * Required. The content of the current conversation with the `Model`. For * single-turn queries, this is a single question to answer. For multi-turn * queries, this is a repeated field that contains conversation history and * the last `Content` in the list containing the question. * * Note: `GenerateAnswer` only supports queries in English. * @param {google.ai.generativelanguage.v1beta.GenerateAnswerRequest.AnswerStyle} request.answerStyle * Required. Style in which answers should be returned. * @param {number[]} [request.safetySettings] * Optional. A list of unique `SafetySetting` instances for blocking unsafe * content. * * This will be enforced on the `GenerateAnswerRequest.contents` and * `GenerateAnswerResponse.candidate`. There should not be more than one * setting for each `SafetyCategory` type. The API will block any contents and * responses that fail to meet the thresholds set by these settings. This list * overrides the default settings for each `SafetyCategory` specified in the * safety_settings. If there is no `SafetySetting` for a given * `SafetyCategory` provided in the list, the API will use the default safety * setting for that category. Harm categories HARM_CATEGORY_HATE_SPEECH, * HARM_CATEGORY_SEXUALLY_EXPLICIT, HARM_CATEGORY_DANGEROUS_CONTENT, * HARM_CATEGORY_HARASSMENT are supported. * Refer to the * [guide](https://ai.google.dev/gemini-api/docs/safety-settings) * for detailed information on available safety settings. Also refer to the * [Safety guidance](https://ai.google.dev/gemini-api/docs/safety-guidance) to * learn how to incorporate safety considerations in your AI applications. * @param {number} [request.temperature] * Optional. Controls the randomness of the output. * * Values can range from [0.0,1.0], inclusive. A value closer to 1.0 will * produce responses that are more varied and creative, while a value closer * to 0.0 will typically result in more straightforward responses from the * model. A low temperature (~0.2) is usually recommended for * Attributed-Question-Answering use cases. * @param {object} [options] * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. * @returns {Promise} - The promise which resolves to an array. * The first element of the array is an object representing {@link protos.google.ai.generativelanguage.v1beta.GenerateAnswerResponse|GenerateAnswerResponse}. * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } * for more details and examples. * @example <caption>include:samples/generated/v1beta/generative_service.generate_answer.js</caption> * region_tag:generativelanguage_v1beta_generated_GenerativeService_GenerateAnswer_async */ generateAnswer(request?: protos.google.ai.generativelanguage.v1beta.IGenerateAnswerRequest, options?: CallOptions): Promise<[ protos.google.ai.generativelanguage.v1beta.IGenerateAnswerResponse, (protos.google.ai.generativelanguage.v1beta.IGenerateAnswerRequest | undefined), {} | undefined ]>; generateAnswer(request: protos.google.ai.generativelanguage.v1beta.IGenerateAnswerRequest, options: CallOptions, callback: Callback<protos.google.ai.generativelanguage.v1beta.IGenerateAnswerResponse, protos.google.ai.generativelanguage.v1beta.IGenerateAnswerRequest | null | undefined, {} | null | undefined>): void; generateAnswer(request: protos.google.ai.generativelanguage.v1beta.IGenerateAnswerRequest, callback: Callback<protos.google.ai.generativelanguage.v1beta.IGenerateAnswerResponse, protos.google.ai.generativelanguage.v1beta.IGenerateAnswerRequest | null | undefined, {} | null | undefined>): void; /** * Generates a text embedding vector from the input `Content` using the * specified [Gemini Embedding * model](https://ai.google.dev/gemini-api/docs/models/gemini#text-embedding). * * @param {Object} request * The request object that will be sent. * @param {string} request.model * Required. The model's resource name. This serves as an ID for the Model to * use. * * This name should match a model name returned by the `ListModels` method. * * Format: `models/{model}` * @param {google.ai.generativelanguage.v1beta.Content} request.content * Required. The content to embed. Only the `parts.text` fields will be * counted. * @param {google.ai.generativelanguage.v1beta.TaskType} [request.taskType] * Optional. Optional task type for which the embeddings will be used. Not * supported on earlier models (`models/embedding-001`). * @param {string} [request.title] * Optional. An optional title for the text. Only applicable when TaskType is * `RETRIEVAL_DOCUMENT`. * * Note: Specifying a `title` for `RETRIEVAL_DOCUMENT` provides better quality * embeddings for retrieval. * @param {number} [request.outputDimensionality] * Optional. Optional reduced dimension for the output embedding. If set, * excessive values in the output embedding are truncated from the end. * Supported by newer models since 2024 only. You cannot set this value if * using the earlier model (`models/embedding-001`). * @param {object} [options] * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. * @returns {Promise} - The promise which resolves to an array. * The first element of the array is an object representing {@link protos.google.ai.generativelanguage.v1beta.EmbedContentResponse|EmbedContentResponse}. * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } * for more details and examples. * @example <caption>include:samples/generated/v1beta/generative_service.embed_content.js</caption> * region_tag:generativelanguage_v1beta_generated_GenerativeService_EmbedContent_async */ embedContent(request?: protos.google.ai.generativelanguage.v1beta.IEmbedContentRequest, options?: CallOptions): Promise<[ protos.google.ai.generativelanguage.v1beta.IEmbedContentResponse, (protos.google.ai.generativelanguage.v1beta.IEmbedContentRequest | undefined), {} | undefined ]>; embedContent(request: protos.google.ai.generativelanguage.v1beta.IEmbedContentRequest, options: CallOptions, callback: Callback<protos.google.ai.generativelanguage.v1beta.IEmbedContentResponse, protos.google.ai.generativelanguage.v1beta.IEmbedContentRequest | null | undefined, {} | null | undefined>): void; embedContent(request: protos.google.ai.generativelanguage.v1beta.IEmbedContentRequest, callback: Callback<protos.google.ai.generativelanguage.v1beta.IEmbedContentResponse, protos.google.ai.generativelanguage.v1beta.IEmbedContentRequest | null | undefined, {} | null | undefined>): void; /** * Generates multiple embedding vectors from the input `Content` which * consists of a batch of strings represented as `EmbedContentRequest` * objects. * * @param {Object} request * The request object that will be sent. * @param {string} request.model * Required. The model's resource name. This serves as an ID for the Model to * use. * * This name should match a model name returned by the `ListModels` method. * * Format: `models/{model}` * @param {number[]} request.requests * Required. Embed requests for the batch. The model in each of these requests * must match the model specified `BatchEmbedContentsRequest.model`. * @param {object} [options] * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. * @returns {Promise} - The promise which resolves to an array. * The first element of the array is an object representing {@link protos.google.ai.generativelanguage.v1beta.BatchEmbedContentsResponse|BatchEmbedContentsResponse}. * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } * for more details and examples. * @example <caption>include:samples/generated/v1beta/generative_service.batch_embed_contents.js</caption> * region_tag:generativelanguage_v1beta_generated_GenerativeService_BatchEmbedContents_async */ batchEmbedContents(request?: protos.google.ai.generativelanguage.v1beta.IBatchEmbedContentsRequest, options?: CallOptions): Promise<[ protos.google.ai.generativelanguage.v1beta.IBatchEmbedContentsResponse, (protos.google.ai.generativelanguage.v1beta.IBatchEmbedContentsRequest | undefined), {} | undefined ]>; batchEmbedContents(request: protos.google.ai.generativelanguage.v1beta.IBatchEmbedContentsRequest, options: CallOptions, callback: Callback<protos.google.ai.generativelanguage.v1beta.IBatchEmbedContentsResponse, protos.google.ai.generativelanguage.v1beta.IBatchEmbedContentsRequest | null | undefined, {} | null | undefined>): void; batchEmbedContents(request: protos.google.ai.generativelanguage.v1beta.IBatchEmbedContentsRequest, callback: Callback<protos.google.ai.generativelanguage.v1beta.IBatchEmbedContentsResponse, protos.google.ai.generativelanguage.v1beta.IBatchEmbedContentsRequest | null | undefined, {} | null | undefined>): void; /** * Runs a model's tokenizer on input `Content` and returns the token count. * Refer to the [tokens guide](https://ai.google.dev/gemini-api/docs/tokens) * to learn more about tokens. * * @param {Object} request * The request object that will be sent. * @param {string} request.model * Required. The model's resource name. This serves as an ID for the Model to * use. * * This name should match a model name returned by the `ListModels` method. * * Format: `models/{model}` * @param {number[]} [request.contents] * Optional. The input given to the model as a prompt. This field is ignored * when `generate_content_request` is set. * @param {google.ai.generativelanguage.v1beta.GenerateContentRequest} [request.generateContentRequest] * Optional. The overall input given to the `Model`. This includes the prompt * as well as other model steering information like [system * instructions](https://ai.google.dev/gemini-api/docs/system-instructions), * and/or function declarations for [function * calling](https://ai.google.dev/gemini-api/docs/function-calling). * `Model`s/`Content`s and `generate_content_request`s are mutually * exclusive. You can either send `Model` + `Content`s or a * `generate_content_request`, but never both. * @param {object} [options] * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. * @returns {Promise} - The promise which resolves to an array. * The first element of the array is an object representing {@link protos.google.ai.generativelanguage.v1beta.CountTokensResponse|CountTokensResponse}. * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } * for more details and examples. * @example <caption>include:samples/generated/v1beta/generative_service.count_tokens.js</caption> * region_tag:generativelanguage_v1beta_generated_GenerativeService_CountTokens_async */ countTokens(request?: protos.google.ai.generativelanguage.v1beta.ICountTokensRequest, options?: CallOptions): Promise<[ protos.google.ai.generativelanguage.v1beta.ICountTokensResponse, (protos.google.ai.generativelanguage.v1beta.ICountTokensRequest | undefined), {} | undefined ]>; countTokens(request: protos.google.ai.generativelanguage.v1beta.ICountTokensRequest, options: CallOptions, callback: Callback<protos.google.ai.generativelanguage.v1beta.ICountTokensResponse, protos.google.ai.generativelanguage.v1beta.ICountTokensRequest | null | undefined, {} | null | undefined>): void; countTokens(request: protos.google.ai.generativelanguage.v1beta.ICountTokensRequest, callback: Callback<protos.google.ai.generativelanguage.v1beta.ICountTokensResponse, protos.google.ai.generativelanguage.v1beta.ICountTokensRequest | null | undefined, {} | null | undefined>): void; /** * Generates a [streamed * response](https://ai.google.dev/gemini-api/docs/text-generation?lang=python#generate-a-text-stream) * from the model given an input `GenerateContentRequest`. * * @param {Object} request * The request object that will be sent. * @param {string} request.model * Required. The name of the `Model` to use for generating the completion. * * Format: `models/{model}`. * @param {google.ai.generativelanguage.v1beta.Content} [request.systemInstruction] * Optional. Developer set [system * instruction(s)](https://ai.google.dev/gemini-api/docs/system-instructions). * Currently, text only. * @param {number[]} request.contents * Required. The content of the current conversation with the model. * * For single-turn queries, this is a single instance. For multi-turn queries * like [chat](https://ai.google.dev/gemini-api/docs/text-generation#chat), * this is a repeated field that contains the conversation history and the * latest request. * @param {number[]} [request.tools] * Optional. A list of `Tools` the `Model` may use to generate the next * response. * * A `Tool` is a piece of code that enables the system to interact with * external systems to perform an action, or set of actions, outside of * knowledge and scope of the `Model`. Supported `Tool`s are `Function` and * `code_execution`. Refer to the [Function * calling](https://ai.google.dev/gemini-api/docs/function-calling) and the * [Code execution](https://ai.google.dev/gemini-api/docs/code-execution) * guides to learn more. * @param {google.ai.generativelanguage.v1beta.ToolConfig} [request.toolConfig] * Optional. Tool configuration for any `Tool` specified in the request. Refer * to the [Function calling * guide](https://ai.google.dev/gemini-api/docs/function-calling#function_calling_mode) * for a usage example. * @param {number[]} [request.safetySettings] * Optional. A list of unique `SafetySetting` instances for blocking unsafe * content. * * This will be enforced on the `GenerateContentRequest.contents` and * `GenerateContentResponse.candidates`. There should not be more than one * setting for each `SafetyCategory` type. The API will block any contents and * responses that fail to meet the thresholds set by these settings. This list * overrides the default settings for each `SafetyCategory` specified in the * safety_settings. If there is no `SafetySetting` for a given * `SafetyCategory` provided in the list, the API will use the default safety * setting for that category. Harm categories HARM_CATEGORY_HATE_SPEECH, * HARM_CATEGORY_SEXUALLY_EXPLICIT, HARM_CATEGORY_DANGEROUS_CONTENT, * HARM_CATEGORY_HARASSMENT, HARM_CATEGORY_CIVIC_INTEGRITY are supported. * Refer to the [guide](https://ai.google.dev/gemini-api/docs/safety-settings) * for detailed information on available safety settings. Also refer to the * [Safety guidance](https://ai.google.dev/gemini-api/docs/safety-guidance) to * learn how to incorporate safety considerations in your AI applications. * @param {google.ai.generativelanguage.v1beta.GenerationConfig} [request.generationConfig] * Optional. Configuration options for model generation and outputs. * @param {string} [request.cachedContent] * Optional. The name of the content * [cached](https://ai.google.dev/gemini-api/docs/caching) to use as context * to serve the prediction. Format: `cachedContents/{cachedContent}` * @param {object} [options] * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. * @returns {Stream} * An object stream which emits {@link protos.google.ai.generativelanguage.v1beta.GenerateContentResponse|GenerateContentResponse} on 'data' event. * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#server-streaming | documentation } * for more details and examples. * @example <caption>include:samples/generated/v1beta/generative_service.stream_generate_content.js</caption> * region_tag:generativelanguage_v1beta_generated_GenerativeService_StreamGenerateContent_async */ streamGenerateContent(request?: protos.google.ai.generativelanguage.v1beta.IGenerateContentRequest, options?: CallOptions): gax.CancellableStream; /** * Low-Latency bidirectional streaming API that supports audio and video * streaming inputs can produce multimodal output streams (audio and text). * * @param {object} [options] * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. * @returns {Stream} * An object stream which is both readable and writable. It accepts objects * representing {@link protos.google.ai.generativelanguage.v1beta.BidiGenerateContentClientMessage|BidiGenerateContentClientMessage} for write() method, and * will emit objects representing {@link protos.google.ai.generativelanguage.v1beta.BidiGenerateContentServerMessage|BidiGenerateContentServerMessage} on 'data' event asynchronously. * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#bi-directional-streaming | documentation } * for more details and examples. * @example <caption>include:samples/generated/v1beta/generative_service.bidi_generate_content.js</caption> * region_tag:generativelanguage_v1beta_generated_GenerativeService_BidiGenerateContent_async */ bidiGenerateContent(options?: CallOptions): gax.CancellableStream; /** * Return a fully-qualified cachedContent resource name string. * * @param {string} id * @returns {string} Resource name string. */ cachedContentPath(id: string): string; /** * Parse the id from CachedContent resource. * * @param {string} cachedContentName * A fully-qualified path representing CachedContent resource. * @returns {string} A string representing the id. */ matchIdFromCachedContentName(cachedContentName: string): string | number; /** * Return a fully-qualified chunk resource name string. * * @param {string} corpus * @param {string} document * @param {string} chunk * @returns {string} Resource name string. */ chunkPath(corpus: string, document: string, chunk: string): string; /** * Parse the corpus from Chunk resource. * * @param {string} chunkName * A fully-qualified path representing Chunk resource. * @returns {string} A string representing the corpus. */ matchCorpusFromChunkName(chunkName: string): string | number; /** * Parse the document from Chunk resource. * * @param {string} chunkName * A fully-qualified path representing Chunk resource. * @returns {string} A string representing the document. */ matchDocumentFromChunkName(chunkName: string): string | number; /** * Parse the chunk from Chunk resource. * * @param {string} chunkName * A fully-qualified path representing Chunk resource. * @returns {string} A string representing the chunk. */ matchChunkFromChunkName(chunkName: string): string | number; /** * Return a fully-qualified corpus resource name string. * * @param {string} corpus * @returns {string} Resource name string. */ corpusPath(corpus: string): string; /** * Parse the corpus from Corpus resource. * * @param {string} corpusName * A fully-qualified path representing Corpus resource. * @returns {string} A string representing the corpus. */ matchCorpusFromCorpusName(corpusName: string): string | number; /** * Return a fully-qualified corpusPermission resource name string. * * @param {string} corpus * @param {string} permission * @returns {string} Resource name string. */ corpusPermissionPath(corpus: string, permission: string): string; /** * Parse the corpus from CorpusPermission resource. * * @param {string} corpusPermissionName * A fully-qualified path representing corpus_permission resource. * @returns {string} A string representing the corpus. */ matchCorpusFromCorpusPermissionName(corpusPermissionName: string): string | number; /** * Parse the permission from CorpusPermission resource. * * @param {string} corpusPermissionName * A fully-qualified path representing corpus_permission resource. * @returns {string} A string representing the permission. */ matchPermissionFromCorpusPermissionName(corpusPermissionName: string): string | number; /** * Return a fully-qualified document resource name string. * * @param {string} corpus * @param {string} document * @returns {string} Resource name string. */ documentPath(corpus: string, document: string): string; /** * Parse the corpus from Document resource. * * @param {string} documentName * A fully-qualified path representing Document resource. * @returns {string} A string representing the corpus. */ matchCorpusFromDocumentName(documentName: string): string | number; /** * Parse the document from Document resource. * * @param {string} documentName * A fully-qualified path representing Document resource. * @returns {string} A string representing the document. */ matchDocumentFromDocumentName(documentName: string): string | number; /** * Return a fully-qualified file resource name string. * * @param {string} file * @returns {string} Resource name string. */ filePath(file: string): string; /** * Parse the file from File resource. * * @param {string} fileName * A fully-qualified path representing File resource. * @returns {string} A string representing the file. */ matchFileFromFileName(fileName: string): string | number; /** * Return a fully-qualified model resource name string. * * @param {string} model * @returns {string} Resource name string. */ modelPath(model: string): string; /** * Parse the model from Model resource. * * @param {string} modelName * A fully-qualified path representing Model resource. * @returns {string} A string representing the model. */ matchModelFromModelName(modelName: string): string | number; /** * Return a fully-qualified tunedModel resource name string. * * @param {string} tuned_model * @returns {string} Resource name string. */ tunedModelPath(tunedModel: string): string; /** * Parse the tuned_model from TunedModel resource. * * @param {string} tunedModelName * A fully-qualified path representing TunedModel resource. * @returns {string} A string representing the tuned_model. */ matchTunedModelFromTunedModelName(tunedModelName: string): string | number; /** * Return a fully-qualified tunedModelPermission resource name string. * * @param {string} tuned_model * @param {string} permission * @returns {string} Resource name string. */ tunedModelPermissionPath(tunedModel: string, permission: string): string; /** * Parse the tuned_model from TunedModelPermission resource. * * @param {string} tunedModelPermissionName * A fully-qualified path representing tuned_model_permission resource. * @returns {string} A string representing the tuned_model. */ matchTunedModelFromTunedModelPermissionName(tunedModelPermissionName: string): string | number; /** * Parse the permission from TunedModelPermission resource. * * @param {string} tunedModelPermissionName * A fully-qualified path representing tuned_model_permission resource. * @returns {string} A string representing the permission. */ matchPermissionFromTunedModelPermissionName(tunedModelPermissionName: string): string | number; /** * Terminate the gRPC channel and close the client. * * The client will no longer be usable and all future behavior is undefined. * @returns {Promise} A promise that resolves when the client is closed. */ close(): Promise<void>; }