@genkit-ai/ai
Version:
Genkit AI framework generative AI APIs.
265 lines • 13.3 kB
JavaScript
"use strict";
var __defProp = Object.defineProperty;
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
var __getOwnPropNames = Object.getOwnPropertyNames;
var __hasOwnProp = Object.prototype.hasOwnProperty;
var __export = (target, all) => {
for (var name in all)
__defProp(target, name, { get: all[name], enumerable: true });
};
var __copyProps = (to, from, except, desc) => {
if (from && typeof from === "object" || typeof from === "function") {
for (let key of __getOwnPropNames(from))
if (!__hasOwnProp.call(to, key) && key !== except)
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
}
return to;
};
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
var model_types_exports = {};
__export(model_types_exports, {
CandidateErrorSchema: () => CandidateErrorSchema,
CandidateSchema: () => CandidateSchema,
FinishReasonSchema: () => FinishReasonSchema,
GenerateActionOptionsSchema: () => GenerateActionOptionsSchema,
GenerateActionOutputConfig: () => GenerateActionOutputConfig,
GenerateRequestSchema: () => GenerateRequestSchema,
GenerateResponseChunkSchema: () => GenerateResponseChunkSchema,
GenerateResponseSchema: () => GenerateResponseSchema,
GenerationCommonConfigDescriptions: () => GenerationCommonConfigDescriptions,
GenerationCommonConfigSchema: () => GenerationCommonConfigSchema,
GenerationUsageSchema: () => GenerationUsageSchema,
MessageSchema: () => MessageSchema,
ModelInfoSchema: () => ModelInfoSchema,
ModelRequestSchema: () => ModelRequestSchema,
ModelResponseChunkSchema: () => ModelResponseChunkSchema,
ModelResponseSchema: () => ModelResponseSchema,
OutputConfigSchema: () => OutputConfigSchema,
PartSchema: () => PartSchema,
RoleSchema: () => RoleSchema,
ToolDefinitionSchema: () => ToolDefinitionSchema
});
module.exports = __toCommonJS(model_types_exports);
var import_core = require("@genkit-ai/core");
var import_document = require("./document.js");
const PartSchema = import_core.z.union([
import_document.TextPartSchema,
import_document.MediaPartSchema,
import_document.ToolRequestPartSchema,
import_document.ToolResponsePartSchema,
import_document.DataPartSchema,
import_document.CustomPartSchema,
import_document.ReasoningPartSchema,
import_document.ResourcePartSchema
]);
const RoleSchema = import_core.z.enum(["system", "user", "model", "tool"]);
const MessageSchema = import_core.z.object({
role: RoleSchema,
content: import_core.z.array(PartSchema),
metadata: import_core.z.record(import_core.z.unknown()).optional()
});
const ModelInfoSchema = import_core.z.object({
/** Acceptable names for this model (e.g. different versions). */
versions: import_core.z.array(import_core.z.string()).optional(),
/** Friendly label for this model (e.g. "Google AI - Gemini Pro") */
label: import_core.z.string().optional(),
/** Model Specific configuration. */
configSchema: import_core.z.record(import_core.z.any()).optional(),
/** Supported model capabilities. */
supports: import_core.z.object({
/** Model can process historical messages passed with a prompt. */
multiturn: import_core.z.boolean().optional(),
/** Model can process media as part of the prompt (multimodal input). */
media: import_core.z.boolean().optional(),
/** Model can perform tool calls. */
tools: import_core.z.boolean().optional(),
/** Model can accept messages with role "system". */
systemRole: import_core.z.boolean().optional(),
/** Model can output this type of data. */
output: import_core.z.array(import_core.z.string()).optional(),
/** Model supports output in these content types. */
contentType: import_core.z.array(import_core.z.string()).optional(),
/** Model can natively support document-based context grounding. */
context: import_core.z.boolean().optional(),
/** Model can natively support constrained generation. */
constrained: import_core.z.enum(["none", "all", "no-tools"]).optional(),
/** Model supports controlling tool choice, e.g. forced tool calling. */
toolChoice: import_core.z.boolean().optional()
}).optional(),
/** At which stage of development this model is.
* - `featured` models are recommended for general use.
* - `stable` models are well-tested and reliable.
* - `unstable` models are experimental and may change.
* - `legacy` models are no longer recommended for new projects.
* - `deprecated` models are deprecated by the provider and may be removed in future versions.
*/
stage: import_core.z.enum(["featured", "stable", "unstable", "legacy", "deprecated"]).optional()
});
const ToolDefinitionSchema = import_core.z.object({
name: import_core.z.string(),
description: import_core.z.string(),
inputSchema: import_core.z.record(import_core.z.any()).describe("Valid JSON Schema representing the input of the tool.").nullish(),
outputSchema: import_core.z.record(import_core.z.any()).describe("Valid JSON Schema describing the output of the tool.").nullish(),
metadata: import_core.z.record(import_core.z.any()).describe("additional metadata for this tool definition").optional()
});
const GenerationCommonConfigDescriptions = {
temperature: "Controls the degree of randomness in token selection. A lower value is good for a more predictable response. A higher value leads to more diverse or unexpected results.",
maxOutputTokens: "The maximum number of tokens to include in the response.",
topK: "The maximum number of tokens to consider when sampling.",
topP: "Decides how many possible words to consider. A higher value means that the model looks at more possible words, even the less likely ones, which makes the generated text more diverse."
};
const GenerationCommonConfigSchema = import_core.z.object({
version: import_core.z.string().describe(
"A specific version of a model family, e.g. `gemini-2.0-flash` for the `googleai` family."
).optional(),
temperature: import_core.z.number().describe(GenerationCommonConfigDescriptions.temperature).optional(),
maxOutputTokens: import_core.z.number().describe(GenerationCommonConfigDescriptions.maxOutputTokens).optional(),
topK: import_core.z.number().describe(GenerationCommonConfigDescriptions.topK).optional(),
topP: import_core.z.number().describe(GenerationCommonConfigDescriptions.topP).optional(),
stopSequences: import_core.z.array(import_core.z.string()).max(5).describe(
"Set of character sequences (up to 5) that will stop output generation."
).optional()
}).passthrough();
const OutputConfigSchema = import_core.z.object({
format: import_core.z.string().optional(),
schema: import_core.z.record(import_core.z.any()).optional(),
constrained: import_core.z.boolean().optional(),
contentType: import_core.z.string().optional()
});
const ModelRequestSchema = import_core.z.object({
messages: import_core.z.array(MessageSchema),
config: import_core.z.any().optional(),
tools: import_core.z.array(ToolDefinitionSchema).optional(),
toolChoice: import_core.z.enum(["auto", "required", "none"]).optional(),
output: OutputConfigSchema.optional(),
docs: import_core.z.array(import_document.DocumentDataSchema).optional()
});
const GenerateRequestSchema = ModelRequestSchema.extend({
/** @deprecated All responses now return a single candidate. This will always be `undefined`. */
candidates: import_core.z.number().optional()
});
const GenerationUsageSchema = import_core.z.object({
inputTokens: import_core.z.number().optional(),
outputTokens: import_core.z.number().optional(),
totalTokens: import_core.z.number().optional(),
inputCharacters: import_core.z.number().optional(),
outputCharacters: import_core.z.number().optional(),
inputImages: import_core.z.number().optional(),
outputImages: import_core.z.number().optional(),
inputVideos: import_core.z.number().optional(),
outputVideos: import_core.z.number().optional(),
inputAudioFiles: import_core.z.number().optional(),
outputAudioFiles: import_core.z.number().optional(),
custom: import_core.z.record(import_core.z.number()).optional(),
thoughtsTokens: import_core.z.number().optional(),
cachedContentTokens: import_core.z.number().optional()
});
const FinishReasonSchema = import_core.z.enum([
"stop",
"length",
"blocked",
"interrupted",
"other",
"unknown"
]);
const CandidateSchema = import_core.z.object({
index: import_core.z.number(),
message: MessageSchema,
usage: GenerationUsageSchema.optional(),
finishReason: FinishReasonSchema,
finishMessage: import_core.z.string().optional(),
custom: import_core.z.unknown()
});
const CandidateErrorSchema = import_core.z.object({
index: import_core.z.number(),
code: import_core.z.enum(["blocked", "other", "unknown"]),
message: import_core.z.string().optional()
});
const ModelResponseSchema = import_core.z.object({
message: MessageSchema.optional(),
finishReason: FinishReasonSchema,
finishMessage: import_core.z.string().optional(),
latencyMs: import_core.z.number().optional(),
usage: GenerationUsageSchema.optional(),
/** @deprecated use `raw` instead */
custom: import_core.z.unknown(),
raw: import_core.z.unknown(),
request: GenerateRequestSchema.optional(),
operation: import_core.OperationSchema.optional()
});
const GenerateResponseSchema = ModelResponseSchema.extend({
/** @deprecated All responses now return a single candidate. Only the first candidate will be used if supplied. Return `message`, `finishReason`, and `finishMessage` instead. */
candidates: import_core.z.array(CandidateSchema).optional(),
finishReason: FinishReasonSchema.optional()
});
const ModelResponseChunkSchema = import_core.z.object({
role: RoleSchema.optional(),
/** index of the message this chunk belongs to. */
index: import_core.z.number().optional(),
/** The chunk of content to stream right now. */
content: import_core.z.array(PartSchema),
/** Model-specific extra information attached to this chunk. */
custom: import_core.z.unknown().optional(),
/** If true, the chunk includes all data from previous chunks. Otherwise, considered to be incremental. */
aggregated: import_core.z.boolean().optional()
});
const GenerateResponseChunkSchema = ModelResponseChunkSchema;
const GenerateActionOutputConfig = import_core.z.object({
format: import_core.z.string().optional(),
contentType: import_core.z.string().optional(),
instructions: import_core.z.union([import_core.z.boolean(), import_core.z.string()]).optional(),
jsonSchema: import_core.z.any().optional(),
constrained: import_core.z.boolean().optional()
});
const GenerateActionOptionsSchema = import_core.z.object({
/** A model name (e.g. `vertexai/gemini-1.0-pro`). */
model: import_core.z.string(),
/** Retrieved documents to be used as context for this generation. */
docs: import_core.z.array(import_document.DocumentDataSchema).optional(),
/** Conversation history for multi-turn prompting when supported by the underlying model. */
messages: import_core.z.array(MessageSchema),
/** List of registered tool names for this generation if supported by the underlying model. */
tools: import_core.z.array(import_core.z.string()).optional(),
/** Tool calling mode. `auto` lets the model decide whether to use tools, `required` forces the model to choose a tool, and `none` forces the model not to use any tools. Defaults to `auto`. */
toolChoice: import_core.z.enum(["auto", "required", "none"]).optional(),
/** Configuration for the generation request. */
config: import_core.z.any().optional(),
/** Configuration for the desired output of the request. Defaults to the model's default output if unspecified. */
output: GenerateActionOutputConfig.optional(),
/** Options for resuming an interrupted generation. */
resume: import_core.z.object({
respond: import_core.z.array(import_document.ToolResponsePartSchema).optional(),
restart: import_core.z.array(import_document.ToolRequestPartSchema).optional(),
metadata: import_core.z.record(import_core.z.any()).optional()
}).optional(),
/** When true, return tool calls for manual processing instead of automatically resolving them. */
returnToolRequests: import_core.z.boolean().optional(),
/** Maximum number of tool call iterations that can be performed in a single generate call (default 5). */
maxTurns: import_core.z.number().optional(),
/** Custom step name for this generate call to display in trace views. Defaults to "generate". */
stepName: import_core.z.string().optional()
});
// Annotate the CommonJS export names for ESM import in node:
0 && (module.exports = {
CandidateErrorSchema,
CandidateSchema,
FinishReasonSchema,
GenerateActionOptionsSchema,
GenerateActionOutputConfig,
GenerateRequestSchema,
GenerateResponseChunkSchema,
GenerateResponseSchema,
GenerationCommonConfigDescriptions,
GenerationCommonConfigSchema,
GenerationUsageSchema,
MessageSchema,
ModelInfoSchema,
ModelRequestSchema,
ModelResponseChunkSchema,
ModelResponseSchema,
OutputConfigSchema,
PartSchema,
RoleSchema,
ToolDefinitionSchema
});
//# sourceMappingURL=model-types.js.map