@genkit-ai/googleai
Version:
Genkit AI framework plugin for Google AI APIs, including Gemini APIs.
129 lines • 3.76 kB
JavaScript
import {
GenkitError,
z
} from "genkit";
import {
modelRef
} from "genkit/model";
import { getApiKeyFromEnvVar } from "./common.js";
import { checkOp, predictModel } from "./predict.js";
const VeoConfigSchema = z.object({
// NOTE: Documentation notes numberOfVideos parameter to pick the number of
// output videos, but this setting does not seem to work
negativePrompt: z.string().optional(),
aspectRatio: z.enum(["9:16", "16:9"]).describe("Desired aspect ratio of the output video.").optional(),
personGeneration: z.enum(["dont_allow", "allow_adult", "allow_all"]).describe(
"Control if/how images of people will be generated by the model."
).optional(),
durationSeconds: z.number().step(1).min(5).max(8).describe("Length of each output video in seconds, between 5 and 8.").optional(),
enhance_prompt: z.boolean().describe("Enable or disable the prompt rewriter. Enabled by default.").optional()
}).passthrough();
function extractText(request) {
return request.messages.at(-1).content.map((c) => c.text || "").join("");
}
function toParameters(request) {
const out = {
...request?.config
};
for (const k in out) {
if (!out[k]) delete out[k];
}
return out;
}
function extractImage(request) {
const media = request.messages.at(-1)?.content.find((p) => !!p.media)?.media;
if (media) {
const img = media?.url.split(",")[1];
return {
bytesBase64Encoded: img,
mimeType: media.contentType
};
}
return void 0;
}
const GENERIC_VEO_INFO = {
label: `Google AI - Generic Veo`,
supports: {
media: true,
multiturn: false,
tools: false,
systemRole: false,
output: ["media"],
longRunning: true
}
};
function defineVeoModel(ai, name, apiKey) {
if (apiKey !== false) {
apiKey = apiKey || getApiKeyFromEnvVar();
if (!apiKey) {
throw new GenkitError({
status: "FAILED_PRECONDITION",
message: "Please pass in the API key or set the GEMINI_API_KEY or GOOGLE_API_KEY environment variable.\nFor more details see https://genkit.dev/docs/plugins/google-genai"
});
}
}
const modelName = `googleai/${name}`;
const model = modelRef({
name: modelName,
info: {
...GENERIC_VEO_INFO,
label: `Google AI - ${name}`
},
configSchema: VeoConfigSchema
});
return ai.defineBackgroundModel({
name: modelName,
...model.info,
configSchema: VeoConfigSchema,
async start(request) {
const instance = {
prompt: extractText(request)
};
const image = extractImage(request);
if (image) {
instance.image = image;
}
const predictClient = predictModel(model.version || name, apiKey, "predictLongRunning");
const response = await predictClient([instance], toParameters(request));
return toGenkitOp(response);
},
async check(operation) {
const newOp = await checkOp(operation.id, apiKey);
return toGenkitOp(newOp);
}
});
}
function toGenkitOp(apiOp) {
const res = { id: apiOp.name };
if (apiOp.done !== void 0) {
res.done = apiOp.done;
}
if (apiOp.error) {
res.error = { message: apiOp.error.message };
}
if (apiOp.response && apiOp.response.generateVideoResponse && apiOp.response.generateVideoResponse.generatedSamples) {
res.output = {
finishReason: "stop",
raw: apiOp.response,
message: {
role: "model",
content: apiOp.response.generateVideoResponse.generatedSamples.map(
(s) => {
return {
media: {
url: s.video.uri
}
};
}
)
}
};
}
return res;
}
export {
GENERIC_VEO_INFO,
VeoConfigSchema,
defineVeoModel
};
//# sourceMappingURL=veo.mjs.map