genkitx-azure-openai
Version:
Firebase Genkit AI framework plugin for Azure OpenAI APIs.
523 lines • 16.3 kB
JavaScript
var __defProp = Object.defineProperty;
var __defProps = Object.defineProperties;
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
var __getOwnPropDescs = Object.getOwnPropertyDescriptors;
var __getOwnPropNames = Object.getOwnPropertyNames;
var __getOwnPropSymbols = Object.getOwnPropertySymbols;
var __hasOwnProp = Object.prototype.hasOwnProperty;
var __propIsEnum = Object.prototype.propertyIsEnumerable;
var __knownSymbol = (name, symbol) => (symbol = Symbol[name]) ? symbol : Symbol.for("Symbol." + name);
var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
var __spreadValues = (a, b) => {
for (var prop in b || (b = {}))
if (__hasOwnProp.call(b, prop))
__defNormalProp(a, prop, b[prop]);
if (__getOwnPropSymbols)
for (var prop of __getOwnPropSymbols(b)) {
if (__propIsEnum.call(b, prop))
__defNormalProp(a, prop, b[prop]);
}
return a;
};
var __spreadProps = (a, b) => __defProps(a, __getOwnPropDescs(b));
var __export = (target, all) => {
for (var name in all)
__defProp(target, name, { get: all[name], enumerable: true });
};
var __copyProps = (to, from, except, desc) => {
if (from && typeof from === "object" || typeof from === "function") {
for (let key of __getOwnPropNames(from))
if (!__hasOwnProp.call(to, key) && key !== except)
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
}
return to;
};
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
var __async = (__this, __arguments, generator) => {
return new Promise((resolve, reject) => {
var fulfilled = (value) => {
try {
step(generator.next(value));
} catch (e) {
reject(e);
}
};
var rejected = (value) => {
try {
step(generator.throw(value));
} catch (e) {
reject(e);
}
};
var step = (x) => x.done ? resolve(x.value) : Promise.resolve(x.value).then(fulfilled, rejected);
step((generator = generator.apply(__this, __arguments)).next());
});
};
var __forAwait = (obj, it, method) => (it = obj[__knownSymbol("asyncIterator")]) ? it.call(obj) : (obj = obj[__knownSymbol("iterator")](), it = {}, method = (key, fn) => (fn = obj[key]) && (it[key] = (arg) => new Promise((yes, no, done) => (arg = fn.call(obj, arg), done = arg.done, Promise.resolve(arg.value).then((value) => yes({ value, done }), no)))), method("next"), method("return"), it);
var gpt_exports = {};
__export(gpt_exports, {
OpenAiConfigSchema: () => OpenAiConfigSchema,
SUPPORTED_GPT_MODELS: () => SUPPORTED_GPT_MODELS,
gpt35Turbo: () => gpt35Turbo,
gpt4: () => gpt4,
gpt45: () => gpt45,
gpt4o: () => gpt4o,
gptModel: () => gptModel,
gptO1: () => gptO1,
gptO1Mini: () => gptO1Mini,
gptO1Preview: () => gptO1Preview,
gptO3Mini: () => gptO3Mini,
toOpenAiMessages: () => toOpenAiMessages,
toOpenAiRequestBody: () => toOpenAiRequestBody,
toOpenAiTextAndMedia: () => toOpenAiTextAndMedia
});
module.exports = __toCommonJS(gpt_exports);
var import_genkit = require("genkit");
var import_model = require("genkit/model");
const MODELS_SUPPORTING_OPENAI_RESPONSE_FORMAT = [
"gpt-4",
"gpt-4-32k",
"gpt-4o",
"gpt-4o-mini",
"gpt-o1",
"gpt-o1-mini",
"gpt-o1-preview",
"gpt-3.5-turbo",
"gpt-3.5-turbo-instruct",
"gpt-3.5-turbo-26k"
];
const OpenAiConfigSchema = import_genkit.GenerationCommonConfigSchema.extend({
frequencyPenalty: import_genkit.z.number().min(-2).max(2).optional(),
logitBias: import_genkit.z.record(import_genkit.z.string(), import_genkit.z.number().min(-100).max(100)).optional(),
logProbs: import_genkit.z.boolean().optional(),
presencePenalty: import_genkit.z.number().min(-2).max(2).optional(),
seed: import_genkit.z.number().int().optional(),
topLogProbs: import_genkit.z.number().int().min(0).max(20).optional(),
user: import_genkit.z.string().optional(),
visualDetailLevel: import_genkit.z.enum(["auto", "low", "high"]).optional()
});
const gpt4o = (0, import_model.modelRef)({
name: "azure-openai/gpt-4o",
info: {
versions: ["gpt-4o", "gpt-4o-mini"],
label: "OpenAI - GPT-4o",
supports: {
multiturn: true,
tools: true,
media: true,
systemRole: true,
output: ["text", "json"]
}
},
configSchema: OpenAiConfigSchema
});
const gpt4 = (0, import_model.modelRef)({
name: "azure-openai/gpt-4",
info: {
versions: ["gpt-4", "gpt-4-32k"],
label: "OpenAI - GPT-4",
supports: {
multiturn: true,
tools: true,
media: true,
systemRole: true,
output: ["text", "json"]
}
},
configSchema: OpenAiConfigSchema
});
const gpt45 = (0, import_model.modelRef)({
name: "azure-openai/gpt-4.5",
info: {
versions: ["gpt-4.5-preview"],
label: "OpenAI - GPT-4.5",
supports: {
multiturn: true,
tools: true,
media: true,
systemRole: true,
output: ["text", "json"]
}
},
configSchema: OpenAiConfigSchema
});
const gptO1 = (0, import_model.modelRef)({
name: "azure-openai/gpt-o1",
info: {
versions: ["gpt-o1"],
label: "OpenAI - GPT-o1",
supports: {
multiturn: true,
tools: false,
media: true,
systemRole: true,
output: ["text"]
}
},
configSchema: OpenAiConfigSchema
});
const gptO1Mini = (0, import_model.modelRef)({
name: "azure-openai/gpt-o1-mini",
info: {
versions: ["o1-mini"],
label: "OpenAI - GPT-o1 Mini",
supports: {
multiturn: true,
tools: true,
media: false,
systemRole: true,
output: ["text"]
}
},
configSchema: OpenAiConfigSchema
});
const gptO1Preview = (0, import_model.modelRef)({
name: "azure-openai/gpt-o1-preview",
info: {
versions: ["o1-preview"],
label: "OpenAI - GPT-o1 Preview",
supports: {
multiturn: true,
tools: true,
media: false,
systemRole: true,
output: ["text"]
}
},
configSchema: OpenAiConfigSchema
});
const gptO3Mini = (0, import_model.modelRef)({
name: "azure-openai/gpt-o3-mini",
info: {
versions: ["o3-mini"],
label: "OpenAI - GPT-o3 Mini",
supports: {
multiturn: true,
tools: true,
media: false,
systemRole: true,
output: ["text"]
}
},
configSchema: OpenAiConfigSchema
});
const gpt35Turbo = (0, import_model.modelRef)({
name: "azure-openai/gpt-3.5-turbo",
info: {
versions: ["gpt-3.5-turbo", "gpt-35-turbo-instruct", "gpt-35-turbo-16k"],
label: "OpenAI - GPT-3.5 Turbo",
supports: {
multiturn: true,
tools: true,
media: false,
systemRole: true,
output: ["json", "text"]
}
},
configSchema: OpenAiConfigSchema
});
const SUPPORTED_GPT_MODELS = {
"gpt-o1-mini": gptO1Mini,
"gpt-o1-preview": gptO1Preview,
"gpt-o1": gptO1,
"gpt-o3-mini": gptO3Mini,
"gpt-4": gpt4,
"gpt-4o": gpt4o,
"gpt-4.5": gpt45,
"gpt-3.5-turbo": gpt35Turbo
};
function toOpenAIRole(role) {
switch (role) {
case "user":
return "user";
case "model":
return "assistant";
case "system":
return "system";
case "tool":
return "tool";
default:
throw new Error(`role ${role} doesn't map to an OpenAI role.`);
}
}
function toOpenAiTool(tool) {
return {
type: "function",
function: {
name: tool.name,
parameters: tool.inputSchema !== null ? tool.inputSchema : void 0
}
};
}
function toOpenAiTextAndMedia(part, visualDetailLevel) {
if (part.text) {
return {
type: "text",
text: part.text
};
} else if (part.media) {
return {
type: "image_url",
image_url: {
url: part.media.url,
detail: visualDetailLevel
}
};
}
throw Error(
`Unsupported genkit part fields encountered for current message role: ${part}.`
);
}
function toOpenAiMessages(messages, visualDetailLevel = "auto") {
const openAiMsgs = [];
for (const message of messages) {
const msg = new import_genkit.Message(message);
const role = toOpenAIRole(message.role);
switch (role) {
case "user":
openAiMsgs.push({
role,
content: msg.content.map(
(part) => toOpenAiTextAndMedia(part, visualDetailLevel)
)
});
break;
case "system":
openAiMsgs.push({
role,
content: msg.text
});
break;
case "assistant":
const toolCalls = msg.content.filter((part) => part.toolRequest).map((part) => {
if (!part.toolRequest) {
throw Error(
"Mapping genkit message to openai tool call content part but message.toolRequest not provided."
);
}
return {
id: part.toolRequest.ref || "",
type: "function",
function: {
name: part.toolRequest.name,
arguments: JSON.stringify(part.toolRequest.input)
}
};
});
if ((toolCalls == null ? void 0 : toolCalls.length) > 0) {
openAiMsgs.push({
role,
tool_calls: toolCalls
});
} else {
openAiMsgs.push({
role,
content: msg.text
});
}
break;
case "tool":
const toolResponseParts = msg.toolResponseParts();
toolResponseParts.map((part) => {
openAiMsgs.push({
role,
tool_call_id: part.toolResponse.ref || "",
content: typeof part.toolResponse.output === "string" ? part.toolResponse.output : JSON.stringify(part.toolResponse.output)
});
});
break;
default:
throw new Error("unrecognized role");
}
}
return openAiMsgs;
}
const finishReasonMap = {
length: "length",
stop: "stop",
tool_calls: "stop",
content_filter: "blocked"
};
function fromOpenAiToolCall(toolCall) {
if (!toolCall.function) {
throw Error(
`Unexpected openAI chunk choice. tool_calls was provided but one or more tool_calls is missing.`
);
}
const f = toolCall.function;
return {
toolRequest: {
name: f.name,
ref: toolCall.id,
input: f.arguments ? JSON.parse(f.arguments) : f.arguments
}
};
}
function fromOpenAiChoice(choice, jsonMode = false) {
var _a;
const toolRequestParts = (_a = choice.message.tool_calls) == null ? void 0 : _a.map(fromOpenAiToolCall);
return {
index: choice.index,
finishReason: finishReasonMap[choice.finish_reason] || "other",
message: {
role: "model",
content: toolRequestParts ? (
// Note: Not sure why I have to cast here exactly.
// Otherwise it thinks toolRequest must be 'undefined' if provided
toolRequestParts
) : [
jsonMode ? { data: JSON.parse(choice.message.content) } : { text: choice.message.content }
]
},
custom: {}
};
}
function fromOpenAiChunkChoice(choice, jsonMode = false) {
var _a;
const toolRequestParts = (_a = choice.delta.tool_calls) == null ? void 0 : _a.map(fromOpenAiToolCall);
return {
index: choice.index,
finishReason: choice.finish_reason ? finishReasonMap[choice.finish_reason] || "other" : "unknown",
message: {
role: "model",
content: toolRequestParts ? (
// Note: Not sure why I have to cast here exactly.
// Otherwise it thinks toolRequest must be 'undefined' if provided
toolRequestParts
) : [
jsonMode ? { data: JSON.parse(choice.delta.content) } : { text: choice.delta.content }
]
},
custom: {}
};
}
function toOpenAiRequestBody(modelName, request) {
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
const mapToSnakeCase = (obj) => {
return Object.entries(obj).reduce((acc, [key, value]) => {
const snakeCaseKey = key.replace(
/[A-Z]/g,
(letter) => `_${letter.toLowerCase()}`
);
acc[snakeCaseKey] = value;
return acc;
}, {});
};
const model = SUPPORTED_GPT_MODELS[modelName];
if (!model) throw new Error(`Unsupported model: ${modelName}`);
const openAiMessages = toOpenAiMessages(
request.messages,
(_a = request.config) == null ? void 0 : _a.visualDetailLevel
);
const mappedModelName = ((_b = request.config) == null ? void 0 : _b.version) || modelName;
const body = __spreadValues({
messages: openAiMessages,
tools: (_c = request.tools) == null ? void 0 : _c.map(toOpenAiTool),
model: mappedModelName,
max_tokens: (_d = request.config) == null ? void 0 : _d.maxOutputTokens,
temperature: (_e = request.config) == null ? void 0 : _e.temperature,
top_p: (_f = request.config) == null ? void 0 : _f.topP,
n: request.candidates,
stop: (_g = request.config) == null ? void 0 : _g.stopSequences
}, mapToSnakeCase(((_h = request.config) == null ? void 0 : _h.custom) || {}));
const response_format = (_i = request.output) == null ? void 0 : _i.format;
if (response_format && MODELS_SUPPORTING_OPENAI_RESPONSE_FORMAT.includes(mappedModelName)) {
if (response_format === "json" && ((_k = (_j = model.info.supports) == null ? void 0 : _j.output) == null ? void 0 : _k.includes("json"))) {
body.response_format = {
type: "json_object"
};
} else if (response_format === "text" && ((_m = (_l = model.info.supports) == null ? void 0 : _l.output) == null ? void 0 : _m.includes("text"))) {
body.response_format = {
type: "text"
};
} else {
throw new Error(
`${response_format} format is not supported for GPT models currently`
);
}
}
for (const key in body) {
if (!body[key] || Array.isArray(body[key]) && !body[key].length)
delete body[key];
}
return body;
}
function gptModel(ai, name, client) {
const modelId = `azure-openai/${name}`;
const model = SUPPORTED_GPT_MODELS[name];
if (!model) throw new Error(`Unsupported model: ${name}`);
return ai.defineModel(
__spreadProps(__spreadValues({
name: modelId
}, model.info), {
configSchema: SUPPORTED_GPT_MODELS[name].configSchema
}),
(request, streamingCallback) => __async(this, null, function* () {
var _a, _b, _c, _d;
let response;
const body = toOpenAiRequestBody(name, request);
if (streamingCallback) {
const stream = client.beta.chat.completions.stream(__spreadProps(__spreadValues({}, body), {
stream: true
}));
try {
for (var iter = __forAwait(stream), more, temp, error; more = !(temp = yield iter.next()).done; more = false) {
const chunk = temp.value;
(_a = chunk.choices) == null ? void 0 : _a.forEach((chunk2) => {
const c = fromOpenAiChunkChoice(chunk2);
streamingCallback({
index: c.index,
content: c.message.content
});
});
}
} catch (temp) {
error = [temp];
} finally {
try {
more && (temp = iter.return) && (yield temp.call(iter));
} finally {
if (error)
throw error[0];
}
}
response = yield stream.finalChatCompletion();
} else {
response = yield client.chat.completions.create(body);
}
return {
candidates: response.choices.map(
(c) => {
var _a2;
return fromOpenAiChoice(c, ((_a2 = request.output) == null ? void 0 : _a2.format) === "json");
}
),
usage: {
inputTokens: (_b = response.usage) == null ? void 0 : _b.prompt_tokens,
outputTokens: (_c = response.usage) == null ? void 0 : _c.completion_tokens,
totalTokens: (_d = response.usage) == null ? void 0 : _d.total_tokens
},
custom: response
};
})
);
}
// Annotate the CommonJS export names for ESM import in node:
0 && (module.exports = {
OpenAiConfigSchema,
SUPPORTED_GPT_MODELS,
gpt35Turbo,
gpt4,
gpt45,
gpt4o,
gptModel,
gptO1,
gptO1Mini,
gptO1Preview,
gptO3Mini,
toOpenAiMessages,
toOpenAiRequestBody,
toOpenAiTextAndMedia
});
//# sourceMappingURL=gpt.js.map
;