@arizeai/phoenix-client
Version:
A client for the Phoenix API
181 lines • 5.93 kB
JavaScript
import { createClient } from "../client.js";
import { assertUnreachable } from "../utils/assertUnreachable.js";
/**
* Create a prompt and store it in Phoenix.
*
* If a prompt with the same name exists, a new version of the prompt will be appended to the history.
*
* @param params - The parameters to create a prompt, including optional metadata.
* @returns The created prompt version.
* @example
* ```typescript
* await createPrompt({
* name: "my-prompt",
* description: "A helpful prompt",
* metadata: { environment: "production", team: "ai" },
* version: promptVersion({
* modelProvider: "OPENAI",
* modelName: "gpt-4",
* template: [{ role: "user", content: "Hello {{name}}" }]
* })
* });
* ```
*/
export async function createPrompt({ client: _client, version, ...promptParams }) {
const client = _client ?? createClient();
const response = await client.POST("/v1/prompts", {
body: {
prompt: promptParams,
version: version,
},
});
const createdPromptVersion = response.data?.data;
if (!createdPromptVersion) {
throw new Error("Failed to create prompt");
}
return createdPromptVersion;
}
/**
* A helper function to construct a prompt version declaratively.
*
* The output of this function can be used to create a prompt version in Phoenix.
*
* @param params - The parameters to create a prompt version.
* @returns Structured prompt version data, not yet persisted to Phoenix.
*/
export function promptVersion(params) {
const { description = "", modelProvider: model_provider, modelName: model_name, template: templateMessages, templateFormat: template_format = "MUSTACHE", invocationParameters: invocation_parameters, } = params;
switch (model_provider) {
case "OPENAI":
return {
description,
model_provider,
model_name,
template_type: "CHAT",
template_format,
template: {
type: "chat",
messages: templateMessages,
},
invocation_parameters: {
type: "openai",
openai: invocation_parameters ?? {},
},
};
case "AZURE_OPENAI":
return {
description,
model_provider,
model_name,
template_type: "CHAT",
template_format,
template: {
type: "chat",
messages: templateMessages,
},
invocation_parameters: {
type: "azure_openai",
azure_openai: invocation_parameters ?? {},
},
};
case "ANTHROPIC":
return {
description,
model_provider,
model_name,
template_type: "CHAT",
template_format,
template: {
type: "chat",
messages: templateMessages,
},
invocation_parameters: {
type: "anthropic",
anthropic: invocation_parameters,
},
};
case "GOOGLE":
return {
description,
model_provider,
model_name,
template_type: "CHAT",
template_format,
template: {
type: "chat",
messages: templateMessages,
},
invocation_parameters: {
type: "google",
google: invocation_parameters ?? {},
},
};
case "DEEPSEEK":
return {
description,
model_provider,
model_name,
template_type: "CHAT",
template_format,
template: {
type: "chat",
messages: templateMessages,
},
invocation_parameters: {
type: "deepseek",
deepseek: invocation_parameters ?? {},
},
};
case "XAI":
return {
description,
model_provider,
model_name,
template_type: "CHAT",
template_format,
template: {
type: "chat",
messages: templateMessages,
},
invocation_parameters: {
type: "xai",
xai: invocation_parameters ?? {},
},
};
case "OLLAMA":
return {
description,
model_provider,
model_name,
template_type: "CHAT",
template_format,
template: {
type: "chat",
messages: templateMessages,
},
invocation_parameters: {
type: "ollama",
ollama: invocation_parameters ?? {},
},
};
case "AWS":
return {
description,
model_provider,
model_name,
template_type: "CHAT",
template_format,
template: {
type: "chat",
messages: templateMessages,
},
invocation_parameters: {
type: "aws",
aws: invocation_parameters ?? {},
},
};
default:
assertUnreachable(model_provider);
}
}
//# sourceMappingURL=createPrompt.js.map