@heroku/plugin-ai
Version:
Heroku CLI plugin for Heroku AI add-on
190 lines (189 loc) • 7.69 kB
JavaScript
Object.defineProperty(exports, "__esModule", { value: true });
const tslib_1 = require("tslib");
const command_1 = require("@heroku-cli/command");
const core_1 = require("@oclif/core");
const promises_1 = tslib_1.__importDefault(require("node:fs/promises"));
const base_1 = tslib_1.__importDefault(require("../../../lib/base"));
const stream_1 = require("../../../lib/ai/agents/stream");
class Call extends base_1.default {
static args = {
model_resource: core_1.Args.string({
description: 'resource ID or alias of model (--app flag required if alias is used)',
required: false,
default: 'heroku-inference',
}),
};
static description = 'make an inference request to the Heroku Inference Agents API';
static examples = [
'heroku ai:agents:call my_llm --app my-app --prompt "What is the current time?"',
'heroku ai:agents:call my_llm --app my-app --messages \'[{"role":"user","content":"What is the current time?"}]\'',
];
static flags = {
app: command_1.flags.app({
required: false,
description: 'name or ID of app (required if alias is used)',
}),
json: command_1.flags.boolean({
char: 'j',
description: 'output response as JSON',
exclusive: ['output'],
}),
optfile: command_1.flags.string({
description: 'additional options for model inference, provided as a JSON config file',
required: false,
exclusive: ['opts'],
exactlyOne: ['prompt', 'messages'],
}),
opts: command_1.flags.string({
description: 'additional options for model inference, provided as a JSON string',
required: false,
exclusive: ['optfile'],
exactlyOne: ['prompt', 'messages'],
}),
output: command_1.flags.string({
char: 'o',
description: 'file path where command writes the model response',
required: false,
exclusive: ['json'],
}),
prompt: command_1.flags.string({
char: 'p',
description: 'input prompt for model (will be converted to a user message)',
required: false,
exclusive: ['messages'],
exactlyOne: ['optfile', 'opts'],
}),
messages: command_1.flags.string({
description: 'JSON array of messages to send to the model',
required: false,
exclusive: ['prompt'],
exactlyOne: ['optfile', 'opts'],
}),
remote: command_1.flags.remote(),
};
async run() {
let flags = {};
let args = {};
try {
({ args, flags } = await this.parse(Call));
}
catch (error) {
const { parse: { output } } = error;
({ args, flags } = output);
}
const { model_resource: modelResource } = args;
const { app, json, optfile, opts, output, prompt, messages } = flags;
// Configure the client to send a request for the target model resource
await this.configureHerokuAIClient(modelResource, app);
// Get config vars to find the model resource
const { body: config } = await this.heroku.get(`/apps/${this.addon.app?.id}/config-vars`);
const configVarNames = Object.keys(config);
// Look for model resource in config vars
const modelResourceKey = configVarNames.find(key => key.startsWith('INFERENCE_') && key.endsWith('_MODEL_ID'));
if (!modelResourceKey) {
throw new Error(`No model resource found for ${app}. Check the Heroku Inference documentation for setup instructions: https://devcenter.heroku.com/articles/heroku-inference`);
}
const options = await this.parseOptions(optfile, opts);
// Create the agent request
const agentRequest = this.createAgentRequest(prompt, messages, options);
const response = await this.callAgent(agentRequest, !json && !output);
await this.displayAgentResponse(response, output, json);
}
async parseOptions(optfile, opts) {
const options = {};
if (optfile) {
const optfileContents = await promises_1.default.readFile(optfile);
try {
Object.assign(options, JSON.parse(optfileContents.toString()));
}
catch (error) {
if (error instanceof SyntaxError) {
const { message } = error;
throw new Error(`Invalid JSON in ${optfile}. Check the formatting in your file.\n${message}`);
}
throw error;
}
}
if (opts) {
try {
Object.assign(options, JSON.parse(opts));
}
catch (error) {
if (error instanceof SyntaxError) {
const { message } = error;
throw new Error(`Invalid JSON. Check the formatting in your --opts value.\n${message}`);
}
throw error;
}
}
return options;
}
createAgentRequest(prompt, messagesStr, options = {}) {
let messages = options.messages || [];
if (messagesStr) {
try {
messages = JSON.parse(messagesStr);
}
catch (error) {
if (error instanceof SyntaxError) {
const { message } = error;
throw new Error(`Invalid JSON in --messages. Check the formatting.\n${message}`);
}
throw error;
}
}
if (prompt) {
messages.push({ role: 'user', content: prompt });
}
return {
...options,
messages,
model: this.apiModelId || '',
};
}
async callAgent(request, writeToStdout = true) {
const response = await fetch(this.apiUrl + '/v1/agents/heroku', {
method: 'POST',
body: JSON.stringify(request),
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.apiKey}`,
Accept: 'text/event-stream',
'User-Agent': `heroku-cli-plugin-ai/${process.env.npm_package_version} ${this.config.platform}`,
},
});
if (!response?.body) {
throw new Error('No response body received from the API');
}
const completions = [];
await (0, stream_1.handleAgentStream)(response.body, {
onMessage: completion => {
completions.push(completion);
const message = (0, stream_1.formatCompletionMessage)(completion);
if (message && writeToStdout) {
core_1.ux.log(message);
}
},
});
return completions;
}
async displayAgentResponse(completions, output, json = false) {
if (output) {
if (json) {
await promises_1.default.writeFile(output, JSON.stringify(completions, null, 2));
}
else {
// Write only the final assistant message content
const finalAssistantMessage = completions
.filter(c => c.object === 'chat.completion')
.pop()?.choices[0].message.content || '';
await promises_1.default.writeFile(output, finalAssistantMessage);
}
}
else if (json) {
core_1.ux.styledJSON(completions);
}
}
}
exports.default = Call;
;