sesterce-cli
Version:
A powerful command-line interface tool for managing Sesterce Cloud services. Sesterce CLI provides easy access to GPU cloud instances, AI inference services, container registries, and SSH key management directly from your terminal.
327 lines • 15.7 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
exports.createAIInferenceInstanceUpdateCommand = createAIInferenceInstanceUpdateCommand;
const get_instance_details_1 = require("@/modules/ai-inference/use-cases/get-instance-details");
const list_inference_hardwares_1 = require("@/modules/ai-inference/use-cases/list-inference-hardwares");
const list_inference_models_1 = require("@/modules/ai-inference/use-cases/list-inference-models");
const list_inference_regions_1 = require("@/modules/ai-inference/use-cases/list-inference-regions");
const list_instances_1 = require("@/modules/ai-inference/use-cases/list-instances");
const preview_instance_pricing_1 = require("@/modules/ai-inference/use-cases/preview-instance-pricing");
const update_instance_1 = require("@/modules/ai-inference/use-cases/update-instance");
const list_registries_1 = require("@/modules/registries/use-cases/list-registries");
const prompts_1 = require("@inquirer/prompts");
const printInstance = (instance) => {
return `${instance.name} (${instance.features.join(", ")}) | ${instance.address} | ${instance.status} | ${instance.hourlyPrice.toFixed(2)}`;
};
const printRegistry = (registry) => {
return `${registry.name} - ${registry.url} - ${registry.username}`;
};
function createAIInferenceInstanceUpdateCommand(aiInferenceInstanceCommand) {
aiInferenceInstanceCommand
.command("update")
.description("Update an AI inference instance")
.option("-i, --instance-id <instanceId>", "The ID of the instance to update")
.action(async (args) => {
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q;
let instanceId = args.instanceId;
if (!instanceId) {
console.log("Loading inference instances...");
const inferenceInstancesResult = await list_instances_1.listInferenceInstances.execute();
if (inferenceInstancesResult.isLeft()) {
console.error(inferenceInstancesResult.value.message);
return;
}
const instances = inferenceInstancesResult.value;
if (instances.length === 0) {
console.log("No instances found");
return;
}
const selectedInstance = await (0, prompts_1.search)({
message: "Select an instance to update",
source: async (input, { signal }) => {
if (!input) {
return instances.map((instance) => ({
name: printInstance(instance),
value: instance,
}));
}
return instances
.filter((instance) => instance.name.toLowerCase().includes(input.toLowerCase()) ||
instance.address
.toLowerCase()
.includes(input.toLowerCase()) ||
instance.features.includes(input.toLowerCase()) ||
instance.status.toLowerCase().includes(input.toLowerCase()) ||
instance.name.toLowerCase().includes(input.toLowerCase()) ||
instance.hourlyPrice.toFixed(2).includes(input))
.map((instance) => ({
name: printInstance(instance),
value: instance,
}));
},
});
instanceId = selectedInstance._id;
}
console.log("Loading instance details...");
const instanceDetailsResult = await get_instance_details_1.getInstanceDetails.execute(instanceId);
if (instanceDetailsResult.isLeft()) {
console.error(instanceDetailsResult.value.message);
return;
}
const instanceToUpdate = instanceDetailsResult.value;
console.log("Loading offers...");
const [hardwaresResult, regionsResult] = await Promise.all([
list_inference_hardwares_1.listInferenceHardwares.execute(),
list_inference_regions_1.listInferenceRegions.execute(),
]);
if (hardwaresResult.isLeft()) {
console.error((_a = hardwaresResult.value) === null || _a === void 0 ? void 0 : _a.message);
return;
}
if (regionsResult.isLeft()) {
console.error((_b = regionsResult.value) === null || _b === void 0 ? void 0 : _b.message);
return;
}
const hardwares = hardwaresResult.value;
const regions = regionsResult.value;
const answer = await (0, prompts_1.checkbox)({
message: "Deploy public or private model?",
choices: [
{ name: "Public model", value: "public", checked: true },
{ name: "Private model (registry)", value: "private" },
],
});
const modelType = answer[0];
let model = null;
let registry = null;
if (modelType === "public") {
console.log("Loading public models...");
const modelsResult = await list_inference_models_1.listInferenceModels.execute();
if (modelsResult.isLeft()) {
console.error(modelsResult.value.message);
return;
}
const models = modelsResult.value;
const selectedModel = await (0, prompts_1.search)({
message: "Select a model",
source: async (input, { signal }) => {
if (!input)
return models.map((model) => ({
name: model.name,
value: model,
}));
return models
.filter((model) => model.name.toLowerCase().includes(input.toLowerCase()))
.map((model) => ({
name: model.name,
value: model,
}));
},
});
model = selectedModel;
}
else {
console.log("Loading registries...");
const registriesResult = await list_registries_1.listRegistries.execute();
if (registriesResult.isLeft()) {
console.error(registriesResult.value.message);
return;
}
const registries = registriesResult.value;
const selectedRegistry = await (0, prompts_1.search)({
message: "Select a registry",
source: async (input, { signal }) => {
if (!input)
return registries.map((registry) => ({
name: printRegistry(registry),
value: registry,
}));
return registries
.filter((registry) => registry.name.toLowerCase().includes(input.toLowerCase()))
.map((registry) => ({
name: printRegistry(registry),
value: registry,
}));
},
});
registry = selectedRegistry;
}
const containerPort = await (0, prompts_1.number)({
message: "Port to expose the model",
required: true,
default: (_c = model === null || model === void 0 ? void 0 : model.port) !== null && _c !== void 0 ? _c : 80,
});
const availableHardwares = hardwares.map((hardware) => ({
...hardware,
available: regions.some((region) => region.capacities.some((capacity) => capacity.hardwareName === hardware.name && capacity.capacity > 0)),
}));
const hardware = await (0, prompts_1.search)({
message: `Select a hardware (current: ${instanceToUpdate.hardwareName})`,
source: async (input, { signal }) => {
if (!input)
return availableHardwares.map((hardware) => ({
name: hardware.name,
value: hardware,
disabled: !hardware.available ? "(Out of stock)" : false,
}));
return availableHardwares
.filter((hardware) => hardware.name.toLowerCase().includes(input.toLowerCase()))
.map((hardware) => ({
name: hardware.name,
value: hardware,
disabled: !hardware.available ? "(Out of stock)" : false,
}));
},
});
console.log("Loading regions...");
const hardwareRegions = regions.filter((region) => region.capacities.some((capacity) => capacity.hardwareName === hardware.name && capacity.capacity > 0));
const pricingResult = await preview_instance_pricing_1.previewInstancePricing.execute({
hardwareName: hardware.name,
regions: hardwareRegions.map((region) => region.id),
maxContainers: 1,
});
if (pricingResult.isLeft()) {
console.error(pricingResult.value.message);
return;
}
const regionPricingDict = pricingResult.value.pricesPerRegion.reduce((acc, { regionId, ...pricing }) => {
acc[regionId] = pricing;
return acc;
}, {});
const selectedRegions = await (0, prompts_1.checkbox)({
message: "Select one or more regions",
required: true,
choices: hardwareRegions.map((region) => ({
name: regionPricingDict[region.id]
? `${region.name} - $${regionPricingDict[region.id].pricePerHour}/hour`
: region.name,
value: region,
checked: instanceToUpdate.containers.some((container) => container.regionId === region.id),
})),
});
const startupCommand = await (0, prompts_1.input)({
message: "Startup command",
default: instanceToUpdate.startupCommand,
});
// all containers have the same scale config, so we can use the first one
const [firstContainer] = instanceToUpdate.containers;
const minContainers = await (0, prompts_1.number)({
message: "Minimum number of containers (0-3)",
default: firstContainer.scale.min,
validate: (value) => {
if (value === undefined) {
return true;
}
if (value < 0 || value > 3) {
return "Minimum number of containers must be between 0 and 3";
}
return true;
},
});
const maxContainers = await (0, prompts_1.number)({
message: "Maximum number of containers (1-25)",
required: true,
default: firstContainer.scale.max,
validate: (value) => {
if (value === undefined) {
return true;
}
if (value < 1 || value > 25) {
return "Maximum number of containers must be between 1 and 25";
}
return true;
},
});
const cooldownPeriod = await (0, prompts_1.number)({
message: "Cooldown period (in seconds)",
default: firstContainer.scale.cooldownPeriod,
});
const timeout = await (0, prompts_1.number)({
message: "Timeout (in seconds)",
default: instanceToUpdate.podLifetime,
});
console.log("Container Deployment Triggers");
const cpu = await (0, prompts_1.number)({
message: "CPU usage (%)",
default: (_e = (_d = firstContainer.scale.triggers) === null || _d === void 0 ? void 0 : _d.cpu) === null || _e === void 0 ? void 0 : _e.threshold,
});
const gpuUtilization = await (0, prompts_1.number)({
message: "GPU usage (%)",
default: (_g = (_f = firstContainer.scale.triggers) === null || _f === void 0 ? void 0 : _f.gpuUtilization) === null || _g === void 0 ? void 0 : _g.threshold,
});
const gpuMemory = await (0, prompts_1.number)({
message: "GPU memory (%)",
default: (_j = (_h = firstContainer.scale.triggers) === null || _h === void 0 ? void 0 : _h.gpuMemory) === null || _j === void 0 ? void 0 : _j.threshold,
});
const memory = await (0, prompts_1.number)({
message: "RAM usage (%)",
default: (_l = (_k = firstContainer.scale.triggers) === null || _k === void 0 ? void 0 : _k.memory) === null || _l === void 0 ? void 0 : _l.threshold,
});
const http = await (0, prompts_1.number)({
message: "HTTP requests (/sec)",
default: (_o = (_m = firstContainer.scale.triggers) === null || _m === void 0 ? void 0 : _m.http) === null || _o === void 0 ? void 0 : _o.rate,
});
const envVars = await (0, prompts_1.editor)({
message: "Environment Variables",
default: Object.entries(instanceToUpdate.envs)
.map(([key, value]) => `${key}=${value}`)
.join("\n"),
});
let envs = {};
if (envVars.length > 0) {
envs = envVars.split("\n").reduce((acc, env) => {
const [key, value] = env.split("=");
if (key && value) {
acc[key] = value;
}
return acc;
}, {});
}
console.log("Calculating final price...");
const finalPrice = await preview_instance_pricing_1.previewInstancePricing.execute({
hardwareName: hardware.name,
regions: selectedRegions.map((region) => region.id),
maxContainers,
});
if (finalPrice.isLeft()) {
console.error(finalPrice.value.message);
return;
}
console.log(`Final price: $${finalPrice.value.totalPrice.pricePerHour}/hour | $${finalPrice.value.totalPrice.pricePerMonth}/month`);
const confirmDeployment = await (0, prompts_1.confirm)({
message: "Are you sure you want to deploy this instance?",
});
if (!confirmDeployment) {
console.log("Deployment cancelled");
return;
}
console.log("Updating instance...");
const updateInstanceResult = await update_instance_1.updateInferenceInstance.execute({
instanceId,
modelId: (_p = model === null || model === void 0 ? void 0 : model.id) !== null && _p !== void 0 ? _p : null,
registryId: (_q = registry === null || registry === void 0 ? void 0 : registry._id) !== null && _q !== void 0 ? _q : null,
containerPort,
regionsIds: selectedRegions.map((region) => region.id),
minContainers: minContainers !== null && minContainers !== void 0 ? minContainers : 1,
maxContainers,
cooldownPeriod,
triggers: {
cpu,
gpuUtilization,
gpuMemory,
memory,
http,
},
timeout,
envs,
hardwareName: hardware.name,
startupCommand: startupCommand !== null && startupCommand !== void 0 ? startupCommand : null,
});
if (updateInstanceResult.isLeft()) {
console.error(updateInstanceResult.value.message);
return;
}
console.log("Instance updated successfully!");
});
}
//# sourceMappingURL=update.js.map