@autobe/agent
Version:
AI backend server code generator
134 lines (125 loc) • 3.95 kB
text/typescript
import { IAgenticaController, MicroAgentica } from "@agentica/core";
import {
AutoBeInterfaceEndpointsEvent,
AutoBeOpenApi,
} from "@autobe/interface";
import { AutoBeInterfaceGroup } from "@autobe/interface/src/histories/contents/AutoBeInterfaceGroup";
import { ILlmApplication, ILlmSchema } from "@samchon/openapi";
import { HashSet, IPointer } from "tstl";
import typia from "typia";
import { AutoBeContext } from "../../context/AutoBeContext";
import { assertSchemaModel } from "../../context/assertSchemaModel";
import { enforceToolCall } from "../../utils/enforceToolCall";
import { transformInterfaceEndpointHistories } from "./histories/transformInterfaceEndpointHistories";
import { IAutoBeInterfaceEndpointApplication } from "./structures/IAutoBeInterfaceEndpointApplication";
import { OpenApiEndpointComparator } from "./utils/OpenApiEndpointComparator";
export async function orchestrateInterfaceEndpoints<
Model extends ILlmSchema.Model,
>(
ctx: AutoBeContext<Model>,
groups: AutoBeInterfaceGroup[],
content: string = `Make endpoints for the given assets`,
): Promise<AutoBeOpenApi.IEndpoint[]> {
const progress: IProgress = {
total: groups.length,
completed: 0,
};
const endpoints: AutoBeOpenApi.IEndpoint[] = (
await Promise.all(groups.map((g) => process(ctx, g, content, progress)))
).flat();
return new HashSet(
endpoints,
OpenApiEndpointComparator.hashCode,
OpenApiEndpointComparator.equals,
).toJSON();
}
async function process<Model extends ILlmSchema.Model>(
ctx: AutoBeContext<Model>,
group: AutoBeInterfaceGroup,
content: string,
progress: IProgress,
): Promise<AutoBeOpenApi.IEndpoint[]> {
const start: Date = new Date();
const pointer: IPointer<AutoBeOpenApi.IEndpoint[] | null> = {
value: null,
};
const agentica: MicroAgentica<Model> = new MicroAgentica({
model: ctx.model,
vendor: ctx.vendor,
config: {
...(ctx.config ?? {}),
executor: {
describe: null,
},
},
histories: transformInterfaceEndpointHistories(ctx.state(), group),
controllers: [
createApplication({
model: ctx.model,
build: (endpoints) => {
pointer.value ??= endpoints;
pointer.value.push(...endpoints);
},
}),
],
});
enforceToolCall(agentica);
await agentica.conversate(content).finally(() => {
const tokenUsage = agentica.getTokenUsage();
ctx.usage().record(tokenUsage, ["interface"]);
});
if (pointer.value === null) throw new Error("Failed to generate endpoints."); // unreachable
const event: AutoBeInterfaceEndpointsEvent = {
type: "interfaceEndpoints",
endpoints: new HashSet(
pointer.value,
OpenApiEndpointComparator.hashCode,
OpenApiEndpointComparator.equals,
).toJSON(),
created_at: start.toISOString(),
step: ctx.state().analyze?.step ?? 0,
completed: ++progress.completed,
total: progress.total,
};
ctx.dispatch(event);
return pointer.value;
}
function createApplication<Model extends ILlmSchema.Model>(props: {
model: Model;
build: (endpoints: AutoBeOpenApi.IEndpoint[]) => void;
}): IAgenticaController.IClass<Model> {
assertSchemaModel(props.model);
const application: ILlmApplication<Model> = collection[
props.model
] as unknown as ILlmApplication<Model>;
return {
protocol: "class",
name: "interface",
application,
execute: {
makeEndpoints: (next) => {
props.build(next.endpoints);
},
} satisfies IAutoBeInterfaceEndpointApplication,
};
}
const claude = typia.llm.application<
IAutoBeInterfaceEndpointApplication,
"claude",
{ reference: true }
>();
const collection = {
chatgpt: typia.llm.application<
IAutoBeInterfaceEndpointApplication,
"chatgpt",
{ reference: true }
>(),
claude,
llama: claude,
deepseek: claude,
"3.1": claude,
};
interface IProgress {
total: number;
completed: number;
}