inference-server
Version:
Libraries and server to build AI applications. Adapters to various native bindings allowing local inference. Integrate it with your application, or use as a microservice.
18 lines • 646 B
JavaScript
// handler for v1/models
// https://platform.openai.com/docs/api-reference/models/list
export function createModelsHandler(inferenceServer) {
return async (req, res) => {
const models = inferenceServer.store.getStatus();
const data = Object.entries(models).map(([id, info]) => {
return {
object: 'model',
id,
created: 0,
owned_by: info.engine,
};
});
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ object: 'list', data }, null, 2));
};
}
//# sourceMappingURL=models.js.map