inference-server
Version:
Libraries and server to build AI applications. Adapters to various native bindings allowing local inference. Integrate it with your application, or use as a microservice.
29 lines • 985 B
JavaScript
import path from 'node:path';
export function parseModelPathInfo(modelPath) {
const parts = modelPath.split(path.sep).filter(Boolean); // Remove empty parts
if (parts.length === 0) {
throw new Error('Invalid model path: empty path');
}
// Special handling for huggingface.co paths with branch
if (parts[0] === 'huggingface.co') {
const lastPart = parts[parts.length - 1];
if (lastPart.includes('-')) {
const lastMinusIndex = lastPart.lastIndexOf('-');
const repoName = lastPart.slice(0, lastMinusIndex);
const branch = lastPart.slice(lastMinusIndex + 1);
return {
domain: parts[0],
org: parts[1],
name: repoName,
branch: branch,
};
}
}
const name = parts[parts.length - 1];
const domain = parts[0];
return {
domain,
name,
};
}
//# sourceMappingURL=parseModelPathInfo.js.map