inference-server
Version:
Libraries and server to build AI applications. Adapters to various native bindings allowing local inference. Integrate it with your application, or use as a microservice.
46 lines • 1.47 kB
JavaScript
import * as TransformersJs from '@huggingface/transformers';
// TransformersJs.CLIPFeatureExtractor
export async function fetchBuffer(url) {
const response = await fetch(url);
if (!response.ok) {
throw new Error(`Failed to fetch ${url}: ${response.statusText}`);
}
return Buffer.from(await response.arrayBuffer());
}
export async function remoteFileExists(url) {
try {
const response = await fetch(url, { method: 'HEAD' });
return response.ok;
}
catch (error) {
console.error('Error checking remote file:', error);
return false;
}
}
export function parseHuggingfaceModelIdAndBranch(url) {
// url to the hub model, like https://huggingface.co/jinaai/jina-clip-v1
const parsedUrl = new URL(url);
const urlSegments = parsedUrl.pathname.split('/');
const repoOrg = urlSegments[1];
const repoName = urlSegments[2];
const branch = urlSegments[4] || 'main';
return {
modelId: `${repoOrg}/${repoName}`,
branch,
};
}
export function normalizeTransformersJsClass(classLike, fallback) {
if (typeof classLike === 'string') {
if (classLike in TransformersJs) {
return TransformersJs[classLike];
}
}
if (typeof classLike === 'function') {
return classLike;
}
if (fallback) {
return fallback;
}
throw new Error(`Invalid TransformersJs class: ${classLike}`);
}
//# sourceMappingURL=util.js.map