inference-server
Version:
Libraries and server to build AI applications. Adapters to various native bindings allowing local inference. Integrate it with your application, or use as a microservice.
25 lines • 1.11 kB
JavaScript
import fs from 'node:fs';
import path from 'node:path';
import { env } from '@huggingface/transformers';
import { acquireFileLock } from '../../lib/acquireFileLock.js';
import { parseHuggingfaceModelIdAndBranch } from './util.js';
export async function acquireModelFileLocks(config, signal) {
const requestedLocks = [];
const modelId = config.id;
const modelCacheDir = path.join(env.cacheDir, modelId);
fs.mkdirSync(modelCacheDir, { recursive: true });
requestedLocks.push(acquireFileLock(modelCacheDir, signal));
if (config.visionModel?.processor?.url) {
const { modelId } = parseHuggingfaceModelIdAndBranch(config.visionModel.processor.url);
const processorCacheDir = path.join(env.cacheDir, modelId);
fs.mkdirSync(processorCacheDir, { recursive: true });
requestedLocks.push(acquireFileLock(processorCacheDir, signal));
}
const acquiredLocks = await Promise.all(requestedLocks);
return () => {
for (const releaseLock of acquiredLocks) {
releaseLock();
}
};
}
//# sourceMappingURL=acquireModelFileLocks.js.map