inference-server
Version:
Libraries and server to build AI applications. Adapters to various native bindings allowing local inference. Integrate it with your application, or use as a microservice.
11 lines (10 loc) • 378 B
TypeScript
import { FileDownloadProgress } from '../types/index.js';
interface DownloadArgs {
url: string;
filePath?: string;
modelsCachePath: string;
onProgress?: (progress: FileDownloadProgress) => void;
signal?: AbortSignal;
}
export declare function downloadModelFile({ url, filePath, modelsCachePath, onProgress, signal }: DownloadArgs): Promise<void>;
export {};