node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
13 lines • 692 B
JavaScript
import { maximumParallelismStrategy } from "./batchItemsPrioritizationStrategies/maximumParallelismStrategy.js";
import { firstInFirstOutStrategy } from "./batchItemsPrioritizationStrategies/firstInFirstOutStrategy.js";
export function resolveBatchItemsPrioritizationStrategy(strategy) {
if (strategy instanceof Function)
return strategy;
else if (strategy === "maximumParallelism")
return maximumParallelismStrategy;
else if (strategy === "firstInFirstOut")
return firstInFirstOutStrategy;
void strategy;
throw new Error(`Unknown batch items prioritize strategy: ${strategy}`);
}
//# sourceMappingURL=resolveBatchItemsPrioritizationStrategy.js.map