node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
19 lines • 577 B
JavaScript
/**
* Call the functions in the array one by one and return the result of the first one that doesn't throw an error.
*
* If all functions throw an error, throw the error of the last function.
*/
export function getFirstValidResult(options) {
for (let i = 0; i < options.length; i++) {
if (i === options.length - 1)
return options[i]();
try {
return options[i]();
}
catch (err) {
// do nothing
}
}
throw new Error("All options failed");
}
//# sourceMappingURL=getFirstValidResult.js.map