@revenium/perplexity
Version:
NodeJS middleware for perplexity's AI API
25 lines (22 loc) • 667 B
text/typescript
import { PerplexityReveniumMiddleware } from "../src";
const chatCompletionStreamingExample = async () => {
console.log("=".repeat(50));
try {
const middleware = new PerplexityReveniumMiddleware();
const model = middleware.getGenerativeModel("sonar-pro");
const result = await model.createChatCompletionStream({
messages: [
{
role: "user",
content: "Hello world",
},
],
});
for await (const chunk of result) {
process.stdout.write(chunk.choices[0]?.delta?.content || "");
}
} catch (error) {
console.log(error);
}
};
chatCompletionStreamingExample();