inference-server
Version:
Libraries and server to build AI applications. Adapters to various native bindings allowing local inference. Integrate it with your application, or use as a microservice.
29 lines • 927 B
JavaScript
import { flattenMessageTextContent } from '../../lib/flattenMessageTextContent.js';
export function createChatMessageArray(messages) {
const chatMessages = [];
let systemPrompt;
for (const message of messages) {
if (message.role === 'user' || message.role === 'assistant') {
chatMessages.push({
role: message.role,
content: flattenMessageTextContent(message.content),
});
}
else if (message.role === 'system') {
if (systemPrompt) {
systemPrompt += '\n\n' + message.content;
}
else {
systemPrompt = flattenMessageTextContent(message.content);
}
}
}
if (systemPrompt) {
chatMessages.unshift({
role: 'system',
content: systemPrompt,
});
}
return chatMessages;
}
//# sourceMappingURL=util.js.map