inference-server
Version:
Libraries and server to build AI applications. Adapters to various native bindings allowing local inference. Integrate it with your application, or use as a microservice.
20 lines (17 loc) • 468 B
text/typescript
import {
ChatMessage,
MessageContentPart,
MessageTextContentPart,
} from '#package/types/index.js'
function isTextContentPart(
part: MessageContentPart,
): part is MessageTextContentPart {
return part.type === 'text'
}
export function flattenMessageTextContent(content: ChatMessage['content']): string {
if (typeof content === 'string') {
return content
}
const parts = content.filter(isTextContentPart)
return parts.map((part) => part.text).join('\n')
}