inference-server
Version:
Libraries and server to build AI applications. Adapters to various native bindings allowing local inference. Integrate it with your application, or use as a microservice.
16 lines (14 loc) • 447 B
text/typescript
import { CompletionFinishReason, ChatMessage } from '#package/types/index.js'
import OpenAI from 'openai'
export const finishReasonMap: Record<CompletionFinishReason, OpenAI.ChatCompletion.Choice['finish_reason']> = {
maxTokens: 'length',
toolCalls: 'tool_calls',
eogToken: 'stop',
stopTrigger: 'stop',
timeout: 'stop',
cancel: 'stop',
abort: 'stop',
} as const
export const messageRoleMap: Record<string, string> = {
function: 'tool',
}