@lobehub/chat
Version:
Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.
291 lines (244 loc) • 7.91 kB
text/typescript
import OpenAI from 'openai';
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import { imageUrlToBase64 } from '@/utils/imageToBase64';
import {
convertMessageContent,
convertOpenAIMessages,
convertOpenAIResponseInputs,
} from './openaiHelpers';
import { parseDataUri } from './uriParser';
// 模拟依赖
vi.mock('@/utils/imageToBase64');
vi.mock('./uriParser');
describe('convertMessageContent', () => {
beforeEach(() => {
vi.resetAllMocks();
});
afterEach(() => {
vi.restoreAllMocks();
});
it('should return the same content if not image_url type', async () => {
const content = { type: 'text', text: 'Hello' } as OpenAI.ChatCompletionContentPart;
const result = await convertMessageContent(content);
expect(result).toEqual(content);
});
it('should convert image URL to base64 when necessary', async () => {
// 设置环境变量
process.env.LLM_VISION_IMAGE_USE_BASE64 = '1';
const content = {
type: 'image_url',
image_url: { url: 'https://example.com/image.jpg' },
} as OpenAI.ChatCompletionContentPart;
vi.mocked(parseDataUri).mockReturnValue({ type: 'url', base64: null, mimeType: null });
vi.mocked(imageUrlToBase64).mockResolvedValue({
base64: 'base64String',
mimeType: 'image/jpeg',
});
const result = await convertMessageContent(content);
expect(result).toEqual({
type: 'image_url',
image_url: { url: 'data:image/jpeg;base64,base64String' },
});
expect(parseDataUri).toHaveBeenCalledWith('https://example.com/image.jpg');
expect(imageUrlToBase64).toHaveBeenCalledWith('https://example.com/image.jpg');
});
it('should not convert image URL when not necessary', async () => {
process.env.LLM_VISION_IMAGE_USE_BASE64 = undefined;
const content = {
type: 'image_url',
image_url: { url: 'https://example.com/image.jpg' },
} as OpenAI.ChatCompletionContentPart;
vi.mocked(parseDataUri).mockReturnValue({ type: 'url', base64: null, mimeType: null });
const result = await convertMessageContent(content);
expect(result).toEqual(content);
expect(imageUrlToBase64).not.toHaveBeenCalled();
});
});
describe('convertOpenAIMessages', () => {
it('should convert string content messages', async () => {
const messages = [
{ role: 'user', content: 'Hello' },
{ role: 'assistant', content: 'Hi there' },
] as OpenAI.ChatCompletionMessageParam[];
const result = await convertOpenAIMessages(messages);
expect(result).toEqual(messages);
});
it('should convert array content messages', async () => {
const messages = [
{
role: 'user',
content: [
{ type: 'text', text: 'Hello' },
{ type: 'image_url', image_url: { url: 'https://example.com/image.jpg' } },
],
},
] as OpenAI.ChatCompletionMessageParam[];
vi.spyOn(Promise, 'all');
vi.mocked(parseDataUri).mockReturnValue({ type: 'url', base64: null, mimeType: null });
vi.mocked(imageUrlToBase64).mockResolvedValue({
base64: 'base64String',
mimeType: 'image/jpeg',
});
process.env.LLM_VISION_IMAGE_USE_BASE64 = '1';
const result = await convertOpenAIMessages(messages);
expect(result).toEqual([
{
role: 'user',
content: [
{ type: 'text', text: 'Hello' },
{
type: 'image_url',
image_url: { url: 'data:image/jpeg;base64,base64String' },
},
],
},
]);
expect(Promise.all).toHaveBeenCalledTimes(2); // 一次用于消息数组,一次用于内容数组
process.env.LLM_VISION_IMAGE_USE_BASE64 = undefined;
});
it('should convert array content messages', async () => {
const messages = [
{
role: 'user',
content: [
{ type: 'text', text: 'Hello' },
{ type: 'image_url', image_url: { url: 'https://example.com/image.jpg' } },
],
},
] as OpenAI.ChatCompletionMessageParam[];
vi.spyOn(Promise, 'all');
vi.mocked(parseDataUri).mockReturnValue({ type: 'url', base64: null, mimeType: null });
vi.mocked(imageUrlToBase64).mockResolvedValue({
base64: 'base64String',
mimeType: 'image/jpeg',
});
const result = await convertOpenAIMessages(messages);
expect(result).toEqual(messages);
expect(Promise.all).toHaveBeenCalledTimes(2); // 一次用于消息数组,一次用于内容数组
});
});
describe('convertOpenAIResponseInputs', () => {
it('应该正确转换普通文本消息', async () => {
const messages: OpenAI.ChatCompletionMessageParam[] = [
{ role: 'user', content: 'Hello' },
{ role: 'assistant', content: 'Hi there!' },
];
const result = await convertOpenAIResponseInputs(messages);
expect(result).toEqual([
{ role: 'user', content: 'Hello' },
{ role: 'assistant', content: 'Hi there!' },
]);
});
it('应该正确转换带有工具调用的消息', async () => {
const messages: OpenAI.ChatCompletionMessageParam[] = [
{
role: 'assistant',
content: '',
tool_calls: [
{
id: 'call_123',
type: 'function',
function: {
name: 'test_function',
arguments: '{"key": "value"}',
},
},
],
},
];
const result = await convertOpenAIResponseInputs(messages);
expect(result).toEqual([
{
arguments: 'test_function',
call_id: 'call_123',
name: 'test_function',
type: 'function_call',
},
]);
});
it('应该正确转换工具响应消息', async () => {
const messages: OpenAI.ChatCompletionMessageParam[] = [
{
role: 'tool',
content: 'Function result',
tool_call_id: 'call_123',
},
];
const result = await convertOpenAIResponseInputs(messages);
expect(result).toEqual([
{
call_id: 'call_123',
output: 'Function result',
type: 'function_call_output',
},
]);
});
it('应该正确转换包含图片的消息', async () => {
const messages: OpenAI.ChatCompletionMessageParam[] = [
{
role: 'user',
content: [
{ type: 'text', text: 'Here is an image' },
{
type: 'image_url',
image_url: {
url: 'data:image/jpeg;base64,test123',
},
},
],
},
];
const result = await convertOpenAIResponseInputs(messages);
expect(result).toEqual([
{
role: 'user',
content: [
{ type: 'input_text', text: 'Here is an image' },
{
type: 'input_image',
image_url: 'data:image/jpeg;base64,test123',
},
],
},
]);
});
it('应该正确处理混合类型的消息序列', async () => {
const messages: OpenAI.ChatCompletionMessageParam[] = [
{ role: 'user', content: 'I need help with a function' },
{
role: 'assistant',
content: '',
tool_calls: [
{
id: 'call_456',
type: 'function',
function: {
name: 'get_data',
arguments: '{}',
},
},
],
},
{
role: 'tool',
content: '{"result": "success"}',
tool_call_id: 'call_456',
},
];
const result = await convertOpenAIResponseInputs(messages);
expect(result).toEqual([
{ role: 'user', content: 'I need help with a function' },
{
arguments: 'get_data',
call_id: 'call_456',
name: 'get_data',
type: 'function_call',
},
{
call_id: 'call_456',
output: '{"result": "success"}',
type: 'function_call_output',
},
]);
});
});