@lobehub/chat
Version:
Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.
116 lines (96 loc) • 4.19 kB
text/typescript
import { TRPCLink, createTRPCClient, httpBatchLink } from '@trpc/client';
import { createTRPCReact } from '@trpc/react-query';
import { observable } from '@trpc/server/observable';
import debug from 'debug';
import { ModelProvider } from 'model-bank';
import superjson from 'superjson';
import { isDesktop } from '@/const/version';
import type { LambdaRouter } from '@/server/routers/lambda';
const log = debug('lobe-image:lambda-client');
// 401 error debouncing: prevent showing multiple login notifications in short time
let last401Time = 0;
const MIN_401_INTERVAL = 5000; // 5 seconds
// handle error
const errorHandlingLink: TRPCLink<LambdaRouter> = () => {
return ({ op, next }) =>
observable((observer) =>
next(op).subscribe({
complete: () => observer.complete(),
error: async (err) => {
// Check if this is an abort error and should be ignored
const isAbortError = err.message.includes('aborted') || err.name === 'AbortError' ||
err.cause?.name === 'AbortError' ||
err.message.includes('signal is aborted without reason');
const showError = (op.context?.showNotification as boolean) ?? true;
const status = err.data?.httpStatus as number;
// Don't show notifications for abort errors
if (showError && !isAbortError) {
const { loginRequired } = await import('@/components/Error/loginRequiredNotification');
const { fetchErrorNotification } = await import(
'@/components/Error/fetchErrorNotification'
);
switch (status) {
case 401: {
// Debounce: only show login notification once every 5 seconds
const now = Date.now();
if (now - last401Time > MIN_401_INTERVAL) {
last401Time = now;
loginRequired.redirect();
}
// Mark error as non-retryable to prevent SWR infinite retry loop
err.meta = { ...err.meta, shouldRetry: false };
break;
}
default: {
fetchErrorNotification.error({ errorMessage: err.message, status });
}
}
}
observer.error(err);
},
next: (value) => observer.next(value),
}),
);
};
// 2. httpBatchLink
const customHttpBatchLink = httpBatchLink({
fetch: async (input, init) => {
if (isDesktop) {
const { desktopRemoteRPCFetch } = await import('@/utils/electron/desktopRemoteRPCFetch');
// eslint-disable-next-line no-undef
const res = await desktopRemoteRPCFetch(input as string, init as RequestInit);
if (res) return res;
}
// eslint-disable-next-line no-undef
return await fetch(input, init as RequestInit);
},
headers: async () => {
// dynamic import to avoid circular dependency
const { createHeaderWithAuth } = await import('@/services/_auth');
let provider: ModelProvider = ModelProvider.OpenAI;
// for image page, we need to get the provider from the store
log('Getting provider from store for image page: %s', location.pathname);
if (location.pathname === '/image') {
const { getImageStoreState } = await import('@/store/image');
const { imageGenerationConfigSelectors } = await import(
'@/store/image/slices/generationConfig/selectors'
);
provider = imageGenerationConfigSelectors.provider(getImageStoreState()) as ModelProvider;
log('Getting provider from store for image page: %s', provider);
}
// TODO: we need to support provider select for chat page
const headers = await createHeaderWithAuth({ provider });
log('Headers: %O', headers);
return headers;
},
maxURLLength: 2083,
transformer: superjson,
url: '/trpc/lambda',
});
// 3. assembly links
const links = [errorHandlingLink, customHttpBatchLink];
export const lambdaClient = createTRPCClient<LambdaRouter>({
links,
});
export const lambdaQuery = createTRPCReact<LambdaRouter>();
export const lambdaQueryClient = lambdaQuery.createClient({ links });