UNPKG

nb-scraper

Version:

Community scraper library by Newbie Scrape

1 lines 261 kB
{"version":3,"sources":["../app/utils.ts","../app/scrapers/blackbox.ts","../app/scrapers/threads.ts","../app/scrapers/pinterest.ts","../app/scrapers/exomlapi.ts","../app/scrapers/dreamanalysis.ts","../app/scrapers/pollinations.ts","../app/scrapers/soundcloud.ts","../app/scrapers/deepinfra.ts","../app/scrapers/animeindo.ts","../app/scrapers/facebook.ts","../app/scrapers/anydownloader.ts","../app/scrapers/youtube.ts","../app/scrapers/liputan6.ts","../app/scrapers/laratranslate.ts","../app/scrapers/savegram.ts","../app/scrapers/wheaterMaster.ts","../app/scrapers/youtube-post.ts","../app/scrapers/lyrics-generator.ts","../app/scrapers/apkpure.ts","../app/scrapers/writeCanvas.ts","../app/scrapers/bacakomik.ts","../app/scrapers/unaimytext.ts","../app/scrapers/ssyoutube.ts","../app/scrapers/ffstalk.ts","../app/scrapers/tiktok.ts","../app/scrapers/translate-image.ts","../app/scrapers/periksadata.ts","../app/scrapers/terabox.ts","../app/scrapers/tutwuri.ts","../app/scrapers/ytmp3mobi.ts","../app/scrapers/textcraft.ts","../app/scrapers/groupda.ts","../app/index.ts"],"sourcesContent":["/**\n * @fileoverview Utility functions for NB Scraper\n * @author Er Project\n * @version 1.0.0\n */\n\nimport axios, { AxiosError, AxiosRequestConfig, AxiosResponse } from 'axios';\nimport { NBScraperResponse, ScraperError, ScraperErrorType, RequestConfig } from './types';\n\n/**\n * Default configuration for the scraper\n */\nexport const DEFAULT_CONFIG = {\n timeout: 30000,\n retries: 3,\n retryDelay: 1000,\n userAgent: 'Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Mobile Safari/537.36'\n} as const;\n\n/**\n * Creator information\n */\nexport const CREATOR = 'nb-scraper';\n\n/**\n * Sleep function for delays\n * @param ms - Milliseconds to sleep\n * @returns Promise that resolves after the specified time\n */\nexport const sleep = (ms: number): Promise<void> => \n new Promise(resolve => setTimeout(resolve, ms));\n\n/**\n * Validates if a string is a valid URL\n * @param url - The URL string to validate\n * @returns True if the URL is valid, false otherwise\n */\nexport function isValidUrl(url: string): boolean {\n try {\n new URL(url);\n return true;\n } catch {\n return false;\n }\n}\n\n/**\n * Sanitizes a string by removing potentially harmful characters\n * @param input - The input string to sanitize\n * @returns Sanitized string\n */\nexport function sanitizeString(input: string): string {\n return input\n .replace(/[<>'\"]/g, '') // Remove potential XSS characters\n .trim()\n .slice(0, 10000); // Limit length to prevent memory issues\n}\n\n/**\n * Creates a standardized error response\n * @param error - The error that occurred\n * @param context - Additional context about the error\n * @returns Standardized error response\n */\nexport function createErrorResponse(\n error: ScraperError | Error | string,\n context?: Record<string, unknown>\n): NBScraperResponse<never> {\n let errorMessage: string;\n let errorType: ScraperErrorType = ScraperErrorType.UNKNOWN_ERROR;\n\n if (typeof error === 'string') {\n errorMessage = error;\n } else if (error instanceof Error) {\n errorMessage = error.message;\n \n // Classify error type based on error properties\n if (error.name === 'AxiosError' || error.message.includes('network')) {\n errorType = ScraperErrorType.NETWORK_ERROR;\n } else if (error.message.includes('timeout')) {\n errorType = ScraperErrorType.NETWORK_ERROR;\n } else if (error.message.includes('rate') || error.message.includes('limit')) {\n errorType = ScraperErrorType.RATE_LIMITED;\n }\n } else {\n errorMessage = error.message;\n errorType = error.type;\n }\n\n return {\n creator: CREATOR,\n status: false,\n error: `[${errorType}] ${errorMessage}${context ? ` | Context: ${JSON.stringify(context)}` : ''}`\n };\n}\n\n/**\n * Creates a standardized success response\n * @param data - The data to include in the response\n * @returns Standardized success response\n */\nexport function createSuccessResponse<T>(data: T): NBScraperResponse<T> {\n return {\n creator: CREATOR,\n status: true,\n data\n };\n}\n\n/**\n * Makes an HTTP request with retry logic and error handling\n * @param config - Axios request configuration\n * @param options - Additional options for the request\n * @returns Promise that resolves to the response data\n */\nexport async function makeRequest<T = unknown>(\n config: AxiosRequestConfig,\n options: RequestConfig = {}\n): Promise<AxiosResponse<T>> {\n const {\n timeout = DEFAULT_CONFIG.timeout,\n retries = DEFAULT_CONFIG.retries,\n retryDelay = DEFAULT_CONFIG.retryDelay,\n headers = {}\n } = options;\n\n const requestConfig: AxiosRequestConfig = {\n ...config,\n timeout,\n headers: {\n 'User-Agent': DEFAULT_CONFIG.userAgent,\n 'Accept': '*/*',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'id-ID,id;q=0.9,en;q=0.8',\n ...headers,\n ...config.headers\n }\n };\n\n let lastError: Error | undefined;\n\n for (let attempt = 0; attempt <= retries; attempt++) {\n try {\n const response = await axios(requestConfig);\n return response;\n } catch (error) {\n lastError = error as Error;\n \n // Don't retry on certain error types\n if (axios.isAxiosError(error)) {\n const axiosError = error as AxiosError;\n \n // Don't retry on 4xx errors (except 429 - Too Many Requests)\n if (axiosError.response?.status && \n axiosError.response.status >= 400 && \n axiosError.response.status < 500 && \n axiosError.response.status !== 429) {\n throw error;\n }\n }\n\n // If this is the last attempt, throw the error\n if (attempt === retries) {\n throw lastError;\n }\n\n // Wait before retrying\n await sleep(retryDelay * (attempt + 1)); // Exponential backoff\n }\n }\n\n throw lastError;\n}\n\n/**\n * Validates required parameters for scraper functions\n * @param params - Object containing parameters to validate\n * @param required - Array of required parameter names\n * @throws Error if any required parameter is missing or invalid\n */\nexport function validateRequiredParams(\n params: Record<string, unknown>,\n required: string[]\n): void {\n for (const param of required) {\n const value = params[param];\n \n if (value === undefined || value === null || value === '') {\n throw new Error(`Parameter '${param}' is required and cannot be empty`);\n }\n \n if (typeof value === 'string' && value.trim() === '') {\n throw new Error(`Parameter '${param}' cannot be an empty string`);\n }\n }\n}\n\n/**\n * Safely parses JSON string, returning null if parsing fails\n * @param jsonString - The JSON string to parse\n * @returns Parsed object or null if parsing failed\n */\nexport function safeJsonParse<T = unknown>(jsonString: string): T | null {\n try {\n return JSON.parse(jsonString) as T;\n } catch {\n return null;\n }\n}\n\n/**\n * Extracts domain from URL\n * @param url - The URL to extract domain from\n * @returns Domain string or null if invalid URL\n */\nexport function extractDomain(url: string): string | null {\n try {\n const urlObj = new URL(url);\n return urlObj.hostname;\n } catch {\n return null;\n }\n}\n\n/**\n * Formats bytes to human readable format\n * @param bytes - Number of bytes\n * @param decimals - Number of decimal places\n * @returns Formatted string\n */\nexport function formatBytes(bytes: number, decimals = 2): string {\n if (bytes === 0) return '0 Bytes';\n\n const k = 1024;\n const dm = decimals < 0 ? 0 : decimals;\n const sizes = ['Bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'];\n\n const i = Math.floor(Math.log(bytes) / Math.log(k));\n\n return parseFloat((bytes / Math.pow(k, i)).toFixed(dm)) + ' ' + sizes[i];\n}","/**\n * @fileoverview BlackBox AI scraper implementation\n * @author From NB\n * @version 1.0.0\n */\n\nimport { \n NBScraperResponse, \n BlackBoxAIData, \n BlackBoxAIOptions, \n BlackBoxSource,\n} from '../types';\nimport { \n createErrorResponse, \n createSuccessResponse, \n makeRequest, \n validateRequiredParams,\n safeJsonParse\n} from '../utils';\n\n/**\n * Default BlackBox AI configuration\n */\nconst DEFAULT_BLACKBOX_CONFIG = {\n maxTokens: 1024,\n temperature: null,\n webSearchMode: false,\n memoryEnabled: false\n} as const;\n\n/**\n * Scrapes BlackBox AI for responses to queries\n * \n * @example\n * ```typescript\n * import { blackboxAi } from 'nb-scraper';\n * \n * const result = await blackboxAi('What is TypeScript?');\n * if (result.status) {\n * console.log(result.data.response);\n * console.log(result.data.source);\n * }\n * ```\n * \n * @param query - The query to send to BlackBox AI\n * @param options - Optional configuration for the request\n * @returns Promise resolving to the AI response with sources\n * \n * @throws Will not throw errors, returns error response instead\n * \n * @author Pratama\n */\nexport async function blackboxAi(\n query: string,\n options: BlackBoxAIOptions = {}\n): Promise<NBScraperResponse<BlackBoxAIData>> {\n try {\n // Validate input parameters\n validateRequiredParams({ query }, ['query']);\n\n // Sanitize and validate query\n const sanitizedQuery = query.trim();\n if (sanitizedQuery.length === 0) {\n return createErrorResponse('Query cannot be empty', { query });\n }\n\n if (sanitizedQuery.length > 10000) {\n return createErrorResponse('Query too long (max 10,000 characters)', { \n queryLength: sanitizedQuery.length \n });\n }\n\n // Merge options with defaults\n const config = { ...DEFAULT_BLACKBOX_CONFIG, ...options };\n\n // Prepare request headers\n const headers = {\n 'Accept': '*/*',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'id-ID,id;q=0.9',\n 'Content-Type': 'application/json',\n 'Origin': 'https://www.blackbox.ai',\n 'Referer': 'https://www.blackbox.ai/',\n 'Sec-Ch-Ua': '\"Chromium\";v=\"137\", \"Not/A)Brand\";v=\"24\"',\n 'Sec-Ch-Ua-Mobile': '?1',\n 'Sec-Ch-Ua-Platform': '\"Android\"',\n 'Sec-Fetch-Dest': 'empty',\n 'Sec-Fetch-Mode': 'cors',\n 'Sec-Fetch-Site': 'same-origin',\n ...options.headers\n };\n\n // Prepare request payload\n const payload = {\n messages: [{ \n role: 'user', \n content: sanitizedQuery, \n id: generateRandomId() \n }],\n id: generateRandomId(),\n previewToken: null,\n userId: null,\n codeModelMode: true,\n trendingAgentMode: {},\n isMicMode: false,\n userSystemPrompt: null,\n maxTokens: config.maxTokens,\n playgroundTopP: null,\n playgroundTemperature: config.temperature,\n isChromeExt: false,\n githubToken: '',\n clickedAnswer2: false,\n clickedAnswer3: false,\n clickedForceWebSearch: config.webSearchMode,\n visitFromDelta: false,\n isMemoryEnabled: config.memoryEnabled,\n mobileClient: false,\n userSelectedModel: null,\n validated: generateUUID(),\n imageGenerationMode: false,\n webSearchModePrompt: config.webSearchMode,\n deepSearchMode: false,\n domains: null,\n vscodeClient: false,\n codeInterpreterMode: false,\n customProfile: {\n name: '',\n occupation: '',\n traits: [],\n additionalInfo: '',\n enableNewChats: false\n },\n webSearchModeOption: {\n autoMode: true,\n webMode: config.webSearchMode,\n offlineMode: !config.webSearchMode\n },\n session: null,\n isPremium: false,\n subscriptionCache: null,\n beastMode: false,\n reasoningMode: false,\n designerMode: false,\n workspaceId: '',\n asyncMode: false,\n isTaskPersistent: false\n };\n\n // Make the request\n const response = await makeRequest(\n {\n method: 'POST',\n url: 'https://www.blackbox.ai/api/chat',\n data: payload,\n headers\n },\n {\n timeout: options.timeout,\n retries: options.retries,\n retryDelay: options.retryDelay\n }\n );\n\n // Validate response\n if (!response.data || typeof response.data !== 'string') {\n return createErrorResponse('Invalid response format from BlackBox AI', {\n responseType: typeof response.data,\n status: response.status\n });\n }\n\n // Parse response\n const rawResponse = response.data;\n const parsedData = parseBlackBoxResponse(rawResponse);\n\n if (!parsedData) {\n return createErrorResponse('Failed to parse BlackBox AI response', {\n rawResponse: rawResponse.substring(0, 100) + '...'\n });\n }\n\n return createSuccessResponse(parsedData);\n } catch (error) {\n return createErrorResponse(error as Error, { \n query: query.substring(0, 100),\n options: { ...options, headers: undefined } // Don't log headers for security\n });\n }\n}\n\n/**\n * Parses the raw response from BlackBox AI\n * @param rawResponse - The raw response string\n * @returns Parsed data or null if parsing failed\n * @internal\n */\nfunction parseBlackBoxResponse(rawResponse: string): BlackBoxAIData | null {\n try {\n const parsed = rawResponse.split('$~~~$');\n \n if (parsed.length === 1) {\n // Simple response without sources\n const response = parsed[0]?.trim();\n if (!response) {\n return null;\n }\n \n return {\n response,\n source: []\n };\n } else if (parsed.length >= 3) {\n // Response with sources\n const response = parsed[2]?.trim();\n const sourcesData = parsed[1];\n \n if (!response || !sourcesData) {\n return null;\n }\n\n const sources = safeJsonParse<BlackBoxSource[]>(sourcesData);\n if (!Array.isArray(sources)) {\n // If sources parsing fails, return response without sources\n return {\n response,\n source: []\n };\n }\n\n // Validate and sanitize sources\n const validSources = sources\n .filter(source => \n source && \n typeof source === 'object' && \n typeof source.link === 'string' &&\n typeof source.title === 'string'\n )\n .map(source => ({\n link: source.link,\n title: source.title,\n snippet: source.snippet || '',\n position: typeof source.position === 'number' ? source.position : 0\n }));\n\n return {\n response,\n source: validSources\n };\n }\n\n return null;\n } catch {\n return null;\n }\n}\n\n/**\n * Generates a random ID for requests\n * @returns Random ID string\n * @internal\n */\nfunction generateRandomId(): string {\n const chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789';\n let result = '';\n for (let i = 0; i < 7; i++) {\n result += chars.charAt(Math.floor(Math.random() * chars.length));\n }\n return result;\n}\n\n/**\n * Generates a UUID v4\n * @returns UUID string\n * @internal\n */\nfunction generateUUID(): string {\n return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {\n const r = Math.random() * 16 | 0;\n const v = c === 'x' ? r : (r & 0x3 | 0x8);\n return v.toString(16);\n });\n}","/**\n * @fileoverview Threads media scraper implementation\n * @author NB Team\n * @version 1.0.0\n */\n\nimport { \n NBScraperResponse, \n ThreadsMediaData, \n ThreadsOptions \n} from '../types.js';\nimport { \n createErrorResponse, \n createSuccessResponse, \n makeRequest, \n validateRequiredParams,\n isValidUrl,\n extractDomain\n} from '../utils.js';\n\n/**\n * @alpha\n * Scrapes media URLs from Threads posts\n * \n * @example\n * ```typescript\n * import { threads } from 'nb-scraper';\n * \n * const result = await threads('https://www.threads.net/@username/post/123456789');\n * if (result.status) {\n * console.log('Images:', result.data.image_urls);\n * console.log('Videos:', result.data.video_urls);\n * }\n * ```\n * \n * @param url - The Threads post URL to scrape\n * @param options - Optional configuration for the request\n * @returns Promise resolving to media URLs from the post\n * \n * @throws Will not throw errors, returns error response instead\n * \n * @author Rian\n */\nexport async function threads(\n url: string,\n options: ThreadsOptions = {}\n): Promise<NBScraperResponse<ThreadsMediaData>> {\n try {\n // Validate input parameters\n validateRequiredParams({ url }, ['url']);\n\n // Validate URL format\n const sanitizedUrl = url.trim();\n if (!isValidUrl(sanitizedUrl)) {\n return createErrorResponse('Invalid URL format', { url: sanitizedUrl });\n }\n\n // Check if URL is from Threads\n const domain = extractDomain(sanitizedUrl);\n if (!domain?.includes('threads.net')) {\n return createErrorResponse('URL must be from threads.net', { domain, url: sanitizedUrl });\n }\n\n // Prepare API URL\n const apiUrl = `https://api.threadsphotodownloader.com/v2/media?url=${encodeURIComponent(sanitizedUrl)}`;\n\n // Prepare request headers\n const headers = {\n 'User-Agent': '5.0',\n 'Accept': 'application/json',\n 'Accept-Language': 'id-ID,id;q=0.9,en;q=0.8',\n ...options.headers\n };\n\n // Make the request\n const response = await makeRequest(\n {\n method: 'GET',\n url: apiUrl,\n headers\n },\n {\n timeout: options.timeout,\n retries: options.retries,\n retryDelay: options.retryDelay\n }\n );\n\n // Validate response\n if (!response.data || typeof response.data !== 'object') {\n return createErrorResponse('Invalid response format from Threads API', {\n responseType: typeof response.data,\n status: response.status\n });\n }\n\n const rawData = response.data as Record<string, unknown>;\n\n // Parse and validate media URLs\n const imageUrls = parseMediaUrls(rawData.image_urls);\n const videoUrls = parseMediaUrls(rawData.video_urls);\n\n // Apply filters based on options\n let filteredData: ThreadsMediaData = {\n image_urls: imageUrls,\n video_urls: videoUrls\n };\n\n if (options.imagesOnly) {\n filteredData.video_urls = [];\n } else if (options.videosOnly) {\n filteredData.image_urls = [];\n }\n\n // Check if any media was found\n if (filteredData.image_urls.length === 0 && filteredData.video_urls.length === 0) {\n return createErrorResponse('No media found in the Threads post', { \n url: sanitizedUrl,\n rawImageCount: imageUrls.length,\n rawVideoCount: videoUrls.length\n });\n }\n\n return createSuccessResponse(filteredData);\n\n } catch (error) {\n return createErrorResponse(error as Error, { \n url: url.substring(0, 100),\n options: { ...options, headers: undefined }\n });\n }\n}\n\n/**\n * Parses and validates media URLs from API response\n * @param urls - Raw URLs data from API\n * @returns Array of valid URL strings\n * @internal\n */\nfunction parseMediaUrls(urls: unknown): string[] {\n if (!Array.isArray(urls)) {\n return [];\n }\n\n return urls\n .filter((url): url is string => \n typeof url === 'string' && \n url.trim().length > 0 && \n isValidUrl(url.trim())\n )\n .map(url => url.trim())\n .slice(0, 50); // Limit to prevent memory issues\n}","/**\n * @fileoverview Pinterest Scraper\n * @author Wolep\n * @version 1.0.0\n */\n\nimport { NBScraperResponse, PinterestData, ScraperErrorType, RequestConfig } from '../types';\nimport { \n createErrorResponse, \n createSuccessResponse, \n makeRequest, \n validateRequiredParams\n} from '../utils';\n\n/**\n * @alpha\n * Search Pinterest from the given query\n * \n * @example\n * ```typescript\n * import { Pinterest } from 'nb-scraper';\n * \n * const result = await Pinterest('jagung');\n * if (result.status) {\n * console.log(result.data.result);\n * }\n * ```\n * \n * @param query - The query to search content on Pinterest\n * @param options - Optional request configuration\n * @returns Promise resolving Array of image URLs\n * \n * @throws Returns error response\n * \n * @author Wolep\n */\nexport async function pinterest(\n query: string,\n options?: RequestConfig\n): Promise<NBScraperResponse<PinterestData>> {\n try {\n // Validate required parameters\n validateRequiredParams({ query }, ['query']);\n\n // Prepare the request\n const searchUrl = \"https://www.pinterest.com/resource/BaseSearchResource/get/\";\n const params = new URLSearchParams({\n data: JSON.stringify({\n options: {\n query: query\n }\n })\n });\n\n const response = await makeRequest({\n url: `${searchUrl}?${params.toString()}`,\n method: 'HEAD',\n headers: {\n 'screen-dpr': '4',\n 'x-pinterest-pws-handler': 'www/search/[scope].js',\n ...options?.headers\n }\n }, options);\n\n // Extract links from response headers\n const linkHeader = response.headers.link;\n if (!linkHeader) {\n return createErrorResponse('No results found for the query', {\n type: ScraperErrorType.INVALID_RESPONSE,\n context: { query }\n });\n }\n\n // Parse the links from the header\n const links = [...linkHeader.matchAll(/<(.*?)>/gm)].map(v => v[1]);\n\n return createSuccessResponse<PinterestData>({\n result: links\n });\n\n } catch (error) {\n return createErrorResponse(error as Error, {\n type: ScraperErrorType.NETWORK_ERROR,\n context: { query }\n });\n }\n}","/**\n * @fileoverview ExomlAPI - AI Text Completion Service\n * Base URL: https://exomlapi.com/\n * \n * Features:\n * - AI text completion with conversation support\n * - Multiple model support (GPT-4.1, GPT-4o, Llama, etc.)\n * - Text-only responses\n * \n * Note: Some models may hang with slow fetch times\n * \n * @author wolep\n * @version 1.0.0\n * @lastUpdated 2025-06-07\n */\n\nimport {\n NBScraperResponse,\n CharSetOptions,\n ExomlAPIData,\n ExomlAPIOptions,\n ExomlAPIMessage,\n ExomlAPIRandomData,\n ScraperErrorType\n} from '../types';\nimport {\n createErrorResponse,\n createSuccessResponse,\n makeRequest,\n validateRequiredParams\n} from '../utils';\n\nconst BASE_URL = 'https://exomlapi.com/api/chat';\n\n// Available models\nexport const EXOML_MODELS = [\n 'llama',\n 'gemma',\n 'qwen-3-235b',\n 'gpt-4.1',\n 'gpt-4o',\n 'gpt-4o-mini',\n 'llama-4-scout',\n 'llama-4-maverick',\n 'deepseek-r1',\n 'qwq-32b'\n] as\nconst;\n\nexport type ExomlModel = typeof EXOML_MODELS[number];\n\n/**\n * Generate random IDs for API request\n */\nfunction generateRandomIds(): ExomlAPIRandomData {\n const gen = (length: number, charSet: CharSetOptions = {}): string => {\n const l = \"abcdefghijklmnopqrstuvwxyz\";\n const u = l.toUpperCase();\n const s = \"-_\";\n const n = \"0123456789\";\n \n const {\n lowerCase = false,\n upperCase = false,\n symbol = false,\n number = false\n } = charSet;\n \n // Build character set based on options\n let cs = \"\";\n if (!lowerCase && !upperCase && !symbol && !number) {\n cs = l + u + s + n; // Default: include everything\n } else {\n if (lowerCase) cs += l;\n if (upperCase) cs += u;\n if (symbol) cs += s;\n if (number) cs += n;\n }\n \n // Generate random string\n return Array.from({ length }, () =>\n cs[Math.floor(Math.random() * cs.length)]\n ).join(\"\");\n };\n \n // Generate IDs with specific character sets\n const id = gen(16, { upperCase: true, lowerCase: true, number: true });\n const timestamp = new Date().getTime();\n const chatId =\n `chat-${timestamp}-${gen(9, { lowerCase: true, number: true })}`;\n const userId =\n `local-user-${timestamp}-${gen(9, { lowerCase: true, number: true })}`;\n const antiBotId = `${gen(32)}-${gen(8, { number: true, lowerCase: true })}`;\n \n return { id, chatId, userId, antiBotId };\n}\n\n/**\n * @alpha\n * Create a conversation message\n * \n * @example\n * ```typescript\n * import { createExomlMessage } from 'nb-scraper';\n * \n * const message = createExomlMessage(\"user\", \"Hello, how are you?\");\n * ```\n * \n * @param role - Message role\n * @param content - Message content\n * @returns ExomlAPIMessage\n * @author Wolep\n */\nexport function createExomlMessage(\n role: \"user\" | \"assistant\" | \"system\",\n content: string\n): ExomlAPIMessage {\n return { role, content };\n}\n\n/**\n * Generate AI response\n * \n * @example\n * ```typescript\n * import { generateExomlResponse } from 'nb-scraper';\n * \n * const result = await generateExomlResponse({\n * messages: [\n * createExomlMessage(\"user\", \"Hello, how are you?\")\n * ],\n * model: \"gpt-4.1\"\n * });\n * \n * if (result.status) {\n * console.log(result.data.content);\n * }\n * ```\n * \n * @param options - Configuration for the AI request\n * @returns Promise<NBScraperResponse<ExomlAPIData>>\n * @author Wolep\n */\nexport async function generateExomlResponse(\n options: ExomlAPIOptions\n): Promise < NBScraperResponse < ExomlAPIData >> {\n try {\n validateRequiredParams(options, ['messages']);\n \n const { messages, systemPrompt = \"\", model = \"gpt-4.1\" } = options;\n \n if (!EXOML_MODELS.includes(model as ExomlModel)) {\n return createErrorResponse(\n `Invalid model. Available models: ${EXOML_MODELS.join(', ')}`, {\n type: ScraperErrorType.INVALID_PARAMETER,\n context: { model }\n });\n }\n \n const body = JSON.stringify({\n messages,\n systemPrompt,\n model,\n isAuthenticated: true,\n ...generateRandomIds()\n });\n \n const response = await makeRequest < { data: string } > ({\n url: BASE_URL,\n method: 'POST',\n headers: {\n 'Content-Type': 'application/json'\n },\n data: body\n });\n \n if (typeof response.data !== 'string') {\n return createErrorResponse('Invalid response format from server', {\n type: ScraperErrorType.INVALID_RESPONSE,\n context: { rawResponse: response.data }\n });\n }\n \n // Parse response (parsing might be imperfect)\n const data = response.data as string;\n const content = [...data.matchAll(/^0:\"(.*?)\"$/gm)]\n .map(v => v[1])\n .join(\"\")\n .replaceAll(\"\\\\n\", \"\\n\")\n .replaceAll(\"\\\\\\\"\", \"\\\"\");\n \n if (!content) {\n return createErrorResponse('Failed to parse message from server', {\n type: ScraperErrorType.PARSE_ERROR,\n context: { rawResponse: data }\n });\n }\n \n return createSuccessResponse < ExomlAPIData > ({ content });\n } catch (error) {\n return createErrorResponse(error as Error, {\n type: ScraperErrorType.API_ERROR,\n context: { service: 'ExomlAPI' }\n });\n }\n}","/**\n * @fileoverview DreamAnalysis - Dream Interpretation Service\n * Base URL: https://safe-coast-53976-cd772af9b056.herokuapp.com/\n * \n * Features:\n * - Analyze and interpret dreams from text descriptions\n * - Premium analysis features available\n * - Returns detailed interpretation with symbols, emotions, and themes\n * \n * @author NB Team\n * @version 1.0.0\n */\n\nimport {\n NBScraperResponse,\n DreamAnalysisData,\n DreamAnalysisOptions,\n ScraperErrorType\n} from '../types';\nimport {\n createErrorResponse,\n createSuccessResponse,\n makeRequest,\n validateRequiredParams\n} from '../utils';\n\nconst BASE_URL = 'https://safe-coast-53976-cd772af9b056.herokuapp.com/';\n\nfunction parseDreamResponse(raw: unknown): DreamAnalysisData | null {\n if (typeof raw !== 'string') return null;\n \n try {\n const data = JSON.parse(raw);\n return {\n analysis: data.analysis,\n interpretation: data.interpretation,\n symbols: data.symbols || [],\n emotions: data.emotions || [],\n themes: data.themes || [],\n metadata: data.metadata || {}\n };\n } catch {\n return null;\n }\n}\n\n/**\n * Analyze dream text and get interpretation\n * \n * @example\n * ```typescript\n * import { analyzeDream } from 'nb-scraper';\n * \n * const result = await analyzeDream({\n * text: \"I dreamed I was flying over mountains\",\n * isPremium: true\n * });\n * \n * if (result.status) {\n * console.log(result.data.interpretation);\n * }\n * ```\n * \n * @param options - Configuration for dream analysis\n * @returns Promise<NBScraperResponse<DreamAnalysisData>>\n * @author NajmyW\n */\nexport async function analyzeDream(\n options: DreamAnalysisOptions\n): Promise < NBScraperResponse < DreamAnalysisData >> {\n try {\n validateRequiredParams(options, ['text']);\n \n const { text, isPremium = true } = options;\n \n const response = await makeRequest({\n url: BASE_URL,\n method: 'POST',\n headers: {\n 'Accept-Encoding': 'gzip',\n 'Connection': 'Keep-Alive',\n 'Content-Type': 'application/json',\n 'Host': 'safe-coast-53976-cd772af9b056.herokuapp.com',\n 'User-Agent': 'okhttp/4.9.2'\n },\n data: JSON.stringify({ text, isPremium })\n });\n \n const rawResponse = response.data;\n \n // Type guard untuk rawResponse\n if (typeof rawResponse !== 'string') {\n return createErrorResponse('Invalid response format', {\n type: ScraperErrorType.INVALID_RESPONSE,\n context: {\n service: 'DreamAnalysis',\n rawResponse: String(rawResponse).substring(0, 100) + '...'\n }\n });\n }\n \n const parsedData = parseDreamResponse(rawResponse);\n \n if (!parsedData) {\n return createErrorResponse('Failed to parse Dream response', {\n type: ScraperErrorType.PARSE_ERROR,\n context: {\n rawResponse: rawResponse.substring(0, 100) + '...'\n }\n });\n }\n \n return createSuccessResponse < DreamAnalysisData > (parsedData);\n } catch (error) {\n return createErrorResponse(error as Error, {\n type: ScraperErrorType.API_ERROR,\n context: { service: 'DreamAnalysis' }\n });\n }\n}\n\n/**\n * Quick analysis with basic interpretation\n * \n * @example\n * ```typescript\n * import { quickDreamAnalysis } from 'nb-scraper';\n * \n * const result = await quickDreamAnalysis(\"I dreamed of being chased\");\n * if (result.status) {\n * console.log(result.data);\n * }\n * ```\n * \n * @param text - Dream description text\n * @returns Promise<NBScraperResponse<DreamAnalysisData>>\n * @author NajmyW\n */\nexport async function quickDreamAnalysis(\n text: string\n): Promise < NBScraperResponse < DreamAnalysisData >> {\n return analyzeDream({ text, isPremium: false });\n}\n\n/**\n * Premium analysis with detailed interpretation\n * \n * @example\n * ```typescript\n * import { premiumDreamAnalysis } from 'nb-scraper';\n * \n * const result = await premiumDreamAnalysis(\"I dreamed I could breathe underwater\");\n * if (result.status) {\n * console.log(result.data.symbols);\n * }\n * ```\n * \n * @param text - Dream description text\n * @returns Promise<NBScraperResponse<DreamAnalysisData>>\n * @author NajmyW\n */\nexport async function premiumDreamAnalysis(\n text: string\n): Promise < NBScraperResponse < DreamAnalysisData >> {\n return analyzeDream({ text, isPremium: true });\n}","/**\n * @fileoverview Pollinations AI Image Generation Service\n * Base URL: https://image.pollinations.ai/\n * \n * Features:\n * - Generate images from text prompts\n * - Upload generated images to Catbox.moe for permanent hosting\n * - No logo option available\n * \n * @author NB Team\n * @version 1.0.1\n */\n\nimport { Readable } from 'stream';\nimport FormData from 'form-data';\nimport fs from 'fs';\nimport path from 'path';\nimport os from 'os';\nimport { \n NBScraperResponse, \n PollinationsData, \n PollinationsOptions,\n ScraperErrorType\n} from '../types';\nimport { \n createErrorResponse, \n createSuccessResponse, \n makeRequest, \n validateRequiredParams,\n} from '../utils';\n\nconst BASE_URL = 'https://image.pollinations.ai/prompt/';\nconst UPLOAD_URL = 'https://catbox.moe/user/api.php';\n\n/**\n * @alpha\n * Generate image from prompt and upload to Catbox\n * \n * @example\n * ```typescript\n * import { generatePollinationsImage } from 'nb-scraper';\n * \n * const result = await generatePollinationsImage({\n * prompt: \"a beautiful sunset over mountains\",\n * nologo: true\n * });\n * \n * if (result.status) {\n * console.log(result.data.url); // Catbox.moe URL\n * }\n * ```\n * \n * @param options - Configuration for image generation\n * @returns Promise<NBScraperResponse<PollinationsData>>\n * @author Jul\n */\nexport async function generatePollinationsImage(\n options: PollinationsOptions\n): Promise<NBScraperResponse<PollinationsData>> {\n try {\n // Validate required parameters\n validateRequiredParams(options, ['prompt']);\n\n const { prompt, nologo = true } = options;\n const encodedPrompt = encodeURIComponent(prompt);\n const imageUrl = `${BASE_URL}${encodedPrompt}${nologo ? '?nologo=true' : ''}`;\n const tempPath = path.join(os.tmpdir(), `pollinations_${Date.now()}.jpg`);\n\n // Download image to temporary file\n const response = await makeRequest<ArrayBuffer>({\n url: imageUrl,\n responseType: 'arraybuffer'\n });\n\n // Create stream from buffer and write to file\n const buffer = Buffer.from(response.data);\n const readable = new Readable();\n readable.push(buffer);\n readable.push(null);\n\n const writer = fs.createWriteStream(tempPath);\n readable.pipe(writer);\n\n // Wait for download to complete\n await new Promise<void>((resolve, reject) => {\n writer.on('finish', () => resolve());\n writer.on('error', (err) => reject(err));\n });\n\n // Upload to Catbox.moe\n const form = new FormData();\n form.append('reqtype', 'fileupload');\n form.append('fileToUpload', fs.createReadStream(tempPath));\n\n const upload = await makeRequest<string>({\n method: 'POST',\n url: UPLOAD_URL,\n data: form,\n headers: form.getHeaders()\n });\n\n // Clean up temporary file\n fs.unlinkSync(tempPath);\n\n if (typeof upload.data !== 'string') {\n throw new Error('Invalid upload response');\n }\n\n return createSuccessResponse<PollinationsData>({\n url: upload.data,\n directUrl: imageUrl\n });\n } catch (error) {\n return createErrorResponse(error as Error, {\n type: ScraperErrorType.IMAGE_GENERATION_ERROR,\n context: { service: 'Pollinations' }\n });\n }\n}\n\n/**\n * Get direct image URL (without upload to Catbox)\n * \n * @example\n * ```typescript\n * import { getPollinationsDirectUrl } from 'nb-scraper';\n * \n * const url = getPollinationsDirectUrl({\n * prompt: \"a beautiful sunset over mountains\",\n * nologo: true\n * });\n * \n * console.log(url); // Direct Pollinations image URL\n * ```\n * \n * @param options - Configuration for image generation\n * @returns string - Direct image URL\n * @author Jul\n */\nexport function getPollinationsDirectUrl(options: PollinationsOptions): string {\n validateRequiredParams(options, ['prompt']);\n \n const { prompt, nologo = true } = options;\n const encodedPrompt = encodeURIComponent(prompt);\n return `${BASE_URL}${encodedPrompt}${nologo ? '?nologo=true' : ''}`;\n}","/**\n * @fileoverview SoundCloud Music Search and Track Information\n * Base URL: https://soundcloud.com/\n * \n * Features:\n * - Search tracks with detailed information\n * - Auto client_id extraction and caching\n * - Format duration, numbers, and dates\n * - Track metadata including plays, likes, downloads\n * \n * @author NB Team\n * @version 1.0.0\n */\n\nimport {\n NBScraperResponse,\n SoundCloudData,\n SoundCloudSearchOptions,\n SoundCloudTrack,\n SoundCloudCache,\n SoundCloudApiResponse,\n SoundCloudApiTrack,\n ScraperErrorType\n} from '../types';\nimport {\n createErrorResponse,\n createSuccessResponse,\n makeRequest,\n validateRequiredParams,\n formatBytes\n} from '../utils';\n\nconst BASE_URL = 'https://soundcloud.com/';\nconst API_URL = 'https://api-v2.soundcloud.com/search/tracks';\n\n// Module-level cache\nlet cache: SoundCloudCache = { version: '', id: '' };\n\n/**\n * Format duration from milliseconds to MM:SS\n * @param ms - Duration in milliseconds\n * @returns Formatted duration string\n */\nfunction formatDuration(ms: number): string {\n const sec = Math.floor(ms / 1000);\n const min = Math.floor(sec / 60);\n const remainder = sec % 60;\n return `${min}:${remainder.toString().padStart(2, '0')}`;\n}\n\n/**\n * Format large numbers with K/M suffixes\n * @param n - Number to format\n * @returns Formatted number string\n */\nfunction formatNumber(n: number): string {\n if (n >= 1e6) return (n / 1e6).toFixed(1).replace(/\\.0$/, '') + 'M';\n if (n >= 1e3) return (n / 1e3).toFixed(1).replace(/\\.0$/, '') + 'K';\n return n.toString();\n}\n\n/**\n * Format date to YYYY-MM-DD\n * @param dateStr - Date string to format\n * @returns Formatted date string or null\n */\nfunction formatDate(dateStr: string | null): string | null {\n if (!dateStr) return null;\n const d = new Date(dateStr);\n return d.toISOString().split('T')[0];\n}\n\n/**\n * Get SoundCloud client ID by parsing website\n * @returns Promise<string | null>\n */\nasync function getClientID(): Promise < string | null > {\n try {\n const response = await makeRequest({\n url: BASE_URL,\n method: 'GET'\n });\n \n if (typeof response.data !== 'string') {\n throw new Error('invalid html data')\n }\n \n const html = response.data;\n const version = html.match(\n /<script>window\\.__sc_version=\"(\\d{10})\"<\\/script>/)?.[1];\n if (!version) return null;\n \n // Return cached ID if version matches\n if (cache.version === version) return cache.id;\n \n // Extract script URLs and find client_id\n const scriptMatches = [...html.matchAll(\n /<script.*?src=\"(https:\\/\\/a-v2\\.sndcdn\\.com\\/assets\\/[^\"]+)\"/g)];\n \n for (const [, scriptUrl] of scriptMatches) {\n const { data: js } = await makeRequest({\n url: scriptUrl,\n method: 'GET'\n });\n \n if (typeof js !== 'string') {\n continue;\n }\n \n const idMatch = js.match(/client_id:\"([a-zA-Z0-9]{32})\"/);\n if (idMatch) {\n cache = { version, id: idMatch[1] };\n return idMatch[1];\n }\n }\n } catch (error) {\n throw new Error(\n `Failed to get client_id: ${error instanceof Error ? error.message : 'Unknown error'}`\n );\n }\n \n return null;\n}\n\n/**\n * Search SoundCloud tracks\n * \n * @example\n * ```typescript\n * import { searchSoundCloud } from 'nb-scraper';\n * \n * const result = await searchSoundCloud({\n * query: \"lofi chill\",\n * limit: 10\n * });\n * \n * if (result.status) {\n * console.log(result.data.tracks);\n * }\n * ```\n * \n * @param options - Search configuration\n * @returns Promise<NBScraperResponse<SoundCloudData>>\n * @author Rian\n */\nexport async function searchSoundCloud(\n options: SoundCloudSearchOptions\n): Promise < NBScraperResponse < SoundCloudData >> {\n \n const { query, limit = 3 } = options;\n let clientId: string | null = null;\n \n try {\n validateRequiredParams(options, ['query']);\n \n const clientId = await getClientID();\n \n if (!clientId) {\n return createErrorResponse('Failed to obtain client_id', {\n type: ScraperErrorType.AUTH_ERROR,\n context: { service: 'SoundCloud' }\n });\n }\n \n const response = await makeRequest < SoundCloudApiResponse > ({\n url: API_URL,\n method: 'GET',\n params: {\n q: query,\n client_id: clientId,\n limit\n }\n });\n \n if (!response.data?.collection) {\n return createErrorResponse('Invalid SoundCloud API response', {\n type: ScraperErrorType.INVALID_RESPONSE\n });\n }\n \n const tracks: SoundCloudTrack[] = response.data.collection.map(track => {\n const durationMs = track.duration;\n const duration = formatDuration(durationMs);\n \n const likeCount = formatNumber(track.likes_count);\n const playCount = formatNumber(track.playback_count);\n const downloadCount = formatNumber(track.download_count);\n \n const releaseDate = track.release_date || track.created_at;\n \n return {\n id: track.id,\n title: track.title,\n url: track.permalink_url,\n duration,\n thumbnail: track.artwork_url,\n author: {\n name: track.user.username,\n url: track.user.permalink_url\n },\n like_count: likeCount,\n download_count: downloadCount,\n play_count: playCount,\n release_date: formatDate(releaseDate)\n };\n });\n \n return createSuccessResponse < SoundCloudData > ({ tracks });\n } catch (error) {\n return createErrorResponse(error as Error, {\n type: ScraperErrorType.API_ERROR,\n context: { service: 'SoundCloud', query: query, clientId: clientId ? '*****' : 'null' }\n });\n }\n}\n\n/**\n * @alpha\n * Get cached client ID and version info\n * \n * @example\n * ```typescript\n * import { getSoundCloudCacheInfo } from 'nb-scraper';\n * \n * const cacheInfo = getSoundCloudCacheInfo();\n * console.log(cacheInfo);\n * ```\n * \n * @returns Current cache state\n * @author Rian\n */\nexport function getSoundCloudCacheInfo(): SoundCloudCache {\n return { ...cache };\n}","/**\n * @fileoverview DeepInfra AI Chat Service\n * Base URL: https://ai-sdk-starter-deepinfra.vercel.app/api/chat\n * \n * Features:\n * - AI text generation with multiple model support\n * - Handles various response formats\n * - Fallback for empty responses\n * \n * @author Woi\n * @version 1.0.0\n */\n\nimport {\n NBScraperResponse,\n DeepInfraAIData,\n DeepInfraAIOptions,\n DeepInfraAIMessage,\n DeepInfraAIModel,\n DeepInfraAIRequest,\n DeepInfraAIResponse,\n ScraperErrorType\n} from '../types';\nimport {\n createErrorResponse,\n createSuccessResponse,\n makeRequest,\n validateRequiredParams\n} from '../utils';\n\nconst BASE_URL = 'https://ai-sdk-starter-deepinfra.vercel.app/api/chat';\n\n/**\n * Available DeepInfra AI models\n */\nexport const DEEPINFRA_MODELS = [\n 'meta-llama/Llama-3.3-70B-Instruct-Turbo',\n 'deepseek-ai/DeepSeek-R1',\n 'Qwen/Qwen2.5-72B-Instruct'\n] as\nconst;\n\n/**\n * Generate AI response using DeepInfra\n * \n * @example\n * ```typescript\n * import { generateDeepInfraResponse } from 'nb-scraper';\n * \n * const result = await generateDeepInfraResponse({\n * prompt: \"Explain JavaScript in simple terms\",\n * model: \"deepseek-ai/DeepSeek-R1\"\n * });\n * \n * if (result.status) {\n * console.log(result.data.response);\n * }\n * ```\n * \n * @param options - Configuration for the AI request\n * @returns Promise<NBScraperResponse<DeepInfraAIData>>\n * @author Woi\n */\nexport async function generateDeepInfraResponse(\n options: DeepInfraAIOptions\n): Promise < NBScraperResponse < DeepInfraAIData >> {\n try {\n validateRequiredParams(options, ['prompt']);\n \n const { prompt, model = 'meta-llama/Llama-3.3-70B-Instruct-Turbo' } =\n options;\n \n const body = {\n id: Math.random().toString(36).slice(2),\n selectedModel: model,\n messages: [{\n role: 'user',\n content: prompt,\n parts: [{ type: 'text', text: prompt }]\n }]\n };\n \n const response = await makeRequest < DeepInfraAIResponse > ({\n url: BASE_URL,\n method: 'POST',\n headers: {\n 'Content-Type': 'application/json'\n },\n data: body\n });\n \n // Process response parts\n const parts: string[] = [];\n const responseData = response.data;\n \n if (responseData && typeof responseData === 'object') {\n const data = responseData as DeepInfraAIResponse;\n \n if (data.g) {\n parts.push(...(Array.isArray(data.g) ? data.g : [data.g]));\n }\n if (data.f) {\n parts.push(...(Array.isArray(data.f) ? data.f : [data.f]));\n }\n if (data['0']) {\n parts.push(...(Array.isArray(data['0']) ? data['0'] : [data['0']]));\n }\n } else if (typeof responseData === 'string') {\n parts.push(responseData);\n }\n \n const result = parts.join('').trim() || 'No response generated';\n \n return createSuccessResponse < DeepInfraAIData > ({\n response: result\n });\n \n } catch (error) {\n return createErrorResponse(error as Error, {\n type: ScraperErrorType.API_ERROR,\n context: {\n service: 'DeepInfraAI',\n model: options.model\n }\n });\n }\n}","/**\n * @fileoverview Anime Indo Scraper\n * Base URL: https://anime-indo.lol\n * \n * Features:\n * - Search anime\n * - Get anime details\n * - Download episodes\n * \n * @author Jul\n * @version 1.0.1\n */\n\nimport axios from 'axios';\nimport { URL } from 'url';\nimport * as cheerio from 'cheerio';\nimport {\n NBScraperResponse,\n AnimeIndoSearchResult,\n AnimeIndoDetail,\n AnimeIndoDownloadInfo,\n AnimeIndoEpisode,\n ScraperErrorType\n} from '../types';\nimport { \n createErrorResponse, \n createSuccessResponse, \n makeRequest,\n validateRequiredParams\n} from '../utils';\n\nconst BASE_URL = 'https://anime-indo.lol';\n\nexport const animeIndo = {\n /**\n * Search anime\n * \n * @example\n * ```typescript\n * const result = await animeIndo.search(\"Naruto\");\n * if (result.status) {\n * console.log(result.data);\n * }\n * ```\n * @author Jul\n */\n async search(query: string): Promise<NBScraperResponse<AnimeIndoSearchResult[]>> {\n try {\n validateRequiredParams({ query }, ['query']);\n\n const url = `${BASE_URL}/search/${encodeURIComponent(query)}/`;\n const response = await makeRequest<string>({ url });\n\n const $ = cheerio.load(response.data);\n const results: AnimeIndoSearchResult[] = [];\n\n $(\"table.otable\").each((_index: number, el: cheerio.Element) => {\n const element = $(el);\n const title = element.find(\".videsc a\").text().trim();\n const link = BASE_URL + element.find(\".videsc a\").attr(\"href\");\n const image = BASE_URL + element.find(\"img\").attr(\"src\");\n const description = element.find(\"p.des\").text().trim();\n const labelEls = element.find(\".label\");\n const year = labelEls.last().text().trim();\n\n results.push({\n title,\n link,\n image,\n year,\n description,\n });\n });\n\n return createSuccessResponse(results);\n } catch (error) {\n return createErrorResponse(error as Error, {\n type: ScraperErrorType.API_ERROR,\n context: { service: 'AnimeIndo', query }\n });\n }\n },\n\n /**\n * Get anime details\n * \n * @example\n * ```typescript\n * const result = await animeIndo.detail(\"https://anime-indo.lol/anime/naruto\");\n * if (result.status) {\n * console.log(result.data);\n * }\n * ```\n * @author Jul\n */\n async detail(url: string): Promise<NBScraperResponse<AnimeIndoDetail>> {\n try {\n validateRequiredParams({ url }, ['url']);\n\n const response = await makeRequest<string>({ url });\n const $ = cheerio.load(response.data);\n\n const title = $(\"h1.title\").text().trim();\n\n let imageSrc = $(\".detail img\").attr(\"src\") || \"\";\n if (imageSrc.startsWith(\"/\")) {\n imageSrc = BASE_URL + imageSrc;\n }\n\n const genres: string[] = [];\n $(\".detail li a\").each((_index: number, el: cheerio.Element) => {\n genres.push($(el).text().trim());\n });\n\n const description = $(\".detail p\").text().trim();\n\n const episodes: AnimeIndoEpisode[] = [];\n $(\".ep a\").each((_index: number, el: cheerio.Element) => {\n let epLink = $(el).attr(\"href\");\n if (epLink && epLink.startsWith(\"/\")) {\n epLink = BASE_URL + epLink;\n }\n episodes.push({\n episode: $(el).text().trim(),\n link: epLink || \"\",\n });\n });\n\n return createSuccessResponse({\n title,\n image: imageSrc,\n genres,\n description,\n episodes,\n });\n } catch (error) {\n return createErrorResponse(error as Error, {\n type: ScraperErrorType.API_ERROR,\n context: { service: 'AnimeIndo', url }\n });\n }\n },\n\n /**\n * Download episode\n * \n * @example\n * ```typescript\n * const result = await animeIndo.download(\"https://anime-indo.lol/episode/naruto-1\");\n * if (result.status) {\n * console.log(result.data.downloadUrl);\n * }\n * ```\n * @author Jul\n */\n async download(episodeUrl: string): Promise<NBScraperResponse<AnimeIndoDownloadInfo>> {\n try {\n validateRequiredParams({ episodeUrl }, ['episodeUrl']);\n\n // Get episode page\n const { data: episodeHtml } = await makeRequest<string>({\n url: episodeUrl,\n headers: { 'User-Agent': 'Mozilla/5.0' }\n });\n const $ = cheerio.load(episodeHtml);\n\n const title = $('h1.title').first().text().trim();\n const description = $('.detail p').text().trim();\n\n //