UNPKG

@spider-cloud/spider-client

Version:

Isomorphic Javascript SDK for Spider Cloud services

82 lines (81 loc) 3.35 kB
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.setBaseUrl = exports.APISchema = exports.APIRoutes = exports.ApiVersion = exports.Collection = exports.RedirectPolicy = void 0; // The HTTP redirect policy to use. Loose allows all domains and Strict only allows relative requests to the domain. var RedirectPolicy; (function (RedirectPolicy) { RedirectPolicy["Loose"] = "Loose"; RedirectPolicy["Strict"] = "Strict"; })(RedirectPolicy || (exports.RedirectPolicy = RedirectPolicy = {})); // records that you can query var Collection; (function (Collection) { Collection["Websites"] = "websites"; Collection["Pages"] = "pages"; Collection["PagesMetadata"] = "pages_metadata"; // Leads Collection["Contacts"] = "contacts"; Collection["CrawlState"] = "crawl_state"; Collection["CrawlLogs"] = "crawl_logs"; Collection["Profiles"] = "profiles"; Collection["Credits"] = "credits"; Collection["Webhooks"] = "webhooks"; Collection["APIKeys"] = "api_keys"; })(Collection || (exports.Collection = Collection = {})); // The API version for Spider var ApiVersion; (function (ApiVersion) { ApiVersion["V1"] = "v1"; })(ApiVersion || (exports.ApiVersion = ApiVersion = {})); // The API routes paths. var APIRoutes; (function (APIRoutes) { // Crawl a website to collect the contents. Can be one page or many. APIRoutes["Crawl"] = "crawl"; // Crawl a website to collect the links. Can be one page or many. APIRoutes["Links"] = "links"; // Crawl a website to collect screenshots. Can be one page or many. APIRoutes["Screenshot"] = "screenshot"; // Search for something and optionally crawl the pages or get the results of the search. APIRoutes["Search"] = "search"; // Transform HTML to markdown or text. APIRoutes["Transform"] = "transform"; // Pipeline extract leads for a website - emails, phones, etc. APIRoutes["PiplineExtractLeads"] = "pipeline/extract-contacts"; // Pipeline label a website by category using AI and metadata. APIRoutes["PiplineLabel"] = "pipeline/label"; // Dynamic collection routes. APIRoutes["Data"] = "data"; // The last crawl state of a website. APIRoutes["DataCrawlState"] = "data/crawl_state"; // Sign a file from storage based on the exact url path of the storage or domain - pathname. APIRoutes["DataSignUrl"] = "data/sign-url"; // Download a file from storage based on the exact url path of the storage or domain - pathname. APIRoutes["DataDownload"] = "data/download"; // Perform a query on the global database to grab content without crawling if available. APIRoutes["DataQuery"] = "data/query"; // Get the credits remaining for an account. APIRoutes["DataCredits"] = "data/credits"; })(APIRoutes || (exports.APIRoutes = APIRoutes = {})); // The base API target info for Spider Cloud. exports.APISchema = { url: "https://api.spider.cloud", versions: { current: ApiVersion.V1, v1: { routes: APIRoutes, end_date: "", }, latest: { routes: APIRoutes, end_date: "", }, }, }; // Adjust the Spider Cloud endpoint. const setBaseUrl = (url) => { if (url) { exports.APISchema["url"] = url; } }; exports.setBaseUrl = setBaseUrl;