@mseep/hyperbrowser-mcp
Version:
Hyperbrowser Model Context Protocol Server
77 lines (76 loc) • 2.58 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
exports.crawlWebpagesToolDescription = exports.crawlWebpagesToolName = exports.crawlWebpagesTool = void 0;
const utils_1 = require("../utils");
async function crawlWebpagesTool({ url, sessionOptions, outputFormat, ignoreSitemap, followLinks, maxPages, }) {
try {
const client = await (0, utils_1.getClient)();
const result = await client.crawl.startAndWait({
url,
sessionOptions,
scrapeOptions: {
formats: outputFormat,
},
maxPages,
ignoreSitemap,
followLinks,
});
if (result.error) {
return {
isError: true,
content: [
{
type: "text",
text: result.error,
},
],
};
}
const response = {
content: [],
isError: false,
};
result.data?.forEach((page) => {
if (page?.markdown) {
response.content.push({
type: "text",
text: page.markdown,
});
}
if (page?.html) {
response.content.push({
type: "text",
text: page.html,
});
}
if (page?.links) {
page.links.forEach((link) => {
response.content.push({
type: "resource",
resource: {
uri: link,
text: link,
},
});
});
}
if (page?.screenshot) {
response.content.push({
type: "image",
data: page.screenshot,
mimeType: "image/webp",
});
}
});
return response;
}
catch (error) {
return {
content: [{ type: "text", text: `${error}` }],
isError: true,
};
}
}
exports.crawlWebpagesTool = crawlWebpagesTool;
exports.crawlWebpagesToolName = "crawl_webpages";
exports.crawlWebpagesToolDescription = "Crawl a website starting from a URL and explore linked pages. This tool allows systematic collection of content from multiple pages within a domain. Use this for larger data collection tasks, content indexing, or site mapping.";