@hechtcarmel/vertica-mcp
Version:
MCP server for Vertica database operations with configurable readonly mode
146 lines • 5.66 kB
JavaScript
import { z } from "zod";
import { VerticaService } from "../services/vertica-service.js";
import { getDatabaseConfig } from "../config/database.js";
import { safeJsonStringify } from "../utils/response-formatter.js";
export default class StreamQueryTool {
name = "stream_query";
get description() {
try {
const config = getDatabaseConfig();
const isReadonly = config.readonlyMode ?? true;
if (isReadonly) {
return "Stream large readonly query results in batches to handle datasets efficiently. Only SELECT, SHOW, DESCRIBE, EXPLAIN, and WITH queries are allowed.";
}
else {
return "Stream large query results in batches to handle datasets efficiently. All SQL operations are allowed including data modification queries.";
}
}
catch {
return "Stream large query results in batches to handle datasets efficiently. Query restrictions depend on configuration.";
}
}
get inputSchema() {
try {
const config = getDatabaseConfig();
const isReadonly = config.readonlyMode ?? true;
const sqlDescription = isReadonly
? "SQL query to execute. Only readonly queries are allowed: SELECT, SHOW, DESCRIBE, EXPLAIN, and WITH."
: "SQL query to execute. All SQL operations are allowed including INSERT, UPDATE, DELETE, CREATE, DROP, etc.";
return {
type: "object",
properties: {
sql: {
type: "string",
description: sqlDescription,
},
batchSize: {
type: "number",
minimum: 1,
maximum: 10000,
default: 1000,
description: "Number of rows per batch (1-10000, default: 1000).",
},
maxRows: {
type: "number",
minimum: 1,
maximum: 1000000,
description: "Maximum total rows to fetch (optional).",
},
},
required: ["sql"],
};
}
catch {
return {
type: "object",
properties: {
sql: {
type: "string",
description: "SQL query to execute. Query restrictions depend on configuration.",
},
batchSize: {
type: "number",
minimum: 1,
maximum: 10000,
default: 1000,
description: "Number of rows per batch (1-10000, default: 1000).",
},
maxRows: {
type: "number",
minimum: 1,
maximum: 1000000,
description: "Maximum total rows to fetch (optional).",
},
},
required: ["sql"],
};
}
}
async execute(input) {
const parsed = this.parseInput(input);
let verticaService = null;
try {
const config = getDatabaseConfig();
verticaService = new VerticaService(config);
const batches = [];
let totalRows = 0;
const batchSize = parsed.batchSize || 1000;
for await (const batch of verticaService.streamQuery(parsed.sql, {
batchSize,
maxRows: parsed.maxRows,
})) {
batches.push({
batchNumber: batch.batchNumber,
rowCount: batch.batch.length,
rows: batch.batch,
hasMore: batch.hasMore,
fields: batch.fields.map((field) => ({
name: field.name,
dataType: field.format,
})),
});
totalRows += batch.batch.length;
if (batches.length > 100) {
break;
}
}
return safeJsonStringify({
success: true,
query: parsed.sql,
totalRows,
batchCount: batches.length,
batchSize,
batches,
executedAt: new Date().toISOString(),
}, 2);
}
catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
return safeJsonStringify({
success: false,
error: errorMessage,
query: parsed.sql,
executedAt: new Date().toISOString(),
}, 2);
}
finally {
if (verticaService) {
try {
await verticaService.disconnect();
}
catch (error) {
console.error("Warning during service cleanup:", error instanceof Error ? error.message : String(error));
}
}
}
}
parseInput(input) {
const schema = z.object({
sql: z.string(),
batchSize: z.number().int().min(1).max(10000).default(1000),
maxRows: z.number().int().min(1).max(1000000).optional(),
});
return schema.parse(input);
}
}
//# sourceMappingURL=stream-query.js.map