@hotmeshio/hotmesh
Version:
Permanent-Memory Workflows & AI Agents
254 lines (253 loc) • 11.5 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
exports.retryMessages = exports.ackAndDelete = exports.deleteMessages = exports.acknowledgeMessages = exports.fetchMessages = exports.buildPublishSQL = exports.publishMessages = void 0;
const utils_1 = require("../../../../modules/utils");
/**
* Publish messages to a stream. Can be used within a transaction.
*
* When a transaction is provided, the SQL is added to the transaction
* and executed atomically with other operations.
*/
async function publishMessages(client, tableName, streamName, messages, options, logger) {
const { sql, params } = buildPublishSQL(tableName, streamName, messages, options);
if (options?.transaction &&
typeof options.transaction.addCommand === 'function') {
// Add to transaction and return the transaction object
options.transaction.addCommand(sql, params, 'array', (rows) => rows.map((row) => row.id.toString()));
return options.transaction;
}
else {
try {
const ids = [];
const res = await client.query(sql, params);
for (const row of res.rows) {
ids.push(row.id.toString());
}
return ids;
}
catch (error) {
logger.error(`postgres-stream-publish-error-${streamName}`, {
error,
});
throw error;
}
}
}
exports.publishMessages = publishMessages;
/**
* Build SQL for publishing messages with retry policies and visibility delays.
* Optimizes the INSERT statement based on whether retry config is present.
*/
function buildPublishSQL(tableName, streamName, messages, options) {
const groupName = streamName.endsWith(':') ? 'ENGINE' : 'WORKER';
// Parse messages to extract retry config and visibility options
const parsedMessages = messages.map(msg => {
const data = JSON.parse(msg);
const retryConfig = data._streamRetryConfig;
const visibilityDelayMs = data._visibilityDelayMs;
const retryAttempt = data._retryAttempt;
// Remove internal fields from message payload
delete data._streamRetryConfig;
delete data._visibilityDelayMs;
delete data._retryAttempt;
// Determine if this message has explicit retry config
const hasExplicitConfig = (retryConfig && 'max_retry_attempts' in retryConfig) || options?.retryPolicy;
let normalizedPolicy = null;
if (retryConfig && 'max_retry_attempts' in retryConfig) {
normalizedPolicy = retryConfig;
}
else if (options?.retryPolicy) {
normalizedPolicy = (0, utils_1.normalizeRetryPolicy)(options.retryPolicy, {
maximumAttempts: 3,
backoffCoefficient: 10,
maximumInterval: 120,
});
}
return {
message: JSON.stringify(data),
hasExplicitConfig,
retryPolicy: normalizedPolicy,
visibilityDelayMs: visibilityDelayMs || 0,
retryAttempt: retryAttempt || 0,
};
});
const params = [streamName, groupName];
let valuesClauses = [];
let insertColumns;
// Check if ALL messages have explicit config or ALL don't
const allHaveConfig = parsedMessages.every(pm => pm.hasExplicitConfig);
const noneHaveConfig = parsedMessages.every(pm => !pm.hasExplicitConfig);
const hasVisibilityDelays = parsedMessages.some(pm => pm.visibilityDelayMs > 0);
if (noneHaveConfig && !hasVisibilityDelays) {
// Omit retry columns entirely - let DB defaults apply
insertColumns = '(stream_name, group_name, message)';
parsedMessages.forEach((pm, idx) => {
const base = idx * 1;
valuesClauses.push(`($1, $2, $${base + 3})`);
params.push(pm.message);
});
}
else if (noneHaveConfig && hasVisibilityDelays) {
// Only visibility delays, no retry config
insertColumns = '(stream_name, group_name, message, visible_at, retry_attempt)';
parsedMessages.forEach((pm, idx) => {
const base = idx * 2;
if (pm.visibilityDelayMs > 0) {
const visibleAtSQL = `NOW() + INTERVAL '${pm.visibilityDelayMs} milliseconds'`;
valuesClauses.push(`($1, $2, $${base + 3}, ${visibleAtSQL}, $${base + 4})`);
params.push(pm.message, pm.retryAttempt);
}
else {
valuesClauses.push(`($1, $2, $${base + 3}, DEFAULT, $${base + 4})`);
params.push(pm.message, pm.retryAttempt);
}
});
}
else {
// Include retry columns and optionally visibility
insertColumns = '(stream_name, group_name, message, max_retry_attempts, backoff_coefficient, maximum_interval_seconds, visible_at, retry_attempt)';
parsedMessages.forEach((pm, idx) => {
const visibleAtClause = pm.visibilityDelayMs > 0
? `NOW() + INTERVAL '${pm.visibilityDelayMs} milliseconds'`
: 'DEFAULT';
if (pm.hasExplicitConfig) {
const paramOffset = params.length + 1; // Current param count + 1 for next param
valuesClauses.push(`($1, $2, $${paramOffset}, $${paramOffset + 1}, $${paramOffset + 2}, $${paramOffset + 3}, ${visibleAtClause}, $${paramOffset + 4})`);
params.push(pm.message, pm.retryPolicy.max_retry_attempts, pm.retryPolicy.backoff_coefficient, pm.retryPolicy.maximum_interval_seconds, pm.retryAttempt);
}
else {
// This message doesn't have config but others do - use DEFAULT keyword
const paramOffset = params.length + 1;
valuesClauses.push(`($1, $2, $${paramOffset}, DEFAULT, DEFAULT, DEFAULT, ${visibleAtClause}, $${paramOffset + 1})`);
params.push(pm.message, pm.retryAttempt);
}
});
}
return {
sql: `INSERT INTO ${tableName} ${insertColumns}
VALUES ${valuesClauses.join(', ')}
RETURNING id`,
params,
};
}
exports.buildPublishSQL = buildPublishSQL;
/**
* Fetch messages from the stream with optional exponential backoff.
* Uses SKIP LOCKED for high-concurrency consumption.
*/
async function fetchMessages(client, tableName, streamName, groupName, consumerName, options = {}, logger) {
const enableBackoff = options?.enableBackoff ?? false;
const initialBackoff = options?.initialBackoff ?? 100; // Default initial backoff: 100ms
const maxBackoff = options?.maxBackoff ?? 3000; // Default max backoff: 3 seconds
const maxRetries = options?.maxRetries ?? 3; // Set a finite default, e.g., 3 retries
let backoff = initialBackoff;
let retries = 0;
try {
while (retries < maxRetries) {
retries++;
const batchSize = options?.batchSize || 1;
const reservationTimeout = options?.reservationTimeout || 30;
// Simplified query for better performance - especially for notification-triggered fetches
const res = await client.query(`UPDATE ${tableName}
SET reserved_at = NOW(), reserved_by = $4
WHERE id IN (
SELECT id FROM ${tableName}
WHERE stream_name = $1
AND group_name = $2
AND (reserved_at IS NULL OR reserved_at < NOW() - INTERVAL '${reservationTimeout} seconds')
AND expired_at IS NULL
AND visible_at <= NOW()
ORDER BY id
LIMIT $3
FOR UPDATE SKIP LOCKED
)
RETURNING id, message, max_retry_attempts, backoff_coefficient, maximum_interval_seconds, retry_attempt`, [streamName, groupName, batchSize, consumerName]);
const messages = res.rows.map((row) => {
const data = (0, utils_1.parseStreamMessage)(row.message);
// Inject retry policy only if not using default values
// Default values indicate old retry mechanism should be used (policies.retry)
const hasDefaultRetryPolicy = (row.max_retry_attempts === 3 || row.max_retry_attempts === 5) &&
parseFloat(row.backoff_coefficient) === 10 &&
row.maximum_interval_seconds === 120;
if (row.max_retry_attempts !== null && !hasDefaultRetryPolicy) {
data._streamRetryConfig = {
max_retry_attempts: row.max_retry_attempts,
backoff_coefficient: parseFloat(row.backoff_coefficient),
maximum_interval_seconds: row.maximum_interval_seconds,
};
}
// Inject retry_attempt from database
if (row.retry_attempt !== undefined && row.retry_attempt !== null) {
data._retryAttempt = row.retry_attempt;
}
return {
id: row.id.toString(),
data,
retryPolicy: (row.max_retry_attempts !== null && !hasDefaultRetryPolicy) ? {
maximumAttempts: row.max_retry_attempts,
backoffCoefficient: parseFloat(row.backoff_coefficient),
maximumInterval: row.maximum_interval_seconds,
} : undefined,
};
});
if (messages.length > 0 || !enableBackoff) {
return messages;
}
// Apply backoff if enabled and no messages found
await (0, utils_1.sleepFor)(backoff);
backoff = Math.min(backoff * 2, maxBackoff); // Exponential backoff
}
// Return empty array if maxRetries is reached and still no messages
return [];
}
catch (error) {
logger.error(`postgres-stream-consumer-error-${streamName}`, {
error,
});
throw error;
}
}
exports.fetchMessages = fetchMessages;
/**
* Acknowledge messages (no-op for PostgreSQL - uses soft delete pattern).
*/
async function acknowledgeMessages(messageIds) {
// No-op for this implementation
return messageIds.length;
}
exports.acknowledgeMessages = acknowledgeMessages;
/**
* Delete messages by soft-deleting them (setting expired_at).
*/
async function deleteMessages(client, tableName, streamName, groupName, messageIds, logger) {
try {
const ids = messageIds.map((id) => parseInt(id));
// Perform a soft delete by setting `expired_at` to the current timestamp
await client.query(`UPDATE ${tableName}
SET expired_at = NOW()
WHERE stream_name = $1 AND id = ANY($2::bigint[]) AND group_name = $3`, [streamName, ids, groupName]);
return messageIds.length;
}
catch (error) {
logger.error(`postgres-stream-delete-error-${streamName}`, {
error,
});
throw error;
}
}
exports.deleteMessages = deleteMessages;
/**
* Acknowledge and delete messages in one operation.
*/
async function ackAndDelete(client, tableName, streamName, groupName, messageIds, logger) {
return await deleteMessages(client, tableName, streamName, groupName, messageIds, logger);
}
exports.ackAndDelete = ackAndDelete;
/**
* Retry messages (placeholder for future implementation).
*/
async function retryMessages(streamName, groupName, options) {
// Implement retry logic if needed
return [];
}
exports.retryMessages = retryMessages;