harperdb
Version:
HarperDB is a distributed database, caching service, streaming broker, and application development platform focused on performance and ease of use.
124 lines (120 loc) • 4.75 kB
TypeScript
type ActionCallback = (action: Action) => void;
export type Value = number | boolean | ActionCallback;
interface Action {
total?: number;
values?: Float32Array;
count?: number;
callback?: ActionCallback;
description?: {
metric: string;
path: string;
method: string;
type: string;
};
}
export declare function setAnalyticsEnabled(enabled: boolean): void;
/**
* Record an action for analytics (like an HTTP request, replication, MQTT message)
* @param value
* @param metric
* @param path
* @param method
* @param type
*/
export declare function recordAction(value: Value, metric: string, path?: string, method?: string, type?: string): void;
export declare function recordActionBinary(value: any, metric: any, path?: any, method?: any, type?: any): void;
export declare function addAnalyticsListener(callback: any): void;
interface ResourceUsage extends Partial<NodeJS.ResourceUsage> {
time?: number;
period?: number;
cpuUtilization?: number;
}
/** calculateCPUUtilization takes a ResourceUsage with at least userCPUTime & systemCPUTime set
* with millisecond values (NB: Node's process.resourceUsage returns values in microseconds for
* these fields so divide them by 1000 to get milliseconds) and a time period in milliseconds
* and returns the percentage of that time the CPU was being utilized as a decimal value
* between 0 and 1. So for example, 50% utilization will be returned as 0.5.
*/
export declare function calculateCPUUtilization(resourceUsage: ResourceUsage, period: number): number;
/** diffResourceUsage takes a ResourceUsage representing the last time we stored them and a new
* process.resourceUsage() return value and normalizes and diffs the two values to return the
* new values for this time period.
*/
export declare function diffResourceUsage(lastResourceUsage: ResourceUsage, resourceUsage: NodeJS.ResourceUsage): ResourceUsage;
export {};
/**
* This section contains a possible/experimental approach to bucketing values as they come instead of pushing all into an array and sorting.
*
const BUCKET_COUNT = 100;
function addToBucket(action, value) {
if (!action.buckets) {
action.buckets = newBuckets();
}
const { counts, values, totalCount } = action.buckets;
let jump = BUCKET_COUNT >> 1; // amount to jump with each iteration
let position = jump; // start at halfway point
while ((jump = jump >> 1) > 0) {
const bucket_value = values[position];
if (bucket_value === 0) {
// unused slot, immediately put our value in
counts[position] = 1;
values[position] = value;
}
if (value > bucket_value) {
position += jump;
} else {
position -= jump;
}
}
const count = counts[position] + 1;
if (position === BUCKET_COUNT) {
// if we go beyond the last item, increase the bucket (max) value
position--;
values[position] = value;
}
if (count > threshold) {
rebalance(action.buckets, false);
} else {
counts[position] = count;
}
}
function newBuckets() {
const ab = new ArrayBuffer(8 * BUCKET_COUNT);
return {
values: new Float32Array(ab, 0, BUCKET_COUNT),
counts: new Uint32Array(ab, BUCKET_COUNT * 4, BUCKET_COUNT),
totalCount: 0,
};
}
let balancing_buckets;
/**
* Rebalance the buckets, we can reset the counts at the same time, if this occurred after a delivery
* @param param
* @param reset_counts
*
function rebalance({ counts, values, totalCount }, reset_counts: boolean) {
const count_per_bucket = totalCount / BUCKET_COUNT;
let target_position = 0;
let target_count = 0;
let last_target_value = 0;
const { values: target_values, counts: target_counts } = balancing_buckets || (balancing_buckets = newBuckets());
for (let i = 0; i < BUCKET_COUNT; i++) {
// iterate through the existing buckets, filling up the target buckets in a balanced way
let count = counts[i];
while ((count_per_bucket - target_count) < count) {
const value = values[i];
last_target_value = ((count_per_bucket - target_count) / count) * (value - last_target_value) + last_target_value;
target_values[target_position] = last_target_value;
target_counts[target_position] = count_per_bucket;
count -= count_per_bucket;
target_position++;
target_count = 0;
}
target_count += count;
}
// now copy the balanced buckets back into the original buckets
values.set(target_values);
if (reset_counts) counts.fill(0);
else counts.set(target_counts);
}
*/