reconstruct
This commit is contained in:
parent
20002030ad
commit
ca8bded949
17 changed files with 1268 additions and 1110 deletions
133
services/analysis_pipelines.ts
Normal file
133
services/analysis_pipelines.ts
Normal file
|
|
@ -0,0 +1,133 @@
|
|||
// analysis_pipelines.ts - High-level workflows for common analysis tasks.
|
||||
|
||||
import { SignalProcessor } from './signal_processing_convolution';
|
||||
import { TimeSeriesAnalyzer, STLDecomposition } from './timeseries';
|
||||
|
||||
/**
|
||||
* The comprehensive result of a denoise and detrend operation.
|
||||
*/
|
||||
export interface DenoiseAndDetrendResult {
|
||||
original: number[];
|
||||
smoothed: number[];
|
||||
decomposition: STLDecomposition;
|
||||
}
|
||||
|
||||
/**
|
||||
* The result of an automatic SARIMA parameter search.
|
||||
*/
|
||||
export interface AutoArimaResult {
|
||||
bestModel: {
|
||||
p: number;
|
||||
d: number;
|
||||
q: number;
|
||||
P: number;
|
||||
D: number;
|
||||
Q: number;
|
||||
s: number;
|
||||
aic: number;
|
||||
};
|
||||
searchLog: { p: number; d: number; q: number; P: number; D: number; Q: number; s: number; aic: number }[];
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* A class containing high-level analysis pipelines that combine
|
||||
* functions from various processing libraries.
|
||||
*/
|
||||
export class AnalysisPipelines {
|
||||
|
||||
/**
|
||||
* A full pipeline to take a raw signal, smooth it to remove noise,
|
||||
* and then decompose it into trend, seasonal, and residual components.
|
||||
* @param series The original time series data.
|
||||
* @param period The seasonal period for STL decomposition.
|
||||
* @param smoothWindow The window size for the initial smoothing (denoising) pass.
|
||||
* @returns An object containing the original, smoothed, and decomposed series.
|
||||
*/
|
||||
static denoiseAndDetrend(series: number[], period: number, smoothWindow: number = 5): DenoiseAndDetrendResult {
|
||||
// Ensure window is odd for symmetry
|
||||
if (smoothWindow > 1 && smoothWindow % 2 === 0) {
|
||||
smoothWindow++;
|
||||
}
|
||||
const smoothed = SignalProcessor.smooth(series, {
|
||||
method: 'gaussian',
|
||||
windowSize: smoothWindow
|
||||
});
|
||||
|
||||
const decomposition = TimeSeriesAnalyzer.stlDecomposition(smoothed, period);
|
||||
|
||||
return {
|
||||
original: series,
|
||||
smoothed: smoothed,
|
||||
decomposition: decomposition,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* [FINAL CORRECTED VERSION] Performs a full grid search to find the optimal SARIMA parameters.
|
||||
* This version now correctly includes 's' in the final result object.
|
||||
* @param series The original time series data.
|
||||
* @param seasonalPeriod The seasonal period of the data (e.g., 7 for weekly, 12 for monthly).
|
||||
* @returns An object containing the best model parameters and a log of the search.
|
||||
*/
|
||||
static findBestArimaParameters(
|
||||
series: number[],
|
||||
seasonalPeriod: number,
|
||||
maxD: number = 1,
|
||||
maxP: number = 2,
|
||||
maxQ: number = 2,
|
||||
maxSeasonalD: number = 1,
|
||||
maxSeasonalP: number = 2,
|
||||
maxSeasonalQ: number = 2
|
||||
): AutoArimaResult {
|
||||
|
||||
const searchLog: any[] = [];
|
||||
let bestModel: any = { aic: Infinity };
|
||||
|
||||
const calculateAIC = (residuals: number[], numParams: number): number => {
|
||||
const n = residuals.length;
|
||||
if (n === 0) return Infinity;
|
||||
const sse = residuals.reduce((sum, r) => sum + r * r, 0);
|
||||
if (sse < 1e-9) return -Infinity; // Perfect fit
|
||||
const logLikelihood = -n / 2 * (Math.log(2 * Math.PI) + Math.log(sse / n)) - n / 2;
|
||||
return 2 * numParams - 2 * logLikelihood;
|
||||
};
|
||||
|
||||
// Grid search over all parameter combinations
|
||||
for (let d = 0; d <= maxD; d++) {
|
||||
for (let p = 0; p <= maxP; p++) {
|
||||
for (let q = 0; q <= maxQ; q++) {
|
||||
for (let D = 0; D <= maxSeasonalD; D++) {
|
||||
for (let P = 0; P <= maxSeasonalP; P++) {
|
||||
for (let Q = 0; Q <= maxSeasonalQ; Q++) {
|
||||
// Skip trivial models where nothing is done
|
||||
if (p === 0 && d === 0 && q === 0 && P === 0 && D === 0 && Q === 0) continue;
|
||||
|
||||
const options = { p, d, q, P, D, Q, s: seasonalPeriod };
|
||||
try {
|
||||
const { residuals } = TimeSeriesAnalyzer.arimaForecast(series, options, 0);
|
||||
const numParams = p + q + P + Q;
|
||||
const aic = calculateAIC(residuals, numParams);
|
||||
|
||||
// Construct the full model info object, ensuring 's' is included
|
||||
const modelInfo = { p, d, q, P, D, Q, s: seasonalPeriod, aic };
|
||||
searchLog.push(modelInfo);
|
||||
|
||||
if (modelInfo.aic < bestModel.aic) {
|
||||
bestModel = modelInfo;
|
||||
}
|
||||
} catch (error) {
|
||||
// Skip invalid parameter combinations that cause errors
|
||||
}
|
||||
} } } } } }
|
||||
|
||||
if (bestModel.aic === Infinity) {
|
||||
throw new Error("Could not find a suitable SARIMA model. The data may be too short or complex.");
|
||||
}
|
||||
|
||||
// Sort the log by AIC for easier reading
|
||||
searchLog.sort((a, b) => a.aic - b.aic);
|
||||
|
||||
return { bestModel, searchLog };
|
||||
}
|
||||
}
|
||||
208
services/analytics_engine.ts
Normal file
208
services/analytics_engine.ts
Normal file
|
|
@ -0,0 +1,208 @@
|
|||
import * as math from 'mathjs';
|
||||
import * as _ from 'lodash';
|
||||
import { DataSeries, DataMatrix, Condition, ApiResponse } from '../types/index';
|
||||
import { RollingWindow } from './rolling_window';
|
||||
import { KMeans, KMeansOptions } from './kmeans';
|
||||
import { getWeekNumber, getSameWeekDayLastYear } from './time-helper';
|
||||
import { calculateLinearRegression, generateForecast, calculatePredictionIntervals, ForecastResult } from './prediction';
|
||||
|
||||
export const handleError = (error: unknown): string => {
|
||||
return error instanceof Error ? error.message : 'Unknown error';
|
||||
};
|
||||
export const validateSeries = (series: DataSeries): void => {
|
||||
if (!series || !Array.isArray(series.values) || series.values.length === 0) {
|
||||
throw new Error('Series must contain at least one value');
|
||||
}
|
||||
};
|
||||
|
||||
export const validateMatrix = (matrix: DataMatrix): void => {
|
||||
if (!matrix || !Array.isArray(matrix.data) || matrix.data.length === 0) {
|
||||
throw new Error('Matrix must contain at least one row');
|
||||
}
|
||||
};
|
||||
|
||||
export class AnalyticsEngine {
|
||||
|
||||
private applyConditions(series: DataSeries, conditions: Condition[] = []): number[] {
|
||||
if (conditions.length === 0) return series.values;
|
||||
return series.values; // TODO: Implement filtering
|
||||
}
|
||||
|
||||
// Basic statistical functions
|
||||
unique(series: DataSeries): number[] {
|
||||
validateSeries(series);
|
||||
return _.uniq(series.values);
|
||||
}
|
||||
|
||||
mean(series: DataSeries, conditions: Condition[] = []): number {
|
||||
validateSeries(series);
|
||||
const filteredValues = this.applyConditions(series, conditions);
|
||||
if (filteredValues.length === 0) throw new Error('No data points match conditions');
|
||||
return Number(math.mean(filteredValues));
|
||||
}
|
||||
|
||||
count(series: DataSeries, conditions: Condition[] = []): number {
|
||||
validateSeries(series);
|
||||
const filteredValues = this.applyConditions(series, conditions);
|
||||
if (filteredValues.length === 0) throw new Error('No data points match conditions');
|
||||
return filteredValues.length;
|
||||
}
|
||||
|
||||
distinctCount(series: DataSeries, conditions: Condition[] = []): number {
|
||||
validateSeries(series);
|
||||
const filteredValues = this.applyConditions(series, conditions);
|
||||
const uniqueValues = _.uniq(filteredValues);
|
||||
return uniqueValues.length;
|
||||
}
|
||||
|
||||
variance(series: DataSeries, conditions: Condition[] = []): number {
|
||||
validateSeries(series);
|
||||
const filteredValues = this.applyConditions(series, conditions);
|
||||
if (filteredValues.length === 0) throw new Error('No data points match conditions');
|
||||
return Number(math.variance(filteredValues));
|
||||
}
|
||||
|
||||
standardDeviation(series: DataSeries, conditions: Condition[] = []): number {
|
||||
validateSeries(series);
|
||||
const filteredValues = this.applyConditions(series, conditions);
|
||||
if (filteredValues.length === 0) throw new Error('No data points match conditions');
|
||||
return Number(math.std(filteredValues));
|
||||
}
|
||||
|
||||
percentile(series: DataSeries, percent: number, ascending: boolean = true, conditions: Condition[] = []): number {
|
||||
validateSeries(series);
|
||||
const filteredValues = this.applyConditions(series, conditions);
|
||||
if (filteredValues.length === 0) throw new Error('No data points match conditions');
|
||||
|
||||
const sorted = ascending ? _.sortBy(filteredValues) : _.sortBy(filteredValues).reverse();
|
||||
const index = (percent / 100) * (sorted.length - 1);
|
||||
const lower = Math.floor(index);
|
||||
const upper = Math.ceil(index);
|
||||
const weight = index % 1;
|
||||
|
||||
return sorted[lower] * (1 - weight) + sorted[upper] * weight;
|
||||
}
|
||||
|
||||
median(series: DataSeries, conditions: Condition[] = []): number {
|
||||
return this.percentile(series, 50, true, conditions);
|
||||
}
|
||||
|
||||
mode(series: DataSeries, conditions: Condition[] = []): number[] {
|
||||
validateSeries(series);
|
||||
const filteredValues = this.applyConditions(series, conditions);
|
||||
const frequency = _.countBy(filteredValues);
|
||||
const maxFreq = Math.max(...Object.values(frequency));
|
||||
|
||||
return Object.keys(frequency)
|
||||
.filter(key => frequency[key] === maxFreq)
|
||||
.map(Number);
|
||||
}
|
||||
|
||||
max(series: DataSeries, conditions: Condition[] = []): number {
|
||||
validateSeries(series);
|
||||
const filteredValues = this.applyConditions(series, conditions);
|
||||
if (filteredValues.length === 0) throw new Error('No data points match conditions');
|
||||
return Math.max(...filteredValues);
|
||||
}
|
||||
|
||||
min(series: DataSeries, conditions: Condition[] = []): number {
|
||||
validateSeries(series);
|
||||
const filteredValues = this.applyConditions(series, conditions);
|
||||
if (filteredValues.length === 0) throw new Error('No data points match conditions');
|
||||
return Math.min(...filteredValues);
|
||||
}
|
||||
|
||||
correlation(series1: DataSeries, series2: DataSeries): number {
|
||||
validateSeries(series1);
|
||||
validateSeries(series2);
|
||||
|
||||
if (series1.values.length !== series2.values.length) {
|
||||
throw new Error('Series must have same length for correlation');
|
||||
}
|
||||
|
||||
const x = series1.values;
|
||||
const y = series2.values;
|
||||
const n = x.length;
|
||||
|
||||
const sumX = _.sum(x);
|
||||
const sumY = _.sum(y);
|
||||
const sumXY = _.sum(x.map((xi, i) => xi * y[i]));
|
||||
const sumX2 = _.sum(x.map(xi => xi * xi));
|
||||
const sumY2 = _.sum(y.map(yi => yi * yi));
|
||||
|
||||
const numerator = n * sumXY - sumX * sumY;
|
||||
const denominator = Math.sqrt((n * sumX2 - sumX * sumX) * (n * sumY2 - sumY * sumY));
|
||||
|
||||
return numerator / denominator;
|
||||
}
|
||||
|
||||
// Rolling window functions
|
||||
rolling(series: DataSeries, windowSize: number): RollingWindow {
|
||||
validateSeries(series);
|
||||
if (windowSize <= 0) {
|
||||
throw new Error('Window size must be a positive number.');
|
||||
}
|
||||
if (series.values.length < windowSize) {
|
||||
return new RollingWindow([]);
|
||||
}
|
||||
|
||||
const windows: number[][] = [];
|
||||
for (let i = 0; i <= series.values.length - windowSize; i++) {
|
||||
const window = series.values.slice(i, i + windowSize);
|
||||
windows.push(window);
|
||||
}
|
||||
return new RollingWindow(windows);
|
||||
}
|
||||
|
||||
movingAverage(series: DataSeries, windowSize: number): number[] {
|
||||
return this.rolling(series, windowSize).mean();
|
||||
}
|
||||
|
||||
// K-means wrapper (uses imported KMeans class)
|
||||
kmeans(matrix: DataMatrix, nClusters: number, options: KMeansOptions = {}): { clusters: number[][][], centroids: number[][] } {
|
||||
validateMatrix(matrix);
|
||||
const points: number[][] = matrix.data;
|
||||
|
||||
// Use the new MiniBatchKMeans class
|
||||
const kmeans = new KMeans(points, nClusters, options);
|
||||
const result = kmeans.run();
|
||||
|
||||
const centroids = result.clusters.map(c => (c as any).centroid);
|
||||
const clusters = result.clusters.map(c => (c as any).points);
|
||||
|
||||
return { clusters, centroids };
|
||||
}
|
||||
|
||||
// Time helper wrapper functions
|
||||
getWeekNumber(dateString: string): number {
|
||||
return getWeekNumber(dateString);
|
||||
}
|
||||
|
||||
getSameWeekDayLastYear(dateString: string): string {
|
||||
return getSameWeekDayLastYear(dateString);
|
||||
}
|
||||
|
||||
// ========================================
|
||||
// Prediction functions
|
||||
// ========================================
|
||||
|
||||
timeSeriesForecast(series: DataSeries, forecastPeriods: number): ForecastResult {
|
||||
validateSeries(series);
|
||||
|
||||
const model = calculateLinearRegression(series.values);
|
||||
const forecast = generateForecast(model, series.values.length, forecastPeriods);
|
||||
const predictionIntervals = calculatePredictionIntervals(series.values, model, forecast);
|
||||
|
||||
return {
|
||||
forecast,
|
||||
predictionIntervals,
|
||||
modelParameters: {
|
||||
slope: model.slope,
|
||||
intercept: model.intercept,
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export const analytics = new AnalyticsEngine();
|
||||
|
||||
415
services/convolution.ts
Normal file
415
services/convolution.ts
Normal file
|
|
@ -0,0 +1,415 @@
|
|||
// convolution.ts - Convolution operations for 1D and 2D data
|
||||
|
||||
export interface ConvolutionOptions {
|
||||
mode?: 'full' | 'same' | 'valid';
|
||||
boundary?: 'zero' | 'reflect' | 'symmetric';
|
||||
}
|
||||
|
||||
export interface ConvolutionResult1D {
|
||||
values: number[];
|
||||
originalLength: number;
|
||||
kernelLength: number;
|
||||
mode: string;
|
||||
}
|
||||
|
||||
export interface ConvolutionResult2D {
|
||||
matrix: number[][];
|
||||
originalDimensions: [number, number];
|
||||
kernelDimensions: [number, number];
|
||||
mode: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates input array for convolution operations
|
||||
*/
|
||||
function validateArray(arr: number[], name: string): void {
|
||||
if (!Array.isArray(arr) || arr.length === 0) {
|
||||
throw new Error(`${name} must be a non-empty array`);
|
||||
}
|
||||
if (arr.some(val => typeof val !== 'number' || !isFinite(val))) {
|
||||
throw new Error(`${name} must contain only finite numbers`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates 2D matrix for convolution operations
|
||||
*/
|
||||
function validateMatrix(matrix: number[][], name: string): void {
|
||||
if (!Array.isArray(matrix) || matrix.length === 0) {
|
||||
throw new Error(`${name} must be a non-empty 2D array`);
|
||||
}
|
||||
|
||||
const rowLength = matrix[0].length;
|
||||
if (rowLength === 0) {
|
||||
throw new Error(`${name} rows must be non-empty`);
|
||||
}
|
||||
|
||||
for (let i = 0; i < matrix.length; i++) {
|
||||
if (!Array.isArray(matrix[i]) || matrix[i].length !== rowLength) {
|
||||
throw new Error(`${name} must be a rectangular matrix`);
|
||||
}
|
||||
if (matrix[i].some(val => typeof val !== 'number' || !isFinite(val))) {
|
||||
throw new Error(`${name} must contain only finite numbers`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Applies boundary conditions to extend an array
|
||||
*/
|
||||
function applyBoundary1D(signal: number[], padding: number, boundary: string): number[] {
|
||||
if (padding <= 0) return signal;
|
||||
|
||||
let result = [...signal];
|
||||
|
||||
switch (boundary) {
|
||||
case 'zero':
|
||||
result = new Array(padding).fill(0).concat(result).concat(new Array(padding).fill(0));
|
||||
break;
|
||||
case 'reflect':
|
||||
const leftPad = [];
|
||||
const rightPad = [];
|
||||
for (let i = 0; i < padding; i++) {
|
||||
leftPad.unshift(signal[Math.min(i + 1, signal.length - 1)]);
|
||||
rightPad.push(signal[Math.max(signal.length - 2 - i, 0)]);
|
||||
}
|
||||
result = leftPad.concat(result).concat(rightPad);
|
||||
break;
|
||||
case 'symmetric':
|
||||
const leftSymPad = [];
|
||||
const rightSymPad = [];
|
||||
for (let i = 0; i < padding; i++) {
|
||||
leftSymPad.unshift(signal[Math.min(i, signal.length - 1)]);
|
||||
rightSymPad.push(signal[Math.max(signal.length - 1 - i, 0)]);
|
||||
}
|
||||
result = leftSymPad.concat(result).concat(rightSymPad);
|
||||
break;
|
||||
default:
|
||||
throw new Error(`Unsupported boundary condition: ${boundary}`);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs 1D convolution between signal and kernel
|
||||
*
|
||||
* @param signal - Input signal array
|
||||
* @param kernel - Convolution kernel array
|
||||
* @param options - Convolution options (mode, boundary)
|
||||
* @returns Convolution result with metadata
|
||||
*/
|
||||
/**
|
||||
* [CORRECTED] Performs 1D convolution between signal and kernel
|
||||
*/
|
||||
export function convolve1D(
|
||||
signal: number[],
|
||||
kernel: number[],
|
||||
options: ConvolutionOptions = {}
|
||||
): ConvolutionResult1D {
|
||||
validateArray(signal, 'Signal');
|
||||
validateArray(kernel, 'Kernel');
|
||||
|
||||
const { mode = 'full', boundary = 'zero' } = options;
|
||||
const flippedKernel = [...kernel].reverse();
|
||||
|
||||
const signalLen = signal.length;
|
||||
const kernelLen = flippedKernel.length;
|
||||
|
||||
const outputLength = mode === 'full' ? signalLen + kernelLen - 1 :
|
||||
mode === 'same' ? signalLen :
|
||||
signalLen - kernelLen + 1;
|
||||
|
||||
const result: number[] = new Array(outputLength);
|
||||
|
||||
const halfKernelLen = Math.floor(kernelLen / 2);
|
||||
|
||||
for (let i = 0; i < outputLength; i++) {
|
||||
let sum = 0;
|
||||
for (let j = 0; j < kernelLen; j++) {
|
||||
let signalIdx: number;
|
||||
|
||||
switch (mode) {
|
||||
case 'full':
|
||||
signalIdx = i - j;
|
||||
break;
|
||||
case 'same':
|
||||
signalIdx = i - halfKernelLen + j;
|
||||
break;
|
||||
case 'valid':
|
||||
signalIdx = i + j;
|
||||
break;
|
||||
}
|
||||
|
||||
// Handle boundary conditions
|
||||
if (signalIdx >= 0 && signalIdx < signalLen) {
|
||||
sum += signal[signalIdx] * flippedKernel[j];
|
||||
} else if (boundary !== 'zero' && (mode === 'full' || mode === 'same')) {
|
||||
// This is a simplified boundary handler for the logic. Your more complex handler can be used here.
|
||||
let boundaryIdx = signalIdx;
|
||||
if (signalIdx < 0) {
|
||||
boundaryIdx = boundary === 'reflect' ? -signalIdx -1 : -signalIdx;
|
||||
} else if (signalIdx >= signalLen) {
|
||||
boundaryIdx = boundary === 'reflect' ? 2 * signalLen - signalIdx - 1 : 2 * signalLen - signalIdx - 2;
|
||||
}
|
||||
boundaryIdx = Math.max(0, Math.min(signalLen - 1, boundaryIdx));
|
||||
sum += signal[boundaryIdx] * flippedKernel[j];
|
||||
}
|
||||
// If boundary is 'zero', we add nothing, which is correct.
|
||||
}
|
||||
result[i] = sum;
|
||||
}
|
||||
|
||||
return {
|
||||
values: result,
|
||||
originalLength: signalLen,
|
||||
kernelLength: kernelLen,
|
||||
mode
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs 2D convolution between matrix and kernel
|
||||
*
|
||||
* @param matrix - Input 2D matrix
|
||||
* @param kernel - 2D convolution kernel
|
||||
* @param options - Convolution options (mode, boundary)
|
||||
* @returns 2D convolution result with metadata
|
||||
*/
|
||||
export function convolve2D(
|
||||
matrix: number[][],
|
||||
kernel: number[][],
|
||||
options: ConvolutionOptions = {}
|
||||
): ConvolutionResult2D {
|
||||
validateMatrix(matrix, 'Matrix');
|
||||
validateMatrix(kernel, 'Kernel');
|
||||
|
||||
const { mode = 'same', boundary = 'reflect' } = options;
|
||||
|
||||
// Flip kernel for convolution
|
||||
const flippedKernel = kernel.map(row => [...row].reverse()).reverse();
|
||||
|
||||
const matrixRows = matrix.length;
|
||||
const matrixCols = matrix[0].length;
|
||||
const kernelRows = flippedKernel.length;
|
||||
const kernelCols = flippedKernel[0].length;
|
||||
|
||||
// Calculate output dimensions
|
||||
let outputRows: number, outputCols: number;
|
||||
let padTop: number, padLeft: number;
|
||||
|
||||
switch (mode) {
|
||||
case 'full':
|
||||
outputRows = matrixRows + kernelRows - 1;
|
||||
outputCols = matrixCols + kernelCols - 1;
|
||||
padTop = kernelRows - 1;
|
||||
padLeft = kernelCols - 1;
|
||||
break;
|
||||
case 'same':
|
||||
outputRows = matrixRows;
|
||||
outputCols = matrixCols;
|
||||
padTop = Math.floor(kernelRows / 2);
|
||||
padLeft = Math.floor(kernelCols / 2);
|
||||
break;
|
||||
case 'valid':
|
||||
outputRows = Math.max(0, matrixRows - kernelRows + 1);
|
||||
outputCols = Math.max(0, matrixCols - kernelCols + 1);
|
||||
padTop = 0;
|
||||
padLeft = 0;
|
||||
break;
|
||||
default:
|
||||
throw new Error(`Unsupported convolution mode: ${mode}`);
|
||||
}
|
||||
|
||||
// Create padded matrix based on boundary conditions
|
||||
const totalPadRows = mode === 'valid' ? 0 : kernelRows - 1;
|
||||
const totalPadCols = mode === 'valid' ? 0 : kernelCols - 1;
|
||||
|
||||
const paddedMatrix: number[][] = [];
|
||||
|
||||
// Initialize padded matrix with boundary conditions
|
||||
for (let i = -padTop; i < matrixRows + totalPadRows - padTop; i++) {
|
||||
const row: number[] = [];
|
||||
for (let j = -padLeft; j < matrixCols + totalPadCols - padLeft; j++) {
|
||||
let value = 0;
|
||||
|
||||
if (i >= 0 && i < matrixRows && j >= 0 && j < matrixCols) {
|
||||
value = matrix[i][j];
|
||||
} else if (boundary !== 'zero') {
|
||||
// Apply boundary conditions
|
||||
let boundaryI = i;
|
||||
let boundaryJ = j;
|
||||
|
||||
if (boundary === 'reflect') {
|
||||
boundaryI = i < 0 ? -i - 1 : i >= matrixRows ? 2 * matrixRows - i - 1 : i;
|
||||
boundaryJ = j < 0 ? -j - 1 : j >= matrixCols ? 2 * matrixCols - j - 1 : j;
|
||||
} else if (boundary === 'symmetric') {
|
||||
boundaryI = i < 0 ? -i : i >= matrixRows ? 2 * matrixRows - i - 2 : i;
|
||||
boundaryJ = j < 0 ? -j : j >= matrixCols ? 2 * matrixCols - j - 2 : j;
|
||||
}
|
||||
|
||||
boundaryI = Math.max(0, Math.min(boundaryI, matrixRows - 1));
|
||||
boundaryJ = Math.max(0, Math.min(boundaryJ, matrixCols - 1));
|
||||
value = matrix[boundaryI][boundaryJ];
|
||||
}
|
||||
|
||||
row.push(value);
|
||||
}
|
||||
paddedMatrix.push(row);
|
||||
}
|
||||
|
||||
// Perform 2D convolution
|
||||
const result: number[][] = [];
|
||||
|
||||
for (let i = 0; i < outputRows; i++) {
|
||||
const row: number[] = [];
|
||||
for (let j = 0; j < outputCols; j++) {
|
||||
let sum = 0;
|
||||
|
||||
for (let ki = 0; ki < kernelRows; ki++) {
|
||||
for (let kj = 0; kj < kernelCols; kj++) {
|
||||
const matrixI = i + ki;
|
||||
const matrixJ = j + kj;
|
||||
|
||||
if (matrixI >= 0 && matrixI < paddedMatrix.length &&
|
||||
matrixJ >= 0 && matrixJ < paddedMatrix[0].length) {
|
||||
sum += paddedMatrix[matrixI][matrixJ] * flippedKernel[ki][kj];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
row.push(sum);
|
||||
}
|
||||
result.push(row);
|
||||
}
|
||||
|
||||
return {
|
||||
matrix: result,
|
||||
originalDimensions: [matrixRows, matrixCols],
|
||||
kernelDimensions: [kernelRows, kernelCols],
|
||||
mode
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates common convolution kernels
|
||||
*/
|
||||
export class ConvolutionKernels {
|
||||
/**
|
||||
* Creates a Gaussian blur kernel
|
||||
*/
|
||||
static gaussian(size: number, sigma: number = 1.0): number[][] {
|
||||
if (size % 2 === 0) {
|
||||
throw new Error('Kernel size must be odd');
|
||||
}
|
||||
|
||||
const kernel: number[][] = [];
|
||||
const center = Math.floor(size / 2);
|
||||
let sum = 0;
|
||||
|
||||
for (let i = 0; i < size; i++) {
|
||||
const row: number[] = [];
|
||||
for (let j = 0; j < size; j++) {
|
||||
const x = i - center;
|
||||
const y = j - center;
|
||||
const value = Math.exp(-(x * x + y * y) / (2 * sigma * sigma));
|
||||
row.push(value);
|
||||
sum += value;
|
||||
}
|
||||
kernel.push(row);
|
||||
}
|
||||
|
||||
// Normalize kernel
|
||||
return kernel.map(row => row.map(val => val / sum));
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a Sobel edge detection kernel
|
||||
*/
|
||||
static sobel(direction: 'x' | 'y' = 'x'): number[][] {
|
||||
if (direction === 'x') {
|
||||
return [
|
||||
[-1, 0, 1],
|
||||
[-2, 0, 2],
|
||||
[-1, 0, 1]
|
||||
];
|
||||
} else {
|
||||
return [
|
||||
[-1, -2, -1],
|
||||
[ 0, 0, 0],
|
||||
[ 1, 2, 1]
|
||||
];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a Laplacian edge detection kernel
|
||||
*/
|
||||
static laplacian(): number[][] {
|
||||
return [
|
||||
[ 0, -1, 0],
|
||||
[-1, 4, -1],
|
||||
[ 0, -1, 0]
|
||||
];
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a box/average blur kernel
|
||||
*/
|
||||
static box(size: number): number[][] {
|
||||
if (size % 2 === 0) {
|
||||
throw new Error('Kernel size must be odd');
|
||||
}
|
||||
|
||||
const value = 1 / (size * size);
|
||||
const kernel: number[][] = [];
|
||||
|
||||
for (let i = 0; i < size; i++) {
|
||||
kernel.push(new Array(size).fill(value));
|
||||
}
|
||||
|
||||
return kernel;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a 1D Gaussian kernel
|
||||
*/
|
||||
static gaussian1D(size: number, sigma: number = 1.0): number[] {
|
||||
if (size % 2 === 0) {
|
||||
throw new Error('Kernel size must be odd');
|
||||
}
|
||||
|
||||
const kernel: number[] = [];
|
||||
const center = Math.floor(size / 2);
|
||||
let sum = 0;
|
||||
|
||||
for (let i = 0; i < size; i++) {
|
||||
const x = i - center;
|
||||
const value = Math.exp(-(x * x) / (2 * sigma * sigma));
|
||||
kernel.push(value);
|
||||
sum += value;
|
||||
}
|
||||
|
||||
// Normalize kernel
|
||||
return kernel.map(val => val / sum);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a 1D difference kernel for edge detection
|
||||
*/
|
||||
static difference1D(): number[] {
|
||||
return [-1, 0, 1];
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a 1D moving average kernel
|
||||
*/
|
||||
static average1D(size: number): number[] {
|
||||
if (size <= 0) {
|
||||
throw new Error('Kernel size must be positive');
|
||||
}
|
||||
|
||||
const value = 1 / size;
|
||||
return new Array(size).fill(value);
|
||||
}
|
||||
}
|
||||
144
services/kmeans.ts
Normal file
144
services/kmeans.ts
Normal file
|
|
@ -0,0 +1,144 @@
|
|||
export type Point = number[];
|
||||
|
||||
export interface Cluster {
|
||||
centroid: Point;
|
||||
points: Point[];
|
||||
}
|
||||
|
||||
export interface KMeansOptions {
|
||||
batchSize?: number;
|
||||
maxIterations?: number;
|
||||
tolerance?: number;
|
||||
}
|
||||
|
||||
export interface KMeansResult {
|
||||
clusters: Cluster[];
|
||||
iterations: number;
|
||||
converged: boolean;
|
||||
}
|
||||
|
||||
export class KMeans {
|
||||
private readonly k: number;
|
||||
private readonly batchSize: number;
|
||||
private readonly maxIterations: number;
|
||||
private readonly tolerance: number;
|
||||
private readonly data: Point[];
|
||||
private centroids: Point[] = [];
|
||||
|
||||
constructor(data: Point[], k: number, options: KMeansOptions = {}) {
|
||||
this.data = data;
|
||||
this.k = k;
|
||||
this.batchSize = options.batchSize ?? 32;
|
||||
this.maxIterations = options.maxIterations ?? 100;
|
||||
this.tolerance = options.tolerance ?? 0.0001;
|
||||
}
|
||||
|
||||
private static euclideanDistance(p1: Point, p2: Point): number {
|
||||
return Math.sqrt(p1.reduce((sum, val, i) => sum + (val - p2[i]) ** 2, 0));
|
||||
}
|
||||
|
||||
private initializeCentroids(): void {
|
||||
const dataCopy = [...this.data];
|
||||
for (let i = 0; i < this.k; i++) {
|
||||
const randomIndex = Math.floor(Math.random() * dataCopy.length);
|
||||
this.centroids.push([...dataCopy[randomIndex]]);
|
||||
dataCopy.splice(randomIndex, 1);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a random sample of the data.
|
||||
*/
|
||||
private createMiniBatch(): Point[] {
|
||||
const miniBatch: Point[] = [];
|
||||
const dataCopy = [...this.data];
|
||||
for (let i = 0; i < this.batchSize && dataCopy.length > 0; i++) {
|
||||
const randomIndex = Math.floor(Math.random() * dataCopy.length);
|
||||
miniBatch.push(dataCopy[randomIndex]);
|
||||
dataCopy.splice(randomIndex, 1);
|
||||
}
|
||||
return miniBatch;
|
||||
}
|
||||
|
||||
/**
|
||||
* Assigns all points in the full dataset to the final centroids.
|
||||
*/
|
||||
private assignFinalClusters(): Cluster[] {
|
||||
const clusters: Cluster[] = this.centroids.map(c => ({ centroid: c, points: [] }));
|
||||
|
||||
for (const point of this.data) {
|
||||
let minDistance = Infinity;
|
||||
let closestClusterIndex = -1;
|
||||
for (let i = 0; i < this.centroids.length; i++) {
|
||||
const distance = KMeans.euclideanDistance(point, this.centroids[i]);
|
||||
if (distance < minDistance) {
|
||||
minDistance = distance;
|
||||
closestClusterIndex = i;
|
||||
}
|
||||
}
|
||||
if (closestClusterIndex !== -1) {
|
||||
clusters[closestClusterIndex].points.push(point);
|
||||
}
|
||||
}
|
||||
return clusters;
|
||||
}
|
||||
|
||||
public run(): KMeansResult {
|
||||
this.initializeCentroids();
|
||||
|
||||
const clusterPointCounts = new Array(this.k).fill(0);
|
||||
let converged = false;
|
||||
let iterations = 0;
|
||||
|
||||
for (let i = 0; i < this.maxIterations; i++) {
|
||||
iterations = i + 1;
|
||||
const miniBatch = this.createMiniBatch();
|
||||
const previousCentroids = this.centroids.map(c => [...c]);
|
||||
|
||||
// Assign points in the batch and update centroids gradually
|
||||
for (const point of miniBatch) {
|
||||
let minDistance = Infinity;
|
||||
let closestClusterIndex = -1;
|
||||
|
||||
for (let j = 0; j < this.k; j++) {
|
||||
const distance = KMeans.euclideanDistance(point, this.centroids[j]);
|
||||
if (distance < minDistance) {
|
||||
minDistance = distance;
|
||||
closestClusterIndex = j;
|
||||
}
|
||||
}
|
||||
|
||||
if (closestClusterIndex !== -1) {
|
||||
clusterPointCounts[closestClusterIndex]++;
|
||||
const learningRate = 1 / clusterPointCounts[closestClusterIndex];
|
||||
const centroidToUpdate = this.centroids[closestClusterIndex];
|
||||
|
||||
// Move the centroid slightly towards the new point
|
||||
for (let dim = 0; dim < centroidToUpdate.length; dim++) {
|
||||
centroidToUpdate[dim] = (1 - learningRate) * centroidToUpdate[dim] + learningRate * point[dim];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check for convergence
|
||||
let totalMovement = 0;
|
||||
for(let j = 0; j < this.k; j++) {
|
||||
totalMovement += KMeans.euclideanDistance(previousCentroids[j], this.centroids[j]);
|
||||
}
|
||||
|
||||
if (totalMovement < this.tolerance) {
|
||||
converged = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// After training, assign all points to the final centroids
|
||||
const finalClusters = this.assignFinalClusters();
|
||||
|
||||
return {
|
||||
clusters: finalClusters,
|
||||
iterations,
|
||||
converged
|
||||
};
|
||||
}
|
||||
}
|
||||
36
services/pivot_table.ts
Normal file
36
services/pivot_table.ts
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
import { analytics } from './analytics_engine'; // Import your analytics engine
|
||||
|
||||
export interface PivotOptions {
|
||||
index: string[];
|
||||
columns: string[];
|
||||
values: string;
|
||||
aggFunc?: (items: number[]) => number; // Aggregation function (e.g., analytics.mean)
|
||||
}
|
||||
|
||||
export function pivotTable(
|
||||
data: Record<string, any>[],
|
||||
options: PivotOptions
|
||||
): Record<string, Record<string, number>> {
|
||||
const { index, columns, values, aggFunc = arr => arr.reduce((a, b) => a + b, 0) } = options;
|
||||
const cellMap: Record<string, Record<string, number[]>> = {};
|
||||
|
||||
data.forEach(row => {
|
||||
const rowKey = index.map(k => row[k]).join('|');
|
||||
const colKey = columns.map(k => row[k]).join('|');
|
||||
|
||||
if (!cellMap[rowKey]) cellMap[rowKey] = {};
|
||||
if (!cellMap[rowKey][colKey]) cellMap[rowKey][colKey] = [];
|
||||
cellMap[rowKey][colKey].push(row[values]);
|
||||
});
|
||||
|
||||
// Apply aggregation function to each cell
|
||||
const result: Record<string, Record<string, number>> = {};
|
||||
Object.entries(cellMap).forEach(([rowKey, cols]) => {
|
||||
result[rowKey] = {};
|
||||
Object.entries(cols).forEach(([colKey, valuesArr]) => {
|
||||
result[rowKey][colKey] = aggFunc(valuesArr);
|
||||
});
|
||||
});
|
||||
|
||||
return result;
|
||||
}
|
||||
101
services/prediction.ts
Normal file
101
services/prediction.ts
Normal file
|
|
@ -0,0 +1,101 @@
|
|||
import * as math from 'mathjs';
|
||||
|
||||
// The structure for the returned regression model
|
||||
export interface LinearRegressionModel {
|
||||
slope: number;
|
||||
intercept: number;
|
||||
predict: (x: number) => number;
|
||||
}
|
||||
|
||||
// The structure for the full forecast output
|
||||
export interface ForecastResult {
|
||||
forecast: number[];
|
||||
predictionIntervals: {
|
||||
upperBound: number[];
|
||||
lowerBound: number[];
|
||||
};
|
||||
modelParameters: {
|
||||
slope: number;
|
||||
intercept: number;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates the linear regression model from a time series.
|
||||
* @param yValues The historical data points (e.g., sales per month).
|
||||
* @returns {LinearRegressionModel} An object containing the model's parameters and a predict function.
|
||||
*/
|
||||
export function calculateLinearRegression(yValues: number[]): LinearRegressionModel {
|
||||
if (yValues.length < 2) {
|
||||
throw new Error('At least two data points are required for linear regression.');
|
||||
}
|
||||
|
||||
const xValues = Array.from({ length: yValues.length }, (_, i) => i);
|
||||
|
||||
const meanX = Number(math.mean(xValues));
|
||||
const meanY = Number(math.mean(yValues));
|
||||
const stdDevX = Number(math.std(xValues, 'uncorrected'));
|
||||
const stdDevY = Number(math.std(yValues, 'uncorrected'));
|
||||
|
||||
// Ensure stdDevX is not zero to avoid division by zero
|
||||
if (stdDevX === 0) {
|
||||
// This happens if all xValues are the same, which is impossible in this time series context,
|
||||
// but it's good practice to handle. A vertical line has an infinite slope.
|
||||
// For simplicity, we can return a model with zero slope.
|
||||
return { slope: 0, intercept: meanY, predict: (x: number) => meanY };
|
||||
}
|
||||
|
||||
// Cast the result of math.sum to a Number
|
||||
const correlationNumerator = Number(math.sum(xValues.map((x, i) => (x - meanX) * (yValues[i] - meanY))));
|
||||
|
||||
const correlation = correlationNumerator / ((xValues.length) * stdDevX * stdDevY);
|
||||
|
||||
const slope = correlation * (stdDevY / stdDevX);
|
||||
const intercept = meanY - slope * meanX;
|
||||
|
||||
const predict = (x: number): number => slope * x + intercept;
|
||||
|
||||
return { slope, intercept, predict };
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a forecast for a specified number of future periods.
|
||||
* @param model The calculated linear regression model.
|
||||
* @param historicalDataLength The number of historical data points.
|
||||
* @param forecastPeriods The number of future periods to predict.
|
||||
* @returns {number[]} An array of forecasted values.
|
||||
*/
|
||||
export function generateForecast(model: LinearRegressionModel, historicalDataLength: number, forecastPeriods: number): number[] {
|
||||
const forecast: number[] = [];
|
||||
const startPeriod = historicalDataLength;
|
||||
|
||||
for (let i = 0; i < forecastPeriods; i++) {
|
||||
const futureX = startPeriod + i;
|
||||
forecast.push(model.predict(futureX));
|
||||
}
|
||||
return forecast;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates prediction intervals to show the range of uncertainty.
|
||||
* @param yValues The original historical data.
|
||||
* @param model The calculated linear regression model.
|
||||
* @param forecast The array of forecasted values.
|
||||
* @returns An object with upperBound and lowerBound arrays.
|
||||
*/
|
||||
export function calculatePredictionIntervals(yValues: number[], model: LinearRegressionModel, forecast: number[]) {
|
||||
const n = yValues.length;
|
||||
const residualsSquaredSum = yValues.reduce((sum, y, i) => {
|
||||
const predictedY = model.predict(i);
|
||||
return sum + (y - predictedY) ** 2;
|
||||
}, 0);
|
||||
const stdError = Math.sqrt(residualsSquaredSum / (n - 2));
|
||||
|
||||
const zScore = 1.96; // For a 95% confidence level
|
||||
const marginOfError = zScore * stdError;
|
||||
|
||||
const upperBound = forecast.map(val => val + marginOfError);
|
||||
const lowerBound = forecast.map(val => val - marginOfError);
|
||||
|
||||
return { upperBound, lowerBound };
|
||||
}
|
||||
77
services/retail_metrics.ts
Normal file
77
services/retail_metrics.ts
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
export function purchaseIndex(totalItemsSold: number, numberOfCustomers: number): number {
|
||||
if (numberOfCustomers === 0) {
|
||||
throw new Error('Number of customers cannot be zero');
|
||||
}
|
||||
return (totalItemsSold / numberOfCustomers) * 1000;
|
||||
}
|
||||
|
||||
export function purchaseRate(productPurchases: number, totalTransactions: number): number;
|
||||
export function purchaseRate(productPurchases: number[], totalTransactions: number[]): number[];
|
||||
export function purchaseRate(productPurchases: number | number[], totalTransactions: number | number[]): number | number[] {
|
||||
if (Array.isArray(productPurchases) && Array.isArray(totalTransactions)) {
|
||||
if (productPurchases.length !== totalTransactions.length) throw new Error('Arrays must have the same length');
|
||||
return productPurchases.map((pp, i) => purchaseRate(pp, totalTransactions[i]));
|
||||
}
|
||||
if (typeof productPurchases === 'number' && typeof totalTransactions === 'number') {
|
||||
if (totalTransactions === 0) throw new Error('Total transactions cannot be zero');
|
||||
return (productPurchases / totalTransactions) * 100;
|
||||
}
|
||||
throw new Error('Input types must match');
|
||||
}
|
||||
|
||||
export function liftValue(jointPurchaseRate: number, productAPurchaseRate: number, productBPurchaseRate: number): number;
|
||||
export function liftValue(jointPurchaseRate: number[], productAPurchaseRate: number[], productBPurchaseRate: number[]): number[];
|
||||
export function liftValue(jointPurchaseRate: number | number[], productAPurchaseRate: number | number[], productBPurchaseRate: number | number[]): number | number[] {
|
||||
if (Array.isArray(jointPurchaseRate) && Array.isArray(productAPurchaseRate) && Array.isArray(productBPurchaseRate)) {
|
||||
if (jointPurchaseRate.length !== productAPurchaseRate.length || jointPurchaseRate.length !== productBPurchaseRate.length) throw new Error('Arrays must have the same length');
|
||||
return jointPurchaseRate.map((jpr, i) => liftValue(jpr, productAPurchaseRate[i], productBPurchaseRate[i]));
|
||||
}
|
||||
if (typeof jointPurchaseRate === 'number' && typeof productAPurchaseRate === 'number' && typeof productBPurchaseRate === 'number') {
|
||||
const expectedJointRate = productAPurchaseRate * productBPurchaseRate;
|
||||
if (expectedJointRate === 0) throw new Error('Expected joint rate cannot be zero');
|
||||
return jointPurchaseRate / expectedJointRate;
|
||||
}
|
||||
throw new Error('Input types must match');
|
||||
}
|
||||
|
||||
export function costRatio(cost: number, salePrice: number): number;
|
||||
export function costRatio(cost: number[], salePrice: number[]): number[];
|
||||
export function costRatio(cost: number | number[], salePrice: number | number[]): number | number[] {
|
||||
if (Array.isArray(cost) && Array.isArray(salePrice)) {
|
||||
if (cost.length !== salePrice.length) throw new Error('Arrays must have the same length');
|
||||
return cost.map((c, i) => costRatio(c, salePrice[i]));
|
||||
}
|
||||
if (typeof cost === 'number' && typeof salePrice === 'number') {
|
||||
if (salePrice === 0) throw new Error('Sale price cannot be zero');
|
||||
return cost / salePrice;
|
||||
}
|
||||
throw new Error('Input types must match');
|
||||
}
|
||||
|
||||
export function grossMarginRate(salePrice: number, cost: number): number;
|
||||
export function grossMarginRate(salePrice: number[], cost: number[]): number[];
|
||||
export function grossMarginRate(salePrice: number | number[], cost: number | number[]): number | number[] {
|
||||
if (Array.isArray(salePrice) && Array.isArray(cost)) {
|
||||
if (salePrice.length !== cost.length) throw new Error('Arrays must have the same length');
|
||||
return salePrice.map((sp, i) => grossMarginRate(sp, cost[i]));
|
||||
}
|
||||
if (typeof salePrice === 'number' && typeof cost === 'number') {
|
||||
if (salePrice === 0) throw new Error('Sale price cannot be zero');
|
||||
return (salePrice - cost) / salePrice;
|
||||
}
|
||||
throw new Error('Input types must match');
|
||||
}
|
||||
|
||||
export function averageSpendPerCustomer(totalRevenue: number, numberOfCustomers: number): number;
|
||||
export function averageSpendPerCustomer(totalRevenue: number[], numberOfCustomers: number[]): number[];
|
||||
export function averageSpendPerCustomer(totalRevenue: number | number[], numberOfCustomers: number | number[]): number | number[] {
|
||||
if (Array.isArray(totalRevenue) && Array.isArray(numberOfCustomers)) {
|
||||
if (totalRevenue.length !== numberOfCustomers.length) throw new Error('Arrays must have the same length');
|
||||
return totalRevenue.map((tr, i) => averageSpendPerCustomer(tr, numberOfCustomers[i]));
|
||||
}
|
||||
if (typeof totalRevenue === 'number' && typeof numberOfCustomers === 'number') {
|
||||
if (numberOfCustomers === 0) throw new Error('Number of customers cannot be zero');
|
||||
return totalRevenue / numberOfCustomers;
|
||||
}
|
||||
throw new Error('Input types must match');
|
||||
}
|
||||
30
services/rolling_window.ts
Normal file
30
services/rolling_window.ts
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
import * as math from 'mathjs';
|
||||
import * as _ from 'lodash';
|
||||
|
||||
export class RollingWindow {
|
||||
private windows: number[][];
|
||||
|
||||
constructor(windows: number[][]) {
|
||||
this.windows = windows;
|
||||
}
|
||||
|
||||
mean(): number[] {
|
||||
return this.windows.map(window => Number(math.mean(window)));
|
||||
}
|
||||
|
||||
sum(): number[] {
|
||||
return this.windows.map(window => _.sum(window));
|
||||
}
|
||||
|
||||
min(): number[] {
|
||||
return this.windows.map(window => Math.min(...window));
|
||||
}
|
||||
|
||||
max(): number[] {
|
||||
return this.windows.map(window => Math.max(...window));
|
||||
}
|
||||
|
||||
toArray(): number[][] {
|
||||
return this.windows;
|
||||
}
|
||||
}
|
||||
671
services/signal_processing_convolution.ts
Normal file
671
services/signal_processing_convolution.ts
Normal file
|
|
@ -0,0 +1,671 @@
|
|||
// signal-processing.ts - Convolution-based signal processing functions
|
||||
import { convolve1D, convolve2D, ConvolutionKernels, ConvolutionOptions } from './convolution';
|
||||
|
||||
export interface SmoothingOptions {
|
||||
method?: 'gaussian' | 'moving_average';
|
||||
windowSize?: number;
|
||||
sigma?: number;
|
||||
}
|
||||
|
||||
export interface EdgeDetectionOptions {
|
||||
method?: 'sobel' | 'laplacian' | 'canny';
|
||||
threshold?: number;
|
||||
}
|
||||
|
||||
export interface FilterOptions {
|
||||
type: 'lowpass' | 'highpass' | 'bandpass' | 'bandstop';
|
||||
cutoffLow?: number;
|
||||
cutoffHigh?: number;
|
||||
order?: number;
|
||||
}
|
||||
|
||||
export interface DerivativeOptions {
|
||||
order?: 1 | 2;
|
||||
method?: 'gradient' | 'laplacian';
|
||||
}
|
||||
|
||||
/**
|
||||
* Convolution-Based Signal Processing Library
|
||||
* Functions that leverage convolution operations for signal processing
|
||||
*/
|
||||
export class SignalProcessor {
|
||||
|
||||
/**
|
||||
* Smooth a 1D signal using convolution-based methods
|
||||
*/
|
||||
static smooth(signal: number[], options: SmoothingOptions = {}): number[] {
|
||||
const { method = 'gaussian', windowSize = 5, sigma = 1.0 } = options;
|
||||
|
||||
if (signal.length === 0) {
|
||||
throw new Error('Signal cannot be empty');
|
||||
}
|
||||
|
||||
let kernel: number[];
|
||||
|
||||
switch (method) {
|
||||
case 'gaussian':
|
||||
kernel = ConvolutionKernels.gaussian1D(windowSize, sigma);
|
||||
break;
|
||||
|
||||
case 'moving_average':
|
||||
kernel = ConvolutionKernels.average1D(windowSize);
|
||||
break;
|
||||
|
||||
default:
|
||||
throw new Error(`Unsupported smoothing method: ${method}`);
|
||||
}
|
||||
|
||||
return convolve1D(signal, kernel, { mode: 'same' }).values;
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect edges in 2D image data using convolution-based methods
|
||||
*/
|
||||
static detectEdges2D(image: number[][], options: EdgeDetectionOptions = {}): number[][] {
|
||||
const { method = 'sobel', threshold = 0.1 } = options;
|
||||
|
||||
let kernelX: number[][];
|
||||
let kernelY: number[][];
|
||||
|
||||
switch (method) {
|
||||
case 'sobel':
|
||||
kernelX = ConvolutionKernels.sobel("x");
|
||||
kernelY = ConvolutionKernels.sobel("y");
|
||||
break;
|
||||
|
||||
case 'laplacian':
|
||||
const laplacianKernel = ConvolutionKernels.laplacian();
|
||||
return convolve2D(image, laplacianKernel, { mode: 'same' }).matrix.map(row =>
|
||||
row.map(val => Math.abs(val) > threshold ? Math.abs(val) : 0)
|
||||
);
|
||||
|
||||
default:
|
||||
throw new Error(`Unsupported edge detection method: ${method}`);
|
||||
}
|
||||
|
||||
// Apply both kernels and combine results
|
||||
const edgesX = convolve2D(image, kernelX, { mode: 'same' }).matrix;
|
||||
const edgesY = convolve2D(image, kernelY, { mode: 'same' }).matrix;
|
||||
|
||||
// Calculate gradient magnitude
|
||||
const result: number[][] = [];
|
||||
for (let i = 0; i < edgesX.length; i++) {
|
||||
result[i] = [];
|
||||
for (let j = 0; j < edgesX[i].length; j++) {
|
||||
const magnitude = Math.sqrt(edgesX[i][j] ** 2 + edgesY[i][j] ** 2);
|
||||
result[i][j] = magnitude > threshold ? magnitude : 0;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply digital filters using convolution
|
||||
*/
|
||||
static filter(signal: number[], options: FilterOptions): number[] {
|
||||
const { type, cutoffLow = 0.1, cutoffHigh = 0.5, order = 4 } = options;
|
||||
|
||||
let kernel: number[];
|
||||
|
||||
switch (type) {
|
||||
case 'lowpass':
|
||||
// Low-pass filter using Gaussian kernel
|
||||
kernel = ConvolutionKernels.gaussian1D(order * 4 + 1, order / 2);
|
||||
return convolve1D(signal, kernel, { mode: 'same' }).values;
|
||||
|
||||
case 'highpass':
|
||||
// High-pass filter using difference of Gaussians
|
||||
const lpKernel = ConvolutionKernels.gaussian1D(order * 4 + 1, order / 2);
|
||||
const smoothed = convolve1D(signal, lpKernel, { mode: 'same' }).values;
|
||||
return signal.map((val, i) => val - smoothed[i]);
|
||||
|
||||
case 'bandpass':
|
||||
// Band-pass as combination of high-pass and low-pass
|
||||
const hp = this.filter(signal, { type: 'highpass', cutoffLow, order });
|
||||
return this.filter(hp, { type: 'lowpass', cutoffLow: cutoffHigh, order });
|
||||
|
||||
case 'bandstop':
|
||||
// Band-stop as original minus band-pass
|
||||
const bp = this.filter(signal, { type: 'bandpass', cutoffLow, cutoffHigh, order });
|
||||
return signal.map((val, i) => val - bp[i]);
|
||||
|
||||
default:
|
||||
throw new Error(`Unsupported filter type: ${type}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate derivatives using convolution with derivative kernels
|
||||
*/
|
||||
static derivative(signal: number[], options: DerivativeOptions = {}): number[] {
|
||||
const { order = 1, method = 'gradient' } = options;
|
||||
|
||||
let kernel: number[];
|
||||
|
||||
if (method === 'gradient') {
|
||||
switch (order) {
|
||||
case 1:
|
||||
// First derivative using gradient kernel
|
||||
kernel = [-0.5, 0, 0.5]; // Simple gradient
|
||||
break;
|
||||
case 2:
|
||||
// Second derivative using Laplacian-like kernel
|
||||
kernel = [1, -2, 1]; // Simple second derivative
|
||||
break;
|
||||
default:
|
||||
throw new Error(`Unsupported derivative order: ${order}`);
|
||||
}
|
||||
} else if (method === 'laplacian' && order === 2) {
|
||||
// 1D Laplacian
|
||||
kernel = [1, -2, 1];
|
||||
} else {
|
||||
throw new Error(`Unsupported derivative method: ${method}`);
|
||||
}
|
||||
|
||||
return convolve1D(signal, kernel, { mode: 'same' }).values;
|
||||
}
|
||||
|
||||
/**
|
||||
* Blur 2D image using Gaussian convolution
|
||||
*/
|
||||
static blur2D(image: number[][], sigma: number = 1.0, kernelSize?: number): number[][] {
|
||||
const size = kernelSize || Math.ceil(sigma * 6) | 1; // Ensure odd size
|
||||
const kernel = ConvolutionKernels.gaussian(size, sigma);
|
||||
|
||||
return convolve2D(image, kernel, { mode: 'same' }).matrix;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sharpen 2D image using unsharp masking (convolution-based)
|
||||
*/
|
||||
static sharpen2D(image: number[][], strength: number = 1.0): number[][] {
|
||||
const sharpenKernel = [
|
||||
[0, -strength, 0],
|
||||
[-strength, 1 + 4 * strength, -strength],
|
||||
[0, -strength, 0]
|
||||
];
|
||||
|
||||
return convolve2D(image, sharpenKernel, { mode: 'same' }).matrix;
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply emboss effect using convolution
|
||||
*/
|
||||
static emboss2D(image: number[][], direction: 'ne' | 'nw' | 'se' | 'sw' = 'ne'): number[][] {
|
||||
const embossKernels = {
|
||||
ne: [[-2, -1, 0], [-1, 1, 1], [0, 1, 2]],
|
||||
nw: [[0, -1, -2], [1, 1, -1], [2, 1, 0]],
|
||||
se: [[0, 1, 2], [-1, 1, 1], [-2, -1, 0]],
|
||||
sw: [[2, 1, 0], [1, 1, -1], [0, -1, -2]]
|
||||
};
|
||||
|
||||
const kernel = embossKernels[direction];
|
||||
return convolve2D(image, kernel, { mode: 'same' }).matrix;
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply motion blur using directional convolution kernel
|
||||
*/
|
||||
static motionBlur(signal: number[], direction: number, length: number = 9): number[] {
|
||||
// Create motion blur kernel
|
||||
const kernel = new Array(length).fill(1 / length);
|
||||
|
||||
return convolve1D(signal, kernel, { mode: 'same' }).values;
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect impulse response using convolution with known impulse
|
||||
*/
|
||||
static matchedFilter(signal: number[], template: number[]): number[] {
|
||||
// Matched filter using cross-correlation (convolution with reversed template)
|
||||
const reversedTemplate = [...template].reverse();
|
||||
return convolve1D(signal, reversedTemplate, { mode: 'same' }).values;
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply median filtering (note: not convolution-based, but commonly used with other filters)
|
||||
*/
|
||||
static medianFilter(signal: number[], windowSize: number = 3): number[] {
|
||||
const result: number[] = [];
|
||||
const halfWindow = Math.floor(windowSize / 2);
|
||||
|
||||
for (let i = 0; i < signal.length; i++) {
|
||||
const window: number[] = [];
|
||||
|
||||
for (let j = Math.max(0, i - halfWindow); j <= Math.min(signal.length - 1, i + halfWindow); j++) {
|
||||
window.push(signal[j]);
|
||||
}
|
||||
|
||||
window.sort((a, b) => a - b);
|
||||
const median = window[Math.floor(window.length / 2)];
|
||||
result.push(median);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Cross-correlation using convolution
|
||||
*/
|
||||
static crossCorrelate(signal1: number[], signal2: number[]): number[] {
|
||||
// Cross-correlation is convolution with one signal reversed
|
||||
const reversedSignal2 = [...signal2].reverse();
|
||||
return convolve1D(signal1, reversedSignal2, { mode: 'full' }).values;
|
||||
}
|
||||
|
||||
/**
|
||||
* Auto-correlation using convolution
|
||||
*/
|
||||
static autoCorrelate(signal: number[]): number[] {
|
||||
return this.crossCorrelate(signal, signal);
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect peaks using convolution-based edge detection
|
||||
*/
|
||||
/**
|
||||
* [REWRITTEN] Detects peaks (local maxima) in a 1D signal.
|
||||
* This is a more robust method that directly finds local maxima.
|
||||
*/
|
||||
static detectPeaksConvolution(signal: number[], options: {
|
||||
smoothWindow?: number;
|
||||
threshold?: number;
|
||||
minDistance?: number;
|
||||
} = {}): { index: number; value: number }[] {
|
||||
const { smoothWindow = 0, threshold = -Infinity, minDistance = 1 } = options;
|
||||
|
||||
let processedSignal = signal;
|
||||
// Optionally smooth the signal first to reduce noise
|
||||
if (smoothWindow > 1) {
|
||||
processedSignal = this.smooth(signal, { method: 'gaussian', windowSize: smoothWindow });
|
||||
}
|
||||
|
||||
const peaks: { index: number; value: number }[] = [];
|
||||
|
||||
// Find all points that are higher than their immediate neighbors
|
||||
for (let i = 1; i < processedSignal.length - 1; i++) {
|
||||
const prev = processedSignal[i - 1];
|
||||
const curr = processedSignal[i];
|
||||
const next = processedSignal[i + 1];
|
||||
|
||||
if (curr > prev && curr > next && curr > threshold) {
|
||||
peaks.push({ index: i, value: signal[i] }); // Store index and ORIGINAL value
|
||||
}
|
||||
}
|
||||
|
||||
// Check boundaries: Is the first or last point a peak?
|
||||
if (processedSignal[0] > processedSignal[1] && processedSignal[0] > threshold) {
|
||||
peaks.unshift({ index: 0, value: signal[0] });
|
||||
}
|
||||
const last = processedSignal.length - 1;
|
||||
if (processedSignal[last] > processedSignal[last - 1] && processedSignal[last] > threshold) {
|
||||
peaks.push({ index: last, value: signal[last] });
|
||||
}
|
||||
|
||||
// [CORRECTED LOGIC] Enforce minimum distance between peaks
|
||||
if (minDistance < 2 || peaks.length <= 1) {
|
||||
return peaks;
|
||||
}
|
||||
|
||||
// Sort peaks by value, highest first
|
||||
peaks.sort((a, b) => b.value - a.value);
|
||||
|
||||
const finalPeaks: { index: number; value: number }[] = [];
|
||||
const removed = new Array(peaks.length).fill(false);
|
||||
|
||||
for (let i = 0; i < peaks.length; i++) {
|
||||
if (!removed[i]) {
|
||||
finalPeaks.push(peaks[i]);
|
||||
// Remove other peaks within the minimum distance
|
||||
for (let j = i + 1; j < peaks.length; j++) {
|
||||
if (!removed[j] && Math.abs(peaks[i].index - peaks[j].index) < minDistance) {
|
||||
removed[j] = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return finalPeaks.sort((a, b) => a.index - b.index);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* [REWRITTEN] Detects valleys (local minima) in a 1D signal.
|
||||
*/
|
||||
static detectValleysConvolution(signal: number[], options: {
|
||||
smoothWindow?: number;
|
||||
threshold?: number;
|
||||
minDistance?: number;
|
||||
} = {}): { index: number; value: number }[] {
|
||||
const invertedSignal = signal.map(x => -x);
|
||||
const invertedThreshold = options.threshold !== undefined ? -options.threshold : undefined;
|
||||
|
||||
const invertedPeaks = this.detectPeaksConvolution(invertedSignal, { ...options, threshold: invertedThreshold });
|
||||
|
||||
return invertedPeaks.map(peak => ({
|
||||
index: peak.index,
|
||||
value: -peak.value,
|
||||
}));
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect outliers using convolution-based methods
|
||||
*/
|
||||
/**
|
||||
* [REWRITTEN] Detects outliers using more reliable and statistically sound methods.
|
||||
*/
|
||||
static detectOutliersConvolution(signal: number[], options: {
|
||||
method?: 'local_deviation' | 'mean_diff';
|
||||
windowSize?: number;
|
||||
threshold?: number;
|
||||
} = {}): { index: number; value: number; outlierScore: number }[] {
|
||||
const { method = 'local_deviation', windowSize = 7, threshold = 3.0 } = options;
|
||||
|
||||
let outlierScores: number[];
|
||||
|
||||
switch (method) {
|
||||
case 'mean_diff':
|
||||
// Detects outliers by their difference from the local mean.
|
||||
const meanKernel = ConvolutionKernels.average1D(windowSize);
|
||||
const localMean = convolve1D(signal, meanKernel, { mode: 'same' }).values;
|
||||
outlierScores = signal.map((val, i) => Math.abs(val - localMean[i]));
|
||||
break;
|
||||
|
||||
case 'local_deviation':
|
||||
// A robust method using Z-score: how many local standard deviations away a point is.
|
||||
const avgKernel = ConvolutionKernels.average1D(windowSize);
|
||||
const localMeanValues = convolve1D(signal, avgKernel, { mode: 'same' }).values;
|
||||
const squaredDiffs = signal.map((val, i) => (val - localMeanValues[i]) ** 2);
|
||||
const localVar = convolve1D(squaredDiffs, avgKernel, { mode: 'same' }).values;
|
||||
outlierScores = signal.map((val, i) => {
|
||||
const std = Math.sqrt(localVar[i]);
|
||||
return std > 1e-6 ? Math.abs(val - localMeanValues[i]) / std : 0;
|
||||
});
|
||||
break;
|
||||
|
||||
default:
|
||||
throw new Error(`Unsupported outlier detection method: ${method}`);
|
||||
}
|
||||
|
||||
// Find points exceeding the threshold
|
||||
const outliers: { index: number; value: number; outlierScore: number }[] = [];
|
||||
outlierScores.forEach((score, i) => {
|
||||
if (score > threshold) {
|
||||
outliers.push({
|
||||
index: i,
|
||||
value: signal[i],
|
||||
outlierScore: score
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
return outliers;
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect trend vertices (turning points) using convolution
|
||||
*/
|
||||
/**
|
||||
* [CORRECTED] Detects trend vertices (turning points) by finding all peaks and valleys.
|
||||
* This version fixes a bug that prevented valleys from being detected.
|
||||
*/
|
||||
static detectTrendVertices(signal: number[], options: {
|
||||
smoothingWindow?: number;
|
||||
threshold?: number;
|
||||
minDistance?: number;
|
||||
} = {}): { index: number; value: number; type: 'peak' | 'valley' }[] {
|
||||
const {
|
||||
smoothingWindow = 5,
|
||||
threshold = 0, // CORRECTED: Changed default from -Infinity to a sensible 0
|
||||
minDistance = 3
|
||||
} = options;
|
||||
|
||||
// Create the options object to pass down. The valley function will handle inverting the threshold itself.
|
||||
const detectionOptions = { smoothingWindow, threshold, minDistance };
|
||||
|
||||
const peaks = this.detectPeaksConvolution(signal, detectionOptions).map(p => ({ ...p, type: 'peak' as const }));
|
||||
const valleys = this.detectValleysConvolution(signal, detectionOptions).map(v => ({ ...v, type: 'valley' as const }));
|
||||
|
||||
// Combine peaks and valleys and sort them by their index to get the sequence of trend changes
|
||||
const vertices = [...peaks, ...valleys];
|
||||
vertices.sort((a, b) => a.index - b.index);
|
||||
|
||||
return vertices;
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect vertices using curvature (second derivative)
|
||||
*/
|
||||
private static detectCurvatureVertices(
|
||||
signal: number[],
|
||||
threshold: number
|
||||
): { index: number; value: number; type: 'peak' | 'valley'; curvature: number }[] {
|
||||
// Use second derivative kernel for curvature
|
||||
const curvatureKernel = [1, -2, 1]; // Discrete Laplacian
|
||||
const curvature = convolve1D(signal, curvatureKernel, { mode: 'same' }).values;
|
||||
|
||||
const vertices: { index: number; value: number; type: 'peak' | 'valley'; curvature: number }[] = [];
|
||||
|
||||
// Find zero crossings in curvature with sufficient magnitude
|
||||
for (let i = 1; i < curvature.length - 1; i++) {
|
||||
const prev = curvature[i - 1];
|
||||
const curr = curvature[i];
|
||||
const next = curvature[i + 1];
|
||||
|
||||
// Zero crossing detection
|
||||
if ((prev > 0 && next < 0) || (prev < 0 && next > 0)) {
|
||||
const curvatureMagnitude = Math.abs(curr);
|
||||
|
||||
if (curvatureMagnitude > threshold) {
|
||||
const type: 'peak' | 'valley' = prev > 0 ? 'peak' : 'valley';
|
||||
|
||||
vertices.push({
|
||||
index: i,
|
||||
value: signal[i],
|
||||
type,
|
||||
curvature: curr
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return vertices;
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect vertices using gradient sign changes
|
||||
*/
|
||||
private static detectSignChangeVertices(
|
||||
signal: number[],
|
||||
threshold: number
|
||||
): { index: number; value: number; type: 'peak' | 'valley'; curvature: number }[] {
|
||||
// First derivative for gradient
|
||||
const gradientKernel = [-0.5, 0, 0.5]; // Central difference
|
||||
const gradient = convolve1D(signal, gradientKernel, { mode: 'same' }).values;
|
||||
|
||||
// Second derivative for curvature
|
||||
const curvatureKernel = [1, -2, 1];
|
||||
const curvature = convolve1D(signal, curvatureKernel, { mode: 'same' }).values;
|
||||
|
||||
const vertices: { index: number; value: number; type: 'peak' | 'valley'; curvature: number }[] = [];
|
||||
|
||||
// Find gradient sign changes
|
||||
for (let i = 1; i < gradient.length - 1; i++) {
|
||||
const prevGrad = gradient[i - 1];
|
||||
const nextGrad = gradient[i + 1];
|
||||
|
||||
// Check for sign change with sufficient gradient magnitude
|
||||
if (Math.abs(prevGrad) > threshold && Math.abs(nextGrad) > threshold) {
|
||||
if ((prevGrad > 0 && nextGrad < 0) || (prevGrad < 0 && nextGrad > 0)) {
|
||||
const type: 'peak' | 'valley' = prevGrad > 0 ? 'peak' : 'valley';
|
||||
|
||||
vertices.push({
|
||||
index: i,
|
||||
value: signal[i],
|
||||
type,
|
||||
curvature: curvature[i]
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return vertices;
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect vertices using momentum changes
|
||||
*/
|
||||
private static detectMomentumVertices(
|
||||
signal: number[],
|
||||
threshold: number
|
||||
): { index: number; value: number; type: 'peak' | 'valley'; curvature: number }[] {
|
||||
// Create momentum kernel (difference over larger window)
|
||||
const momentumKernel = [-1, 0, 0, 0, 1]; // 4-point difference
|
||||
const momentum = convolve1D(signal, momentumKernel, { mode: 'same' }).values;
|
||||
|
||||
// Detect momentum reversals
|
||||
const momentumGradient = convolve1D(momentum, [-0.5, 0, 0.5], { mode: 'same' }).values;
|
||||
const curvature = convolve1D(signal, [1, -2, 1], { mode: 'same' }).values;
|
||||
|
||||
const vertices: { index: number; value: number; type: 'peak' | 'valley'; curvature: number }[] = [];
|
||||
|
||||
for (let i = 2; i < momentum.length - 2; i++) {
|
||||
const prevMomentum = momentum[i - 1];
|
||||
const currMomentum = momentum[i];
|
||||
const nextMomentum = momentum[i + 1];
|
||||
|
||||
// Check for momentum reversal
|
||||
if (Math.abs(momentumGradient[i]) > threshold) {
|
||||
if ((prevMomentum > 0 && nextMomentum < 0) || (prevMomentum < 0 && nextMomentum > 0)) {
|
||||
const type: 'peak' | 'valley' = prevMomentum > 0 ? 'peak' : 'valley';
|
||||
|
||||
vertices.push({
|
||||
index: i,
|
||||
value: signal[i],
|
||||
type,
|
||||
curvature: curvature[i]
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return vertices;
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect trend direction changes using convolution
|
||||
*/
|
||||
static detectTrendChanges(signal: number[], options: {
|
||||
windowSize?: number;
|
||||
threshold?: number;
|
||||
minTrendLength?: number;
|
||||
} = {}): { index: number; fromTrend: 'up' | 'down' | 'flat'; toTrend: 'up' | 'down' | 'flat'; strength: number }[] {
|
||||
const { windowSize = 10, threshold = 0.01, minTrendLength = 5 } = options;
|
||||
|
||||
// Calculate local trends using convolution with trend-detecting kernel
|
||||
const trendKernel = new Array(windowSize).fill(0).map((_, i) => {
|
||||
const center = windowSize / 2;
|
||||
return (i - center) / (windowSize * windowSize / 12); // Linear trend kernel
|
||||
});
|
||||
|
||||
const trends = convolve1D(signal, trendKernel, { mode: 'same' }).values;
|
||||
|
||||
// Classify trends
|
||||
const trendDirection = trends.map(t => {
|
||||
if (t > threshold) return 'up';
|
||||
if (t < -threshold) return 'down';
|
||||
return 'flat';
|
||||
});
|
||||
|
||||
// Find trend changes
|
||||
const changes: { index: number; fromTrend: 'up' | 'down' | 'flat'; toTrend: 'up' | 'down' | 'flat'; strength: number }[] = [];
|
||||
|
||||
let currentTrend: 'up' | 'down' | 'flat' = trendDirection[0];
|
||||
let trendStart = 0;
|
||||
|
||||
for (let i = 1; i < trendDirection.length; i++) {
|
||||
if (trendDirection[i] !== currentTrend) {
|
||||
const trendLength = i - trendStart;
|
||||
|
||||
if (trendLength >= minTrendLength) {
|
||||
changes.push({
|
||||
index: i,
|
||||
fromTrend: currentTrend,
|
||||
toTrend: trendDirection[i] as 'up' | 'down' | 'flat',
|
||||
strength: Math.abs(trends[i] - trends[trendStart])
|
||||
});
|
||||
}
|
||||
|
||||
currentTrend = trendDirection[i] as 'up' | 'down' | 'flat';
|
||||
trendStart = i;
|
||||
}
|
||||
}
|
||||
|
||||
return changes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Enforce minimum distance between vertices
|
||||
*/
|
||||
private static enforceMinDistanceVertices(
|
||||
vertices: { index: number; value: number; type: 'peak' | 'valley'; curvature: number }[],
|
||||
minDistance: number
|
||||
): { index: number; value: number; type: 'peak' | 'valley'; curvature: number }[] {
|
||||
if (vertices.length <= 1) return vertices;
|
||||
|
||||
// Sort by curvature magnitude (stronger vertices first)
|
||||
const sorted = [...vertices].sort((a, b) => Math.abs(b.curvature) - Math.abs(a.curvature));
|
||||
const result: { index: number; value: number; type: 'peak' | 'valley'; curvature: number }[] = [];
|
||||
|
||||
for (const vertex of sorted) {
|
||||
let tooClose = false;
|
||||
|
||||
for (const accepted of result) {
|
||||
if (Math.abs(vertex.index - accepted.index) < minDistance) {
|
||||
tooClose = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!tooClose) {
|
||||
result.push(vertex);
|
||||
}
|
||||
}
|
||||
|
||||
// Sort result by index
|
||||
return result.sort((a, b) => a.index - b.index);
|
||||
}
|
||||
|
||||
/**
|
||||
* Enforce minimum distance between detected features
|
||||
*/
|
||||
private static enforceMinDistanceConv(
|
||||
features: { index: number; value: number; strength: number }[],
|
||||
minDistance: number
|
||||
): { index: number; value: number; strength: number }[] {
|
||||
if (features.length <= 1) return features;
|
||||
|
||||
// Sort by strength (descending)
|
||||
const sorted = [...features].sort((a, b) => b.strength - a.strength);
|
||||
const result: { index: number; value: number; strength: number }[] = [];
|
||||
|
||||
for (const feature of sorted) {
|
||||
let tooClose = false;
|
||||
|
||||
for (const accepted of result) {
|
||||
if (Math.abs(feature.index - accepted.index) < minDistance) {
|
||||
tooClose = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!tooClose) {
|
||||
result.push(feature);
|
||||
}
|
||||
}
|
||||
|
||||
// Sort result by index
|
||||
return result.sort((a, b) => a.index - b.index);
|
||||
}
|
||||
}
|
||||
22
services/time-helper.ts
Normal file
22
services/time-helper.ts
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
import { getISOWeek, getISODay, subYears, setISOWeek, setISODay, isValid } from 'date-fns';
|
||||
|
||||
export const getWeekNumber = (dateString: string): number => {
|
||||
const date = new Date(dateString);
|
||||
if (!isValid(date)) {
|
||||
throw new Error('Invalid date string provided.');
|
||||
}
|
||||
return getISOWeek(date);
|
||||
};
|
||||
|
||||
export const getSameWeekDayLastYear = (dateString: string): string => {
|
||||
const baseDate = new Date(dateString);
|
||||
if (!isValid(baseDate)) {
|
||||
throw new Error('Invalid date string provided.');
|
||||
}
|
||||
const originalWeek = getISOWeek(baseDate);
|
||||
const originalDayOfWeek = getISODay(baseDate);
|
||||
const lastYearDate = subYears(baseDate, 1);
|
||||
const dateWithWeekSet = setISOWeek(lastYearDate, originalWeek);
|
||||
const finalDate = setISODay(dateWithWeekSet, originalDayOfWeek);
|
||||
return finalDate.toISOString().split('T')[0]; // Return as YYYY-MM-DD
|
||||
};
|
||||
346
services/timeseries.ts
Normal file
346
services/timeseries.ts
Normal file
|
|
@ -0,0 +1,346 @@
|
|||
// timeseries.ts - A library for time series analysis, focusing on ARIMA.
|
||||
|
||||
// ========================================
|
||||
// TYPE DEFINITIONS
|
||||
// ========================================
|
||||
|
||||
/**
|
||||
* Defines the parameters for an ARIMA model.
|
||||
* (p, d, q) are the non-seasonal components.
|
||||
* (P, D, Q, s) are the optional seasonal components for SARIMA.
|
||||
*/
|
||||
export interface ARIMAOptions {
|
||||
p: number; // AutoRegressive (AR) order
|
||||
d: number; // Differencing (I) order
|
||||
q: number; // Moving Average (MA) order
|
||||
P?: number; // Seasonal AR order
|
||||
D?: number; // Seasonal Differencing order
|
||||
Q?: number; // Seasonal MA order
|
||||
s?: number; // Seasonal period length
|
||||
}
|
||||
|
||||
/**
|
||||
* The result object from an ARIMA forecast.
|
||||
*/
|
||||
export interface ARIMAForecastResult {
|
||||
forecast: number[]; // The predicted future values
|
||||
residuals: number[]; // The errors of the model fit on the original data
|
||||
model: ARIMAOptions; // The model parameters used
|
||||
}
|
||||
|
||||
/**
|
||||
* The result object from an STL decomposition.
|
||||
*/
|
||||
export interface STLDecomposition {
|
||||
seasonal: number[]; // The seasonal component of the series
|
||||
trend: number[]; // The trend component of the series
|
||||
residual: number[]; // The remainder/residual component
|
||||
original: number[]; // The original series, for comparison
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* A class for performing time series analysis, including identification and forecasting.
|
||||
*/
|
||||
export class TimeSeriesAnalyzer {
|
||||
|
||||
// ========================================
|
||||
// 1. IDENTIFICATION METHODS
|
||||
// ========================================
|
||||
|
||||
/**
|
||||
* Calculates the difference of a time series.
|
||||
* This is the 'I' (Integrated) part of ARIMA, used to make a series stationary.
|
||||
* @param series The input data series.
|
||||
* @param lag The lag to difference by (usually 1).
|
||||
* @returns A new, differenced time series.
|
||||
*/
|
||||
static difference(series: number[], lag: number = 1): number[] {
|
||||
if (lag < 1 || !Number.isInteger(lag)) {
|
||||
throw new Error('Lag must be a positive integer.');
|
||||
}
|
||||
if (series.length <= lag) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const differenced: number[] = [];
|
||||
for (let i = lag; i < series.length; i++) {
|
||||
differenced.push(series[i] - series[i - lag]);
|
||||
}
|
||||
return differenced;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function to calculate the autocovariance of a series at a given lag.
|
||||
*/
|
||||
private static autocovariance(series: number[], lag: number): number {
|
||||
const n = series.length;
|
||||
if (lag >= n) return 0;
|
||||
const mean = series.reduce((a, b) => a + b) / n;
|
||||
let sum = 0;
|
||||
for (let i = lag; i < n; i++) {
|
||||
sum += (series[i] - mean) * (series[i - lag] - mean);
|
||||
}
|
||||
return sum / n;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates the Autocorrelation Function (ACF) for a time series.
|
||||
* ACF helps in determining the 'q' parameter for an ARIMA model.
|
||||
* @param series The input data series.
|
||||
* @param maxLag The maximum number of lags to calculate.
|
||||
* @returns An array of correlation values from lag 1 to maxLag.
|
||||
*/
|
||||
static calculateACF(series: number[], maxLag: number): number[] {
|
||||
if (series.length < 2) return [];
|
||||
|
||||
const variance = this.autocovariance(series, 0);
|
||||
if (variance === 0) {
|
||||
return new Array(maxLag).fill(1);
|
||||
}
|
||||
|
||||
const acf: number[] = [];
|
||||
for (let lag = 1; lag <= maxLag; lag++) {
|
||||
acf.push(this.autocovariance(series, lag) / variance);
|
||||
}
|
||||
return acf;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates the Partial Autocorrelation Function (PACF) for a time series.
|
||||
* This now uses the Durbin-Levinson algorithm for an accurate calculation.
|
||||
* PACF helps in determining the 'p' parameter for an ARIMA model.
|
||||
* @param series The input data series.
|
||||
* @param maxLag The maximum number of lags to calculate.
|
||||
* @returns An array of partial correlation values from lag 1 to maxLag.
|
||||
*/
|
||||
static calculatePACF(series: number[], maxLag: number): number[] {
|
||||
const acf = this.calculateACF(series, maxLag);
|
||||
const pacf: number[] = [];
|
||||
|
||||
if (acf.length === 0) return [];
|
||||
|
||||
pacf.push(acf[0]); // PACF at lag 1 is the same as ACF at lag 1
|
||||
|
||||
for (let k = 2; k <= maxLag; k++) {
|
||||
let numerator = acf[k - 1];
|
||||
let denominator = 1;
|
||||
|
||||
const phi = new Array(k + 1).fill(0).map(() => new Array(k + 1).fill(0));
|
||||
|
||||
for(let i=1; i<=k; i++) {
|
||||
phi[i][i] = acf[i-1];
|
||||
}
|
||||
|
||||
for (let j = 1; j < k; j++) {
|
||||
const factor = pacf[j - 1];
|
||||
numerator -= factor * acf[k - j - 1];
|
||||
denominator -= factor * acf[j - 1];
|
||||
}
|
||||
|
||||
if (Math.abs(denominator) < 1e-9) { // Avoid division by zero
|
||||
pacf.push(0);
|
||||
continue;
|
||||
}
|
||||
|
||||
const pacf_k = numerator / denominator;
|
||||
pacf.push(pacf_k);
|
||||
}
|
||||
|
||||
return pacf;
|
||||
}
|
||||
|
||||
/**
|
||||
* Decomposes a time series using the robust Classical Additive method.
|
||||
* This version correctly isolates trend, seasonal, and residual components.
|
||||
* @param series The input data series.
|
||||
* @param period The seasonal period (e.g., 7 for daily data with a weekly cycle).
|
||||
* @returns An object containing the seasonal, trend, and residual series.
|
||||
*/
|
||||
static stlDecomposition(series: number[], period: number): STLDecomposition {
|
||||
if (series.length < 2 * period) {
|
||||
throw new Error("Series must be at least twice the length of the seasonal period.");
|
||||
}
|
||||
|
||||
// Helper for a centered moving average
|
||||
const movingAverage = (data: number[], window: number) => {
|
||||
const result = [];
|
||||
const halfWindow = Math.floor(window / 2);
|
||||
for (let i = 0; i < data.length; i++) {
|
||||
const start = Math.max(0, i - halfWindow);
|
||||
const end = Math.min(data.length, i + halfWindow + 1);
|
||||
let sum = 0;
|
||||
for (let j = start; j < end; j++) {
|
||||
sum += data[j];
|
||||
}
|
||||
result.push(sum / (end - start));
|
||||
}
|
||||
return result;
|
||||
};
|
||||
|
||||
// Step 1: Calculate the trend using a centered moving average.
|
||||
// If period is even, we use a 2x-MA to center it correctly.
|
||||
let trend: number[];
|
||||
if (period % 2 === 0) {
|
||||
const intermediate = movingAverage(series, period);
|
||||
trend = movingAverage(intermediate, 2);
|
||||
} else {
|
||||
trend = movingAverage(series, period);
|
||||
}
|
||||
|
||||
// Step 2: Detrend the series
|
||||
const detrended = series.map((val, i) => val - trend[i]);
|
||||
|
||||
// Step 3: Calculate the seasonal component by averaging the detrended values for each period
|
||||
const seasonalAverages = new Array(period).fill(0);
|
||||
const seasonalCounts = new Array(period).fill(0);
|
||||
for (let i = 0; i < series.length; i++) {
|
||||
if (!isNaN(detrended[i])) {
|
||||
const seasonIndex = i % period;
|
||||
seasonalAverages[seasonIndex] += detrended[i];
|
||||
seasonalCounts[seasonIndex]++;
|
||||
}
|
||||
}
|
||||
|
||||
for (let i = 0; i < period; i++) {
|
||||
seasonalAverages[i] /= seasonalCounts[i];
|
||||
}
|
||||
|
||||
// Center the seasonal component to have a mean of zero
|
||||
const seasonalMean = seasonalAverages.reduce((a, b) => a + b, 0) / period;
|
||||
const centeredSeasonalAverages = seasonalAverages.map(avg => avg - seasonalMean);
|
||||
|
||||
const seasonal = new Array(series.length).fill(0);
|
||||
for (let i = 0; i < series.length; i++) {
|
||||
seasonal[i] = centeredSeasonalAverages[i % period];
|
||||
}
|
||||
|
||||
// Step 4: Calculate the residual component
|
||||
const residual = detrended.map((val, i) => val - seasonal[i]);
|
||||
|
||||
return {
|
||||
original: series,
|
||||
seasonal,
|
||||
trend,
|
||||
residual,
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
// ========================================
|
||||
// 2. FORECASTING METHODS
|
||||
// ========================================
|
||||
|
||||
/**
|
||||
* [UPGRADED] Generates a forecast using a simplified SARIMA model.
|
||||
* This implementation now handles both non-seasonal (p,d,q) and seasonal (P,D,Q,s) components.
|
||||
* @param series The input time series data.
|
||||
* @param options The SARIMA parameters.
|
||||
* @param forecastSteps The number of future steps to predict.
|
||||
* @returns An object containing the forecast and model residuals.
|
||||
*/
|
||||
static arimaForecast(series: number[], options: ARIMAOptions, forecastSteps: number): ARIMAForecastResult {
|
||||
const { p, d, q, P = 0, D = 0, Q = 0, s = 0 } = options;
|
||||
|
||||
if (series.length < p + d + (P + D) * s + q + Q * s) {
|
||||
throw new Error("Data series is too short for the specified SARIMA order.");
|
||||
}
|
||||
|
||||
const originalSeries = [...series];
|
||||
let differencedSeries = [...series];
|
||||
const diffLog: { lag: number, values: number[] }[] = [];
|
||||
|
||||
// Step 1: Apply seasonal differencing 'D' times
|
||||
for (let i = 0; i < D; i++) {
|
||||
diffLog.push({ lag: s, values: differencedSeries.slice(-s) });
|
||||
differencedSeries = this.difference(differencedSeries, s);
|
||||
}
|
||||
|
||||
// Step 2: Apply non-seasonal differencing 'd' times
|
||||
for (let i = 0; i < d; i++) {
|
||||
diffLog.push({ lag: 1, values: differencedSeries.slice(-1) });
|
||||
differencedSeries = this.difference(differencedSeries, 1);
|
||||
}
|
||||
|
||||
const n = differencedSeries.length;
|
||||
// Simplified coefficients
|
||||
const arCoeffs = p > 0 ? new Array(p).fill(1 / p) : [];
|
||||
const maCoeffs = q > 0 ? new Array(q).fill(1 / q) : [];
|
||||
const sarCoeffs = P > 0 ? new Array(P).fill(1 / P) : [];
|
||||
const smaCoeffs = Q > 0 ? new Array(Q).fill(1 / Q) : [];
|
||||
|
||||
const residuals: number[] = new Array(n).fill(0);
|
||||
const fitted: number[] = new Array(n).fill(0);
|
||||
|
||||
// Step 3: Fit the model
|
||||
const startIdx = Math.max(p, q, P * s, Q * s);
|
||||
for (let t = startIdx; t < n; t++) {
|
||||
// Non-seasonal AR
|
||||
let arVal = 0;
|
||||
for (let i = 0; i < p; i++) arVal += arCoeffs[i] * differencedSeries[t - 1 - i];
|
||||
|
||||
// Non-seasonal MA
|
||||
let maVal = 0;
|
||||
for (let i = 0; i < q; i++) maVal += maCoeffs[i] * residuals[t - 1 - i];
|
||||
|
||||
// Seasonal AR
|
||||
let sarVal = 0;
|
||||
for (let i = 0; i < P; i++) sarVal += sarCoeffs[i] * differencedSeries[t - s * (i + 1)];
|
||||
|
||||
// Seasonal MA
|
||||
let smaVal = 0;
|
||||
for (let i = 0; i < Q; i++) smaVal += smaCoeffs[i] * residuals[t - s * (i + 1)];
|
||||
|
||||
fitted[t] = arVal + maVal + sarVal + smaVal;
|
||||
residuals[t] = differencedSeries[t] - fitted[t];
|
||||
}
|
||||
|
||||
// Step 4: Generate the forecast
|
||||
const forecastDifferenced: number[] = [];
|
||||
const extendedSeries = [...differencedSeries];
|
||||
const extendedResiduals = [...residuals];
|
||||
|
||||
for (let f = 0; f < forecastSteps; f++) {
|
||||
const t = n + f;
|
||||
let nextForecast = 0;
|
||||
|
||||
// AR
|
||||
for (let i = 0; i < p; i++) nextForecast += arCoeffs[i] * extendedSeries[t - 1 - i];
|
||||
// MA (future residuals are 0)
|
||||
for (let i = 0; i < q; i++) nextForecast += maCoeffs[i] * extendedResiduals[t - 1 - i];
|
||||
// SAR
|
||||
for (let i = 0; i < P; i++) nextForecast += sarCoeffs[i] * extendedSeries[t - s * (i + 1)];
|
||||
// SMA
|
||||
for (let i = 0; i < Q; i++) nextForecast += smaCoeffs[i] * extendedResiduals[t - s * (i + 1)];
|
||||
|
||||
forecastDifferenced.push(nextForecast);
|
||||
extendedSeries.push(nextForecast);
|
||||
extendedResiduals.push(0);
|
||||
}
|
||||
|
||||
// Step 5: Invert the differencing
|
||||
let forecast = [...forecastDifferenced];
|
||||
for (let i = diffLog.length - 1; i >= 0; i--) {
|
||||
const { lag, values } = diffLog[i];
|
||||
const inverted = [];
|
||||
const fullHistory = [...originalSeries, ...forecast]; // Need a temporary full history for inversion
|
||||
|
||||
// A simpler inversion method for forecasting
|
||||
let history = [...series];
|
||||
for (const forecastVal of forecast) {
|
||||
const lastSeasonalVal = history[history.length - lag];
|
||||
const invertedVal = forecastVal + lastSeasonalVal;
|
||||
inverted.push(invertedVal);
|
||||
history.push(invertedVal);
|
||||
}
|
||||
forecast = inverted;
|
||||
}
|
||||
|
||||
return {
|
||||
forecast,
|
||||
residuals,
|
||||
model: options,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
Add table
Add a link
Reference in a new issue