All files / frontend/src/services/api inference.ts

29.03% Statements 9/31
100% Branches 0/0
0% Functions 0/5
29.03% Lines 9/31

Press n or j to go to the next uncovered block, b, p or k for the previous block.

1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111                  1x                                                                                                                                                                                       1x 1x 1x 1x 1x 1x 1x   1x  
/**
 * Inference API Service
 *
 * Handles ML inference operations:
 * - Get model information
 * - Get performance metrics
 * - Make predictions
 */
 
import api from '@/lib/api';
import type { ModelInfo, ModelPerformanceMetrics, LocalizationResult } from './types';
 
/**
 * Get information about the active model
 */
export async function getModelInfo(): Promise<ModelInfo> {
    const response = await api.get<ModelInfo>('/api/v1/analytics/model/info');
    return response.data;
}
 
/**
 * Get model performance metrics
 */
export async function getModelPerformance(): Promise<ModelPerformanceMetrics> {
    const response = await api.get<ModelPerformanceMetrics>('/api/v1/analytics/model/performance');
    return response.data;
}
 
/**
 * Prediction request interface
 */
export interface PredictionRequest {
    iq_data: number[][]; // IQ samples as [I, Q] pairs
    session_id?: string;
    cache_enabled?: boolean;
}
 
/**
 * Prediction response interface
 */
export interface PredictionResponse {
    position: {
        latitude: number;
        longitude: number;
    };
    uncertainty: {
        sigma_x: number;
        sigma_y: number;
        theta: number;
        confidence_interval: number;
    };
    confidence: number;
    model_version: string;
    inference_time_ms: number;
    timestamp: string;
    session_id: string;
    _cache_hit: boolean;
}
 
/**
 * Make a single localization prediction
 */
export async function predictLocalization(request: PredictionRequest): Promise<PredictionResponse> {
    const response = await api.post<PredictionResponse>('/api/v1/inference/predict', request);
    return response.data;
}
 
/**
 * Batch prediction request interface
 */
export interface BatchPredictionRequest {
    predictions: PredictionRequest[];
}
 
/**
 * Batch prediction response interface
 */
export interface BatchPredictionResponse {
    predictions: PredictionResponse[];
    batch_id: string;
    total_time_ms: number;
}
 
/**
 * Make batch localization predictions
 */
export async function predictLocalizationBatch(request: BatchPredictionRequest): Promise<BatchPredictionResponse> {
    const response = await api.post<BatchPredictionResponse>('/api/v1/inference/predict/batch', request);
    return response.data;
}
 
/**
 * Get recent localization results
 */
export async function getRecentLocalizations(limit: number = 10): Promise<LocalizationResult[]> {
    const response = await api.get<LocalizationResult[]>('/api/v1/analytics/localizations/recent', {
        params: { limit }
    });
    return response.data;
}
 
const inferenceService = {
    getModelInfo,
    getModelPerformance,
    predictLocalization,
    predictLocalizationBatch,
    getRecentLocalizations,
};
 
export default inferenceService;