Coverage for services/inference/src/models/schemas.py: 0%
66 statements
« prev ^ index » next coverage.py v7.11.0, created at 2025-10-25 16:18 +0000
« prev ^ index » next coverage.py v7.11.0, created at 2025-10-25 16:18 +0000
1"""Pydantic schemas for inference service API."""
2from pydantic import BaseModel, Field
3from typing import List, Optional
4from datetime import datetime
7class PredictionRequest(BaseModel):
8 """Request for single prediction endpoint."""
10 iq_data: List[List[float]] = Field(
11 ...,
12 description="IQ data as [[I1, Q1], [I2, Q2], ...] or complex array",
13 examples=[[[1.0, 0.5], [0.8, 0.3]]]
14 )
15 session_id: Optional[str] = Field(
16 None,
17 description="Optional session ID for tracking and audit trail"
18 )
19 cache_enabled: bool = Field(
20 True,
21 description="Enable Redis caching for this prediction"
22 )
24 class Config:
25 schema_extra = {
26 "example": {
27 "iq_data": [[1.0, 0.5], [0.8, 0.3]],
28 "session_id": "session-123",
29 "cache_enabled": True,
30 }
31 }
34class UncertaintyResponse(BaseModel):
35 """Uncertainty ellipse parameters for visualization."""
37 sigma_x: float = Field(
38 ...,
39 description="Standard deviation in X direction (meters)",
40 ge=0.0
41 )
42 sigma_y: float = Field(
43 ...,
44 description="Standard deviation in Y direction (meters)",
45 ge=0.0
46 )
47 theta: float = Field(
48 0.0,
49 description="Rotation angle of ellipse in degrees (-180 to 180)",
50 ge=-180.0,
51 le=180.0
52 )
53 confidence_interval: float = Field(
54 0.68,
55 description="Confidence interval (1-sigma = 68%, 2-sigma = 95%, etc.)",
56 ge=0.0,
57 le=1.0
58 )
61class PositionResponse(BaseModel):
62 """Predicted position in geographic coordinates."""
64 latitude: float = Field(
65 ...,
66 description="Latitude in decimal degrees",
67 ge=-90.0,
68 le=90.0
69 )
70 longitude: float = Field(
71 ...,
72 description="Longitude in decimal degrees",
73 ge=-180.0,
74 le=180.0
75 )
78class PredictionResponse(BaseModel):
79 """Response for single prediction endpoint."""
81 position: PositionResponse = Field(
82 ...,
83 description="Predicted position"
84 )
85 uncertainty: UncertaintyResponse = Field(
86 ...,
87 description="Uncertainty parameters for visualization"
88 )
89 confidence: float = Field(
90 ...,
91 description="Model confidence (0-1, higher is more confident)",
92 ge=0.0,
93 le=1.0
94 )
95 model_version: str = Field(
96 ...,
97 description="Version of model used for this prediction"
98 )
99 inference_time_ms: float = Field(
100 ...,
101 description="Inference latency in milliseconds",
102 ge=0.0
103 )
104 timestamp: datetime = Field(
105 default_factory=datetime.utcnow,
106 description="Timestamp when prediction was made"
107 )
109 class Config:
110 schema_extra = {
111 "example": {
112 "position": {"latitude": 45.123, "longitude": 7.456},
113 "uncertainty": {"sigma_x": 45.0, "sigma_y": 38.0, "theta": 25.0, "confidence_interval": 0.68},
114 "confidence": 0.92,
115 "model_version": "v1.2.0",
116 "inference_time_ms": 125.5,
117 "timestamp": "2025-10-22T10:30:00"
118 }
119 }
122class BatchPredictionRequest(BaseModel):
123 """Request for batch prediction endpoint."""
125 iq_samples: List[List[List[float]]] = Field(
126 ...,
127 description="List of IQ data samples, each as [[I1, Q1], [I2, Q2], ...]",
128 min_items=1,
129 max_items=100
130 )
131 session_id: Optional[str] = Field(None, description="Optional session ID")
132 cache_enabled: bool = Field(True, description="Enable Redis caching")
134 class Config:
135 schema_extra = {
136 "example": {
137 "iq_samples": [
138 [[1.0, 0.5], [0.8, 0.3]],
139 [[0.9, 0.4], [0.7, 0.2]],
140 ],
141 "session_id": "batch-session-456",
142 "cache_enabled": True,
143 }
144 }
147class BatchPredictionResponse(BaseModel):
148 """Response for batch prediction endpoint."""
150 predictions: List[PredictionResponse] = Field(
151 ...,
152 description="List of prediction results"
153 )
154 total_time_ms: float = Field(
155 ...,
156 description="Total processing time in milliseconds",
157 ge=0.0
158 )
159 samples_per_second: float = Field(
160 ...,
161 description="Throughput: samples processed per second",
162 ge=0.0
163 )
166class ModelInfoResponse(BaseModel):
167 """Response for model information endpoint."""
169 name: str = Field(..., description="Model name")
170 version: str = Field(..., description="Model version (semantic versioning)")
171 stage: str = Field(..., description="Current model stage (Production, Staging, None)")
172 created_at: datetime = Field(..., description="Model creation timestamp")
173 mlflow_run_id: str = Field(..., description="MLflow run ID for reproducibility")
174 accuracy_meters: Optional[float] = Field(
175 None,
176 description="Localization accuracy (sigma) in meters from training"
177 )
178 training_samples: Optional[int] = Field(None, description="Number of samples in training set")
179 last_reloaded: datetime = Field(
180 ...,
181 description="Timestamp when model was last reloaded in service"
182 )
183 inference_count: int = Field(
184 ...,
185 description="Total number of inferences performed",
186 ge=0
187 )
188 avg_latency_ms: float = Field(
189 ...,
190 description="Average inference latency in milliseconds",
191 ge=0.0
192 )
193 cache_hit_rate: float = Field(
194 ...,
195 description="Cache hit rate (0-1)",
196 ge=0.0,
197 le=1.0
198 )
199 status: str = Field(
200 ...,
201 description="Service status (ready, loading, error)"
202 )
204 class Config:
205 schema_extra = {
206 "example": {
207 "name": "localization_model",
208 "version": "1.2.0",
209 "stage": "Production",
210 "created_at": "2025-10-20T15:30:00",
211 "mlflow_run_id": "abc123def456",
212 "accuracy_meters": 30.0,
213 "training_samples": 5000,
214 "last_reloaded": "2025-10-22T08:00:00",
215 "inference_count": 1250,
216 "avg_latency_ms": 145.5,
217 "cache_hit_rate": 0.82,
218 "status": "ready"
219 }
220 }
223class HealthCheckResponse(BaseModel):
224 """Response for health check endpoint."""
226 status: str = Field(..., description="Service status")
227 service: str = Field(..., description="Service name")
228 version: str = Field(..., description="Service version")
229 model_ready: bool = Field(..., description="Is model ready for inference")
230 timestamp: datetime = Field(default_factory=datetime.utcnow, description="Check timestamp")
232 class Config:
233 schema_extra = {
234 "example": {
235 "status": "ok",
236 "service": "inference",
237 "version": "0.1.0",
238 "model_ready": True,
239 "timestamp": "2025-10-22T10:30:00"
240 }
241 }
244class ErrorResponse(BaseModel):
245 """Standard error response."""
247 error: str = Field(..., description="Error type")
248 message: str = Field(..., description="Error message")
249 timestamp: datetime = Field(default_factory=datetime.utcnow, description="Error timestamp")
250 request_id: Optional[str] = Field(None, description="Request ID for tracing")
252 class Config:
253 schema_extra = {
254 "example": {
255 "error": "ValidationError",
256 "message": "IQ data validation failed",
257 "timestamp": "2025-10-22T10:30:00",
258 "request_id": "req-789"
259 }
260 }