v1.0 - Version stable: multi-PC, détection UI-DETR-1, 3 modes exécution
- Frontend v4 accessible sur réseau local (192.168.1.40) - Ports ouverts: 3002 (frontend), 5001 (backend), 5004 (dashboard) - Ollama GPU fonctionnel - Self-healing interactif - Dashboard confiance Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
7
core/precision/api/__init__.py
Normal file
7
core/precision/api/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""
|
||||
API Métriques - Fiche #10
|
||||
"""
|
||||
|
||||
from .metrics_api import MetricsAPI
|
||||
|
||||
__all__ = ['MetricsAPI']
|
||||
219
core/precision/api/metrics_api.py
Normal file
219
core/precision/api/metrics_api.py
Normal file
@@ -0,0 +1,219 @@
|
||||
"""
|
||||
API Métriques - Fiche #10 Patch D
|
||||
|
||||
API REST pour accès aux métriques de précision temps réel.
|
||||
|
||||
Auteur: Dom, Alice Kiro - 15 décembre 2024
|
||||
"""
|
||||
|
||||
import time
|
||||
import logging
|
||||
from typing import Dict, Any, List, Optional
|
||||
|
||||
from ..metrics_engine import MetricsEngine
|
||||
from ..models.metric_models import MetricType
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MetricsAPI:
|
||||
"""
|
||||
API REST pour métriques de précision
|
||||
|
||||
Fournit endpoints optimisés pour monitoring temps réel.
|
||||
"""
|
||||
|
||||
def __init__(self, metrics_engine: MetricsEngine):
|
||||
"""
|
||||
Initialise API métriques
|
||||
|
||||
Args:
|
||||
metrics_engine: Instance MetricsEngine
|
||||
"""
|
||||
self.engine = metrics_engine
|
||||
|
||||
def get_precision_stats(self, time_range: str = "1h") -> Dict[str, Any]:
|
||||
"""
|
||||
Statistiques précision temps réel
|
||||
|
||||
Args:
|
||||
time_range: Plage temporelle ("1h", "24h", "7d")
|
||||
|
||||
Returns:
|
||||
Dictionnaire statistiques précision
|
||||
"""
|
||||
try:
|
||||
time_range_seconds = self._parse_time_range(time_range)
|
||||
|
||||
# Récupération métriques résolution
|
||||
resolution_metrics = self.engine.get_metrics(
|
||||
MetricType.RESOLUTION,
|
||||
limit=10000,
|
||||
time_range_seconds=time_range_seconds
|
||||
)
|
||||
|
||||
if not resolution_metrics:
|
||||
return self._empty_precision_stats()
|
||||
|
||||
# Calcul statistiques
|
||||
total_resolutions = len(resolution_metrics)
|
||||
successful_resolutions = sum(1 for m in resolution_metrics if m.get('success', False))
|
||||
|
||||
precision_rate = successful_resolutions / total_resolutions if total_resolutions > 0 else 0.0
|
||||
|
||||
# Statistiques par stratégie
|
||||
strategy_stats = {}
|
||||
for metric in resolution_metrics:
|
||||
strategy = metric.get('resolution_strategy', 'unknown')
|
||||
if strategy not in strategy_stats:
|
||||
strategy_stats[strategy] = {'total': 0, 'successful': 0}
|
||||
|
||||
strategy_stats[strategy]['total'] += 1
|
||||
if metric.get('success', False):
|
||||
strategy_stats[strategy]['successful'] += 1
|
||||
|
||||
# Calcul précision par stratégie
|
||||
for strategy, stats in strategy_stats.items():
|
||||
stats['precision_rate'] = stats['successful'] / stats['total'] if stats['total'] > 0 else 0.0
|
||||
|
||||
# Durées moyennes
|
||||
durations = [m.get('duration_ms', 0) for m in resolution_metrics]
|
||||
avg_duration = sum(durations) / len(durations) if durations else 0.0
|
||||
p95_duration = sorted(durations)[int(len(durations) * 0.95)] if durations else 0.0
|
||||
|
||||
return {
|
||||
'time_range': time_range,
|
||||
'timestamp': time.time(),
|
||||
'precision': {
|
||||
'overall_rate': precision_rate,
|
||||
'total_resolutions': total_resolutions,
|
||||
'successful_resolutions': successful_resolutions,
|
||||
'failed_resolutions': total_resolutions - successful_resolutions
|
||||
},
|
||||
'performance': {
|
||||
'avg_duration_ms': avg_duration,
|
||||
'p95_duration_ms': p95_duration
|
||||
},
|
||||
'by_strategy': strategy_stats
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get precision stats: {e}")
|
||||
return self._empty_precision_stats()
|
||||
|
||||
def get_performance_stats(self, time_range: str = "1h") -> Dict[str, Any]:
|
||||
"""
|
||||
Statistiques performance système
|
||||
|
||||
Args:
|
||||
time_range: Plage temporelle
|
||||
|
||||
Returns:
|
||||
Dictionnaire statistiques performance
|
||||
"""
|
||||
try:
|
||||
# Récupération stats moteur
|
||||
engine_stats = self.engine.get_stats()
|
||||
|
||||
return {
|
||||
'time_range': time_range,
|
||||
'timestamp': time.time(),
|
||||
'engine_stats': engine_stats
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get performance stats: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
def get_error_stats(self, time_range: str = "1h") -> Dict[str, Any]:
|
||||
"""
|
||||
Statistiques erreurs système
|
||||
|
||||
Args:
|
||||
time_range: Plage temporelle
|
||||
|
||||
Returns:
|
||||
Dictionnaire statistiques erreurs
|
||||
"""
|
||||
try:
|
||||
time_range_seconds = self._parse_time_range(time_range)
|
||||
|
||||
# Récupération métriques erreurs
|
||||
error_metrics = self.engine.get_metrics(
|
||||
MetricType.ERROR,
|
||||
limit=1000,
|
||||
time_range_seconds=time_range_seconds
|
||||
)
|
||||
|
||||
return {
|
||||
'time_range': time_range,
|
||||
'timestamp': time.time(),
|
||||
'summary': {
|
||||
'total_errors': len(error_metrics),
|
||||
'error_rate': len(error_metrics) / max(time_range_seconds / 3600, 1)
|
||||
},
|
||||
'recent_errors': error_metrics[-10:] if error_metrics else []
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get error stats: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
def export_metrics(self, format: str = "json", time_range: str = "24h") -> Dict[str, Any]:
|
||||
"""
|
||||
Export métriques pour monitoring externe
|
||||
|
||||
Args:
|
||||
format: Format export ("json")
|
||||
time_range: Plage temporelle
|
||||
|
||||
Returns:
|
||||
Données export formatées
|
||||
"""
|
||||
try:
|
||||
return {
|
||||
'precision': self.get_precision_stats(time_range),
|
||||
'performance': self.get_performance_stats(time_range),
|
||||
'errors': self.get_error_stats(time_range)
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to export metrics: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
def _parse_time_range(self, time_range: str) -> float:
|
||||
"""Parse time range string to seconds"""
|
||||
time_range = time_range.lower().strip()
|
||||
|
||||
if time_range.endswith('s'):
|
||||
return float(time_range[:-1])
|
||||
elif time_range.endswith('m'):
|
||||
return float(time_range[:-1]) * 60
|
||||
elif time_range.endswith('h'):
|
||||
return float(time_range[:-1]) * 3600
|
||||
elif time_range.endswith('d'):
|
||||
return float(time_range[:-1]) * 86400
|
||||
else:
|
||||
# Default to hours
|
||||
try:
|
||||
return float(time_range) * 3600
|
||||
except ValueError:
|
||||
return 3600 # 1 hour default
|
||||
|
||||
def _empty_precision_stats(self) -> Dict[str, Any]:
|
||||
"""Retourne stats précision vides"""
|
||||
return {
|
||||
'time_range': '1h',
|
||||
'timestamp': time.time(),
|
||||
'precision': {
|
||||
'overall_rate': 0.0,
|
||||
'total_resolutions': 0,
|
||||
'successful_resolutions': 0,
|
||||
'failed_resolutions': 0
|
||||
},
|
||||
'performance': {
|
||||
'avg_duration_ms': 0.0,
|
||||
'p95_duration_ms': 0.0
|
||||
},
|
||||
'by_strategy': {}
|
||||
}
|
||||
Reference in New Issue
Block a user