v1.0 - Version stable: multi-PC, détection UI-DETR-1, 3 modes exécution
- Frontend v4 accessible sur réseau local (192.168.1.40) - Ports ouverts: 3002 (frontend), 5001 (backend), 5004 (dashboard) - Ollama GPU fonctionnel - Self-healing interactif - Dashboard confiance Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
44
core/precision/__init__.py
Normal file
44
core/precision/__init__.py
Normal file
@@ -0,0 +1,44 @@
|
||||
"""
|
||||
Precision Metrics Engine - Fiche #10
|
||||
|
||||
Système de collecte métriques temps réel pour RPA Vision V3
|
||||
avec <1ms overhead et support 1000+ métriques/seconde.
|
||||
|
||||
Auteur: Dom, Alice Kiro - 15 décembre 2024
|
||||
"""
|
||||
|
||||
# Imports conditionnels pour éviter les erreurs lors du développement
|
||||
try:
|
||||
from .metrics_engine import MetricsEngine
|
||||
except ImportError:
|
||||
MetricsEngine = None
|
||||
|
||||
try:
|
||||
from .models.metric_models import (
|
||||
ResolutionMetric,
|
||||
PerformanceMetric,
|
||||
ErrorMetric,
|
||||
MetricType
|
||||
)
|
||||
except ImportError:
|
||||
ResolutionMetric = None
|
||||
PerformanceMetric = None
|
||||
ErrorMetric = None
|
||||
MetricType = None
|
||||
|
||||
try:
|
||||
from .api.metrics_api import MetricsAPI
|
||||
except ImportError:
|
||||
MetricsAPI = None
|
||||
|
||||
# Exports seulement les classes disponibles
|
||||
__all__ = [
|
||||
name for name in [
|
||||
'MetricsEngine',
|
||||
'ResolutionMetric',
|
||||
'PerformanceMetric',
|
||||
'ErrorMetric',
|
||||
'MetricType',
|
||||
'MetricsAPI'
|
||||
] if globals().get(name) is not None
|
||||
]
|
||||
7
core/precision/api/__init__.py
Normal file
7
core/precision/api/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""
|
||||
API Métriques - Fiche #10
|
||||
"""
|
||||
|
||||
from .metrics_api import MetricsAPI
|
||||
|
||||
__all__ = ['MetricsAPI']
|
||||
219
core/precision/api/metrics_api.py
Normal file
219
core/precision/api/metrics_api.py
Normal file
@@ -0,0 +1,219 @@
|
||||
"""
|
||||
API Métriques - Fiche #10 Patch D
|
||||
|
||||
API REST pour accès aux métriques de précision temps réel.
|
||||
|
||||
Auteur: Dom, Alice Kiro - 15 décembre 2024
|
||||
"""
|
||||
|
||||
import time
|
||||
import logging
|
||||
from typing import Dict, Any, List, Optional
|
||||
|
||||
from ..metrics_engine import MetricsEngine
|
||||
from ..models.metric_models import MetricType
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MetricsAPI:
|
||||
"""
|
||||
API REST pour métriques de précision
|
||||
|
||||
Fournit endpoints optimisés pour monitoring temps réel.
|
||||
"""
|
||||
|
||||
def __init__(self, metrics_engine: MetricsEngine):
|
||||
"""
|
||||
Initialise API métriques
|
||||
|
||||
Args:
|
||||
metrics_engine: Instance MetricsEngine
|
||||
"""
|
||||
self.engine = metrics_engine
|
||||
|
||||
def get_precision_stats(self, time_range: str = "1h") -> Dict[str, Any]:
|
||||
"""
|
||||
Statistiques précision temps réel
|
||||
|
||||
Args:
|
||||
time_range: Plage temporelle ("1h", "24h", "7d")
|
||||
|
||||
Returns:
|
||||
Dictionnaire statistiques précision
|
||||
"""
|
||||
try:
|
||||
time_range_seconds = self._parse_time_range(time_range)
|
||||
|
||||
# Récupération métriques résolution
|
||||
resolution_metrics = self.engine.get_metrics(
|
||||
MetricType.RESOLUTION,
|
||||
limit=10000,
|
||||
time_range_seconds=time_range_seconds
|
||||
)
|
||||
|
||||
if not resolution_metrics:
|
||||
return self._empty_precision_stats()
|
||||
|
||||
# Calcul statistiques
|
||||
total_resolutions = len(resolution_metrics)
|
||||
successful_resolutions = sum(1 for m in resolution_metrics if m.get('success', False))
|
||||
|
||||
precision_rate = successful_resolutions / total_resolutions if total_resolutions > 0 else 0.0
|
||||
|
||||
# Statistiques par stratégie
|
||||
strategy_stats = {}
|
||||
for metric in resolution_metrics:
|
||||
strategy = metric.get('resolution_strategy', 'unknown')
|
||||
if strategy not in strategy_stats:
|
||||
strategy_stats[strategy] = {'total': 0, 'successful': 0}
|
||||
|
||||
strategy_stats[strategy]['total'] += 1
|
||||
if metric.get('success', False):
|
||||
strategy_stats[strategy]['successful'] += 1
|
||||
|
||||
# Calcul précision par stratégie
|
||||
for strategy, stats in strategy_stats.items():
|
||||
stats['precision_rate'] = stats['successful'] / stats['total'] if stats['total'] > 0 else 0.0
|
||||
|
||||
# Durées moyennes
|
||||
durations = [m.get('duration_ms', 0) for m in resolution_metrics]
|
||||
avg_duration = sum(durations) / len(durations) if durations else 0.0
|
||||
p95_duration = sorted(durations)[int(len(durations) * 0.95)] if durations else 0.0
|
||||
|
||||
return {
|
||||
'time_range': time_range,
|
||||
'timestamp': time.time(),
|
||||
'precision': {
|
||||
'overall_rate': precision_rate,
|
||||
'total_resolutions': total_resolutions,
|
||||
'successful_resolutions': successful_resolutions,
|
||||
'failed_resolutions': total_resolutions - successful_resolutions
|
||||
},
|
||||
'performance': {
|
||||
'avg_duration_ms': avg_duration,
|
||||
'p95_duration_ms': p95_duration
|
||||
},
|
||||
'by_strategy': strategy_stats
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get precision stats: {e}")
|
||||
return self._empty_precision_stats()
|
||||
|
||||
def get_performance_stats(self, time_range: str = "1h") -> Dict[str, Any]:
|
||||
"""
|
||||
Statistiques performance système
|
||||
|
||||
Args:
|
||||
time_range: Plage temporelle
|
||||
|
||||
Returns:
|
||||
Dictionnaire statistiques performance
|
||||
"""
|
||||
try:
|
||||
# Récupération stats moteur
|
||||
engine_stats = self.engine.get_stats()
|
||||
|
||||
return {
|
||||
'time_range': time_range,
|
||||
'timestamp': time.time(),
|
||||
'engine_stats': engine_stats
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get performance stats: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
def get_error_stats(self, time_range: str = "1h") -> Dict[str, Any]:
|
||||
"""
|
||||
Statistiques erreurs système
|
||||
|
||||
Args:
|
||||
time_range: Plage temporelle
|
||||
|
||||
Returns:
|
||||
Dictionnaire statistiques erreurs
|
||||
"""
|
||||
try:
|
||||
time_range_seconds = self._parse_time_range(time_range)
|
||||
|
||||
# Récupération métriques erreurs
|
||||
error_metrics = self.engine.get_metrics(
|
||||
MetricType.ERROR,
|
||||
limit=1000,
|
||||
time_range_seconds=time_range_seconds
|
||||
)
|
||||
|
||||
return {
|
||||
'time_range': time_range,
|
||||
'timestamp': time.time(),
|
||||
'summary': {
|
||||
'total_errors': len(error_metrics),
|
||||
'error_rate': len(error_metrics) / max(time_range_seconds / 3600, 1)
|
||||
},
|
||||
'recent_errors': error_metrics[-10:] if error_metrics else []
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get error stats: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
def export_metrics(self, format: str = "json", time_range: str = "24h") -> Dict[str, Any]:
|
||||
"""
|
||||
Export métriques pour monitoring externe
|
||||
|
||||
Args:
|
||||
format: Format export ("json")
|
||||
time_range: Plage temporelle
|
||||
|
||||
Returns:
|
||||
Données export formatées
|
||||
"""
|
||||
try:
|
||||
return {
|
||||
'precision': self.get_precision_stats(time_range),
|
||||
'performance': self.get_performance_stats(time_range),
|
||||
'errors': self.get_error_stats(time_range)
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to export metrics: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
def _parse_time_range(self, time_range: str) -> float:
|
||||
"""Parse time range string to seconds"""
|
||||
time_range = time_range.lower().strip()
|
||||
|
||||
if time_range.endswith('s'):
|
||||
return float(time_range[:-1])
|
||||
elif time_range.endswith('m'):
|
||||
return float(time_range[:-1]) * 60
|
||||
elif time_range.endswith('h'):
|
||||
return float(time_range[:-1]) * 3600
|
||||
elif time_range.endswith('d'):
|
||||
return float(time_range[:-1]) * 86400
|
||||
else:
|
||||
# Default to hours
|
||||
try:
|
||||
return float(time_range) * 3600
|
||||
except ValueError:
|
||||
return 3600 # 1 hour default
|
||||
|
||||
def _empty_precision_stats(self) -> Dict[str, Any]:
|
||||
"""Retourne stats précision vides"""
|
||||
return {
|
||||
'time_range': '1h',
|
||||
'timestamp': time.time(),
|
||||
'precision': {
|
||||
'overall_rate': 0.0,
|
||||
'total_resolutions': 0,
|
||||
'successful_resolutions': 0,
|
||||
'failed_resolutions': 0
|
||||
},
|
||||
'performance': {
|
||||
'avg_duration_ms': 0.0,
|
||||
'p95_duration_ms': 0.0
|
||||
},
|
||||
'by_strategy': {}
|
||||
}
|
||||
24
core/precision/collectors/__init__.py
Normal file
24
core/precision/collectors/__init__.py
Normal file
@@ -0,0 +1,24 @@
|
||||
"""
|
||||
Collecteurs de métriques spécialisés pour l'excellence opérationnelle
|
||||
|
||||
Ce module fournit les collecteurs de métriques spécialisés pour le système RPA Vision V3,
|
||||
dans le cadre de l'implémentation des fiches d'excellence opérationnelle #10-15.
|
||||
|
||||
Les collecteurs disponibles :
|
||||
- ResolutionCollector : Collecte des métriques de résolution d'éléments UI
|
||||
- PerformanceCollector : Collecte des métriques de performance système
|
||||
- ErrorCollector : Collecte et analyse des erreurs et échecs
|
||||
|
||||
Auteur : Dom, Alice Kiro
|
||||
Date : 15 décembre 2024
|
||||
"""
|
||||
|
||||
from .resolution_collector import ResolutionCollector
|
||||
from .performance_collector import PerformanceCollector
|
||||
from .error_collector import ErrorCollector
|
||||
|
||||
__all__ = [
|
||||
'ResolutionCollector',
|
||||
'PerformanceCollector',
|
||||
'ErrorCollector'
|
||||
]
|
||||
24
core/precision/collectors/error_collector.py
Normal file
24
core/precision/collectors/error_collector.py
Normal file
@@ -0,0 +1,24 @@
|
||||
"""
|
||||
Collecteur Métriques Erreur - Fiche #10
|
||||
|
||||
Auteur: Dom, Alice Kiro - 15 décembre 2024
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ErrorCollector:
|
||||
"""Collecteur spécialisé pour métriques erreur"""
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def collect(self, error_type, error_message, component):
|
||||
"""Collecte données erreur"""
|
||||
return {
|
||||
'error_type': error_type,
|
||||
'error_message': error_message,
|
||||
'component': component
|
||||
}
|
||||
23
core/precision/collectors/performance_collector.py
Normal file
23
core/precision/collectors/performance_collector.py
Normal file
@@ -0,0 +1,23 @@
|
||||
"""
|
||||
Collecteur Métriques Performance - Fiche #10
|
||||
|
||||
Auteur: Dom, Alice Kiro - 15 décembre 2024
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PerformanceCollector:
|
||||
"""Collecteur spécialisé pour métriques performance"""
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def collect(self, operation_type, duration_ms):
|
||||
"""Collecte données performance"""
|
||||
return {
|
||||
'operation_type': operation_type,
|
||||
'duration_ms': duration_ms
|
||||
}
|
||||
26
core/precision/collectors/resolution_collector.py
Normal file
26
core/precision/collectors/resolution_collector.py
Normal file
@@ -0,0 +1,26 @@
|
||||
"""
|
||||
Collecteur Métriques Résolution - Fiche #10
|
||||
|
||||
Collecteur spécialisé pour métriques de résolution de cibles UI.
|
||||
|
||||
Auteur: Dom, Alice Kiro - 15 décembre 2024
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ResolutionCollector:
|
||||
"""Collecteur spécialisé pour métriques résolution"""
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def collect(self, target_spec, result, screen_state):
|
||||
"""Collecte données résolution"""
|
||||
return {
|
||||
'success': getattr(result, 'success', False),
|
||||
'confidence': getattr(result, 'confidence', 0.0),
|
||||
'strategy': getattr(result, 'strategy', 'unknown')
|
||||
}
|
||||
334
core/precision/metrics_engine.py
Normal file
334
core/precision/metrics_engine.py
Normal file
@@ -0,0 +1,334 @@
|
||||
"""
|
||||
Precision Metrics Engine - Fiche #10 Patch B
|
||||
|
||||
Moteur principal de collecte métriques temps réel avec <1ms overhead
|
||||
et support 1000+ métriques/seconde.
|
||||
|
||||
Auteur: Dom, Alice Kiro - 15 décembre 2024
|
||||
"""
|
||||
|
||||
import time
|
||||
import threading
|
||||
import logging
|
||||
from typing import Optional, List, Dict, Any
|
||||
from collections import deque, defaultdict
|
||||
|
||||
from .models.metric_models import (
|
||||
MetricType,
|
||||
ResolutionMetric,
|
||||
PerformanceMetric,
|
||||
ErrorMetric,
|
||||
generate_target_spec_hash,
|
||||
generate_screen_state_hash,
|
||||
generate_environment_hash
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MetricsEngine:
|
||||
"""
|
||||
Moteur principal de collecte métriques temps réel
|
||||
|
||||
Fonctionnalités:
|
||||
- Collecte <1ms overhead par métrique
|
||||
- Support 1000+ métriques/seconde
|
||||
- Buffer asynchrone pour performance
|
||||
- Thread-safe pour usage concurrent
|
||||
"""
|
||||
|
||||
def __init__(self, storage_adapter=None, buffer_size=10000, flush_interval=1.0):
|
||||
"""
|
||||
Initialise le moteur métriques
|
||||
|
||||
Args:
|
||||
storage_adapter: Adapter stockage (défaut: mémoire)
|
||||
buffer_size: Taille buffer métriques
|
||||
flush_interval: Intervalle flush buffer (secondes)
|
||||
"""
|
||||
self.storage = storage_adapter or self._create_memory_storage()
|
||||
self.buffer_size = buffer_size
|
||||
self.flush_interval = flush_interval
|
||||
|
||||
# Buffers thread-safe par type métrique
|
||||
self._buffers = {
|
||||
MetricType.RESOLUTION: deque(maxlen=buffer_size),
|
||||
MetricType.PERFORMANCE: deque(maxlen=buffer_size),
|
||||
MetricType.ERROR: deque(maxlen=buffer_size)
|
||||
}
|
||||
|
||||
# Locks pour thread safety
|
||||
self._locks = {
|
||||
metric_type: threading.Lock()
|
||||
for metric_type in MetricType
|
||||
}
|
||||
|
||||
# Statistiques internes
|
||||
self._stats = {
|
||||
'metrics_collected': defaultdict(int),
|
||||
'collection_time_ms': deque(maxlen=1000)
|
||||
}
|
||||
|
||||
# Cache environnement pour performance
|
||||
self._env_hash_cache = None
|
||||
self._env_cache_time = 0
|
||||
self._env_cache_ttl = 60.0 # 1 minute
|
||||
|
||||
logger.info("MetricsEngine initialized with buffer_size=%d", buffer_size)
|
||||
|
||||
def record_resolution(self, target_spec, result, duration_ms: float, screen_state) -> None:
|
||||
"""
|
||||
Enregistre métrique résolution avec <1ms overhead
|
||||
|
||||
Args:
|
||||
target_spec: Spécification cible
|
||||
result: Résultat résolution
|
||||
duration_ms: Durée résolution en ms
|
||||
screen_state: État écran
|
||||
"""
|
||||
start_time = time.perf_counter()
|
||||
|
||||
try:
|
||||
# Génération hash optimisée
|
||||
target_hash = generate_target_spec_hash(target_spec)
|
||||
screen_hash = generate_screen_state_hash(screen_state)
|
||||
env_hash = self._get_cached_environment_hash()
|
||||
|
||||
# Extraction données résultat
|
||||
success = getattr(result, 'success', False)
|
||||
confidence = getattr(result, 'confidence', 0.0)
|
||||
strategy = getattr(result, 'strategy', 'unknown')
|
||||
error_type = getattr(result, 'error_type', None) if not success else None
|
||||
|
||||
# Contexte avancé (Fiches #6-8)
|
||||
sniper_score = getattr(result, 'sniper_score', None)
|
||||
anchor_id = getattr(result, 'anchor_element_id', None)
|
||||
context_hints = getattr(result, 'context_hints_used', None)
|
||||
candidates_count = getattr(result, 'candidates_count', 0)
|
||||
|
||||
# Création métrique
|
||||
metric = ResolutionMetric(
|
||||
timestamp=time.time(),
|
||||
target_spec_hash=target_hash,
|
||||
resolution_strategy=strategy,
|
||||
success=success,
|
||||
duration_ms=duration_ms,
|
||||
confidence_score=confidence,
|
||||
environment_hash=env_hash,
|
||||
screen_state_hash=screen_hash,
|
||||
error_type=error_type,
|
||||
candidates_count=candidates_count,
|
||||
sniper_score=sniper_score,
|
||||
anchor_element_id=anchor_id,
|
||||
context_hints_used=context_hints
|
||||
)
|
||||
|
||||
# Ajout buffer thread-safe
|
||||
with self._locks[MetricType.RESOLUTION]:
|
||||
self._buffers[MetricType.RESOLUTION].append(metric)
|
||||
|
||||
# Statistiques
|
||||
self._stats['metrics_collected'][MetricType.RESOLUTION] += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.warning("Failed to record resolution metric: %s", e)
|
||||
|
||||
finally:
|
||||
# Mesure overhead
|
||||
collection_time = (time.perf_counter() - start_time) * 1000
|
||||
self._stats['collection_time_ms'].append(collection_time)
|
||||
|
||||
# Warning si overhead > 1ms
|
||||
if collection_time > 1.0:
|
||||
logger.warning("Resolution metric collection took %.2fms (>1ms target)",
|
||||
collection_time)
|
||||
|
||||
def record_performance(self, operation_type: str, duration_ms: float,
|
||||
memory_usage_mb: Optional[float] = None,
|
||||
cpu_usage_percent: Optional[float] = None,
|
||||
cache_hit: bool = False) -> None:
|
||||
"""
|
||||
Enregistre métrique performance
|
||||
|
||||
Args:
|
||||
operation_type: Type opération
|
||||
duration_ms: Durée en ms
|
||||
memory_usage_mb: Usage mémoire
|
||||
cpu_usage_percent: Usage CPU
|
||||
cache_hit: Hit cache
|
||||
"""
|
||||
start_time = time.perf_counter()
|
||||
|
||||
try:
|
||||
# Création métrique
|
||||
metric = PerformanceMetric(
|
||||
timestamp=time.time(),
|
||||
operation_type=operation_type,
|
||||
duration_ms=duration_ms,
|
||||
memory_usage_mb=memory_usage_mb or 0.0,
|
||||
cpu_usage_percent=cpu_usage_percent or 0.0,
|
||||
cache_hit=cache_hit
|
||||
)
|
||||
|
||||
# Ajout buffer
|
||||
with self._locks[MetricType.PERFORMANCE]:
|
||||
self._buffers[MetricType.PERFORMANCE].append(metric)
|
||||
|
||||
self._stats['metrics_collected'][MetricType.PERFORMANCE] += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.warning("Failed to record performance metric: %s", e)
|
||||
|
||||
finally:
|
||||
collection_time = (time.perf_counter() - start_time) * 1000
|
||||
self._stats['collection_time_ms'].append(collection_time)
|
||||
|
||||
def record_error(self, error_type: str, error_message: str,
|
||||
component: str = "unknown", severity: str = "medium",
|
||||
context: Optional[Dict[str, Any]] = None) -> None:
|
||||
"""
|
||||
Enregistre métrique erreur
|
||||
|
||||
Args:
|
||||
error_type: Type erreur
|
||||
error_message: Message erreur
|
||||
component: Composant source
|
||||
severity: Sévérité (low/medium/high/critical)
|
||||
context: Contexte additionnel
|
||||
"""
|
||||
start_time = time.perf_counter()
|
||||
|
||||
try:
|
||||
# Création métrique
|
||||
metric = ErrorMetric(
|
||||
timestamp=time.time(),
|
||||
error_type=error_type,
|
||||
error_message=error_message,
|
||||
component=component,
|
||||
severity=severity,
|
||||
context=context
|
||||
)
|
||||
|
||||
# Ajout buffer
|
||||
with self._locks[MetricType.ERROR]:
|
||||
self._buffers[MetricType.ERROR].append(metric)
|
||||
|
||||
self._stats['metrics_collected'][MetricType.ERROR] += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.warning("Failed to record error metric: %s", e)
|
||||
|
||||
finally:
|
||||
collection_time = (time.perf_counter() - start_time) * 1000
|
||||
self._stats['collection_time_ms'].append(collection_time)
|
||||
|
||||
def get_stats(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Récupère statistiques moteur métriques
|
||||
|
||||
Returns:
|
||||
Dictionnaire statistiques
|
||||
"""
|
||||
collection_times = list(self._stats['collection_time_ms'])
|
||||
|
||||
return {
|
||||
'metrics_collected': dict(self._stats['metrics_collected']),
|
||||
'buffer_sizes': {
|
||||
metric_type.value: len(self._buffers[metric_type])
|
||||
for metric_type in MetricType
|
||||
if metric_type in self._buffers
|
||||
},
|
||||
'collection_performance': {
|
||||
'avg_time_ms': sum(collection_times) / len(collection_times) if collection_times else 0,
|
||||
'max_time_ms': max(collection_times) if collection_times else 0,
|
||||
'p95_time_ms': sorted(collection_times)[int(len(collection_times) * 0.95)] if collection_times else 0
|
||||
}
|
||||
}
|
||||
|
||||
def get_metrics(self, metric_type: MetricType, limit: int = 1000,
|
||||
time_range_seconds: Optional[float] = None) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Récupère métriques avec filtrage
|
||||
|
||||
Args:
|
||||
metric_type: Type métrique
|
||||
limit: Limite nombre résultats
|
||||
time_range_seconds: Plage temporelle (None = toutes)
|
||||
|
||||
Returns:
|
||||
Liste métriques sérialisées
|
||||
"""
|
||||
try:
|
||||
# Flush buffer pour données récentes
|
||||
self._flush_buffer_to_storage(metric_type)
|
||||
|
||||
# Récupération depuis storage
|
||||
metrics = self.storage.get_metrics(metric_type, limit)
|
||||
|
||||
return [metric.to_dict() if hasattr(metric, 'to_dict') else metric
|
||||
for metric in metrics]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get metrics: %s", e)
|
||||
return []
|
||||
|
||||
def _flush_buffer_to_storage(self, metric_type: MetricType):
|
||||
"""Flush buffer spécifique vers storage"""
|
||||
try:
|
||||
with self._locks[metric_type]:
|
||||
if not self._buffers[metric_type]:
|
||||
return
|
||||
|
||||
metrics_to_flush = list(self._buffers[metric_type])
|
||||
self._buffers[metric_type].clear()
|
||||
|
||||
if metrics_to_flush:
|
||||
self.storage.store_metrics(metric_type, metrics_to_flush)
|
||||
except Exception as e:
|
||||
logger.error("Failed to flush buffer: %s", e)
|
||||
|
||||
def _get_cached_environment_hash(self) -> str:
|
||||
"""Récupère hash environnement avec cache TTL"""
|
||||
current_time = time.time()
|
||||
|
||||
if (self._env_hash_cache is None or
|
||||
current_time - self._env_cache_time > self._env_cache_ttl):
|
||||
self._env_hash_cache = generate_environment_hash()
|
||||
self._env_cache_time = current_time
|
||||
|
||||
return self._env_hash_cache
|
||||
|
||||
def _create_memory_storage(self):
|
||||
"""Crée storage mémoire simple"""
|
||||
return SimpleMemoryStorage()
|
||||
|
||||
|
||||
class SimpleMemoryStorage:
|
||||
"""Storage mémoire simple pour développement"""
|
||||
|
||||
def __init__(self):
|
||||
self.metrics = defaultdict(list)
|
||||
|
||||
def store_metrics(self, metric_type: MetricType, metrics: List):
|
||||
"""Stocke métriques en mémoire"""
|
||||
self.metrics[metric_type].extend(metrics)
|
||||
|
||||
def get_metrics(self, metric_type: MetricType, limit: int = 1000):
|
||||
"""Récupère métriques depuis mémoire"""
|
||||
return self.metrics[metric_type][-limit:]
|
||||
|
||||
|
||||
# Instance globale optionnelle pour usage simple
|
||||
_global_metrics_engine: Optional[MetricsEngine] = None
|
||||
|
||||
|
||||
def get_global_metrics_engine() -> Optional[MetricsEngine]:
|
||||
"""Récupère instance globale MetricsEngine"""
|
||||
return _global_metrics_engine
|
||||
|
||||
|
||||
def initialize_global_metrics_engine(**kwargs) -> MetricsEngine:
|
||||
"""Initialise et définit instance globale MetricsEngine"""
|
||||
global _global_metrics_engine
|
||||
_global_metrics_engine = MetricsEngine(**kwargs)
|
||||
return _global_metrics_engine
|
||||
23
core/precision/models/__init__.py
Normal file
23
core/precision/models/__init__.py
Normal file
@@ -0,0 +1,23 @@
|
||||
"""
|
||||
Modèles de données pour métriques - Fiche #10
|
||||
"""
|
||||
|
||||
from .metric_models import (
|
||||
MetricType,
|
||||
ResolutionMetric,
|
||||
PerformanceMetric,
|
||||
ErrorMetric,
|
||||
generate_target_spec_hash,
|
||||
generate_screen_state_hash,
|
||||
generate_environment_hash
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
'MetricType',
|
||||
'ResolutionMetric',
|
||||
'PerformanceMetric',
|
||||
'ErrorMetric',
|
||||
'generate_target_spec_hash',
|
||||
'generate_screen_state_hash',
|
||||
'generate_environment_hash'
|
||||
]
|
||||
142
core/precision/models/metric_models.py
Normal file
142
core/precision/models/metric_models.py
Normal file
@@ -0,0 +1,142 @@
|
||||
"""
|
||||
Modèles de Données Métriques - Fiche #10 Patch A
|
||||
|
||||
Définit les structures de données pour la collecte métriques
|
||||
temps réel avec sérialisation optimisée.
|
||||
|
||||
Auteur: Dom, Alice Kiro - 15 décembre 2024
|
||||
"""
|
||||
|
||||
import time
|
||||
import hashlib
|
||||
from dataclasses import dataclass, asdict
|
||||
from typing import Optional, Dict, Any, List
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class MetricType(str, Enum):
|
||||
"""Types de métriques collectées"""
|
||||
RESOLUTION = "resolution"
|
||||
PERFORMANCE = "performance"
|
||||
ERROR = "error"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ResolutionMetric:
|
||||
"""
|
||||
Métrique de résolution de cible UI
|
||||
|
||||
Collecte les données de performance et succès
|
||||
pour chaque tentative de résolution de cible.
|
||||
"""
|
||||
timestamp: float
|
||||
target_spec_hash: str
|
||||
resolution_strategy: str # "by_text", "by_role", "composite", "sniper"
|
||||
success: bool
|
||||
duration_ms: float
|
||||
confidence_score: float
|
||||
environment_hash: str
|
||||
screen_state_hash: str
|
||||
|
||||
# Détails optionnels
|
||||
error_type: Optional[str] = None
|
||||
error_message: Optional[str] = None
|
||||
candidates_count: int = 0
|
||||
cache_hit: bool = False
|
||||
|
||||
# Contexte résolution
|
||||
sniper_score: Optional[float] = None
|
||||
anchor_element_id: Optional[str] = None
|
||||
context_hints_used: Optional[List[str]] = None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Sérialisation optimisée pour stockage"""
|
||||
return asdict(self)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PerformanceMetric:
|
||||
"""
|
||||
Métrique de performance système
|
||||
|
||||
Collecte les métriques de performance globales
|
||||
du système (CPU, mémoire, latence).
|
||||
"""
|
||||
timestamp: float
|
||||
operation_type: str # "resolve", "execute", "cache_lookup", "embedding"
|
||||
duration_ms: float
|
||||
memory_usage_mb: float
|
||||
cpu_usage_percent: float
|
||||
|
||||
# Métriques cache
|
||||
cache_hit: bool = False
|
||||
cache_size_mb: Optional[float] = None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Sérialisation optimisée"""
|
||||
return asdict(self)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ErrorMetric:
|
||||
"""
|
||||
Métrique d'erreur système
|
||||
|
||||
Collecte les erreurs et exceptions pour
|
||||
analyse et amélioration continue.
|
||||
"""
|
||||
timestamp: float
|
||||
error_type: str
|
||||
error_message: str
|
||||
component: str # "target_resolver", "action_executor", "cache", etc.
|
||||
severity: str # "low", "medium", "high", "critical"
|
||||
|
||||
# Contexte erreur
|
||||
stack_trace: Optional[str] = None
|
||||
context: Optional[Dict[str, Any]] = None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Sérialisation optimisée"""
|
||||
return asdict(self)
|
||||
|
||||
|
||||
# Utilitaires pour génération hash
|
||||
def generate_target_spec_hash(target_spec) -> str:
|
||||
"""Génère hash stable pour TargetSpec"""
|
||||
try:
|
||||
# Sérialisation déterministe
|
||||
spec_str = f"{target_spec.by_role}|{target_spec.by_text}|{target_spec.by_position}"
|
||||
if hasattr(target_spec, 'context_hints') and target_spec.context_hints:
|
||||
hints_str = "|".join(sorted(f"{k}:{v}" for k, v in target_spec.context_hints.items()))
|
||||
spec_str += f"|{hints_str}"
|
||||
|
||||
return hashlib.md5(spec_str.encode()).hexdigest()[:16]
|
||||
except Exception:
|
||||
return "unknown"
|
||||
|
||||
|
||||
def generate_screen_state_hash(screen_state) -> str:
|
||||
"""Génère hash stable pour ScreenState"""
|
||||
try:
|
||||
# Hash basé sur éléments UI principaux
|
||||
elements_str = ""
|
||||
if hasattr(screen_state, 'ui_elements') and screen_state.ui_elements:
|
||||
for elem in screen_state.ui_elements[:10]: # Limite pour performance
|
||||
elements_str += f"{elem.element_type}|{elem.text}|{elem.bbox}|"
|
||||
|
||||
return hashlib.md5(elements_str.encode()).hexdigest()[:16]
|
||||
except Exception:
|
||||
return "unknown"
|
||||
|
||||
|
||||
def generate_environment_hash() -> str:
|
||||
"""Génère hash stable pour environnement actuel"""
|
||||
try:
|
||||
import platform
|
||||
|
||||
# Informations système
|
||||
os_info = f"{platform.system()}|{platform.release()}"
|
||||
env_str = f"{os_info}"
|
||||
return hashlib.md5(env_str.encode()).hexdigest()[:16]
|
||||
except Exception:
|
||||
return "unknown"
|
||||
Reference in New Issue
Block a user